4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #define DEBUG_SUBSYSTEM S_OSC
34 #include <linux/workqueue.h>
35 #include <libcfs/libcfs.h>
36 #include <linux/falloc.h>
37 #include <lprocfs_status.h>
38 #include <lustre_dlm.h>
39 #include <lustre_fid.h>
40 #include <lustre_ha.h>
41 #include <uapi/linux/lustre/lustre_ioctl.h>
42 #include <lustre_net.h>
43 #include <lustre_obdo.h>
45 #include <obd_cksum.h>
46 #include <obd_class.h>
47 #include <lustre_osc.h>
48 #include <linux/falloc.h>
50 #include "osc_internal.h"
51 #include <lnet/lnet_rdma.h>
53 atomic_t osc_pool_req_count;
54 unsigned int osc_reqpool_maxreqcount;
55 struct ptlrpc_request_pool *osc_rq_pool;
57 /* max memory used for request pool, unit is MB */
58 static unsigned int osc_reqpool_mem_max = 5;
59 module_param(osc_reqpool_mem_max, uint, 0444);
61 static int osc_idle_timeout = 20;
62 module_param(osc_idle_timeout, uint, 0644);
64 #define osc_grant_args osc_brw_async_args
66 struct osc_setattr_args {
68 obd_enqueue_update_f sa_upcall;
72 struct osc_fsync_args {
73 struct osc_object *fa_obj;
75 obd_enqueue_update_f fa_upcall;
79 struct osc_ladvise_args {
81 obd_enqueue_update_f la_upcall;
85 static void osc_release_ppga(struct brw_page **ppga, size_t count);
86 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
89 void osc_pack_req_body(struct ptlrpc_request *req, struct obdo *oa)
91 struct ost_body *body;
93 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
96 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
99 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
102 struct ptlrpc_request *req;
103 struct ost_body *body;
107 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
111 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
113 ptlrpc_request_free(req);
117 osc_pack_req_body(req, oa);
119 ptlrpc_request_set_replen(req);
121 rc = ptlrpc_queue_wait(req);
125 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
127 GOTO(out, rc = -EPROTO);
129 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
130 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
132 oa->o_blksize = cli_brw_size(exp->exp_obd);
133 oa->o_valid |= OBD_MD_FLBLKSZ;
137 ptlrpc_req_finished(req);
142 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
145 struct ptlrpc_request *req;
146 struct ost_body *body;
150 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
152 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
156 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
158 ptlrpc_request_free(req);
162 osc_pack_req_body(req, oa);
164 ptlrpc_request_set_replen(req);
166 rc = ptlrpc_queue_wait(req);
170 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
172 GOTO(out, rc = -EPROTO);
174 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
178 ptlrpc_req_finished(req);
183 static int osc_setattr_interpret(const struct lu_env *env,
184 struct ptlrpc_request *req, void *args, int rc)
186 struct osc_setattr_args *sa = args;
187 struct ost_body *body;
194 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
196 GOTO(out, rc = -EPROTO);
198 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
201 rc = sa->sa_upcall(sa->sa_cookie, rc);
205 int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
206 obd_enqueue_update_f upcall, void *cookie,
207 struct ptlrpc_request_set *rqset)
209 struct ptlrpc_request *req;
210 struct osc_setattr_args *sa;
215 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
219 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
221 ptlrpc_request_free(req);
225 osc_pack_req_body(req, oa);
227 ptlrpc_request_set_replen(req);
229 /* do mds to ost setattr asynchronously */
231 /* Do not wait for response. */
232 ptlrpcd_add_req(req);
234 req->rq_interpret_reply = osc_setattr_interpret;
236 sa = ptlrpc_req_async_args(sa, req);
238 sa->sa_upcall = upcall;
239 sa->sa_cookie = cookie;
241 ptlrpc_set_add_req(rqset, req);
247 static int osc_ladvise_interpret(const struct lu_env *env,
248 struct ptlrpc_request *req,
251 struct osc_ladvise_args *la = arg;
252 struct ost_body *body;
258 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
260 GOTO(out, rc = -EPROTO);
262 *la->la_oa = body->oa;
264 rc = la->la_upcall(la->la_cookie, rc);
269 * If rqset is NULL, do not wait for response. Upcall and cookie could also
270 * be NULL in this case
272 int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
273 struct ladvise_hdr *ladvise_hdr,
274 obd_enqueue_update_f upcall, void *cookie,
275 struct ptlrpc_request_set *rqset)
277 struct ptlrpc_request *req;
278 struct ost_body *body;
279 struct osc_ladvise_args *la;
281 struct lu_ladvise *req_ladvise;
282 struct lu_ladvise *ladvise = ladvise_hdr->lah_advise;
283 int num_advise = ladvise_hdr->lah_count;
284 struct ladvise_hdr *req_ladvise_hdr;
287 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_LADVISE);
291 req_capsule_set_size(&req->rq_pill, &RMF_OST_LADVISE, RCL_CLIENT,
292 num_advise * sizeof(*ladvise));
293 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_LADVISE);
295 ptlrpc_request_free(req);
298 req->rq_request_portal = OST_IO_PORTAL;
299 ptlrpc_at_set_req_timeout(req);
301 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
303 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
306 req_ladvise_hdr = req_capsule_client_get(&req->rq_pill,
307 &RMF_OST_LADVISE_HDR);
308 memcpy(req_ladvise_hdr, ladvise_hdr, sizeof(*ladvise_hdr));
310 req_ladvise = req_capsule_client_get(&req->rq_pill, &RMF_OST_LADVISE);
311 memcpy(req_ladvise, ladvise, sizeof(*ladvise) * num_advise);
312 ptlrpc_request_set_replen(req);
315 /* Do not wait for response. */
316 ptlrpcd_add_req(req);
320 req->rq_interpret_reply = osc_ladvise_interpret;
321 la = ptlrpc_req_async_args(la, req);
323 la->la_upcall = upcall;
324 la->la_cookie = cookie;
326 ptlrpc_set_add_req(rqset, req);
331 static int osc_create(const struct lu_env *env, struct obd_export *exp,
334 struct ptlrpc_request *req;
335 struct ost_body *body;
340 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
341 LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi)));
343 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
345 GOTO(out, rc = -ENOMEM);
347 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
349 ptlrpc_request_free(req);
353 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
356 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
358 ptlrpc_request_set_replen(req);
360 rc = ptlrpc_queue_wait(req);
364 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
366 GOTO(out_req, rc = -EPROTO);
368 CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
369 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
371 oa->o_blksize = cli_brw_size(exp->exp_obd);
372 oa->o_valid |= OBD_MD_FLBLKSZ;
374 CDEBUG(D_HA, "transno: %lld\n",
375 lustre_msg_get_transno(req->rq_repmsg));
377 ptlrpc_req_finished(req);
382 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
383 obd_enqueue_update_f upcall, void *cookie)
385 struct ptlrpc_request *req;
386 struct osc_setattr_args *sa;
387 struct obd_import *imp = class_exp2cliimp(exp);
388 struct ost_body *body;
393 req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
397 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
399 ptlrpc_request_free(req);
403 osc_set_io_portal(req);
405 ptlrpc_at_set_req_timeout(req);
407 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
409 lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
411 ptlrpc_request_set_replen(req);
413 req->rq_interpret_reply = osc_setattr_interpret;
414 sa = ptlrpc_req_async_args(sa, req);
416 sa->sa_upcall = upcall;
417 sa->sa_cookie = cookie;
419 ptlrpcd_add_req(req);
423 EXPORT_SYMBOL(osc_punch_send);
426 * osc_fallocate_base() - Handles fallocate request.
428 * @exp: Export structure
429 * @oa: Attributes passed to OSS from client (obdo structure)
430 * @upcall: Primary & supplementary group information
431 * @cookie: Exclusive identifier
432 * @rqset: Request list.
433 * @mode: Operation done on given range.
435 * osc_fallocate_base() - Handles fallocate requests only. Only block
436 * allocation or standard preallocate operation is supported currently.
437 * Other mode flags is not supported yet. ftruncate(2) or truncate(2)
438 * is supported via SETATTR request.
440 * Return: Non-zero on failure and O on success.
442 int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
443 obd_enqueue_update_f upcall, void *cookie, int mode)
445 struct ptlrpc_request *req;
446 struct osc_setattr_args *sa;
447 struct ost_body *body;
448 struct obd_import *imp = class_exp2cliimp(exp);
452 oa->o_falloc_mode = mode;
453 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
458 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_FALLOCATE);
460 ptlrpc_request_free(req);
464 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
467 lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
469 ptlrpc_request_set_replen(req);
471 req->rq_interpret_reply = osc_setattr_interpret;
472 BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
473 sa = ptlrpc_req_async_args(sa, req);
475 sa->sa_upcall = upcall;
476 sa->sa_cookie = cookie;
478 ptlrpcd_add_req(req);
482 EXPORT_SYMBOL(osc_fallocate_base);
484 static int osc_sync_interpret(const struct lu_env *env,
485 struct ptlrpc_request *req, void *args, int rc)
487 struct osc_fsync_args *fa = args;
488 struct ost_body *body;
489 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
490 unsigned long valid = 0;
491 struct cl_object *obj;
497 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
499 CERROR("can't unpack ost_body\n");
500 GOTO(out, rc = -EPROTO);
503 *fa->fa_oa = body->oa;
504 obj = osc2cl(fa->fa_obj);
506 /* Update osc object's blocks attribute */
507 cl_object_attr_lock(obj);
508 if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
509 attr->cat_blocks = body->oa.o_blocks;
514 cl_object_attr_update(env, obj, attr, valid);
515 cl_object_attr_unlock(obj);
518 rc = fa->fa_upcall(fa->fa_cookie, rc);
522 int osc_sync_base(struct osc_object *obj, struct obdo *oa,
523 obd_enqueue_update_f upcall, void *cookie,
524 struct ptlrpc_request_set *rqset)
526 struct obd_export *exp = osc_export(obj);
527 struct ptlrpc_request *req;
528 struct ost_body *body;
529 struct osc_fsync_args *fa;
533 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
537 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
539 ptlrpc_request_free(req);
543 /* overload the size and blocks fields in the oa with start/end */
544 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
546 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
548 ptlrpc_request_set_replen(req);
549 req->rq_interpret_reply = osc_sync_interpret;
551 fa = ptlrpc_req_async_args(fa, req);
554 fa->fa_upcall = upcall;
555 fa->fa_cookie = cookie;
557 ptlrpc_set_add_req(rqset, req);
562 /* Find and cancel locally locks matched by @mode in the resource found by
563 * @objid. Found locks are added into @cancel list. Returns the amount of
564 * locks added to @cancels list. */
565 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
566 struct list_head *cancels,
567 enum ldlm_mode mode, __u64 lock_flags)
569 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
570 struct ldlm_res_id res_id;
571 struct ldlm_resource *res;
575 /* Return, i.e. cancel nothing, only if ELC is supported (flag in
576 * export) but disabled through procfs (flag in NS).
578 * This distinguishes from a case when ELC is not supported originally,
579 * when we still want to cancel locks in advance and just cancel them
580 * locally, without sending any RPC. */
581 if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
584 ostid_build_res_name(&oa->o_oi, &res_id);
585 res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
589 LDLM_RESOURCE_ADDREF(res);
590 count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
591 lock_flags, 0, NULL);
592 LDLM_RESOURCE_DELREF(res);
593 ldlm_resource_putref(res);
597 static int osc_destroy_interpret(const struct lu_env *env,
598 struct ptlrpc_request *req, void *args, int rc)
600 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
602 atomic_dec(&cli->cl_destroy_in_flight);
603 wake_up(&cli->cl_destroy_waitq);
608 static int osc_can_send_destroy(struct client_obd *cli)
610 if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
611 cli->cl_max_rpcs_in_flight) {
612 /* The destroy request can be sent */
615 if (atomic_dec_return(&cli->cl_destroy_in_flight) <
616 cli->cl_max_rpcs_in_flight) {
618 * The counter has been modified between the two atomic
621 wake_up(&cli->cl_destroy_waitq);
626 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
629 struct client_obd *cli = &exp->exp_obd->u.cli;
630 struct ptlrpc_request *req;
631 struct ost_body *body;
637 CDEBUG(D_INFO, "oa NULL\n");
641 count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
642 LDLM_FL_DISCARD_DATA);
644 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
646 ldlm_lock_list_put(&cancels, l_bl_ast, count);
650 rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
653 ptlrpc_request_free(req);
657 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
658 ptlrpc_at_set_req_timeout(req);
660 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
662 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
664 ptlrpc_request_set_replen(req);
666 req->rq_interpret_reply = osc_destroy_interpret;
667 if (!osc_can_send_destroy(cli)) {
669 * Wait until the number of on-going destroy RPCs drops
670 * under max_rpc_in_flight
672 rc = l_wait_event_abortable_exclusive(
673 cli->cl_destroy_waitq,
674 osc_can_send_destroy(cli));
676 ptlrpc_req_finished(req);
681 /* Do not wait for response */
682 ptlrpcd_add_req(req);
686 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
689 u64 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
691 LASSERT(!(oa->o_valid & bits));
694 spin_lock(&cli->cl_loi_list_lock);
695 if (cli->cl_ocd_grant_param)
696 oa->o_dirty = cli->cl_dirty_grant;
698 oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
699 if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
700 CERROR("dirty %lu > dirty_max %lu\n",
702 cli->cl_dirty_max_pages);
704 } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
705 (long)(obd_max_dirty_pages + 1))) {
706 /* The atomic_read() allowing the atomic_inc() are
707 * not covered by a lock thus they may safely race and trip
708 * this CERROR() unless we add in a small fudge factor (+1). */
709 CERROR("%s: dirty %ld > system dirty_max %ld\n",
710 cli_name(cli), atomic_long_read(&obd_dirty_pages),
711 obd_max_dirty_pages);
713 } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
715 CERROR("dirty %lu - dirty_max %lu too big???\n",
716 cli->cl_dirty_pages, cli->cl_dirty_max_pages);
719 unsigned long nrpages;
720 unsigned long undirty;
722 nrpages = cli->cl_max_pages_per_rpc;
723 nrpages *= cli->cl_max_rpcs_in_flight + 1;
724 nrpages = max(nrpages, cli->cl_dirty_max_pages);
725 undirty = nrpages << PAGE_SHIFT;
726 if (cli->cl_ocd_grant_param) {
729 /* take extent tax into account when asking for more
731 nrextents = (nrpages + cli->cl_max_extent_pages - 1) /
732 cli->cl_max_extent_pages;
733 undirty += nrextents * cli->cl_grant_extent_tax;
735 /* Do not ask for more than OBD_MAX_GRANT - a margin for server
736 * to add extent tax, etc.
738 oa->o_undirty = min(undirty, OBD_MAX_GRANT &
739 ~(PTLRPC_MAX_BRW_SIZE * 4UL));
741 oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
742 /* o_dropped AKA o_misc is 32 bits, but cl_lost_grant is 64 bits */
743 if (cli->cl_lost_grant > INT_MAX) {
745 "%s: avoided o_dropped overflow: cl_lost_grant %lu\n",
746 cli_name(cli), cli->cl_lost_grant);
747 oa->o_dropped = INT_MAX;
749 oa->o_dropped = cli->cl_lost_grant;
751 cli->cl_lost_grant -= oa->o_dropped;
752 spin_unlock(&cli->cl_loi_list_lock);
753 CDEBUG(D_CACHE, "%s: dirty: %llu undirty: %u dropped %u grant: %llu"
754 " cl_lost_grant %lu\n", cli_name(cli), oa->o_dirty,
755 oa->o_undirty, oa->o_dropped, oa->o_grant, cli->cl_lost_grant);
758 void osc_update_next_shrink(struct client_obd *cli)
760 cli->cl_next_shrink_grant = ktime_get_seconds() +
761 cli->cl_grant_shrink_interval;
763 CDEBUG(D_CACHE, "next time %lld to shrink grant\n",
764 cli->cl_next_shrink_grant);
767 static void __osc_update_grant(struct client_obd *cli, u64 grant)
769 spin_lock(&cli->cl_loi_list_lock);
770 cli->cl_avail_grant += grant;
771 spin_unlock(&cli->cl_loi_list_lock);
774 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
776 if (body->oa.o_valid & OBD_MD_FLGRANT) {
777 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
778 __osc_update_grant(cli, body->oa.o_grant);
783 * grant thread data for shrinking space.
785 struct grant_thread_data {
786 struct list_head gtd_clients;
787 struct mutex gtd_mutex;
788 unsigned long gtd_stopped:1;
790 static struct grant_thread_data client_gtd;
792 static int osc_shrink_grant_interpret(const struct lu_env *env,
793 struct ptlrpc_request *req,
796 struct osc_grant_args *aa = args;
797 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
798 struct ost_body *body;
801 __osc_update_grant(cli, aa->aa_oa->o_grant);
805 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
807 osc_update_grant(cli, body);
809 OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
815 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
817 spin_lock(&cli->cl_loi_list_lock);
818 oa->o_grant = cli->cl_avail_grant / 4;
819 cli->cl_avail_grant -= oa->o_grant;
820 spin_unlock(&cli->cl_loi_list_lock);
821 if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
822 oa->o_valid |= OBD_MD_FLFLAGS;
825 oa->o_flags |= OBD_FL_SHRINK_GRANT;
826 osc_update_next_shrink(cli);
829 /* Shrink the current grant, either from some large amount to enough for a
830 * full set of in-flight RPCs, or if we have already shrunk to that limit
831 * then to enough for a single RPC. This avoids keeping more grant than
832 * needed, and avoids shrinking the grant piecemeal. */
833 static int osc_shrink_grant(struct client_obd *cli)
835 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
836 (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
838 spin_lock(&cli->cl_loi_list_lock);
839 if (cli->cl_avail_grant <= target_bytes)
840 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
841 spin_unlock(&cli->cl_loi_list_lock);
843 return osc_shrink_grant_to_target(cli, target_bytes);
846 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
849 struct ost_body *body;
852 spin_lock(&cli->cl_loi_list_lock);
853 /* Don't shrink if we are already above or below the desired limit
854 * We don't want to shrink below a single RPC, as that will negatively
855 * impact block allocation and long-term performance. */
856 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
857 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
859 if (target_bytes >= cli->cl_avail_grant) {
860 spin_unlock(&cli->cl_loi_list_lock);
863 spin_unlock(&cli->cl_loi_list_lock);
869 osc_announce_cached(cli, &body->oa, 0);
871 spin_lock(&cli->cl_loi_list_lock);
872 if (target_bytes >= cli->cl_avail_grant) {
873 /* available grant has changed since target calculation */
874 spin_unlock(&cli->cl_loi_list_lock);
875 GOTO(out_free, rc = 0);
877 body->oa.o_grant = cli->cl_avail_grant - target_bytes;
878 cli->cl_avail_grant = target_bytes;
879 spin_unlock(&cli->cl_loi_list_lock);
880 if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
881 body->oa.o_valid |= OBD_MD_FLFLAGS;
882 body->oa.o_flags = 0;
884 body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
885 osc_update_next_shrink(cli);
887 rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
888 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
889 sizeof(*body), body, NULL);
891 __osc_update_grant(cli, body->oa.o_grant);
897 static int osc_should_shrink_grant(struct client_obd *client)
899 time64_t next_shrink = client->cl_next_shrink_grant;
901 if (client->cl_import == NULL)
904 if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
905 client->cl_import->imp_grant_shrink_disabled) {
906 osc_update_next_shrink(client);
910 if (ktime_get_seconds() >= next_shrink - 5) {
911 /* Get the current RPC size directly, instead of going via:
912 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
913 * Keep comment here so that it can be found by searching. */
914 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
916 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
917 client->cl_avail_grant > brw_size)
920 osc_update_next_shrink(client);
925 #define GRANT_SHRINK_RPC_BATCH 100
927 static struct delayed_work work;
929 static void osc_grant_work_handler(struct work_struct *data)
931 struct client_obd *cli;
933 bool init_next_shrink = true;
934 time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
937 mutex_lock(&client_gtd.gtd_mutex);
938 list_for_each_entry(cli, &client_gtd.gtd_clients,
940 if (rpc_sent < GRANT_SHRINK_RPC_BATCH &&
941 osc_should_shrink_grant(cli)) {
942 osc_shrink_grant(cli);
946 if (!init_next_shrink) {
947 if (cli->cl_next_shrink_grant < next_shrink &&
948 cli->cl_next_shrink_grant > ktime_get_seconds())
949 next_shrink = cli->cl_next_shrink_grant;
951 init_next_shrink = false;
952 next_shrink = cli->cl_next_shrink_grant;
955 mutex_unlock(&client_gtd.gtd_mutex);
957 if (client_gtd.gtd_stopped == 1)
960 if (next_shrink > ktime_get_seconds()) {
961 time64_t delay = next_shrink - ktime_get_seconds();
963 schedule_delayed_work(&work, cfs_time_seconds(delay));
965 schedule_work(&work.work);
969 void osc_schedule_grant_work(void)
971 cancel_delayed_work_sync(&work);
972 schedule_work(&work.work);
976 * Start grant thread for returing grant to server for idle clients.
978 static int osc_start_grant_work(void)
980 client_gtd.gtd_stopped = 0;
981 mutex_init(&client_gtd.gtd_mutex);
982 INIT_LIST_HEAD(&client_gtd.gtd_clients);
984 INIT_DELAYED_WORK(&work, osc_grant_work_handler);
985 schedule_work(&work.work);
990 static void osc_stop_grant_work(void)
992 client_gtd.gtd_stopped = 1;
993 cancel_delayed_work_sync(&work);
996 static void osc_add_grant_list(struct client_obd *client)
998 mutex_lock(&client_gtd.gtd_mutex);
999 list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
1000 mutex_unlock(&client_gtd.gtd_mutex);
1003 static void osc_del_grant_list(struct client_obd *client)
1005 if (list_empty(&client->cl_grant_chain))
1008 mutex_lock(&client_gtd.gtd_mutex);
1009 list_del_init(&client->cl_grant_chain);
1010 mutex_unlock(&client_gtd.gtd_mutex);
1013 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1016 * ocd_grant is the total grant amount we're expect to hold: if we've
1017 * been evicted, it's the new avail_grant amount, cl_dirty_pages will
1018 * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
1021 * race is tolerable here: if we're evicted, but imp_state already
1022 * left EVICTED state, then cl_dirty_pages must be 0 already.
1024 spin_lock(&cli->cl_loi_list_lock);
1025 cli->cl_avail_grant = ocd->ocd_grant;
1026 if (cli->cl_import->imp_state != LUSTRE_IMP_EVICTED) {
1027 unsigned long consumed = cli->cl_reserved_grant;
1029 if (OCD_HAS_FLAG(ocd, GRANT_PARAM))
1030 consumed += cli->cl_dirty_grant;
1032 consumed += cli->cl_dirty_pages << PAGE_SHIFT;
1033 if (cli->cl_avail_grant < consumed) {
1034 CERROR("%s: granted %ld but already consumed %ld\n",
1035 cli_name(cli), cli->cl_avail_grant, consumed);
1036 cli->cl_avail_grant = 0;
1038 cli->cl_avail_grant -= consumed;
1042 if (OCD_HAS_FLAG(ocd, GRANT_PARAM)) {
1046 /* overhead for each extent insertion */
1047 cli->cl_grant_extent_tax = ocd->ocd_grant_tax_kb << 10;
1048 /* determine the appropriate chunk size used by osc_extent. */
1049 cli->cl_chunkbits = max_t(int, PAGE_SHIFT,
1050 ocd->ocd_grant_blkbits);
1051 /* max_pages_per_rpc must be chunk aligned */
1052 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
1053 cli->cl_max_pages_per_rpc = (cli->cl_max_pages_per_rpc +
1054 ~chunk_mask) & chunk_mask;
1055 /* determine maximum extent size, in #pages */
1056 size = (u64)ocd->ocd_grant_max_blks << ocd->ocd_grant_blkbits;
1057 cli->cl_max_extent_pages = (size >> PAGE_SHIFT) ?: 1;
1058 cli->cl_ocd_grant_param = 1;
1060 cli->cl_ocd_grant_param = 0;
1061 cli->cl_grant_extent_tax = 0;
1062 cli->cl_chunkbits = PAGE_SHIFT;
1063 cli->cl_max_extent_pages = DT_MAX_BRW_PAGES;
1065 spin_unlock(&cli->cl_loi_list_lock);
1068 "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
1070 cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
1071 cli->cl_max_extent_pages);
1073 if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
1074 osc_add_grant_list(cli);
1076 EXPORT_SYMBOL(osc_init_grant);
1078 /* We assume that the reason this OSC got a short read is because it read
1079 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1080 * via the LOV, and it _knows_ it's reading inside the file, it's just that
1081 * this stripe never got written at or beyond this stripe offset yet. */
1082 static void handle_short_read(int nob_read, size_t page_count,
1083 struct brw_page **pga)
1088 /* skip bytes read OK */
1089 while (nob_read > 0) {
1090 LASSERT (page_count > 0);
1092 if (pga[i]->count > nob_read) {
1093 /* EOF inside this page */
1094 ptr = kmap(pga[i]->pg) +
1095 (pga[i]->off & ~PAGE_MASK);
1096 memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1103 nob_read -= pga[i]->count;
1108 /* zero remaining pages */
1109 while (page_count-- > 0) {
1110 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
1111 memset(ptr, 0, pga[i]->count);
1117 static int check_write_rcs(struct ptlrpc_request *req,
1118 int requested_nob, int niocount,
1119 size_t page_count, struct brw_page **pga)
1124 remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1125 sizeof(*remote_rcs) *
1127 if (remote_rcs == NULL) {
1128 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1132 /* return error if any niobuf was in error */
1133 for (i = 0; i < niocount; i++) {
1134 if ((int)remote_rcs[i] < 0) {
1135 CDEBUG(D_INFO, "rc[%d]: %d req %p\n",
1136 i, remote_rcs[i], req);
1137 return remote_rcs[i];
1140 if (remote_rcs[i] != 0) {
1141 CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1142 i, remote_rcs[i], req);
1146 if (req->rq_bulk != NULL &&
1147 req->rq_bulk->bd_nob_transferred != requested_nob) {
1148 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1149 req->rq_bulk->bd_nob_transferred, requested_nob);
1156 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1158 if (p1->flag != p2->flag) {
1159 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1160 OBD_BRW_SYNC | OBD_BRW_ASYNC |
1161 OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC |
1162 OBD_BRW_SYS_RESOURCE);
1164 /* warn if we try to combine flags that we don't know to be
1165 * safe to combine */
1166 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1167 CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1168 "report this at https://jira.whamcloud.com/\n",
1169 p1->flag, p2->flag);
1174 return (p1->off + p1->count == p2->off);
1177 #if IS_ENABLED(CONFIG_CRC_T10DIF)
1178 static int osc_checksum_bulk_t10pi(const char *obd_name, int nob,
1179 size_t pg_count, struct brw_page **pga,
1180 int opc, obd_dif_csum_fn *fn,
1182 u32 *check_sum, bool resend)
1184 struct ahash_request *req;
1185 /* Used Adler as the default checksum type on top of DIF tags */
1186 unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
1187 struct page *__page;
1188 unsigned char *buffer;
1190 unsigned int bufsize;
1192 int used_number = 0;
1198 LASSERT(pg_count > 0);
1200 __page = alloc_page(GFP_KERNEL);
1204 req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1207 CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
1208 obd_name, cfs_crypto_hash_name(cfs_alg), rc);
1212 buffer = kmap(__page);
1213 guard_start = (__u16 *)buffer;
1214 guard_number = PAGE_SIZE / sizeof(*guard_start);
1215 CDEBUG(D_PAGE | (resend ? D_HA : 0),
1216 "GRD tags per page=%u, resend=%u, bytes=%u, pages=%zu\n",
1217 guard_number, resend, nob, pg_count);
1219 while (nob > 0 && pg_count > 0) {
1220 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1222 /* corrupt the data before we compute the checksum, to
1223 * simulate an OST->client data error */
1224 if (unlikely(i == 0 && opc == OST_READ &&
1225 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))) {
1226 unsigned char *ptr = kmap(pga[i]->pg);
1227 int off = pga[i]->off & ~PAGE_MASK;
1229 memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1234 * The left guard number should be able to hold checksums of a
1237 rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg,
1238 pga[i]->off & ~PAGE_MASK,
1240 guard_start + used_number,
1241 guard_number - used_number,
1244 if (unlikely(resend))
1245 CDEBUG(D_PAGE | D_HA,
1246 "pga[%u]: used %u off %llu+%u gen checksum: %*phN\n",
1247 i, used, pga[i]->off & ~PAGE_MASK, count,
1248 (int)(used * sizeof(*guard_start)),
1249 guard_start + used_number);
1253 used_number += used;
1254 if (used_number == guard_number) {
1255 cfs_crypto_hash_update_page(req, __page, 0,
1256 used_number * sizeof(*guard_start));
1260 nob -= pga[i]->count;
1268 if (used_number != 0)
1269 cfs_crypto_hash_update_page(req, __page, 0,
1270 used_number * sizeof(*guard_start));
1272 bufsize = sizeof(cksum);
1273 cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
1275 /* For sending we only compute the wrong checksum instead
1276 * of corrupting the data so it is still correct on a redo */
1277 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1282 __free_page(__page);
1285 #else /* !CONFIG_CRC_T10DIF */
1286 #define obd_dif_ip_fn NULL
1287 #define obd_dif_crc_fn NULL
1288 #define osc_checksum_bulk_t10pi(name, nob, pgc, pga, opc, fn, ssize, csum, re) \
1290 #endif /* CONFIG_CRC_T10DIF */
1292 static int osc_checksum_bulk(int nob, size_t pg_count,
1293 struct brw_page **pga, int opc,
1294 enum cksum_types cksum_type,
1298 struct ahash_request *req;
1299 unsigned int bufsize;
1300 unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
1302 LASSERT(pg_count > 0);
1304 req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1306 CERROR("Unable to initialize checksum hash %s\n",
1307 cfs_crypto_hash_name(cfs_alg));
1308 return PTR_ERR(req);
1311 while (nob > 0 && pg_count > 0) {
1312 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1314 /* corrupt the data before we compute the checksum, to
1315 * simulate an OST->client data error */
1316 if (i == 0 && opc == OST_READ &&
1317 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1318 unsigned char *ptr = kmap(pga[i]->pg);
1319 int off = pga[i]->off & ~PAGE_MASK;
1321 memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1324 cfs_crypto_hash_update_page(req, pga[i]->pg,
1325 pga[i]->off & ~PAGE_MASK,
1327 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
1328 (int)(pga[i]->off & ~PAGE_MASK));
1330 nob -= pga[i]->count;
1335 bufsize = sizeof(*cksum);
1336 cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
1338 /* For sending we only compute the wrong checksum instead
1339 * of corrupting the data so it is still correct on a redo */
1340 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1346 static int osc_checksum_bulk_rw(const char *obd_name,
1347 enum cksum_types cksum_type,
1348 int nob, size_t pg_count,
1349 struct brw_page **pga, int opc,
1350 u32 *check_sum, bool resend)
1352 obd_dif_csum_fn *fn = NULL;
1353 int sector_size = 0;
1357 obd_t10_cksum2dif(cksum_type, &fn, §or_size);
1360 rc = osc_checksum_bulk_t10pi(obd_name, nob, pg_count, pga,
1361 opc, fn, sector_size, check_sum,
1364 rc = osc_checksum_bulk(nob, pg_count, pga, opc, cksum_type,
1370 static inline void osc_release_bounce_pages(struct brw_page **pga,
1373 #ifdef HAVE_LUSTRE_CRYPTO
1376 for (i = 0; i < page_count; i++) {
1377 /* Bounce pages allocated by a call to
1378 * llcrypt_encrypt_pagecache_blocks() in osc_brw_prep_request()
1379 * are identified thanks to the PageChecked flag.
1381 if (PageChecked(pga[i]->pg))
1382 llcrypt_finalize_bounce_page(&pga[i]->pg);
1383 pga[i]->count -= pga[i]->bp_count_diff;
1384 pga[i]->off += pga[i]->bp_off_diff;
1390 osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
1391 u32 page_count, struct brw_page **pga,
1392 struct ptlrpc_request **reqp, int resend)
1394 struct ptlrpc_request *req;
1395 struct ptlrpc_bulk_desc *desc;
1396 struct ost_body *body;
1397 struct obd_ioobj *ioobj;
1398 struct niobuf_remote *niobuf;
1399 int niocount, i, requested_nob, opc, rc, short_io_size = 0;
1400 struct osc_brw_async_args *aa;
1401 struct req_capsule *pill;
1402 struct brw_page *pg_prev;
1404 const char *obd_name = cli->cl_import->imp_obd->obd_name;
1405 struct inode *inode = NULL;
1406 bool directio = false;
1407 bool enable_checksum = true;
1411 inode = page2inode(pga[0]->pg);
1412 if (inode == NULL) {
1413 /* Try to get reference to inode from cl_page if we are
1414 * dealing with direct IO, as handled pages are not
1415 * actual page cache pages.
1417 struct osc_async_page *oap = brw_page2oap(pga[0]);
1418 struct cl_page *clpage = oap2cl_page(oap);
1420 inode = clpage->cp_inode;
1425 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1426 RETURN(-ENOMEM); /* Recoverable */
1427 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1428 RETURN(-EINVAL); /* Fatal */
1430 if ((cmd & OBD_BRW_WRITE) != 0) {
1432 req = ptlrpc_request_alloc_pool(cli->cl_import,
1434 &RQF_OST_BRW_WRITE);
1437 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1442 if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
1443 for (i = 0; i < page_count; i++) {
1444 struct brw_page *pg = pga[i];
1445 struct page *data_page = NULL;
1446 bool retried = false;
1447 bool lockedbymyself;
1448 u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1449 struct address_space *map_orig = NULL;
1453 if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1454 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1455 LUSTRE_ENCRYPTION_UNIT_SIZE;
1456 /* The page can already be locked when we arrive here.
1457 * This is possible when cl_page_assume/vvp_page_assume
1458 * is stuck on wait_on_page_writeback with page lock
1459 * held. In this case there is no risk for the lock to
1460 * be released while we are doing our encryption
1461 * processing, because writeback against that page will
1462 * end in vvp_page_completion_write/cl_page_completion,
1463 * which means only once the page is fully processed.
1465 lockedbymyself = trylock_page(pg->pg);
1467 map_orig = pg->pg->mapping;
1468 pg->pg->mapping = inode->i_mapping;
1469 index_orig = pg->pg->index;
1470 pg->pg->index = pg->off >> PAGE_SHIFT;
1473 llcrypt_encrypt_pagecache_blocks(pg->pg,
1477 pg->pg->mapping = map_orig;
1478 pg->pg->index = index_orig;
1481 unlock_page(pg->pg);
1482 if (IS_ERR(data_page)) {
1483 rc = PTR_ERR(data_page);
1484 if (rc == -ENOMEM && !retried) {
1489 ptlrpc_request_free(req);
1492 /* Set PageChecked flag on bounce page for
1493 * disambiguation in osc_release_bounce_pages().
1495 SetPageChecked(data_page);
1497 /* there should be no gap in the middle of page array */
1498 if (i == page_count - 1) {
1499 struct osc_async_page *oap = brw_page2oap(pg);
1501 oa->o_size = oap->oap_count +
1502 oap->oap_obj_off + oap->oap_page_off;
1504 /* len is forced to nunits, and relative offset to 0
1505 * so store the old, clear text info
1507 pg->bp_count_diff = nunits - pg->count;
1509 pg->bp_off_diff = pg->off & ~PAGE_MASK;
1510 pg->off = pg->off & PAGE_MASK;
1512 } else if (opc == OST_READ && inode && IS_ENCRYPTED(inode)) {
1513 for (i = 0; i < page_count; i++) {
1514 struct brw_page *pg = pga[i];
1515 u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1517 if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1518 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1519 LUSTRE_ENCRYPTION_UNIT_SIZE;
1520 /* count/off are forced to cover the whole encryption
1521 * unit size so that all encrypted data is stored on the
1522 * OST, so adjust bp_{count,off}_diff for the size of
1525 pg->bp_count_diff = nunits - pg->count;
1527 pg->bp_off_diff = pg->off & ~PAGE_MASK;
1528 pg->off = pg->off & PAGE_MASK;
1532 for (niocount = i = 1; i < page_count; i++) {
1533 if (!can_merge_pages(pga[i - 1], pga[i]))
1537 pill = &req->rq_pill;
1538 req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1540 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1541 niocount * sizeof(*niobuf));
1543 for (i = 0; i < page_count; i++) {
1544 short_io_size += pga[i]->count;
1545 if (!inode || !IS_ENCRYPTED(inode)) {
1546 pga[i]->bp_count_diff = 0;
1547 pga[i]->bp_off_diff = 0;
1551 if (lnet_is_rdma_only_page(pga[0]->pg)) {
1552 enable_checksum = false;
1556 /* Check if read/write is small enough to be a short io. */
1557 if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
1558 !imp_connect_shortio(cli->cl_import))
1561 /* If this is an empty RPC to old server, just ignore it */
1562 if (!short_io_size && !pga[0]->pg) {
1563 ptlrpc_request_free(req);
1567 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
1568 opc == OST_READ ? 0 : short_io_size);
1569 if (opc == OST_READ)
1570 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_SERVER,
1573 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1575 ptlrpc_request_free(req);
1578 osc_set_io_portal(req);
1580 ptlrpc_at_set_req_timeout(req);
1581 /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1583 req->rq_no_retry_einprogress = 1;
1585 if (short_io_size != 0) {
1587 short_io_buf = NULL;
1591 desc = ptlrpc_prep_bulk_imp(req, page_count,
1592 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1593 (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
1594 PTLRPC_BULK_PUT_SINK),
1596 &ptlrpc_bulk_kiov_pin_ops);
1599 GOTO(out, rc = -ENOMEM);
1600 /* NB request now owns desc and will free it when it gets freed */
1602 body = req_capsule_client_get(pill, &RMF_OST_BODY);
1603 ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1604 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1605 LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1607 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1609 /* For READ and WRITE, we can't fill o_uid and o_gid using from_kuid()
1610 * and from_kgid(), because they are asynchronous. Fortunately, variable
1611 * oa contains valid o_uid and o_gid in these two operations.
1612 * Besides, filling o_uid and o_gid is enough for nrs-tbf, see LU-9658.
1613 * OBD_MD_FLUID and OBD_MD_FLUID is not set in order to avoid breaking
1614 * other process logic */
1615 body->oa.o_uid = oa->o_uid;
1616 body->oa.o_gid = oa->o_gid;
1618 obdo_to_ioobj(oa, ioobj);
1619 ioobj->ioo_bufcnt = niocount;
1620 /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1621 * that might be send for this request. The actual number is decided
1622 * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1623 * "max - 1" for old client compatibility sending "0", and also so the
1624 * the actual maximum is a power-of-two number, not one less. LU-1431 */
1626 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1628 ioobj_max_brw_set(ioobj, 0);
1630 if (short_io_size != 0) {
1631 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1632 body->oa.o_valid |= OBD_MD_FLFLAGS;
1633 body->oa.o_flags = 0;
1635 body->oa.o_flags |= OBD_FL_SHORT_IO;
1636 CDEBUG(D_CACHE, "Using short io for data transfer, size = %d\n",
1638 if (opc == OST_WRITE) {
1639 short_io_buf = req_capsule_client_get(pill,
1641 LASSERT(short_io_buf != NULL);
1645 LASSERT(page_count > 0);
1647 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1648 struct brw_page *pg = pga[i];
1649 int poff = pg->off & ~PAGE_MASK;
1651 LASSERT(pg->count > 0);
1652 /* make sure there is no gap in the middle of page array */
1653 LASSERTF(page_count == 1 ||
1654 (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1655 ergo(i > 0 && i < page_count - 1,
1656 poff == 0 && pg->count == PAGE_SIZE) &&
1657 ergo(i == page_count - 1, poff == 0)),
1658 "i: %d/%d pg: %p off: %llu, count: %u\n",
1659 i, page_count, pg, pg->off, pg->count);
1660 LASSERTF(i == 0 || pg->off > pg_prev->off,
1661 "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
1662 " prev_pg %p [pri %lu ind %lu] off %llu\n",
1664 pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1665 pg_prev->pg, page_private(pg_prev->pg),
1666 pg_prev->pg->index, pg_prev->off);
1667 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1668 (pg->flag & OBD_BRW_SRVLOCK));
1669 if (short_io_size != 0 && opc == OST_WRITE) {
1670 unsigned char *ptr = kmap_atomic(pg->pg);
1672 LASSERT(short_io_size >= requested_nob + pg->count);
1673 memcpy(short_io_buf + requested_nob,
1677 } else if (short_io_size == 0) {
1678 desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
1681 requested_nob += pg->count;
1683 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1685 niobuf->rnb_len += pg->count;
1687 niobuf->rnb_offset = pg->off;
1688 niobuf->rnb_len = pg->count;
1689 niobuf->rnb_flags = pg->flag;
1694 LASSERTF((void *)(niobuf - niocount) ==
1695 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1696 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1697 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1699 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1701 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1702 body->oa.o_valid |= OBD_MD_FLFLAGS;
1703 body->oa.o_flags = 0;
1705 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1708 if (osc_should_shrink_grant(cli))
1709 osc_shrink_grant_local(cli, &body->oa);
1711 if (!cli->cl_checksum || sptlrpc_flavor_has_bulk(&req->rq_flvr))
1712 enable_checksum = false;
1714 /* size[REQ_REC_OFF] still sizeof (*body) */
1715 if (opc == OST_WRITE) {
1716 if (enable_checksum) {
1717 /* store cl_cksum_type in a local variable since
1718 * it can be changed via lprocfs */
1719 enum cksum_types cksum_type = cli->cl_cksum_type;
1721 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1722 body->oa.o_flags = 0;
1724 body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1726 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1728 rc = osc_checksum_bulk_rw(obd_name, cksum_type,
1729 requested_nob, page_count,
1731 &body->oa.o_cksum, resend);
1733 CDEBUG(D_PAGE, "failed to checksum: rc = %d\n",
1737 CDEBUG(D_PAGE | (resend ? D_HA : 0),
1738 "checksum at write origin: %x (%x)\n",
1739 body->oa.o_cksum, cksum_type);
1741 /* save this in 'oa', too, for later checking */
1742 oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1743 oa->o_flags |= obd_cksum_type_pack(obd_name,
1746 /* clear out the checksum flag, in case this is a
1747 * resend but cl_checksum is no longer set. b=11238 */
1748 oa->o_valid &= ~OBD_MD_FLCKSUM;
1750 oa->o_cksum = body->oa.o_cksum;
1751 /* 1 RC per niobuf */
1752 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1753 sizeof(__u32) * niocount);
1755 if (enable_checksum) {
1756 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1757 body->oa.o_flags = 0;
1758 body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1759 cli->cl_cksum_type);
1760 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1763 /* Client cksum has been already copied to wire obdo in previous
1764 * lustre_set_wire_obdo(), and in the case a bulk-read is being
1765 * resent due to cksum error, this will allow Server to
1766 * check+dump pages on its side */
1768 ptlrpc_request_set_replen(req);
1770 aa = ptlrpc_req_async_args(aa, req);
1772 aa->aa_requested_nob = requested_nob;
1773 aa->aa_nio_count = niocount;
1774 aa->aa_page_count = page_count;
1778 INIT_LIST_HEAD(&aa->aa_oaps);
1781 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1782 CDEBUG(D_RPCTRACE, "brw rpc %p - object "DOSTID" offset %lld<>%lld\n",
1783 req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
1784 niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
1788 ptlrpc_req_finished(req);
1792 char dbgcksum_file_name[PATH_MAX];
1794 static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
1795 struct brw_page **pga, __u32 server_cksum,
1803 /* will only keep dump of pages on first error for the same range in
1804 * file/fid, not during the resends/retries. */
1805 snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
1806 "%s-checksum_dump-osc-"DFID":[%llu-%llu]-%x-%x",
1807 (strncmp(libcfs_debug_file_path, "NONE", 4) != 0 ?
1808 libcfs_debug_file_path : LIBCFS_DEBUG_FILE_PATH_DEFAULT),
1809 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
1810 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1811 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1813 pga[page_count-1]->off + pga[page_count-1]->count - 1,
1814 client_cksum, server_cksum);
1815 CWARN("dumping checksum data to %s\n", dbgcksum_file_name);
1816 filp = filp_open(dbgcksum_file_name,
1817 O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
1821 CDEBUG(D_INFO, "%s: can't open to dump pages with "
1822 "checksum error: rc = %d\n", dbgcksum_file_name,
1825 CERROR("%s: can't open to dump pages with checksum "
1826 "error: rc = %d\n", dbgcksum_file_name, rc);
1830 for (i = 0; i < page_count; i++) {
1831 len = pga[i]->count;
1832 buf = kmap(pga[i]->pg);
1834 rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
1836 CERROR("%s: wanted to write %u but got %d "
1837 "error\n", dbgcksum_file_name, len, rc);
1846 rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
1848 CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
1849 filp_close(filp, NULL);
1851 libcfs_debug_dumplog();
1855 check_write_checksum(struct obdo *oa, const struct lnet_process_id *peer,
1856 __u32 client_cksum, __u32 server_cksum,
1857 struct osc_brw_async_args *aa)
1859 const char *obd_name = aa->aa_cli->cl_import->imp_obd->obd_name;
1860 enum cksum_types cksum_type;
1861 obd_dif_csum_fn *fn = NULL;
1862 int sector_size = 0;
1867 if (server_cksum == client_cksum) {
1868 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1872 if (aa->aa_cli->cl_checksum_dump)
1873 dump_all_bulk_pages(oa, aa->aa_page_count, aa->aa_ppga,
1874 server_cksum, client_cksum);
1876 cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1879 switch (cksum_type) {
1880 case OBD_CKSUM_T10IP512:
1884 case OBD_CKSUM_T10IP4K:
1888 case OBD_CKSUM_T10CRC512:
1889 fn = obd_dif_crc_fn;
1892 case OBD_CKSUM_T10CRC4K:
1893 fn = obd_dif_crc_fn;
1901 rc = osc_checksum_bulk_t10pi(obd_name, aa->aa_requested_nob,
1902 aa->aa_page_count, aa->aa_ppga,
1903 OST_WRITE, fn, sector_size,
1906 rc = osc_checksum_bulk(aa->aa_requested_nob, aa->aa_page_count,
1907 aa->aa_ppga, OST_WRITE, cksum_type,
1911 msg = "failed to calculate the client write checksum";
1912 else if (cksum_type != obd_cksum_type_unpack(aa->aa_oa->o_flags))
1913 msg = "the server did not use the checksum type specified in "
1914 "the original request - likely a protocol problem";
1915 else if (new_cksum == server_cksum)
1916 msg = "changed on the client after we checksummed it - "
1917 "likely false positive due to mmap IO (bug 11742)";
1918 else if (new_cksum == client_cksum)
1919 msg = "changed in transit before arrival at OST";
1921 msg = "changed in transit AND doesn't match the original - "
1922 "likely false positive due to mmap IO (bug 11742)";
1924 LCONSOLE_ERROR_MSG(0x132, "%s: BAD WRITE CHECKSUM: %s: from %s inode "
1925 DFID " object "DOSTID" extent [%llu-%llu], original "
1926 "client csum %x (type %x), server csum %x (type %x),"
1927 " client csum now %x\n",
1928 obd_name, msg, libcfs_nid2str(peer->nid),
1929 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1930 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1931 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1932 POSTID(&oa->o_oi), aa->aa_ppga[0]->off,
1933 aa->aa_ppga[aa->aa_page_count - 1]->off +
1934 aa->aa_ppga[aa->aa_page_count-1]->count - 1,
1936 obd_cksum_type_unpack(aa->aa_oa->o_flags),
1937 server_cksum, cksum_type, new_cksum);
1941 /* Note rc enters this function as number of bytes transferred */
1942 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1944 struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1945 struct client_obd *cli = aa->aa_cli;
1946 const char *obd_name = cli->cl_import->imp_obd->obd_name;
1947 const struct lnet_process_id *peer =
1948 &req->rq_import->imp_connection->c_peer;
1949 struct ost_body *body;
1950 u32 client_cksum = 0;
1951 struct inode *inode;
1952 unsigned int blockbits = 0, blocksize = 0;
1956 if (rc < 0 && rc != -EDQUOT) {
1957 DEBUG_REQ(D_INFO, req, "Failed request: rc = %d", rc);
1961 LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1962 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1964 DEBUG_REQ(D_INFO, req, "cannot unpack body");
1968 /* set/clear over quota flag for a uid/gid/projid */
1969 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1970 body->oa.o_valid & (OBD_MD_FLALLQUOTA)) {
1971 unsigned qid[LL_MAXQUOTAS] = {
1972 body->oa.o_uid, body->oa.o_gid,
1973 body->oa.o_projid };
1975 "setdq for [%u %u %u] with valid %#llx, flags %x\n",
1976 body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
1977 body->oa.o_valid, body->oa.o_flags);
1978 osc_quota_setdq(cli, req->rq_xid, qid, body->oa.o_valid,
1982 osc_update_grant(cli, body);
1987 if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1988 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1990 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1992 CERROR("%s: unexpected positive size %d\n",
1997 if (req->rq_bulk != NULL &&
1998 sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
2001 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
2002 check_write_checksum(&body->oa, peer, client_cksum,
2003 body->oa.o_cksum, aa))
2006 rc = check_write_rcs(req, aa->aa_requested_nob,
2007 aa->aa_nio_count, aa->aa_page_count,
2012 /* The rest of this function executes only for OST_READs */
2014 if (req->rq_bulk == NULL) {
2015 rc = req_capsule_get_size(&req->rq_pill, &RMF_SHORT_IO,
2017 LASSERT(rc == req->rq_status);
2019 /* if unwrap_bulk failed, return -EAGAIN to retry */
2020 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
2023 GOTO(out, rc = -EAGAIN);
2025 if (rc > aa->aa_requested_nob) {
2026 CERROR("%s: unexpected size %d, requested %d\n", obd_name,
2027 rc, aa->aa_requested_nob);
2031 if (req->rq_bulk != NULL && rc != req->rq_bulk->bd_nob_transferred) {
2032 CERROR("%s: unexpected size %d, transferred %d\n", obd_name,
2033 rc, req->rq_bulk->bd_nob_transferred);
2037 if (req->rq_bulk == NULL) {
2039 int nob, pg_count, i = 0;
2042 CDEBUG(D_CACHE, "Using short io read, size %d\n", rc);
2043 pg_count = aa->aa_page_count;
2044 buf = req_capsule_server_sized_get(&req->rq_pill, &RMF_SHORT_IO,
2047 while (nob > 0 && pg_count > 0) {
2049 int count = aa->aa_ppga[i]->count > nob ?
2050 nob : aa->aa_ppga[i]->count;
2052 CDEBUG(D_CACHE, "page %p count %d\n",
2053 aa->aa_ppga[i]->pg, count);
2054 ptr = kmap_atomic(aa->aa_ppga[i]->pg);
2055 memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
2057 kunmap_atomic((void *) ptr);
2066 if (rc < aa->aa_requested_nob)
2067 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
2069 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
2070 static int cksum_counter;
2071 u32 server_cksum = body->oa.o_cksum;
2075 enum cksum_types cksum_type;
2076 u32 o_flags = body->oa.o_valid & OBD_MD_FLFLAGS ?
2077 body->oa.o_flags : 0;
2079 cksum_type = obd_cksum_type_unpack(o_flags);
2080 rc = osc_checksum_bulk_rw(obd_name, cksum_type, nob,
2081 aa->aa_page_count, aa->aa_ppga,
2082 OST_READ, &client_cksum, false);
2086 if (req->rq_bulk != NULL &&
2087 peer->nid != req->rq_bulk->bd_sender) {
2089 router = libcfs_nid2str(req->rq_bulk->bd_sender);
2092 if (server_cksum != client_cksum) {
2093 struct ost_body *clbody;
2094 __u32 client_cksum2;
2095 u32 page_count = aa->aa_page_count;
2097 osc_checksum_bulk_rw(obd_name, cksum_type, nob,
2098 page_count, aa->aa_ppga,
2099 OST_READ, &client_cksum2, true);
2100 clbody = req_capsule_client_get(&req->rq_pill,
2102 if (cli->cl_checksum_dump)
2103 dump_all_bulk_pages(&clbody->oa, page_count,
2104 aa->aa_ppga, server_cksum,
2107 LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
2108 "%s%s%s inode "DFID" object "DOSTID
2109 " extent [%llu-%llu], client %x/%x, "
2110 "server %x, cksum_type %x\n",
2112 libcfs_nid2str(peer->nid),
2114 clbody->oa.o_valid & OBD_MD_FLFID ?
2115 clbody->oa.o_parent_seq : 0ULL,
2116 clbody->oa.o_valid & OBD_MD_FLFID ?
2117 clbody->oa.o_parent_oid : 0,
2118 clbody->oa.o_valid & OBD_MD_FLFID ?
2119 clbody->oa.o_parent_ver : 0,
2120 POSTID(&body->oa.o_oi),
2121 aa->aa_ppga[0]->off,
2122 aa->aa_ppga[page_count-1]->off +
2123 aa->aa_ppga[page_count-1]->count - 1,
2124 client_cksum, client_cksum2,
2125 server_cksum, cksum_type);
2127 aa->aa_oa->o_cksum = client_cksum;
2131 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
2134 } else if (unlikely(client_cksum)) {
2135 static int cksum_missed;
2138 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
2139 CERROR("%s: checksum %u requested from %s but not sent\n",
2140 obd_name, cksum_missed,
2141 libcfs_nid2str(peer->nid));
2146 inode = page2inode(aa->aa_ppga[0]->pg);
2147 if (inode == NULL) {
2148 /* Try to get reference to inode from cl_page if we are
2149 * dealing with direct IO, as handled pages are not
2150 * actual page cache pages.
2152 struct osc_async_page *oap = brw_page2oap(aa->aa_ppga[0]);
2154 inode = oap2cl_page(oap)->cp_inode;
2156 blockbits = inode->i_blkbits;
2157 blocksize = 1 << blockbits;
2160 if (inode && IS_ENCRYPTED(inode)) {
2163 if (!llcrypt_has_encryption_key(inode)) {
2164 CDEBUG(D_SEC, "no enc key for ino %lu\n", inode->i_ino);
2167 for (idx = 0; idx < aa->aa_page_count; idx++) {
2168 struct brw_page *pg = aa->aa_ppga[idx];
2169 unsigned int offs = 0;
2171 while (offs < PAGE_SIZE) {
2172 /* do not decrypt if page is all 0s */
2173 if (memchr_inv(page_address(pg->pg) + offs, 0,
2174 LUSTRE_ENCRYPTION_UNIT_SIZE) == NULL) {
2175 /* if page is empty forward info to
2176 * upper layers (ll_io_zero_page) by
2177 * clearing PagePrivate2
2180 ClearPagePrivate2(pg->pg);
2185 /* This is direct IO case. Directly call
2186 * decrypt function that takes inode as
2187 * input parameter. Page does not need
2191 ((u64)(pg->off >> PAGE_SHIFT) <<
2192 (PAGE_SHIFT - blockbits)) +
2193 (offs >> blockbits);
2198 LUSTRE_ENCRYPTION_UNIT_SIZE;
2199 i += blocksize, lblk_num++) {
2201 llcrypt_decrypt_block_inplace(
2209 rc = llcrypt_decrypt_pagecache_blocks(
2211 LUSTRE_ENCRYPTION_UNIT_SIZE,
2217 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
2224 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
2225 aa->aa_oa, &body->oa);
2230 static int osc_brw_redo_request(struct ptlrpc_request *request,
2231 struct osc_brw_async_args *aa, int rc)
2233 struct ptlrpc_request *new_req;
2234 struct osc_brw_async_args *new_aa;
2235 struct osc_async_page *oap;
2238 /* The below message is checked in replay-ost-single.sh test_8ae*/
2239 DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
2240 "redo for recoverable error %d", rc);
2242 rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
2243 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
2244 aa->aa_cli, aa->aa_oa, aa->aa_page_count,
2245 aa->aa_ppga, &new_req, 1);
2249 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2250 if (oap->oap_request != NULL) {
2251 LASSERTF(request == oap->oap_request,
2252 "request %p != oap_request %p\n",
2253 request, oap->oap_request);
2257 * New request takes over pga and oaps from old request.
2258 * Note that copying a list_head doesn't work, need to move it...
2261 new_req->rq_interpret_reply = request->rq_interpret_reply;
2262 new_req->rq_async_args = request->rq_async_args;
2263 new_req->rq_commit_cb = request->rq_commit_cb;
2264 /* cap resend delay to the current request timeout, this is similar to
2265 * what ptlrpc does (see after_reply()) */
2266 if (aa->aa_resends > new_req->rq_timeout)
2267 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
2269 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
2270 new_req->rq_generation_set = 1;
2271 new_req->rq_import_generation = request->rq_import_generation;
2273 new_aa = ptlrpc_req_async_args(new_aa, new_req);
2275 INIT_LIST_HEAD(&new_aa->aa_oaps);
2276 list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
2277 INIT_LIST_HEAD(&new_aa->aa_exts);
2278 list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
2279 new_aa->aa_resends = aa->aa_resends;
2281 list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
2282 if (oap->oap_request) {
2283 ptlrpc_req_finished(oap->oap_request);
2284 oap->oap_request = ptlrpc_request_addref(new_req);
2288 /* XXX: This code will run into problem if we're going to support
2289 * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
2290 * and wait for all of them to be finished. We should inherit request
2291 * set from old request. */
2292 ptlrpcd_add_req(new_req);
2294 DEBUG_REQ(D_INFO, new_req, "new request");
2299 * ugh, we want disk allocation on the target to happen in offset order. we'll
2300 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
2301 * fine for our small page arrays and doesn't require allocation. its an
2302 * insertion sort that swaps elements that are strides apart, shrinking the
2303 * stride down until its '1' and the array is sorted.
2305 static void sort_brw_pages(struct brw_page **array, int num)
2308 struct brw_page *tmp;
2312 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
2317 for (i = stride ; i < num ; i++) {
2320 while (j >= stride && array[j - stride]->off > tmp->off) {
2321 array[j] = array[j - stride];
2326 } while (stride > 1);
2329 static void osc_release_ppga(struct brw_page **ppga, size_t count)
2331 LASSERT(ppga != NULL);
2332 OBD_FREE_PTR_ARRAY_LARGE(ppga, count);
2335 static int brw_interpret(const struct lu_env *env,
2336 struct ptlrpc_request *req, void *args, int rc)
2338 struct osc_brw_async_args *aa = args;
2339 struct osc_extent *ext;
2340 struct osc_extent *tmp;
2341 struct client_obd *cli = aa->aa_cli;
2342 unsigned long transferred = 0;
2346 rc = osc_brw_fini_request(req, rc);
2347 CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2349 /* restore clear text pages */
2350 osc_release_bounce_pages(aa->aa_ppga, aa->aa_page_count);
2353 * When server returns -EINPROGRESS, client should always retry
2354 * regardless of the number of times the bulk was resent already.
2356 if (osc_recoverable_error(rc) && !req->rq_no_delay) {
2357 if (req->rq_import_generation !=
2358 req->rq_import->imp_generation) {
2359 CDEBUG(D_HA, "%s: resend cross eviction for object: "
2360 ""DOSTID", rc = %d.\n",
2361 req->rq_import->imp_obd->obd_name,
2362 POSTID(&aa->aa_oa->o_oi), rc);
2363 } else if (rc == -EINPROGRESS ||
2364 client_should_resend(aa->aa_resends, aa->aa_cli)) {
2365 rc = osc_brw_redo_request(req, aa, rc);
2367 CERROR("%s: too many resent retries for object: "
2368 "%llu:%llu, rc = %d.\n",
2369 req->rq_import->imp_obd->obd_name,
2370 POSTID(&aa->aa_oa->o_oi), rc);
2375 else if (rc == -EAGAIN || rc == -EINPROGRESS)
2380 struct obdo *oa = aa->aa_oa;
2381 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
2382 unsigned long valid = 0;
2383 struct cl_object *obj;
2384 struct osc_async_page *last;
2386 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
2387 obj = osc2cl(last->oap_obj);
2389 cl_object_attr_lock(obj);
2390 if (oa->o_valid & OBD_MD_FLBLOCKS) {
2391 attr->cat_blocks = oa->o_blocks;
2392 valid |= CAT_BLOCKS;
2394 if (oa->o_valid & OBD_MD_FLMTIME) {
2395 attr->cat_mtime = oa->o_mtime;
2398 if (oa->o_valid & OBD_MD_FLATIME) {
2399 attr->cat_atime = oa->o_atime;
2402 if (oa->o_valid & OBD_MD_FLCTIME) {
2403 attr->cat_ctime = oa->o_ctime;
2407 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
2408 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
2409 loff_t last_off = last->oap_count + last->oap_obj_off +
2412 /* Change file size if this is an out of quota or
2413 * direct IO write and it extends the file size */
2414 if (loi->loi_lvb.lvb_size < last_off) {
2415 attr->cat_size = last_off;
2418 /* Extend KMS if it's not a lockless write */
2419 if (loi->loi_kms < last_off &&
2420 oap2osc_page(last)->ops_srvlock == 0) {
2421 attr->cat_kms = last_off;
2427 cl_object_attr_update(env, obj, attr, valid);
2428 cl_object_attr_unlock(obj);
2430 OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
2433 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
2434 osc_inc_unstable_pages(req);
2436 list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
2437 list_del_init(&ext->oe_link);
2438 osc_extent_finish(env, ext, 1,
2439 rc && req->rq_no_delay ? -EAGAIN : rc);
2441 LASSERT(list_empty(&aa->aa_exts));
2442 LASSERT(list_empty(&aa->aa_oaps));
2444 transferred = (req->rq_bulk == NULL ? /* short io */
2445 aa->aa_requested_nob :
2446 req->rq_bulk->bd_nob_transferred);
2448 osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2449 ptlrpc_lprocfs_brw(req, transferred);
2451 spin_lock(&cli->cl_loi_list_lock);
2452 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2453 * is called so we know whether to go to sync BRWs or wait for more
2454 * RPCs to complete */
2455 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2456 cli->cl_w_in_flight--;
2458 cli->cl_r_in_flight--;
2459 osc_wake_cache_waiters(cli);
2460 spin_unlock(&cli->cl_loi_list_lock);
2462 osc_io_unplug(env, cli, NULL);
2466 static void brw_commit(struct ptlrpc_request *req)
2468 /* If osc_inc_unstable_pages (via osc_extent_finish) races with
2469 * this called via the rq_commit_cb, I need to ensure
2470 * osc_dec_unstable_pages is still called. Otherwise unstable
2471 * pages may be leaked. */
2472 spin_lock(&req->rq_lock);
2473 if (likely(req->rq_unstable)) {
2474 req->rq_unstable = 0;
2475 spin_unlock(&req->rq_lock);
2477 osc_dec_unstable_pages(req);
2479 req->rq_committed = 1;
2480 spin_unlock(&req->rq_lock);
2485 * Build an RPC by the list of extent @ext_list. The caller must ensure
2486 * that the total pages in this list are NOT over max pages per RPC.
2487 * Extents in the list must be in OES_RPC state.
2489 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
2490 struct list_head *ext_list, int cmd)
2492 struct ptlrpc_request *req = NULL;
2493 struct osc_extent *ext;
2494 struct brw_page **pga = NULL;
2495 struct osc_brw_async_args *aa = NULL;
2496 struct obdo *oa = NULL;
2497 struct osc_async_page *oap;
2498 struct osc_object *obj = NULL;
2499 struct cl_req_attr *crattr = NULL;
2500 loff_t starting_offset = OBD_OBJECT_EOF;
2501 loff_t ending_offset = 0;
2502 /* '1' for consistency with code that checks !mpflag to restore */
2506 bool soft_sync = false;
2507 bool ndelay = false;
2511 __u32 layout_version = 0;
2512 LIST_HEAD(rpc_list);
2513 struct ost_body *body;
2515 LASSERT(!list_empty(ext_list));
2517 /* add pages into rpc_list to build BRW rpc */
2518 list_for_each_entry(ext, ext_list, oe_link) {
2519 LASSERT(ext->oe_state == OES_RPC);
2520 mem_tight |= ext->oe_memalloc;
2521 grant += ext->oe_grants;
2522 page_count += ext->oe_nr_pages;
2523 layout_version = max(layout_version, ext->oe_layout_version);
2528 soft_sync = osc_over_unstable_soft_limit(cli);
2530 mpflag = memalloc_noreclaim_save();
2532 OBD_ALLOC_PTR_ARRAY_LARGE(pga, page_count);
2534 GOTO(out, rc = -ENOMEM);
2536 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
2538 GOTO(out, rc = -ENOMEM);
2541 list_for_each_entry(ext, ext_list, oe_link) {
2542 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
2544 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
2546 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
2547 pga[i] = &oap->oap_brw_page;
2548 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2551 list_add_tail(&oap->oap_rpc_item, &rpc_list);
2552 if (starting_offset == OBD_OBJECT_EOF ||
2553 starting_offset > oap->oap_obj_off)
2554 starting_offset = oap->oap_obj_off;
2556 LASSERT(oap->oap_page_off == 0);
2557 if (ending_offset < oap->oap_obj_off + oap->oap_count)
2558 ending_offset = oap->oap_obj_off +
2561 LASSERT(oap->oap_page_off + oap->oap_count ==
2568 /* first page in the list */
2569 oap = list_first_entry(&rpc_list, typeof(*oap), oap_rpc_item);
2571 crattr = &osc_env_info(env)->oti_req_attr;
2572 memset(crattr, 0, sizeof(*crattr));
2573 crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2574 crattr->cra_flags = ~0ULL;
2575 crattr->cra_page = oap2cl_page(oap);
2576 crattr->cra_oa = oa;
2577 cl_req_attr_set(env, osc2cl(obj), crattr);
2579 if (cmd == OBD_BRW_WRITE) {
2580 oa->o_grant_used = grant;
2581 if (layout_version > 0) {
2582 CDEBUG(D_LAYOUT, DFID": write with layout version %u\n",
2583 PFID(&oa->o_oi.oi_fid), layout_version);
2585 oa->o_layout_version = layout_version;
2586 oa->o_valid |= OBD_MD_LAYOUT_VERSION;
2590 sort_brw_pages(pga, page_count);
2591 rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 0);
2593 CERROR("prep_req failed: %d\n", rc);
2597 req->rq_commit_cb = brw_commit;
2598 req->rq_interpret_reply = brw_interpret;
2599 req->rq_memalloc = mem_tight != 0;
2600 oap->oap_request = ptlrpc_request_addref(req);
2602 req->rq_no_resend = req->rq_no_delay = 1;
2603 /* probably set a shorter timeout value.
2604 * to handle ETIMEDOUT in brw_interpret() correctly. */
2605 /* lustre_msg_set_timeout(req, req->rq_timeout / 2); */
2608 /* Need to update the timestamps after the request is built in case
2609 * we race with setattr (locally or in queue at OST). If OST gets
2610 * later setattr before earlier BRW (as determined by the request xid),
2611 * the OST will not use BRW timestamps. Sadly, there is no obvious
2612 * way to do this in a single call. bug 10150 */
2613 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2614 crattr->cra_oa = &body->oa;
2615 crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
2616 cl_req_attr_set(env, osc2cl(obj), crattr);
2617 lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2619 aa = ptlrpc_req_async_args(aa, req);
2620 INIT_LIST_HEAD(&aa->aa_oaps);
2621 list_splice_init(&rpc_list, &aa->aa_oaps);
2622 INIT_LIST_HEAD(&aa->aa_exts);
2623 list_splice_init(ext_list, &aa->aa_exts);
2625 spin_lock(&cli->cl_loi_list_lock);
2626 starting_offset >>= PAGE_SHIFT;
2627 if (cmd == OBD_BRW_READ) {
2628 cli->cl_r_in_flight++;
2629 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2630 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2631 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2632 starting_offset + 1);
2634 cli->cl_w_in_flight++;
2635 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2636 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2637 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2638 starting_offset + 1);
2640 spin_unlock(&cli->cl_loi_list_lock);
2642 DEBUG_REQ(D_INODE, req, "%d pages, aa %p, now %ur/%uw in flight",
2643 page_count, aa, cli->cl_r_in_flight,
2644 cli->cl_w_in_flight);
2645 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
2647 ptlrpcd_add_req(req);
2653 memalloc_noreclaim_restore(mpflag);
2656 LASSERT(req == NULL);
2659 OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
2661 osc_release_bounce_pages(pga, page_count);
2662 osc_release_ppga(pga, page_count);
2664 /* this should happen rarely and is pretty bad, it makes the
2665 * pending list not follow the dirty order
2667 while ((ext = list_first_entry_or_null(ext_list,
2669 oe_link)) != NULL) {
2670 list_del_init(&ext->oe_link);
2671 osc_extent_finish(env, ext, 0, rc);
2677 /* This is to refresh our lock in face of no RPCs. */
2678 void osc_send_empty_rpc(struct osc_object *osc, pgoff_t start)
2680 struct ptlrpc_request *req;
2682 struct brw_page bpg = { .off = start, .count = 1};
2683 struct brw_page *pga = &bpg;
2686 memset(&oa, 0, sizeof(oa));
2687 oa.o_oi = osc->oo_oinfo->loi_oi;
2688 oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLFLAGS;
2689 /* For updated servers - don't do a read */
2690 oa.o_flags = OBD_FL_NORPC;
2692 rc = osc_brw_prep_request(OBD_BRW_READ, osc_cli(osc), &oa, 1, &pga,
2695 /* If we succeeded we ship it off, if not there's no point in doing
2696 * anything. Also no resends.
2697 * No interpret callback, no commit callback.
2700 req->rq_no_resend = 1;
2701 ptlrpcd_add_req(req);
2705 static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
2709 LASSERT(lock != NULL);
2711 lock_res_and_lock(lock);
2713 if (lock->l_ast_data == NULL)
2714 lock->l_ast_data = data;
2715 if (lock->l_ast_data == data)
2718 unlock_res_and_lock(lock);
2723 int osc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
2724 void *cookie, struct lustre_handle *lockh,
2725 enum ldlm_mode mode, __u64 *flags, bool speculative,
2728 bool intent = *flags & LDLM_FL_HAS_INTENT;
2732 /* The request was created before ldlm_cli_enqueue call. */
2733 if (intent && errcode == ELDLM_LOCK_ABORTED) {
2734 struct ldlm_reply *rep;
2736 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2737 LASSERT(rep != NULL);
2739 rep->lock_policy_res1 =
2740 ptlrpc_status_ntoh(rep->lock_policy_res1);
2741 if (rep->lock_policy_res1)
2742 errcode = rep->lock_policy_res1;
2744 *flags |= LDLM_FL_LVB_READY;
2745 } else if (errcode == ELDLM_OK) {
2746 *flags |= LDLM_FL_LVB_READY;
2749 /* Call the update callback. */
2750 rc = (*upcall)(cookie, lockh, errcode);
2752 /* release the reference taken in ldlm_cli_enqueue() */
2753 if (errcode == ELDLM_LOCK_MATCHED)
2755 if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
2756 ldlm_lock_decref(lockh, mode);
2761 int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
2764 struct osc_enqueue_args *aa = args;
2765 struct ldlm_lock *lock;
2766 struct lustre_handle *lockh = &aa->oa_lockh;
2767 enum ldlm_mode mode = aa->oa_mode;
2768 struct ost_lvb *lvb = aa->oa_lvb;
2769 __u32 lvb_len = sizeof(*lvb);
2771 struct ldlm_enqueue_info einfo = {
2772 .ei_type = aa->oa_type,
2778 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2780 lock = ldlm_handle2lock(lockh);
2781 LASSERTF(lock != NULL,
2782 "lockh %#llx, req %p, aa %p - client evicted?\n",
2783 lockh->cookie, req, aa);
2785 /* Take an additional reference so that a blocking AST that
2786 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2787 * to arrive after an upcall has been executed by
2788 * osc_enqueue_fini(). */
2789 ldlm_lock_addref(lockh, mode);
2791 /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2792 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2794 /* Let CP AST to grant the lock first. */
2795 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2797 if (aa->oa_speculative) {
2798 LASSERT(aa->oa_lvb == NULL);
2799 LASSERT(aa->oa_flags == NULL);
2800 aa->oa_flags = &flags;
2803 /* Complete obtaining the lock procedure. */
2804 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, &einfo, 1, aa->oa_flags,
2805 lvb, lvb_len, lockh, rc);
2806 /* Complete osc stuff. */
2807 rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
2808 aa->oa_flags, aa->oa_speculative, rc);
2810 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2812 ldlm_lock_decref(lockh, mode);
2813 LDLM_LOCK_PUT(lock);
2817 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2818 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2819 * other synchronous requests, however keeping some locks and trying to obtain
2820 * others may take a considerable amount of time in a case of ost failure; and
2821 * when other sync requests do not get released lock from a client, the client
2822 * is evicted from the cluster -- such scenarious make the life difficult, so
2823 * release locks just after they are obtained. */
2824 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2825 __u64 *flags, union ldlm_policy_data *policy,
2826 struct ost_lvb *lvb, osc_enqueue_upcall_f upcall,
2827 void *cookie, struct ldlm_enqueue_info *einfo,
2828 struct ptlrpc_request_set *rqset, int async,
2831 struct obd_device *obd = exp->exp_obd;
2832 struct lustre_handle lockh = { 0 };
2833 struct ptlrpc_request *req = NULL;
2834 int intent = *flags & LDLM_FL_HAS_INTENT;
2835 __u64 match_flags = *flags;
2836 enum ldlm_mode mode;
2840 /* Filesystem lock extents are extended to page boundaries so that
2841 * dealing with the page cache is a little smoother. */
2842 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2843 policy->l_extent.end |= ~PAGE_MASK;
2845 /* Next, search for already existing extent locks that will cover us */
2846 /* If we're trying to read, we also search for an existing PW lock. The
2847 * VFS and page cache already protect us locally, so lots of readers/
2848 * writers can share a single PW lock.
2850 * There are problems with conversion deadlocks, so instead of
2851 * converting a read lock to a write lock, we'll just enqueue a new
2854 * At some point we should cancel the read lock instead of making them
2855 * send us a blocking callback, but there are problems with canceling
2856 * locks out from other users right now, too. */
2857 mode = einfo->ei_mode;
2858 if (einfo->ei_mode == LCK_PR)
2860 /* Normal lock requests must wait for the LVB to be ready before
2861 * matching a lock; speculative lock requests do not need to,
2862 * because they will not actually use the lock. */
2864 match_flags |= LDLM_FL_LVB_READY;
2866 match_flags |= LDLM_FL_BLOCK_GRANTED;
2867 mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
2868 einfo->ei_type, policy, mode, &lockh);
2870 struct ldlm_lock *matched;
2872 if (*flags & LDLM_FL_TEST_LOCK)
2875 matched = ldlm_handle2lock(&lockh);
2877 /* This DLM lock request is speculative, and does not
2878 * have an associated IO request. Therefore if there
2879 * is already a DLM lock, it wll just inform the
2880 * caller to cancel the request for this stripe.*/
2881 lock_res_and_lock(matched);
2882 if (ldlm_extent_equal(&policy->l_extent,
2883 &matched->l_policy_data.l_extent))
2887 unlock_res_and_lock(matched);
2889 ldlm_lock_decref(&lockh, mode);
2890 LDLM_LOCK_PUT(matched);
2892 } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
2893 *flags |= LDLM_FL_LVB_READY;
2895 /* We already have a lock, and it's referenced. */
2896 (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
2898 ldlm_lock_decref(&lockh, mode);
2899 LDLM_LOCK_PUT(matched);
2902 ldlm_lock_decref(&lockh, mode);
2903 LDLM_LOCK_PUT(matched);
2907 if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
2910 /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2911 *flags &= ~LDLM_FL_BLOCK_GRANTED;
2913 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2914 sizeof(*lvb), LVB_T_OST, &lockh, async);
2917 struct osc_enqueue_args *aa;
2918 aa = ptlrpc_req_async_args(aa, req);
2920 aa->oa_mode = einfo->ei_mode;
2921 aa->oa_type = einfo->ei_type;
2922 lustre_handle_copy(&aa->oa_lockh, &lockh);
2923 aa->oa_upcall = upcall;
2924 aa->oa_cookie = cookie;
2925 aa->oa_speculative = speculative;
2927 aa->oa_flags = flags;
2930 /* speculative locks are essentially to enqueue
2931 * a DLM lock in advance, so we don't care
2932 * about the result of the enqueue. */
2934 aa->oa_flags = NULL;
2937 req->rq_interpret_reply = osc_enqueue_interpret;
2938 ptlrpc_set_add_req(rqset, req);
2943 rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
2944 flags, speculative, rc);
2949 int osc_match_base(const struct lu_env *env, struct obd_export *exp,
2950 struct ldlm_res_id *res_id, enum ldlm_type type,
2951 union ldlm_policy_data *policy, enum ldlm_mode mode,
2952 __u64 *flags, struct osc_object *obj,
2953 struct lustre_handle *lockh, enum ldlm_match_flags match_flags)
2955 struct obd_device *obd = exp->exp_obd;
2956 __u64 lflags = *flags;
2960 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2963 /* Filesystem lock extents are extended to page boundaries so that
2964 * dealing with the page cache is a little smoother */
2965 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2966 policy->l_extent.end |= ~PAGE_MASK;
2968 /* Next, search for already existing extent locks that will cover us */
2969 rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
2970 res_id, type, policy, mode, lockh,
2972 if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
2976 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2978 LASSERT(lock != NULL);
2979 if (osc_set_lock_data(lock, obj)) {
2980 lock_res_and_lock(lock);
2981 if (!ldlm_is_lvb_cached(lock)) {
2982 LASSERT(lock->l_ast_data == obj);
2983 osc_lock_lvb_update(env, obj, lock, NULL);
2984 ldlm_set_lvb_cached(lock);
2986 unlock_res_and_lock(lock);
2988 ldlm_lock_decref(lockh, rc);
2991 LDLM_LOCK_PUT(lock);
2996 static int osc_statfs_interpret(const struct lu_env *env,
2997 struct ptlrpc_request *req, void *args, int rc)
2999 struct osc_async_args *aa = args;
3000 struct obd_statfs *msfs;
3005 * The request has in fact never been sent due to issues at
3006 * a higher level (LOV). Exit immediately since the caller
3007 * is aware of the problem and takes care of the clean up.
3011 if ((rc == -ENOTCONN || rc == -EAGAIN) &&
3012 (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
3018 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3020 GOTO(out, rc = -EPROTO);
3022 *aa->aa_oi->oi_osfs = *msfs;
3024 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
3029 static int osc_statfs_async(struct obd_export *exp,
3030 struct obd_info *oinfo, time64_t max_age,
3031 struct ptlrpc_request_set *rqset)
3033 struct obd_device *obd = class_exp2obd(exp);
3034 struct ptlrpc_request *req;
3035 struct osc_async_args *aa;
3039 if (obd->obd_osfs_age >= max_age) {
3041 "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
3042 obd->obd_name, &obd->obd_osfs,
3043 obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
3044 obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
3045 spin_lock(&obd->obd_osfs_lock);
3046 memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
3047 spin_unlock(&obd->obd_osfs_lock);
3048 oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
3049 if (oinfo->oi_cb_up)
3050 oinfo->oi_cb_up(oinfo, 0);
3055 /* We could possibly pass max_age in the request (as an absolute
3056 * timestamp or a "seconds.usec ago") so the target can avoid doing
3057 * extra calls into the filesystem if that isn't necessary (e.g.
3058 * during mount that would help a bit). Having relative timestamps
3059 * is not so great if request processing is slow, while absolute
3060 * timestamps are not ideal because they need time synchronization. */
3061 req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3065 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3067 ptlrpc_request_free(req);
3070 ptlrpc_request_set_replen(req);
3071 req->rq_request_portal = OST_CREATE_PORTAL;
3072 ptlrpc_at_set_req_timeout(req);
3074 if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3075 /* procfs requests not want stat in wait for avoid deadlock */
3076 req->rq_no_resend = 1;
3077 req->rq_no_delay = 1;
3080 req->rq_interpret_reply = osc_statfs_interpret;
3081 aa = ptlrpc_req_async_args(aa, req);
3084 ptlrpc_set_add_req(rqset, req);
3088 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
3089 struct obd_statfs *osfs, time64_t max_age, __u32 flags)
3091 struct obd_device *obd = class_exp2obd(exp);
3092 struct obd_statfs *msfs;
3093 struct ptlrpc_request *req;
3094 struct obd_import *imp, *imp0;
3098 /*Since the request might also come from lprocfs, so we need
3099 *sync this with client_disconnect_export Bug15684
3101 with_imp_locked(obd, imp0, rc)
3102 imp = class_import_get(imp0);
3106 /* We could possibly pass max_age in the request (as an absolute
3107 * timestamp or a "seconds.usec ago") so the target can avoid doing
3108 * extra calls into the filesystem if that isn't necessary (e.g.
3109 * during mount that would help a bit). Having relative timestamps
3110 * is not so great if request processing is slow, while absolute
3111 * timestamps are not ideal because they need time synchronization. */
3112 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3114 class_import_put(imp);
3119 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3121 ptlrpc_request_free(req);
3124 ptlrpc_request_set_replen(req);
3125 req->rq_request_portal = OST_CREATE_PORTAL;
3126 ptlrpc_at_set_req_timeout(req);
3128 if (flags & OBD_STATFS_NODELAY) {
3129 /* procfs requests not want stat in wait for avoid deadlock */
3130 req->rq_no_resend = 1;
3131 req->rq_no_delay = 1;
3134 rc = ptlrpc_queue_wait(req);
3138 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3140 GOTO(out, rc = -EPROTO);
3146 ptlrpc_req_finished(req);
3150 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3151 void *karg, void __user *uarg)
3153 struct obd_device *obd = exp->exp_obd;
3154 struct obd_ioctl_data *data = karg;
3158 if (!try_module_get(THIS_MODULE)) {
3159 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
3160 module_name(THIS_MODULE));
3164 case OBD_IOC_CLIENT_RECOVER:
3165 rc = ptlrpc_recover_import(obd->u.cli.cl_import,
3166 data->ioc_inlbuf1, 0);
3170 case IOC_OSC_SET_ACTIVE:
3171 rc = ptlrpc_set_import_active(obd->u.cli.cl_import,
3176 CDEBUG(D_INODE, "%s: unrecognised ioctl %#x by %s: rc = %d\n",
3177 obd->obd_name, cmd, current->comm, rc);
3181 module_put(THIS_MODULE);
3185 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
3186 u32 keylen, void *key, u32 vallen, void *val,
3187 struct ptlrpc_request_set *set)
3189 struct ptlrpc_request *req;
3190 struct obd_device *obd = exp->exp_obd;
3191 struct obd_import *imp = class_exp2cliimp(exp);
3196 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
3198 if (KEY_IS(KEY_CHECKSUM)) {
3199 if (vallen != sizeof(int))
3201 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
3205 if (KEY_IS(KEY_SPTLRPC_CONF)) {
3206 sptlrpc_conf_client_adapt(obd);
3210 if (KEY_IS(KEY_FLUSH_CTX)) {
3211 sptlrpc_import_flush_my_ctx(imp);
3215 if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
3216 struct client_obd *cli = &obd->u.cli;
3217 long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
3218 long target = *(long *)val;
3220 nr = osc_lru_shrink(env, cli, min(nr, target), true);
3225 if (!set && !KEY_IS(KEY_GRANT_SHRINK))
3228 /* We pass all other commands directly to OST. Since nobody calls osc
3229 methods directly and everybody is supposed to go through LOV, we
3230 assume lov checked invalid values for us.
3231 The only recognised values so far are evict_by_nid and mds_conn.
3232 Even if something bad goes through, we'd get a -EINVAL from OST
3235 req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
3236 &RQF_OST_SET_GRANT_INFO :
3241 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3242 RCL_CLIENT, keylen);
3243 if (!KEY_IS(KEY_GRANT_SHRINK))
3244 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
3245 RCL_CLIENT, vallen);
3246 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
3248 ptlrpc_request_free(req);
3252 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3253 memcpy(tmp, key, keylen);
3254 tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
3257 memcpy(tmp, val, vallen);
3259 if (KEY_IS(KEY_GRANT_SHRINK)) {
3260 struct osc_grant_args *aa;
3263 aa = ptlrpc_req_async_args(aa, req);
3264 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
3266 ptlrpc_req_finished(req);
3269 *oa = ((struct ost_body *)val)->oa;
3271 req->rq_interpret_reply = osc_shrink_grant_interpret;
3274 ptlrpc_request_set_replen(req);
3275 if (!KEY_IS(KEY_GRANT_SHRINK)) {
3276 LASSERT(set != NULL);
3277 ptlrpc_set_add_req(set, req);
3278 ptlrpc_check_set(NULL, set);
3280 ptlrpcd_add_req(req);
3285 EXPORT_SYMBOL(osc_set_info_async);
3287 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
3288 struct obd_device *obd, struct obd_uuid *cluuid,
3289 struct obd_connect_data *data, void *localdata)
3291 struct client_obd *cli = &obd->u.cli;
3293 if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
3297 spin_lock(&cli->cl_loi_list_lock);
3298 grant = cli->cl_avail_grant + cli->cl_reserved_grant;
3299 if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM) {
3300 /* restore ocd_grant_blkbits as client page bits */
3301 data->ocd_grant_blkbits = PAGE_SHIFT;
3302 grant += cli->cl_dirty_grant;
3304 grant += cli->cl_dirty_pages << PAGE_SHIFT;
3306 data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
3307 lost_grant = cli->cl_lost_grant;
3308 cli->cl_lost_grant = 0;
3309 spin_unlock(&cli->cl_loi_list_lock);
3311 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
3312 " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
3313 data->ocd_version, data->ocd_grant, lost_grant);
3318 EXPORT_SYMBOL(osc_reconnect);
3320 int osc_disconnect(struct obd_export *exp)
3322 struct obd_device *obd = class_exp2obd(exp);
3325 rc = client_disconnect_export(exp);
3327 * Initially we put del_shrink_grant before disconnect_export, but it
3328 * causes the following problem if setup (connect) and cleanup
3329 * (disconnect) are tangled together.
3330 * connect p1 disconnect p2
3331 * ptlrpc_connect_import
3332 * ............... class_manual_cleanup
3335 * ptlrpc_connect_interrupt
3337 * add this client to shrink list
3339 * Bang! grant shrink thread trigger the shrink. BUG18662
3341 osc_del_grant_list(&obd->u.cli);
3344 EXPORT_SYMBOL(osc_disconnect);
3346 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
3347 struct hlist_node *hnode, void *arg)
3349 struct lu_env *env = arg;
3350 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
3351 struct ldlm_lock *lock;
3352 struct osc_object *osc = NULL;
3356 list_for_each_entry(lock, &res->lr_granted, l_res_link) {
3357 if (lock->l_ast_data != NULL && osc == NULL) {
3358 osc = lock->l_ast_data;
3359 cl_object_get(osc2cl(osc));
3362 /* clear LDLM_FL_CLEANED flag to make sure it will be canceled
3363 * by the 2nd round of ldlm_namespace_clean() call in
3364 * osc_import_event(). */
3365 ldlm_clear_cleaned(lock);
3370 osc_object_invalidate(env, osc);
3371 cl_object_put(env, osc2cl(osc));
3376 EXPORT_SYMBOL(osc_ldlm_resource_invalidate);
3378 static int osc_import_event(struct obd_device *obd,
3379 struct obd_import *imp,
3380 enum obd_import_event event)
3382 struct client_obd *cli;
3386 LASSERT(imp->imp_obd == obd);
3389 case IMP_EVENT_DISCON: {
3391 spin_lock(&cli->cl_loi_list_lock);
3392 cli->cl_avail_grant = 0;
3393 cli->cl_lost_grant = 0;
3394 spin_unlock(&cli->cl_loi_list_lock);
3397 case IMP_EVENT_INACTIVE: {
3398 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
3401 case IMP_EVENT_INVALIDATE: {
3402 struct ldlm_namespace *ns = obd->obd_namespace;
3406 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3408 env = cl_env_get(&refcheck);
3410 osc_io_unplug(env, &obd->u.cli, NULL);
3412 cfs_hash_for_each_nolock(ns->ns_rs_hash,
3413 osc_ldlm_resource_invalidate,
3415 cl_env_put(env, &refcheck);
3417 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3422 case IMP_EVENT_ACTIVE: {
3423 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
3426 case IMP_EVENT_OCD: {
3427 struct obd_connect_data *ocd = &imp->imp_connect_data;
3429 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3430 osc_init_grant(&obd->u.cli, ocd);
3433 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3434 imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3436 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
3439 case IMP_EVENT_DEACTIVATE: {
3440 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE);
3443 case IMP_EVENT_ACTIVATE: {
3444 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE);
3448 CERROR("Unknown import event %d\n", event);
3455 * Determine whether the lock can be canceled before replaying the lock
3456 * during recovery, see bug16774 for detailed information.
3458 * \retval zero the lock can't be canceled
3459 * \retval other ok to cancel
3461 static int osc_cancel_weight(struct ldlm_lock *lock)
3464 * Cancel all unused and granted extent lock.
3466 if (lock->l_resource->lr_type == LDLM_EXTENT &&
3467 ldlm_is_granted(lock) &&
3468 osc_ldlm_weigh_ast(lock) == 0)
3474 static int brw_queue_work(const struct lu_env *env, void *data)
3476 struct client_obd *cli = data;
3478 CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3480 osc_io_unplug(env, cli, NULL);
3484 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg)
3486 struct client_obd *cli = &obd->u.cli;
3492 rc = ptlrpcd_addref();
3496 rc = client_obd_setup(obd, lcfg);
3498 GOTO(out_ptlrpcd, rc);
3501 handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3502 if (IS_ERR(handler))
3503 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3504 cli->cl_writeback_work = handler;
3506 handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3507 if (IS_ERR(handler))
3508 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3509 cli->cl_lru_work = handler;
3511 rc = osc_quota_setup(obd);
3513 GOTO(out_ptlrpcd_work, rc);
3515 cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3516 cli->cl_root_squash = 0;
3517 osc_update_next_shrink(cli);
3522 if (cli->cl_writeback_work != NULL) {
3523 ptlrpcd_destroy_work(cli->cl_writeback_work);
3524 cli->cl_writeback_work = NULL;
3526 if (cli->cl_lru_work != NULL) {
3527 ptlrpcd_destroy_work(cli->cl_lru_work);
3528 cli->cl_lru_work = NULL;
3530 client_obd_cleanup(obd);
3535 EXPORT_SYMBOL(osc_setup_common);
3537 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3539 struct client_obd *cli = &obd->u.cli;
3547 rc = osc_setup_common(obd, lcfg);
3551 rc = osc_tunables_init(obd);
3556 * We try to control the total number of requests with a upper limit
3557 * osc_reqpool_maxreqcount. There might be some race which will cause
3558 * over-limit allocation, but it is fine.
3560 req_count = atomic_read(&osc_pool_req_count);
3561 if (req_count < osc_reqpool_maxreqcount) {
3562 adding = cli->cl_max_rpcs_in_flight + 2;
3563 if (req_count + adding > osc_reqpool_maxreqcount)
3564 adding = osc_reqpool_maxreqcount - req_count;
3566 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3567 atomic_add(added, &osc_pool_req_count);
3570 ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3572 spin_lock(&osc_shrink_lock);
3573 list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
3574 spin_unlock(&osc_shrink_lock);
3575 cli->cl_import->imp_idle_timeout = osc_idle_timeout;
3576 cli->cl_import->imp_idle_debug = D_HA;
3581 int osc_precleanup_common(struct obd_device *obd)
3583 struct client_obd *cli = &obd->u.cli;
3587 * for echo client, export may be on zombie list, wait for
3588 * zombie thread to cull it, because cli.cl_import will be
3589 * cleared in client_disconnect_export():
3590 * class_export_destroy() -> obd_cleanup() ->
3591 * echo_device_free() -> echo_client_cleanup() ->
3592 * obd_disconnect() -> osc_disconnect() ->
3593 * client_disconnect_export()
3595 obd_zombie_barrier();
3596 if (cli->cl_writeback_work) {
3597 ptlrpcd_destroy_work(cli->cl_writeback_work);
3598 cli->cl_writeback_work = NULL;
3601 if (cli->cl_lru_work) {
3602 ptlrpcd_destroy_work(cli->cl_lru_work);
3603 cli->cl_lru_work = NULL;
3606 obd_cleanup_client_import(obd);
3609 EXPORT_SYMBOL(osc_precleanup_common);
3611 static int osc_precleanup(struct obd_device *obd)
3615 osc_precleanup_common(obd);
3617 ptlrpc_lprocfs_unregister_obd(obd);
3621 int osc_cleanup_common(struct obd_device *obd)
3623 struct client_obd *cli = &obd->u.cli;
3628 spin_lock(&osc_shrink_lock);
3629 list_del(&cli->cl_shrink_list);
3630 spin_unlock(&osc_shrink_lock);
3633 if (cli->cl_cache != NULL) {
3634 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3635 spin_lock(&cli->cl_cache->ccc_lru_lock);
3636 list_del_init(&cli->cl_lru_osc);
3637 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3638 cli->cl_lru_left = NULL;
3639 cl_cache_decref(cli->cl_cache);
3640 cli->cl_cache = NULL;
3643 /* free memory of osc quota cache */
3644 osc_quota_cleanup(obd);
3646 rc = client_obd_cleanup(obd);
3651 EXPORT_SYMBOL(osc_cleanup_common);
3653 static const struct obd_ops osc_obd_ops = {
3654 .o_owner = THIS_MODULE,
3655 .o_setup = osc_setup,
3656 .o_precleanup = osc_precleanup,
3657 .o_cleanup = osc_cleanup_common,
3658 .o_add_conn = client_import_add_conn,
3659 .o_del_conn = client_import_del_conn,
3660 .o_connect = client_connect_import,
3661 .o_reconnect = osc_reconnect,
3662 .o_disconnect = osc_disconnect,
3663 .o_statfs = osc_statfs,
3664 .o_statfs_async = osc_statfs_async,
3665 .o_create = osc_create,
3666 .o_destroy = osc_destroy,
3667 .o_getattr = osc_getattr,
3668 .o_setattr = osc_setattr,
3669 .o_iocontrol = osc_iocontrol,
3670 .o_set_info_async = osc_set_info_async,
3671 .o_import_event = osc_import_event,
3672 .o_quotactl = osc_quotactl,
3675 LIST_HEAD(osc_shrink_list);
3676 DEFINE_SPINLOCK(osc_shrink_lock);
3678 #ifdef HAVE_SHRINKER_COUNT
3679 static struct shrinker osc_cache_shrinker = {
3680 .count_objects = osc_cache_shrink_count,
3681 .scan_objects = osc_cache_shrink_scan,
3682 .seeks = DEFAULT_SEEKS,
3685 static int osc_cache_shrink(struct shrinker *shrinker,
3686 struct shrink_control *sc)
3688 (void)osc_cache_shrink_scan(shrinker, sc);
3690 return osc_cache_shrink_count(shrinker, sc);
3693 static struct shrinker osc_cache_shrinker = {
3694 .shrink = osc_cache_shrink,
3695 .seeks = DEFAULT_SEEKS,
3699 static int __init osc_init(void)
3701 unsigned int reqpool_size;
3702 unsigned int reqsize;
3706 /* print an address of _any_ initialized kernel symbol from this
3707 * module, to allow debugging with gdb that doesn't support data
3708 * symbols from modules.*/
3709 CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3711 rc = lu_kmem_init(osc_caches);
3715 rc = class_register_type(&osc_obd_ops, NULL, true,
3716 LUSTRE_OSC_NAME, &osc_device_type);
3720 rc = register_shrinker(&osc_cache_shrinker);
3724 /* This is obviously too much memory, only prevent overflow here */
3725 if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
3726 GOTO(out_shrinker, rc = -EINVAL);
3728 reqpool_size = osc_reqpool_mem_max << 20;
3731 while (reqsize < OST_IO_MAXREQSIZE)
3732 reqsize = reqsize << 1;
3735 * We don't enlarge the request count in OSC pool according to
3736 * cl_max_rpcs_in_flight. The allocation from the pool will only be
3737 * tried after normal allocation failed. So a small OSC pool won't
3738 * cause much performance degression in most of cases.
3740 osc_reqpool_maxreqcount = reqpool_size / reqsize;
3742 atomic_set(&osc_pool_req_count, 0);
3743 osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
3744 ptlrpc_add_rqs_to_pool);
3746 if (osc_rq_pool == NULL)
3747 GOTO(out_shrinker, rc = -ENOMEM);
3749 rc = osc_start_grant_work();
3751 GOTO(out_req_pool, rc);
3756 ptlrpc_free_rq_pool(osc_rq_pool);
3758 unregister_shrinker(&osc_cache_shrinker);
3760 class_unregister_type(LUSTRE_OSC_NAME);
3762 lu_kmem_fini(osc_caches);
3767 static void __exit osc_exit(void)
3769 osc_stop_grant_work();
3770 unregister_shrinker(&osc_cache_shrinker);
3771 class_unregister_type(LUSTRE_OSC_NAME);
3772 lu_kmem_fini(osc_caches);
3773 ptlrpc_free_rq_pool(osc_rq_pool);
3776 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3777 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3778 MODULE_VERSION(LUSTRE_VERSION_STRING);
3779 MODULE_LICENSE("GPL");
3781 module_init(osc_init);
3782 module_exit(osc_exit);