4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #define DEBUG_SUBSYSTEM S_OSC
34 #include <linux/workqueue.h>
35 #include <libcfs/libcfs.h>
36 #include <linux/falloc.h>
37 #include <lprocfs_status.h>
38 #include <lustre_dlm.h>
39 #include <lustre_fid.h>
40 #include <lustre_ha.h>
41 #include <uapi/linux/lustre/lustre_ioctl.h>
42 #include <lustre_net.h>
43 #include <lustre_obdo.h>
45 #include <obd_cksum.h>
46 #include <obd_class.h>
47 #include <lustre_osc.h>
48 #include <linux/falloc.h>
50 #include "osc_internal.h"
52 atomic_t osc_pool_req_count;
53 unsigned int osc_reqpool_maxreqcount;
54 struct ptlrpc_request_pool *osc_rq_pool;
56 /* max memory used for request pool, unit is MB */
57 static unsigned int osc_reqpool_mem_max = 5;
58 module_param(osc_reqpool_mem_max, uint, 0444);
60 static int osc_idle_timeout = 20;
61 module_param(osc_idle_timeout, uint, 0644);
63 #define osc_grant_args osc_brw_async_args
65 struct osc_setattr_args {
67 obd_enqueue_update_f sa_upcall;
71 struct osc_fsync_args {
72 struct osc_object *fa_obj;
74 obd_enqueue_update_f fa_upcall;
78 struct osc_ladvise_args {
80 obd_enqueue_update_f la_upcall;
84 static void osc_release_ppga(struct brw_page **ppga, size_t count);
85 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
88 void osc_pack_req_body(struct ptlrpc_request *req, struct obdo *oa)
90 struct ost_body *body;
92 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
95 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
98 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
101 struct ptlrpc_request *req;
102 struct ost_body *body;
106 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
110 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
112 ptlrpc_request_free(req);
116 osc_pack_req_body(req, oa);
118 ptlrpc_request_set_replen(req);
120 rc = ptlrpc_queue_wait(req);
124 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
126 GOTO(out, rc = -EPROTO);
128 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
129 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
131 oa->o_blksize = cli_brw_size(exp->exp_obd);
132 oa->o_valid |= OBD_MD_FLBLKSZ;
136 ptlrpc_req_finished(req);
141 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
144 struct ptlrpc_request *req;
145 struct ost_body *body;
149 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
151 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
155 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
157 ptlrpc_request_free(req);
161 osc_pack_req_body(req, oa);
163 ptlrpc_request_set_replen(req);
165 rc = ptlrpc_queue_wait(req);
169 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
171 GOTO(out, rc = -EPROTO);
173 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
177 ptlrpc_req_finished(req);
182 static int osc_setattr_interpret(const struct lu_env *env,
183 struct ptlrpc_request *req, void *args, int rc)
185 struct osc_setattr_args *sa = args;
186 struct ost_body *body;
193 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
195 GOTO(out, rc = -EPROTO);
197 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
200 rc = sa->sa_upcall(sa->sa_cookie, rc);
204 int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
205 obd_enqueue_update_f upcall, void *cookie,
206 struct ptlrpc_request_set *rqset)
208 struct ptlrpc_request *req;
209 struct osc_setattr_args *sa;
214 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
218 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
220 ptlrpc_request_free(req);
224 osc_pack_req_body(req, oa);
226 ptlrpc_request_set_replen(req);
228 /* do mds to ost setattr asynchronously */
230 /* Do not wait for response. */
231 ptlrpcd_add_req(req);
233 req->rq_interpret_reply = osc_setattr_interpret;
235 sa = ptlrpc_req_async_args(sa, req);
237 sa->sa_upcall = upcall;
238 sa->sa_cookie = cookie;
240 ptlrpc_set_add_req(rqset, req);
246 static int osc_ladvise_interpret(const struct lu_env *env,
247 struct ptlrpc_request *req,
250 struct osc_ladvise_args *la = arg;
251 struct ost_body *body;
257 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
259 GOTO(out, rc = -EPROTO);
261 *la->la_oa = body->oa;
263 rc = la->la_upcall(la->la_cookie, rc);
268 * If rqset is NULL, do not wait for response. Upcall and cookie could also
269 * be NULL in this case
271 int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
272 struct ladvise_hdr *ladvise_hdr,
273 obd_enqueue_update_f upcall, void *cookie,
274 struct ptlrpc_request_set *rqset)
276 struct ptlrpc_request *req;
277 struct ost_body *body;
278 struct osc_ladvise_args *la;
280 struct lu_ladvise *req_ladvise;
281 struct lu_ladvise *ladvise = ladvise_hdr->lah_advise;
282 int num_advise = ladvise_hdr->lah_count;
283 struct ladvise_hdr *req_ladvise_hdr;
286 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_LADVISE);
290 req_capsule_set_size(&req->rq_pill, &RMF_OST_LADVISE, RCL_CLIENT,
291 num_advise * sizeof(*ladvise));
292 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_LADVISE);
294 ptlrpc_request_free(req);
297 req->rq_request_portal = OST_IO_PORTAL;
298 ptlrpc_at_set_req_timeout(req);
300 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
302 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
305 req_ladvise_hdr = req_capsule_client_get(&req->rq_pill,
306 &RMF_OST_LADVISE_HDR);
307 memcpy(req_ladvise_hdr, ladvise_hdr, sizeof(*ladvise_hdr));
309 req_ladvise = req_capsule_client_get(&req->rq_pill, &RMF_OST_LADVISE);
310 memcpy(req_ladvise, ladvise, sizeof(*ladvise) * num_advise);
311 ptlrpc_request_set_replen(req);
314 /* Do not wait for response. */
315 ptlrpcd_add_req(req);
319 req->rq_interpret_reply = osc_ladvise_interpret;
320 la = ptlrpc_req_async_args(la, req);
322 la->la_upcall = upcall;
323 la->la_cookie = cookie;
325 ptlrpc_set_add_req(rqset, req);
330 static int osc_create(const struct lu_env *env, struct obd_export *exp,
333 struct ptlrpc_request *req;
334 struct ost_body *body;
339 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
340 LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi)));
342 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
344 GOTO(out, rc = -ENOMEM);
346 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
348 ptlrpc_request_free(req);
352 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
355 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
357 ptlrpc_request_set_replen(req);
359 rc = ptlrpc_queue_wait(req);
363 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
365 GOTO(out_req, rc = -EPROTO);
367 CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
368 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
370 oa->o_blksize = cli_brw_size(exp->exp_obd);
371 oa->o_valid |= OBD_MD_FLBLKSZ;
373 CDEBUG(D_HA, "transno: %lld\n",
374 lustre_msg_get_transno(req->rq_repmsg));
376 ptlrpc_req_finished(req);
381 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
382 obd_enqueue_update_f upcall, void *cookie)
384 struct ptlrpc_request *req;
385 struct osc_setattr_args *sa;
386 struct obd_import *imp = class_exp2cliimp(exp);
387 struct ost_body *body;
392 req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
396 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
398 ptlrpc_request_free(req);
402 osc_set_io_portal(req);
404 ptlrpc_at_set_req_timeout(req);
406 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
408 lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
410 ptlrpc_request_set_replen(req);
412 req->rq_interpret_reply = osc_setattr_interpret;
413 sa = ptlrpc_req_async_args(sa, req);
415 sa->sa_upcall = upcall;
416 sa->sa_cookie = cookie;
418 ptlrpcd_add_req(req);
422 EXPORT_SYMBOL(osc_punch_send);
425 * osc_fallocate_base() - Handles fallocate request.
427 * @exp: Export structure
428 * @oa: Attributes passed to OSS from client (obdo structure)
429 * @upcall: Primary & supplementary group information
430 * @cookie: Exclusive identifier
431 * @rqset: Request list.
432 * @mode: Operation done on given range.
434 * osc_fallocate_base() - Handles fallocate requests only. Only block
435 * allocation or standard preallocate operation is supported currently.
436 * Other mode flags is not supported yet. ftruncate(2) or truncate(2)
437 * is supported via SETATTR request.
439 * Return: Non-zero on failure and O on success.
441 int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
442 obd_enqueue_update_f upcall, void *cookie, int mode)
444 struct ptlrpc_request *req;
445 struct osc_setattr_args *sa;
446 struct ost_body *body;
447 struct obd_import *imp = class_exp2cliimp(exp);
451 oa->o_falloc_mode = mode;
452 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
457 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_FALLOCATE);
459 ptlrpc_request_free(req);
463 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
466 lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
468 ptlrpc_request_set_replen(req);
470 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
471 BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
472 sa = ptlrpc_req_async_args(sa, req);
474 sa->sa_upcall = upcall;
475 sa->sa_cookie = cookie;
477 ptlrpcd_add_req(req);
482 static int osc_sync_interpret(const struct lu_env *env,
483 struct ptlrpc_request *req, void *args, int rc)
485 struct osc_fsync_args *fa = args;
486 struct ost_body *body;
487 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
488 unsigned long valid = 0;
489 struct cl_object *obj;
495 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
497 CERROR("can't unpack ost_body\n");
498 GOTO(out, rc = -EPROTO);
501 *fa->fa_oa = body->oa;
502 obj = osc2cl(fa->fa_obj);
504 /* Update osc object's blocks attribute */
505 cl_object_attr_lock(obj);
506 if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
507 attr->cat_blocks = body->oa.o_blocks;
512 cl_object_attr_update(env, obj, attr, valid);
513 cl_object_attr_unlock(obj);
516 rc = fa->fa_upcall(fa->fa_cookie, rc);
520 int osc_sync_base(struct osc_object *obj, struct obdo *oa,
521 obd_enqueue_update_f upcall, void *cookie,
522 struct ptlrpc_request_set *rqset)
524 struct obd_export *exp = osc_export(obj);
525 struct ptlrpc_request *req;
526 struct ost_body *body;
527 struct osc_fsync_args *fa;
531 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
535 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
537 ptlrpc_request_free(req);
541 /* overload the size and blocks fields in the oa with start/end */
542 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
544 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
546 ptlrpc_request_set_replen(req);
547 req->rq_interpret_reply = osc_sync_interpret;
549 fa = ptlrpc_req_async_args(fa, req);
552 fa->fa_upcall = upcall;
553 fa->fa_cookie = cookie;
555 ptlrpc_set_add_req(rqset, req);
560 /* Find and cancel locally locks matched by @mode in the resource found by
561 * @objid. Found locks are added into @cancel list. Returns the amount of
562 * locks added to @cancels list. */
563 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
564 struct list_head *cancels,
565 enum ldlm_mode mode, __u64 lock_flags)
567 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
568 struct ldlm_res_id res_id;
569 struct ldlm_resource *res;
573 /* Return, i.e. cancel nothing, only if ELC is supported (flag in
574 * export) but disabled through procfs (flag in NS).
576 * This distinguishes from a case when ELC is not supported originally,
577 * when we still want to cancel locks in advance and just cancel them
578 * locally, without sending any RPC. */
579 if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
582 ostid_build_res_name(&oa->o_oi, &res_id);
583 res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
587 LDLM_RESOURCE_ADDREF(res);
588 count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
589 lock_flags, 0, NULL);
590 LDLM_RESOURCE_DELREF(res);
591 ldlm_resource_putref(res);
595 static int osc_destroy_interpret(const struct lu_env *env,
596 struct ptlrpc_request *req, void *args, int rc)
598 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
600 atomic_dec(&cli->cl_destroy_in_flight);
601 wake_up(&cli->cl_destroy_waitq);
606 static int osc_can_send_destroy(struct client_obd *cli)
608 if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
609 cli->cl_max_rpcs_in_flight) {
610 /* The destroy request can be sent */
613 if (atomic_dec_return(&cli->cl_destroy_in_flight) <
614 cli->cl_max_rpcs_in_flight) {
616 * The counter has been modified between the two atomic
619 wake_up(&cli->cl_destroy_waitq);
624 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
627 struct client_obd *cli = &exp->exp_obd->u.cli;
628 struct ptlrpc_request *req;
629 struct ost_body *body;
635 CDEBUG(D_INFO, "oa NULL\n");
639 count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
640 LDLM_FL_DISCARD_DATA);
642 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
644 ldlm_lock_list_put(&cancels, l_bl_ast, count);
648 rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
651 ptlrpc_request_free(req);
655 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
656 ptlrpc_at_set_req_timeout(req);
658 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
660 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
662 ptlrpc_request_set_replen(req);
664 req->rq_interpret_reply = osc_destroy_interpret;
665 if (!osc_can_send_destroy(cli)) {
667 * Wait until the number of on-going destroy RPCs drops
668 * under max_rpc_in_flight
670 rc = l_wait_event_abortable_exclusive(
671 cli->cl_destroy_waitq,
672 osc_can_send_destroy(cli));
674 ptlrpc_req_finished(req);
679 /* Do not wait for response */
680 ptlrpcd_add_req(req);
684 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
687 u64 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
689 LASSERT(!(oa->o_valid & bits));
692 spin_lock(&cli->cl_loi_list_lock);
693 if (cli->cl_ocd_grant_param)
694 oa->o_dirty = cli->cl_dirty_grant;
696 oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
697 if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
698 CERROR("dirty %lu > dirty_max %lu\n",
700 cli->cl_dirty_max_pages);
702 } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
703 (long)(obd_max_dirty_pages + 1))) {
704 /* The atomic_read() allowing the atomic_inc() are
705 * not covered by a lock thus they may safely race and trip
706 * this CERROR() unless we add in a small fudge factor (+1). */
707 CERROR("%s: dirty %ld > system dirty_max %ld\n",
708 cli_name(cli), atomic_long_read(&obd_dirty_pages),
709 obd_max_dirty_pages);
711 } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
713 CERROR("dirty %lu - dirty_max %lu too big???\n",
714 cli->cl_dirty_pages, cli->cl_dirty_max_pages);
717 unsigned long nrpages;
718 unsigned long undirty;
720 nrpages = cli->cl_max_pages_per_rpc;
721 nrpages *= cli->cl_max_rpcs_in_flight + 1;
722 nrpages = max(nrpages, cli->cl_dirty_max_pages);
723 undirty = nrpages << PAGE_SHIFT;
724 if (cli->cl_ocd_grant_param) {
727 /* take extent tax into account when asking for more
729 nrextents = (nrpages + cli->cl_max_extent_pages - 1) /
730 cli->cl_max_extent_pages;
731 undirty += nrextents * cli->cl_grant_extent_tax;
733 /* Do not ask for more than OBD_MAX_GRANT - a margin for server
734 * to add extent tax, etc.
736 oa->o_undirty = min(undirty, OBD_MAX_GRANT &
737 ~(PTLRPC_MAX_BRW_SIZE * 4UL));
739 oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
740 /* o_dropped AKA o_misc is 32 bits, but cl_lost_grant is 64 bits */
741 if (cli->cl_lost_grant > INT_MAX) {
743 "%s: avoided o_dropped overflow: cl_lost_grant %lu\n",
744 cli_name(cli), cli->cl_lost_grant);
745 oa->o_dropped = INT_MAX;
747 oa->o_dropped = cli->cl_lost_grant;
749 cli->cl_lost_grant -= oa->o_dropped;
750 spin_unlock(&cli->cl_loi_list_lock);
751 CDEBUG(D_CACHE, "%s: dirty: %llu undirty: %u dropped %u grant: %llu"
752 " cl_lost_grant %lu\n", cli_name(cli), oa->o_dirty,
753 oa->o_undirty, oa->o_dropped, oa->o_grant, cli->cl_lost_grant);
756 void osc_update_next_shrink(struct client_obd *cli)
758 cli->cl_next_shrink_grant = ktime_get_seconds() +
759 cli->cl_grant_shrink_interval;
761 CDEBUG(D_CACHE, "next time %lld to shrink grant\n",
762 cli->cl_next_shrink_grant);
765 static void __osc_update_grant(struct client_obd *cli, u64 grant)
767 spin_lock(&cli->cl_loi_list_lock);
768 cli->cl_avail_grant += grant;
769 spin_unlock(&cli->cl_loi_list_lock);
772 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
774 if (body->oa.o_valid & OBD_MD_FLGRANT) {
775 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
776 __osc_update_grant(cli, body->oa.o_grant);
781 * grant thread data for shrinking space.
783 struct grant_thread_data {
784 struct list_head gtd_clients;
785 struct mutex gtd_mutex;
786 unsigned long gtd_stopped:1;
788 static struct grant_thread_data client_gtd;
790 static int osc_shrink_grant_interpret(const struct lu_env *env,
791 struct ptlrpc_request *req,
794 struct osc_grant_args *aa = args;
795 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
796 struct ost_body *body;
799 __osc_update_grant(cli, aa->aa_oa->o_grant);
803 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
805 osc_update_grant(cli, body);
807 OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
813 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
815 spin_lock(&cli->cl_loi_list_lock);
816 oa->o_grant = cli->cl_avail_grant / 4;
817 cli->cl_avail_grant -= oa->o_grant;
818 spin_unlock(&cli->cl_loi_list_lock);
819 if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
820 oa->o_valid |= OBD_MD_FLFLAGS;
823 oa->o_flags |= OBD_FL_SHRINK_GRANT;
824 osc_update_next_shrink(cli);
827 /* Shrink the current grant, either from some large amount to enough for a
828 * full set of in-flight RPCs, or if we have already shrunk to that limit
829 * then to enough for a single RPC. This avoids keeping more grant than
830 * needed, and avoids shrinking the grant piecemeal. */
831 static int osc_shrink_grant(struct client_obd *cli)
833 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
834 (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
836 spin_lock(&cli->cl_loi_list_lock);
837 if (cli->cl_avail_grant <= target_bytes)
838 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
839 spin_unlock(&cli->cl_loi_list_lock);
841 return osc_shrink_grant_to_target(cli, target_bytes);
844 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
847 struct ost_body *body;
850 spin_lock(&cli->cl_loi_list_lock);
851 /* Don't shrink if we are already above or below the desired limit
852 * We don't want to shrink below a single RPC, as that will negatively
853 * impact block allocation and long-term performance. */
854 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
855 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
857 if (target_bytes >= cli->cl_avail_grant) {
858 spin_unlock(&cli->cl_loi_list_lock);
861 spin_unlock(&cli->cl_loi_list_lock);
867 osc_announce_cached(cli, &body->oa, 0);
869 spin_lock(&cli->cl_loi_list_lock);
870 if (target_bytes >= cli->cl_avail_grant) {
871 /* available grant has changed since target calculation */
872 spin_unlock(&cli->cl_loi_list_lock);
873 GOTO(out_free, rc = 0);
875 body->oa.o_grant = cli->cl_avail_grant - target_bytes;
876 cli->cl_avail_grant = target_bytes;
877 spin_unlock(&cli->cl_loi_list_lock);
878 if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
879 body->oa.o_valid |= OBD_MD_FLFLAGS;
880 body->oa.o_flags = 0;
882 body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
883 osc_update_next_shrink(cli);
885 rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
886 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
887 sizeof(*body), body, NULL);
889 __osc_update_grant(cli, body->oa.o_grant);
895 static int osc_should_shrink_grant(struct client_obd *client)
897 time64_t next_shrink = client->cl_next_shrink_grant;
899 if (client->cl_import == NULL)
902 if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
903 client->cl_import->imp_grant_shrink_disabled) {
904 osc_update_next_shrink(client);
908 if (ktime_get_seconds() >= next_shrink - 5) {
909 /* Get the current RPC size directly, instead of going via:
910 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
911 * Keep comment here so that it can be found by searching. */
912 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
914 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
915 client->cl_avail_grant > brw_size)
918 osc_update_next_shrink(client);
923 #define GRANT_SHRINK_RPC_BATCH 100
925 static struct delayed_work work;
927 static void osc_grant_work_handler(struct work_struct *data)
929 struct client_obd *cli;
931 bool init_next_shrink = true;
932 time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
935 mutex_lock(&client_gtd.gtd_mutex);
936 list_for_each_entry(cli, &client_gtd.gtd_clients,
938 if (rpc_sent < GRANT_SHRINK_RPC_BATCH &&
939 osc_should_shrink_grant(cli)) {
940 osc_shrink_grant(cli);
944 if (!init_next_shrink) {
945 if (cli->cl_next_shrink_grant < next_shrink &&
946 cli->cl_next_shrink_grant > ktime_get_seconds())
947 next_shrink = cli->cl_next_shrink_grant;
949 init_next_shrink = false;
950 next_shrink = cli->cl_next_shrink_grant;
953 mutex_unlock(&client_gtd.gtd_mutex);
955 if (client_gtd.gtd_stopped == 1)
958 if (next_shrink > ktime_get_seconds()) {
959 time64_t delay = next_shrink - ktime_get_seconds();
961 schedule_delayed_work(&work, cfs_time_seconds(delay));
963 schedule_work(&work.work);
967 void osc_schedule_grant_work(void)
969 cancel_delayed_work_sync(&work);
970 schedule_work(&work.work);
974 * Start grant thread for returing grant to server for idle clients.
976 static int osc_start_grant_work(void)
978 client_gtd.gtd_stopped = 0;
979 mutex_init(&client_gtd.gtd_mutex);
980 INIT_LIST_HEAD(&client_gtd.gtd_clients);
982 INIT_DELAYED_WORK(&work, osc_grant_work_handler);
983 schedule_work(&work.work);
988 static void osc_stop_grant_work(void)
990 client_gtd.gtd_stopped = 1;
991 cancel_delayed_work_sync(&work);
994 static void osc_add_grant_list(struct client_obd *client)
996 mutex_lock(&client_gtd.gtd_mutex);
997 list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
998 mutex_unlock(&client_gtd.gtd_mutex);
1001 static void osc_del_grant_list(struct client_obd *client)
1003 if (list_empty(&client->cl_grant_chain))
1006 mutex_lock(&client_gtd.gtd_mutex);
1007 list_del_init(&client->cl_grant_chain);
1008 mutex_unlock(&client_gtd.gtd_mutex);
1011 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1014 * ocd_grant is the total grant amount we're expect to hold: if we've
1015 * been evicted, it's the new avail_grant amount, cl_dirty_pages will
1016 * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
1019 * race is tolerable here: if we're evicted, but imp_state already
1020 * left EVICTED state, then cl_dirty_pages must be 0 already.
1022 spin_lock(&cli->cl_loi_list_lock);
1023 cli->cl_avail_grant = ocd->ocd_grant;
1024 if (cli->cl_import->imp_state != LUSTRE_IMP_EVICTED) {
1025 unsigned long consumed = cli->cl_reserved_grant;
1027 if (OCD_HAS_FLAG(ocd, GRANT_PARAM))
1028 consumed += cli->cl_dirty_grant;
1030 consumed += cli->cl_dirty_pages << PAGE_SHIFT;
1031 if (cli->cl_avail_grant < consumed) {
1032 CERROR("%s: granted %ld but already consumed %ld\n",
1033 cli_name(cli), cli->cl_avail_grant, consumed);
1034 cli->cl_avail_grant = 0;
1036 cli->cl_avail_grant -= consumed;
1040 if (OCD_HAS_FLAG(ocd, GRANT_PARAM)) {
1044 /* overhead for each extent insertion */
1045 cli->cl_grant_extent_tax = ocd->ocd_grant_tax_kb << 10;
1046 /* determine the appropriate chunk size used by osc_extent. */
1047 cli->cl_chunkbits = max_t(int, PAGE_SHIFT,
1048 ocd->ocd_grant_blkbits);
1049 /* max_pages_per_rpc must be chunk aligned */
1050 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
1051 cli->cl_max_pages_per_rpc = (cli->cl_max_pages_per_rpc +
1052 ~chunk_mask) & chunk_mask;
1053 /* determine maximum extent size, in #pages */
1054 size = (u64)ocd->ocd_grant_max_blks << ocd->ocd_grant_blkbits;
1055 cli->cl_max_extent_pages = (size >> PAGE_SHIFT) ?: 1;
1056 cli->cl_ocd_grant_param = 1;
1058 cli->cl_ocd_grant_param = 0;
1059 cli->cl_grant_extent_tax = 0;
1060 cli->cl_chunkbits = PAGE_SHIFT;
1061 cli->cl_max_extent_pages = DT_MAX_BRW_PAGES;
1063 spin_unlock(&cli->cl_loi_list_lock);
1066 "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
1068 cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
1069 cli->cl_max_extent_pages);
1071 if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
1072 osc_add_grant_list(cli);
1074 EXPORT_SYMBOL(osc_init_grant);
1076 /* We assume that the reason this OSC got a short read is because it read
1077 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1078 * via the LOV, and it _knows_ it's reading inside the file, it's just that
1079 * this stripe never got written at or beyond this stripe offset yet. */
1080 static void handle_short_read(int nob_read, size_t page_count,
1081 struct brw_page **pga)
1086 /* skip bytes read OK */
1087 while (nob_read > 0) {
1088 LASSERT (page_count > 0);
1090 if (pga[i]->count > nob_read) {
1091 /* EOF inside this page */
1092 ptr = kmap(pga[i]->pg) +
1093 (pga[i]->off & ~PAGE_MASK);
1094 memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1101 nob_read -= pga[i]->count;
1106 /* zero remaining pages */
1107 while (page_count-- > 0) {
1108 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
1109 memset(ptr, 0, pga[i]->count);
1115 static int check_write_rcs(struct ptlrpc_request *req,
1116 int requested_nob, int niocount,
1117 size_t page_count, struct brw_page **pga)
1122 remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1123 sizeof(*remote_rcs) *
1125 if (remote_rcs == NULL) {
1126 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1130 /* return error if any niobuf was in error */
1131 for (i = 0; i < niocount; i++) {
1132 if ((int)remote_rcs[i] < 0) {
1133 CDEBUG(D_INFO, "rc[%d]: %d req %p\n",
1134 i, remote_rcs[i], req);
1135 return remote_rcs[i];
1138 if (remote_rcs[i] != 0) {
1139 CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1140 i, remote_rcs[i], req);
1144 if (req->rq_bulk != NULL &&
1145 req->rq_bulk->bd_nob_transferred != requested_nob) {
1146 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1147 req->rq_bulk->bd_nob_transferred, requested_nob);
1154 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1156 if (p1->flag != p2->flag) {
1157 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1158 OBD_BRW_SYNC | OBD_BRW_ASYNC |
1159 OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
1161 /* warn if we try to combine flags that we don't know to be
1162 * safe to combine */
1163 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1164 CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1165 "report this at https://jira.whamcloud.com/\n",
1166 p1->flag, p2->flag);
1171 return (p1->off + p1->count == p2->off);
1174 #if IS_ENABLED(CONFIG_CRC_T10DIF)
1175 static int osc_checksum_bulk_t10pi(const char *obd_name, int nob,
1176 size_t pg_count, struct brw_page **pga,
1177 int opc, obd_dif_csum_fn *fn,
1181 struct ahash_request *req;
1182 /* Used Adler as the default checksum type on top of DIF tags */
1183 unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
1184 struct page *__page;
1185 unsigned char *buffer;
1187 unsigned int bufsize;
1189 int used_number = 0;
1195 LASSERT(pg_count > 0);
1197 __page = alloc_page(GFP_KERNEL);
1201 req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1204 CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
1205 obd_name, cfs_crypto_hash_name(cfs_alg), rc);
1209 buffer = kmap(__page);
1210 guard_start = (__u16 *)buffer;
1211 guard_number = PAGE_SIZE / sizeof(*guard_start);
1212 while (nob > 0 && pg_count > 0) {
1213 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1215 /* corrupt the data before we compute the checksum, to
1216 * simulate an OST->client data error */
1217 if (unlikely(i == 0 && opc == OST_READ &&
1218 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))) {
1219 unsigned char *ptr = kmap(pga[i]->pg);
1220 int off = pga[i]->off & ~PAGE_MASK;
1222 memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1227 * The left guard number should be able to hold checksums of a
1230 rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg,
1231 pga[i]->off & ~PAGE_MASK,
1233 guard_start + used_number,
1234 guard_number - used_number,
1240 used_number += used;
1241 if (used_number == guard_number) {
1242 cfs_crypto_hash_update_page(req, __page, 0,
1243 used_number * sizeof(*guard_start));
1247 nob -= pga[i]->count;
1255 if (used_number != 0)
1256 cfs_crypto_hash_update_page(req, __page, 0,
1257 used_number * sizeof(*guard_start));
1259 bufsize = sizeof(cksum);
1260 cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
1262 /* For sending we only compute the wrong checksum instead
1263 * of corrupting the data so it is still correct on a redo */
1264 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1269 __free_page(__page);
1272 #else /* !CONFIG_CRC_T10DIF */
1273 #define obd_dif_ip_fn NULL
1274 #define obd_dif_crc_fn NULL
1275 #define osc_checksum_bulk_t10pi(name, nob, pgc, pga, opc, fn, ssize, csum) \
1277 #endif /* CONFIG_CRC_T10DIF */
1279 static int osc_checksum_bulk(int nob, size_t pg_count,
1280 struct brw_page **pga, int opc,
1281 enum cksum_types cksum_type,
1285 struct ahash_request *req;
1286 unsigned int bufsize;
1287 unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
1289 LASSERT(pg_count > 0);
1291 req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1293 CERROR("Unable to initialize checksum hash %s\n",
1294 cfs_crypto_hash_name(cfs_alg));
1295 return PTR_ERR(req);
1298 while (nob > 0 && pg_count > 0) {
1299 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1301 /* corrupt the data before we compute the checksum, to
1302 * simulate an OST->client data error */
1303 if (i == 0 && opc == OST_READ &&
1304 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1305 unsigned char *ptr = kmap(pga[i]->pg);
1306 int off = pga[i]->off & ~PAGE_MASK;
1308 memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1311 cfs_crypto_hash_update_page(req, pga[i]->pg,
1312 pga[i]->off & ~PAGE_MASK,
1314 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
1315 (int)(pga[i]->off & ~PAGE_MASK));
1317 nob -= pga[i]->count;
1322 bufsize = sizeof(*cksum);
1323 cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
1325 /* For sending we only compute the wrong checksum instead
1326 * of corrupting the data so it is still correct on a redo */
1327 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1333 static int osc_checksum_bulk_rw(const char *obd_name,
1334 enum cksum_types cksum_type,
1335 int nob, size_t pg_count,
1336 struct brw_page **pga, int opc,
1339 obd_dif_csum_fn *fn = NULL;
1340 int sector_size = 0;
1344 obd_t10_cksum2dif(cksum_type, &fn, §or_size);
1347 rc = osc_checksum_bulk_t10pi(obd_name, nob, pg_count, pga,
1348 opc, fn, sector_size, check_sum);
1350 rc = osc_checksum_bulk(nob, pg_count, pga, opc, cksum_type,
1356 static inline void osc_release_bounce_pages(struct brw_page **pga,
1359 #ifdef HAVE_LUSTRE_CRYPTO
1362 for (i = 0; i < page_count; i++) {
1363 /* Bounce pages allocated by a call to
1364 * llcrypt_encrypt_pagecache_blocks() in osc_brw_prep_request()
1365 * are identified thanks to the PageChecked flag.
1367 if (PageChecked(pga[i]->pg))
1368 llcrypt_finalize_bounce_page(&pga[i]->pg);
1369 pga[i]->count -= pga[i]->bp_count_diff;
1370 pga[i]->off += pga[i]->bp_off_diff;
1376 osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
1377 u32 page_count, struct brw_page **pga,
1378 struct ptlrpc_request **reqp, int resend)
1380 struct ptlrpc_request *req;
1381 struct ptlrpc_bulk_desc *desc;
1382 struct ost_body *body;
1383 struct obd_ioobj *ioobj;
1384 struct niobuf_remote *niobuf;
1385 int niocount, i, requested_nob, opc, rc, short_io_size = 0;
1386 struct osc_brw_async_args *aa;
1387 struct req_capsule *pill;
1388 struct brw_page *pg_prev;
1390 const char *obd_name = cli->cl_import->imp_obd->obd_name;
1391 struct inode *inode = NULL;
1392 bool directio = false;
1396 inode = page2inode(pga[0]->pg);
1397 if (inode == NULL) {
1398 /* Try to get reference to inode from cl_page if we are
1399 * dealing with direct IO, as handled pages are not
1400 * actual page cache pages.
1402 struct osc_async_page *oap = brw_page2oap(pga[0]);
1403 struct cl_page *clpage = oap2cl_page(oap);
1405 inode = clpage->cp_inode;
1410 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1411 RETURN(-ENOMEM); /* Recoverable */
1412 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1413 RETURN(-EINVAL); /* Fatal */
1415 if ((cmd & OBD_BRW_WRITE) != 0) {
1417 req = ptlrpc_request_alloc_pool(cli->cl_import,
1419 &RQF_OST_BRW_WRITE);
1422 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1427 if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
1428 for (i = 0; i < page_count; i++) {
1429 struct brw_page *pg = pga[i];
1430 struct page *data_page = NULL;
1431 bool retried = false;
1432 bool lockedbymyself;
1433 u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1434 struct address_space *map_orig = NULL;
1438 if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1439 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1440 LUSTRE_ENCRYPTION_UNIT_SIZE;
1441 /* The page can already be locked when we arrive here.
1442 * This is possible when cl_page_assume/vvp_page_assume
1443 * is stuck on wait_on_page_writeback with page lock
1444 * held. In this case there is no risk for the lock to
1445 * be released while we are doing our encryption
1446 * processing, because writeback against that page will
1447 * end in vvp_page_completion_write/cl_page_completion,
1448 * which means only once the page is fully processed.
1450 lockedbymyself = trylock_page(pg->pg);
1452 map_orig = pg->pg->mapping;
1453 pg->pg->mapping = inode->i_mapping;
1454 index_orig = pg->pg->index;
1455 pg->pg->index = pg->off >> PAGE_SHIFT;
1458 llcrypt_encrypt_pagecache_blocks(pg->pg,
1462 pg->pg->mapping = map_orig;
1463 pg->pg->index = index_orig;
1466 unlock_page(pg->pg);
1467 if (IS_ERR(data_page)) {
1468 rc = PTR_ERR(data_page);
1469 if (rc == -ENOMEM && !retried) {
1474 ptlrpc_request_free(req);
1477 /* Set PageChecked flag on bounce page for
1478 * disambiguation in osc_release_bounce_pages().
1480 SetPageChecked(data_page);
1482 /* there should be no gap in the middle of page array */
1483 if (i == page_count - 1) {
1484 struct osc_async_page *oap = brw_page2oap(pg);
1486 oa->o_size = oap->oap_count +
1487 oap->oap_obj_off + oap->oap_page_off;
1489 /* len is forced to nunits, and relative offset to 0
1490 * so store the old, clear text info
1492 pg->bp_count_diff = nunits - pg->count;
1494 pg->bp_off_diff = pg->off & ~PAGE_MASK;
1495 pg->off = pg->off & PAGE_MASK;
1497 } else if (opc == OST_READ && inode && IS_ENCRYPTED(inode)) {
1498 for (i = 0; i < page_count; i++) {
1499 struct brw_page *pg = pga[i];
1500 u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1502 if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1503 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1504 LUSTRE_ENCRYPTION_UNIT_SIZE;
1505 /* count/off are forced to cover the whole encryption
1506 * unit size so that all encrypted data is stored on the
1507 * OST, so adjust bp_{count,off}_diff for the size of
1510 pg->bp_count_diff = nunits - pg->count;
1512 pg->bp_off_diff = pg->off & ~PAGE_MASK;
1513 pg->off = pg->off & PAGE_MASK;
1517 for (niocount = i = 1; i < page_count; i++) {
1518 if (!can_merge_pages(pga[i - 1], pga[i]))
1522 pill = &req->rq_pill;
1523 req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1525 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1526 niocount * sizeof(*niobuf));
1528 for (i = 0; i < page_count; i++) {
1529 short_io_size += pga[i]->count;
1530 if (!inode || !IS_ENCRYPTED(inode)) {
1531 pga[i]->bp_count_diff = 0;
1532 pga[i]->bp_off_diff = 0;
1536 /* Check if read/write is small enough to be a short io. */
1537 if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
1538 !imp_connect_shortio(cli->cl_import))
1541 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
1542 opc == OST_READ ? 0 : short_io_size);
1543 if (opc == OST_READ)
1544 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_SERVER,
1547 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1549 ptlrpc_request_free(req);
1552 osc_set_io_portal(req);
1554 ptlrpc_at_set_req_timeout(req);
1555 /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1557 req->rq_no_retry_einprogress = 1;
1559 if (short_io_size != 0) {
1561 short_io_buf = NULL;
1565 desc = ptlrpc_prep_bulk_imp(req, page_count,
1566 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1567 (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
1568 PTLRPC_BULK_PUT_SINK),
1570 &ptlrpc_bulk_kiov_pin_ops);
1573 GOTO(out, rc = -ENOMEM);
1574 /* NB request now owns desc and will free it when it gets freed */
1576 body = req_capsule_client_get(pill, &RMF_OST_BODY);
1577 ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1578 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1579 LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1581 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1583 /* For READ and WRITE, we can't fill o_uid and o_gid using from_kuid()
1584 * and from_kgid(), because they are asynchronous. Fortunately, variable
1585 * oa contains valid o_uid and o_gid in these two operations.
1586 * Besides, filling o_uid and o_gid is enough for nrs-tbf, see LU-9658.
1587 * OBD_MD_FLUID and OBD_MD_FLUID is not set in order to avoid breaking
1588 * other process logic */
1589 body->oa.o_uid = oa->o_uid;
1590 body->oa.o_gid = oa->o_gid;
1592 obdo_to_ioobj(oa, ioobj);
1593 ioobj->ioo_bufcnt = niocount;
1594 /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1595 * that might be send for this request. The actual number is decided
1596 * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1597 * "max - 1" for old client compatibility sending "0", and also so the
1598 * the actual maximum is a power-of-two number, not one less. LU-1431 */
1600 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1602 ioobj_max_brw_set(ioobj, 0);
1604 if (short_io_size != 0) {
1605 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1606 body->oa.o_valid |= OBD_MD_FLFLAGS;
1607 body->oa.o_flags = 0;
1609 body->oa.o_flags |= OBD_FL_SHORT_IO;
1610 CDEBUG(D_CACHE, "Using short io for data transfer, size = %d\n",
1612 if (opc == OST_WRITE) {
1613 short_io_buf = req_capsule_client_get(pill,
1615 LASSERT(short_io_buf != NULL);
1619 LASSERT(page_count > 0);
1621 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1622 struct brw_page *pg = pga[i];
1623 int poff = pg->off & ~PAGE_MASK;
1625 LASSERT(pg->count > 0);
1626 /* make sure there is no gap in the middle of page array */
1627 LASSERTF(page_count == 1 ||
1628 (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1629 ergo(i > 0 && i < page_count - 1,
1630 poff == 0 && pg->count == PAGE_SIZE) &&
1631 ergo(i == page_count - 1, poff == 0)),
1632 "i: %d/%d pg: %p off: %llu, count: %u\n",
1633 i, page_count, pg, pg->off, pg->count);
1634 LASSERTF(i == 0 || pg->off > pg_prev->off,
1635 "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
1636 " prev_pg %p [pri %lu ind %lu] off %llu\n",
1638 pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1639 pg_prev->pg, page_private(pg_prev->pg),
1640 pg_prev->pg->index, pg_prev->off);
1641 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1642 (pg->flag & OBD_BRW_SRVLOCK));
1643 if (short_io_size != 0 && opc == OST_WRITE) {
1644 unsigned char *ptr = kmap_atomic(pg->pg);
1646 LASSERT(short_io_size >= requested_nob + pg->count);
1647 memcpy(short_io_buf + requested_nob,
1651 } else if (short_io_size == 0) {
1652 desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
1655 requested_nob += pg->count;
1657 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1659 niobuf->rnb_len += pg->count;
1661 niobuf->rnb_offset = pg->off;
1662 niobuf->rnb_len = pg->count;
1663 niobuf->rnb_flags = pg->flag;
1668 LASSERTF((void *)(niobuf - niocount) ==
1669 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1670 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1671 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1673 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1675 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1676 body->oa.o_valid |= OBD_MD_FLFLAGS;
1677 body->oa.o_flags = 0;
1679 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1682 if (osc_should_shrink_grant(cli))
1683 osc_shrink_grant_local(cli, &body->oa);
1685 /* size[REQ_REC_OFF] still sizeof (*body) */
1686 if (opc == OST_WRITE) {
1687 if (cli->cl_checksum &&
1688 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1689 /* store cl_cksum_type in a local variable since
1690 * it can be changed via lprocfs */
1691 enum cksum_types cksum_type = cli->cl_cksum_type;
1693 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1694 body->oa.o_flags = 0;
1696 body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1698 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1700 rc = osc_checksum_bulk_rw(obd_name, cksum_type,
1701 requested_nob, page_count,
1705 CDEBUG(D_PAGE, "failed to checksum, rc = %d\n",
1709 CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1712 /* save this in 'oa', too, for later checking */
1713 oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1714 oa->o_flags |= obd_cksum_type_pack(obd_name,
1717 /* clear out the checksum flag, in case this is a
1718 * resend but cl_checksum is no longer set. b=11238 */
1719 oa->o_valid &= ~OBD_MD_FLCKSUM;
1721 oa->o_cksum = body->oa.o_cksum;
1722 /* 1 RC per niobuf */
1723 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1724 sizeof(__u32) * niocount);
1726 if (cli->cl_checksum &&
1727 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1728 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1729 body->oa.o_flags = 0;
1730 body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1731 cli->cl_cksum_type);
1732 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1735 /* Client cksum has been already copied to wire obdo in previous
1736 * lustre_set_wire_obdo(), and in the case a bulk-read is being
1737 * resent due to cksum error, this will allow Server to
1738 * check+dump pages on its side */
1740 ptlrpc_request_set_replen(req);
1742 aa = ptlrpc_req_async_args(aa, req);
1744 aa->aa_requested_nob = requested_nob;
1745 aa->aa_nio_count = niocount;
1746 aa->aa_page_count = page_count;
1750 INIT_LIST_HEAD(&aa->aa_oaps);
1753 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1754 CDEBUG(D_RPCTRACE, "brw rpc %p - object "DOSTID" offset %lld<>%lld\n",
1755 req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
1756 niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
1760 ptlrpc_req_finished(req);
1764 char dbgcksum_file_name[PATH_MAX];
1766 static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
1767 struct brw_page **pga, __u32 server_cksum,
1775 /* will only keep dump of pages on first error for the same range in
1776 * file/fid, not during the resends/retries. */
1777 snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
1778 "%s-checksum_dump-osc-"DFID":[%llu-%llu]-%x-%x",
1779 (strncmp(libcfs_debug_file_path, "NONE", 4) != 0 ?
1780 libcfs_debug_file_path : LIBCFS_DEBUG_FILE_PATH_DEFAULT),
1781 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
1782 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1783 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1785 pga[page_count-1]->off + pga[page_count-1]->count - 1,
1786 client_cksum, server_cksum);
1787 filp = filp_open(dbgcksum_file_name,
1788 O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
1792 CDEBUG(D_INFO, "%s: can't open to dump pages with "
1793 "checksum error: rc = %d\n", dbgcksum_file_name,
1796 CERROR("%s: can't open to dump pages with checksum "
1797 "error: rc = %d\n", dbgcksum_file_name, rc);
1801 for (i = 0; i < page_count; i++) {
1802 len = pga[i]->count;
1803 buf = kmap(pga[i]->pg);
1805 rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
1807 CERROR("%s: wanted to write %u but got %d "
1808 "error\n", dbgcksum_file_name, len, rc);
1813 CDEBUG(D_INFO, "%s: wrote %d bytes\n",
1814 dbgcksum_file_name, rc);
1819 rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
1821 CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
1822 filp_close(filp, NULL);
1826 check_write_checksum(struct obdo *oa, const struct lnet_process_id *peer,
1827 __u32 client_cksum, __u32 server_cksum,
1828 struct osc_brw_async_args *aa)
1830 const char *obd_name = aa->aa_cli->cl_import->imp_obd->obd_name;
1831 enum cksum_types cksum_type;
1832 obd_dif_csum_fn *fn = NULL;
1833 int sector_size = 0;
1838 if (server_cksum == client_cksum) {
1839 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1843 if (aa->aa_cli->cl_checksum_dump)
1844 dump_all_bulk_pages(oa, aa->aa_page_count, aa->aa_ppga,
1845 server_cksum, client_cksum);
1847 cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1850 switch (cksum_type) {
1851 case OBD_CKSUM_T10IP512:
1855 case OBD_CKSUM_T10IP4K:
1859 case OBD_CKSUM_T10CRC512:
1860 fn = obd_dif_crc_fn;
1863 case OBD_CKSUM_T10CRC4K:
1864 fn = obd_dif_crc_fn;
1872 rc = osc_checksum_bulk_t10pi(obd_name, aa->aa_requested_nob,
1873 aa->aa_page_count, aa->aa_ppga,
1874 OST_WRITE, fn, sector_size,
1877 rc = osc_checksum_bulk(aa->aa_requested_nob, aa->aa_page_count,
1878 aa->aa_ppga, OST_WRITE, cksum_type,
1882 msg = "failed to calculate the client write checksum";
1883 else if (cksum_type != obd_cksum_type_unpack(aa->aa_oa->o_flags))
1884 msg = "the server did not use the checksum type specified in "
1885 "the original request - likely a protocol problem";
1886 else if (new_cksum == server_cksum)
1887 msg = "changed on the client after we checksummed it - "
1888 "likely false positive due to mmap IO (bug 11742)";
1889 else if (new_cksum == client_cksum)
1890 msg = "changed in transit before arrival at OST";
1892 msg = "changed in transit AND doesn't match the original - "
1893 "likely false positive due to mmap IO (bug 11742)";
1895 LCONSOLE_ERROR_MSG(0x132, "%s: BAD WRITE CHECKSUM: %s: from %s inode "
1896 DFID " object "DOSTID" extent [%llu-%llu], original "
1897 "client csum %x (type %x), server csum %x (type %x),"
1898 " client csum now %x\n",
1899 obd_name, msg, libcfs_nid2str(peer->nid),
1900 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1901 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1902 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1903 POSTID(&oa->o_oi), aa->aa_ppga[0]->off,
1904 aa->aa_ppga[aa->aa_page_count - 1]->off +
1905 aa->aa_ppga[aa->aa_page_count-1]->count - 1,
1907 obd_cksum_type_unpack(aa->aa_oa->o_flags),
1908 server_cksum, cksum_type, new_cksum);
1912 /* Note rc enters this function as number of bytes transferred */
1913 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1915 struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1916 struct client_obd *cli = aa->aa_cli;
1917 const char *obd_name = cli->cl_import->imp_obd->obd_name;
1918 const struct lnet_process_id *peer =
1919 &req->rq_import->imp_connection->c_peer;
1920 struct ost_body *body;
1921 u32 client_cksum = 0;
1922 struct inode *inode;
1923 unsigned int blockbits = 0, blocksize = 0;
1927 if (rc < 0 && rc != -EDQUOT) {
1928 DEBUG_REQ(D_INFO, req, "Failed request: rc = %d", rc);
1932 LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1933 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1935 DEBUG_REQ(D_INFO, req, "cannot unpack body");
1939 /* set/clear over quota flag for a uid/gid/projid */
1940 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1941 body->oa.o_valid & (OBD_MD_FLALLQUOTA)) {
1942 unsigned qid[LL_MAXQUOTAS] = {
1943 body->oa.o_uid, body->oa.o_gid,
1944 body->oa.o_projid };
1946 "setdq for [%u %u %u] with valid %#llx, flags %x\n",
1947 body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
1948 body->oa.o_valid, body->oa.o_flags);
1949 osc_quota_setdq(cli, req->rq_xid, qid, body->oa.o_valid,
1953 osc_update_grant(cli, body);
1958 if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1959 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1961 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1963 CERROR("%s: unexpected positive size %d\n",
1968 if (req->rq_bulk != NULL &&
1969 sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1972 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1973 check_write_checksum(&body->oa, peer, client_cksum,
1974 body->oa.o_cksum, aa))
1977 rc = check_write_rcs(req, aa->aa_requested_nob,
1978 aa->aa_nio_count, aa->aa_page_count,
1983 /* The rest of this function executes only for OST_READs */
1985 if (req->rq_bulk == NULL) {
1986 rc = req_capsule_get_size(&req->rq_pill, &RMF_SHORT_IO,
1988 LASSERT(rc == req->rq_status);
1990 /* if unwrap_bulk failed, return -EAGAIN to retry */
1991 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
1994 GOTO(out, rc = -EAGAIN);
1996 if (rc > aa->aa_requested_nob) {
1997 CERROR("%s: unexpected size %d, requested %d\n", obd_name,
1998 rc, aa->aa_requested_nob);
2002 if (req->rq_bulk != NULL && rc != req->rq_bulk->bd_nob_transferred) {
2003 CERROR("%s: unexpected size %d, transferred %d\n", obd_name,
2004 rc, req->rq_bulk->bd_nob_transferred);
2008 if (req->rq_bulk == NULL) {
2010 int nob, pg_count, i = 0;
2013 CDEBUG(D_CACHE, "Using short io read, size %d\n", rc);
2014 pg_count = aa->aa_page_count;
2015 buf = req_capsule_server_sized_get(&req->rq_pill, &RMF_SHORT_IO,
2018 while (nob > 0 && pg_count > 0) {
2020 int count = aa->aa_ppga[i]->count > nob ?
2021 nob : aa->aa_ppga[i]->count;
2023 CDEBUG(D_CACHE, "page %p count %d\n",
2024 aa->aa_ppga[i]->pg, count);
2025 ptr = kmap_atomic(aa->aa_ppga[i]->pg);
2026 memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
2028 kunmap_atomic((void *) ptr);
2037 if (rc < aa->aa_requested_nob)
2038 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
2040 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
2041 static int cksum_counter;
2042 u32 server_cksum = body->oa.o_cksum;
2045 enum cksum_types cksum_type;
2046 u32 o_flags = body->oa.o_valid & OBD_MD_FLFLAGS ?
2047 body->oa.o_flags : 0;
2049 cksum_type = obd_cksum_type_unpack(o_flags);
2050 rc = osc_checksum_bulk_rw(obd_name, cksum_type, rc,
2051 aa->aa_page_count, aa->aa_ppga,
2052 OST_READ, &client_cksum);
2056 if (req->rq_bulk != NULL &&
2057 peer->nid != req->rq_bulk->bd_sender) {
2059 router = libcfs_nid2str(req->rq_bulk->bd_sender);
2062 if (server_cksum != client_cksum) {
2063 struct ost_body *clbody;
2064 u32 page_count = aa->aa_page_count;
2066 clbody = req_capsule_client_get(&req->rq_pill,
2068 if (cli->cl_checksum_dump)
2069 dump_all_bulk_pages(&clbody->oa, page_count,
2070 aa->aa_ppga, server_cksum,
2073 LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
2074 "%s%s%s inode "DFID" object "DOSTID
2075 " extent [%llu-%llu], client %x, "
2076 "server %x, cksum_type %x\n",
2078 libcfs_nid2str(peer->nid),
2080 clbody->oa.o_valid & OBD_MD_FLFID ?
2081 clbody->oa.o_parent_seq : 0ULL,
2082 clbody->oa.o_valid & OBD_MD_FLFID ?
2083 clbody->oa.o_parent_oid : 0,
2084 clbody->oa.o_valid & OBD_MD_FLFID ?
2085 clbody->oa.o_parent_ver : 0,
2086 POSTID(&body->oa.o_oi),
2087 aa->aa_ppga[0]->off,
2088 aa->aa_ppga[page_count-1]->off +
2089 aa->aa_ppga[page_count-1]->count - 1,
2090 client_cksum, server_cksum,
2093 aa->aa_oa->o_cksum = client_cksum;
2097 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
2100 } else if (unlikely(client_cksum)) {
2101 static int cksum_missed;
2104 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
2105 CERROR("%s: checksum %u requested from %s but not sent\n",
2106 obd_name, cksum_missed,
2107 libcfs_nid2str(peer->nid));
2112 inode = page2inode(aa->aa_ppga[0]->pg);
2113 if (inode == NULL) {
2114 /* Try to get reference to inode from cl_page if we are
2115 * dealing with direct IO, as handled pages are not
2116 * actual page cache pages.
2118 struct osc_async_page *oap = brw_page2oap(aa->aa_ppga[0]);
2120 inode = oap2cl_page(oap)->cp_inode;
2122 blockbits = inode->i_blkbits;
2123 blocksize = 1 << blockbits;
2126 if (inode && IS_ENCRYPTED(inode)) {
2129 if (!llcrypt_has_encryption_key(inode)) {
2130 CDEBUG(D_SEC, "no enc key for ino %lu\n", inode->i_ino);
2133 for (idx = 0; idx < aa->aa_page_count; idx++) {
2134 struct brw_page *pg = aa->aa_ppga[idx];
2135 unsigned int offs = 0;
2137 while (offs < PAGE_SIZE) {
2138 /* do not decrypt if page is all 0s */
2139 if (memchr_inv(page_address(pg->pg) + offs, 0,
2140 LUSTRE_ENCRYPTION_UNIT_SIZE) == NULL) {
2141 /* if page is empty forward info to
2142 * upper layers (ll_io_zero_page) by
2143 * clearing PagePrivate2
2146 ClearPagePrivate2(pg->pg);
2151 /* This is direct IO case. Directly call
2152 * decrypt function that takes inode as
2153 * input parameter. Page does not need
2157 ((u64)(pg->off >> PAGE_SHIFT) <<
2158 (PAGE_SHIFT - blockbits)) +
2159 (offs >> blockbits);
2164 LUSTRE_ENCRYPTION_UNIT_SIZE;
2165 i += blocksize, lblk_num++) {
2167 llcrypt_decrypt_block_inplace(
2175 rc = llcrypt_decrypt_pagecache_blocks(
2177 LUSTRE_ENCRYPTION_UNIT_SIZE,
2183 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
2190 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
2191 aa->aa_oa, &body->oa);
2196 static int osc_brw_redo_request(struct ptlrpc_request *request,
2197 struct osc_brw_async_args *aa, int rc)
2199 struct ptlrpc_request *new_req;
2200 struct osc_brw_async_args *new_aa;
2201 struct osc_async_page *oap;
2204 /* The below message is checked in replay-ost-single.sh test_8ae*/
2205 DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
2206 "redo for recoverable error %d", rc);
2208 rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
2209 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
2210 aa->aa_cli, aa->aa_oa, aa->aa_page_count,
2211 aa->aa_ppga, &new_req, 1);
2215 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2216 if (oap->oap_request != NULL) {
2217 LASSERTF(request == oap->oap_request,
2218 "request %p != oap_request %p\n",
2219 request, oap->oap_request);
2223 * New request takes over pga and oaps from old request.
2224 * Note that copying a list_head doesn't work, need to move it...
2227 new_req->rq_interpret_reply = request->rq_interpret_reply;
2228 new_req->rq_async_args = request->rq_async_args;
2229 new_req->rq_commit_cb = request->rq_commit_cb;
2230 /* cap resend delay to the current request timeout, this is similar to
2231 * what ptlrpc does (see after_reply()) */
2232 if (aa->aa_resends > new_req->rq_timeout)
2233 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
2235 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
2236 new_req->rq_generation_set = 1;
2237 new_req->rq_import_generation = request->rq_import_generation;
2239 new_aa = ptlrpc_req_async_args(new_aa, new_req);
2241 INIT_LIST_HEAD(&new_aa->aa_oaps);
2242 list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
2243 INIT_LIST_HEAD(&new_aa->aa_exts);
2244 list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
2245 new_aa->aa_resends = aa->aa_resends;
2247 list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
2248 if (oap->oap_request) {
2249 ptlrpc_req_finished(oap->oap_request);
2250 oap->oap_request = ptlrpc_request_addref(new_req);
2254 /* XXX: This code will run into problem if we're going to support
2255 * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
2256 * and wait for all of them to be finished. We should inherit request
2257 * set from old request. */
2258 ptlrpcd_add_req(new_req);
2260 DEBUG_REQ(D_INFO, new_req, "new request");
2265 * ugh, we want disk allocation on the target to happen in offset order. we'll
2266 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
2267 * fine for our small page arrays and doesn't require allocation. its an
2268 * insertion sort that swaps elements that are strides apart, shrinking the
2269 * stride down until its '1' and the array is sorted.
2271 static void sort_brw_pages(struct brw_page **array, int num)
2274 struct brw_page *tmp;
2278 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
2283 for (i = stride ; i < num ; i++) {
2286 while (j >= stride && array[j - stride]->off > tmp->off) {
2287 array[j] = array[j - stride];
2292 } while (stride > 1);
2295 static void osc_release_ppga(struct brw_page **ppga, size_t count)
2297 LASSERT(ppga != NULL);
2298 OBD_FREE_PTR_ARRAY_LARGE(ppga, count);
2301 static int brw_interpret(const struct lu_env *env,
2302 struct ptlrpc_request *req, void *args, int rc)
2304 struct osc_brw_async_args *aa = args;
2305 struct osc_extent *ext;
2306 struct osc_extent *tmp;
2307 struct client_obd *cli = aa->aa_cli;
2308 unsigned long transferred = 0;
2312 rc = osc_brw_fini_request(req, rc);
2313 CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2315 /* restore clear text pages */
2316 osc_release_bounce_pages(aa->aa_ppga, aa->aa_page_count);
2319 * When server returns -EINPROGRESS, client should always retry
2320 * regardless of the number of times the bulk was resent already.
2322 if (osc_recoverable_error(rc) && !req->rq_no_delay) {
2323 if (req->rq_import_generation !=
2324 req->rq_import->imp_generation) {
2325 CDEBUG(D_HA, "%s: resend cross eviction for object: "
2326 ""DOSTID", rc = %d.\n",
2327 req->rq_import->imp_obd->obd_name,
2328 POSTID(&aa->aa_oa->o_oi), rc);
2329 } else if (rc == -EINPROGRESS ||
2330 client_should_resend(aa->aa_resends, aa->aa_cli)) {
2331 rc = osc_brw_redo_request(req, aa, rc);
2333 CERROR("%s: too many resent retries for object: "
2334 "%llu:%llu, rc = %d.\n",
2335 req->rq_import->imp_obd->obd_name,
2336 POSTID(&aa->aa_oa->o_oi), rc);
2341 else if (rc == -EAGAIN || rc == -EINPROGRESS)
2346 struct obdo *oa = aa->aa_oa;
2347 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
2348 unsigned long valid = 0;
2349 struct cl_object *obj;
2350 struct osc_async_page *last;
2352 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
2353 obj = osc2cl(last->oap_obj);
2355 cl_object_attr_lock(obj);
2356 if (oa->o_valid & OBD_MD_FLBLOCKS) {
2357 attr->cat_blocks = oa->o_blocks;
2358 valid |= CAT_BLOCKS;
2360 if (oa->o_valid & OBD_MD_FLMTIME) {
2361 attr->cat_mtime = oa->o_mtime;
2364 if (oa->o_valid & OBD_MD_FLATIME) {
2365 attr->cat_atime = oa->o_atime;
2368 if (oa->o_valid & OBD_MD_FLCTIME) {
2369 attr->cat_ctime = oa->o_ctime;
2373 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
2374 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
2375 loff_t last_off = last->oap_count + last->oap_obj_off +
2378 /* Change file size if this is an out of quota or
2379 * direct IO write and it extends the file size */
2380 if (loi->loi_lvb.lvb_size < last_off) {
2381 attr->cat_size = last_off;
2384 /* Extend KMS if it's not a lockless write */
2385 if (loi->loi_kms < last_off &&
2386 oap2osc_page(last)->ops_srvlock == 0) {
2387 attr->cat_kms = last_off;
2393 cl_object_attr_update(env, obj, attr, valid);
2394 cl_object_attr_unlock(obj);
2396 OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
2399 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
2400 osc_inc_unstable_pages(req);
2402 list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
2403 list_del_init(&ext->oe_link);
2404 osc_extent_finish(env, ext, 1,
2405 rc && req->rq_no_delay ? -EAGAIN : rc);
2407 LASSERT(list_empty(&aa->aa_exts));
2408 LASSERT(list_empty(&aa->aa_oaps));
2410 transferred = (req->rq_bulk == NULL ? /* short io */
2411 aa->aa_requested_nob :
2412 req->rq_bulk->bd_nob_transferred);
2414 osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2415 ptlrpc_lprocfs_brw(req, transferred);
2417 spin_lock(&cli->cl_loi_list_lock);
2418 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2419 * is called so we know whether to go to sync BRWs or wait for more
2420 * RPCs to complete */
2421 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2422 cli->cl_w_in_flight--;
2424 cli->cl_r_in_flight--;
2425 osc_wake_cache_waiters(cli);
2426 spin_unlock(&cli->cl_loi_list_lock);
2428 osc_io_unplug(env, cli, NULL);
2432 static void brw_commit(struct ptlrpc_request *req)
2434 /* If osc_inc_unstable_pages (via osc_extent_finish) races with
2435 * this called via the rq_commit_cb, I need to ensure
2436 * osc_dec_unstable_pages is still called. Otherwise unstable
2437 * pages may be leaked. */
2438 spin_lock(&req->rq_lock);
2439 if (likely(req->rq_unstable)) {
2440 req->rq_unstable = 0;
2441 spin_unlock(&req->rq_lock);
2443 osc_dec_unstable_pages(req);
2445 req->rq_committed = 1;
2446 spin_unlock(&req->rq_lock);
2451 * Build an RPC by the list of extent @ext_list. The caller must ensure
2452 * that the total pages in this list are NOT over max pages per RPC.
2453 * Extents in the list must be in OES_RPC state.
2455 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
2456 struct list_head *ext_list, int cmd)
2458 struct ptlrpc_request *req = NULL;
2459 struct osc_extent *ext;
2460 struct brw_page **pga = NULL;
2461 struct osc_brw_async_args *aa = NULL;
2462 struct obdo *oa = NULL;
2463 struct osc_async_page *oap;
2464 struct osc_object *obj = NULL;
2465 struct cl_req_attr *crattr = NULL;
2466 loff_t starting_offset = OBD_OBJECT_EOF;
2467 loff_t ending_offset = 0;
2468 /* '1' for consistency with code that checks !mpflag to restore */
2472 bool soft_sync = false;
2473 bool ndelay = false;
2477 __u32 layout_version = 0;
2478 LIST_HEAD(rpc_list);
2479 struct ost_body *body;
2481 LASSERT(!list_empty(ext_list));
2483 /* add pages into rpc_list to build BRW rpc */
2484 list_for_each_entry(ext, ext_list, oe_link) {
2485 LASSERT(ext->oe_state == OES_RPC);
2486 mem_tight |= ext->oe_memalloc;
2487 grant += ext->oe_grants;
2488 page_count += ext->oe_nr_pages;
2489 layout_version = max(layout_version, ext->oe_layout_version);
2494 soft_sync = osc_over_unstable_soft_limit(cli);
2496 mpflag = memalloc_noreclaim_save();
2498 OBD_ALLOC_PTR_ARRAY_LARGE(pga, page_count);
2500 GOTO(out, rc = -ENOMEM);
2502 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
2504 GOTO(out, rc = -ENOMEM);
2507 list_for_each_entry(ext, ext_list, oe_link) {
2508 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
2510 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
2512 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
2513 pga[i] = &oap->oap_brw_page;
2514 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2517 list_add_tail(&oap->oap_rpc_item, &rpc_list);
2518 if (starting_offset == OBD_OBJECT_EOF ||
2519 starting_offset > oap->oap_obj_off)
2520 starting_offset = oap->oap_obj_off;
2522 LASSERT(oap->oap_page_off == 0);
2523 if (ending_offset < oap->oap_obj_off + oap->oap_count)
2524 ending_offset = oap->oap_obj_off +
2527 LASSERT(oap->oap_page_off + oap->oap_count ==
2534 /* first page in the list */
2535 oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item);
2537 crattr = &osc_env_info(env)->oti_req_attr;
2538 memset(crattr, 0, sizeof(*crattr));
2539 crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2540 crattr->cra_flags = ~0ULL;
2541 crattr->cra_page = oap2cl_page(oap);
2542 crattr->cra_oa = oa;
2543 cl_req_attr_set(env, osc2cl(obj), crattr);
2545 if (cmd == OBD_BRW_WRITE) {
2546 oa->o_grant_used = grant;
2547 if (layout_version > 0) {
2548 CDEBUG(D_LAYOUT, DFID": write with layout version %u\n",
2549 PFID(&oa->o_oi.oi_fid), layout_version);
2551 oa->o_layout_version = layout_version;
2552 oa->o_valid |= OBD_MD_LAYOUT_VERSION;
2556 sort_brw_pages(pga, page_count);
2557 rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 0);
2559 CERROR("prep_req failed: %d\n", rc);
2563 req->rq_commit_cb = brw_commit;
2564 req->rq_interpret_reply = brw_interpret;
2565 req->rq_memalloc = mem_tight != 0;
2566 oap->oap_request = ptlrpc_request_addref(req);
2568 req->rq_no_resend = req->rq_no_delay = 1;
2569 /* probably set a shorter timeout value.
2570 * to handle ETIMEDOUT in brw_interpret() correctly. */
2571 /* lustre_msg_set_timeout(req, req->rq_timeout / 2); */
2574 /* Need to update the timestamps after the request is built in case
2575 * we race with setattr (locally or in queue at OST). If OST gets
2576 * later setattr before earlier BRW (as determined by the request xid),
2577 * the OST will not use BRW timestamps. Sadly, there is no obvious
2578 * way to do this in a single call. bug 10150 */
2579 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2580 crattr->cra_oa = &body->oa;
2581 crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
2582 cl_req_attr_set(env, osc2cl(obj), crattr);
2583 lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2585 aa = ptlrpc_req_async_args(aa, req);
2586 INIT_LIST_HEAD(&aa->aa_oaps);
2587 list_splice_init(&rpc_list, &aa->aa_oaps);
2588 INIT_LIST_HEAD(&aa->aa_exts);
2589 list_splice_init(ext_list, &aa->aa_exts);
2591 spin_lock(&cli->cl_loi_list_lock);
2592 starting_offset >>= PAGE_SHIFT;
2593 if (cmd == OBD_BRW_READ) {
2594 cli->cl_r_in_flight++;
2595 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2596 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2597 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2598 starting_offset + 1);
2600 cli->cl_w_in_flight++;
2601 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2602 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2603 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2604 starting_offset + 1);
2606 spin_unlock(&cli->cl_loi_list_lock);
2608 DEBUG_REQ(D_INODE, req, "%d pages, aa %p, now %ur/%uw in flight",
2609 page_count, aa, cli->cl_r_in_flight,
2610 cli->cl_w_in_flight);
2611 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
2613 ptlrpcd_add_req(req);
2619 memalloc_noreclaim_restore(mpflag);
2622 LASSERT(req == NULL);
2625 OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
2627 osc_release_bounce_pages(pga, page_count);
2628 osc_release_ppga(pga, page_count);
2630 /* this should happen rarely and is pretty bad, it makes the
2631 * pending list not follow the dirty order */
2632 while (!list_empty(ext_list)) {
2633 ext = list_entry(ext_list->next, struct osc_extent,
2635 list_del_init(&ext->oe_link);
2636 osc_extent_finish(env, ext, 0, rc);
2642 /* This is to refresh our lock in face of no RPCs. */
2643 void osc_send_empty_rpc(struct osc_object *osc, pgoff_t start)
2645 struct ptlrpc_request *req;
2647 struct brw_page bpg = { .off = start, .count = 1};
2648 struct brw_page *pga = &bpg;
2651 memset(&oa, 0, sizeof(oa));
2652 oa.o_oi = osc->oo_oinfo->loi_oi;
2653 oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLFLAGS;
2654 /* For updated servers - don't do a read */
2655 oa.o_flags = OBD_FL_NORPC;
2657 rc = osc_brw_prep_request(OBD_BRW_READ, osc_cli(osc), &oa, 1, &pga,
2660 /* If we succeeded we ship it off, if not there's no point in doing
2661 * anything. Also no resends.
2662 * No interpret callback, no commit callback.
2665 req->rq_no_resend = 1;
2666 ptlrpcd_add_req(req);
2670 static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
2674 LASSERT(lock != NULL);
2676 lock_res_and_lock(lock);
2678 if (lock->l_ast_data == NULL)
2679 lock->l_ast_data = data;
2680 if (lock->l_ast_data == data)
2683 unlock_res_and_lock(lock);
2688 int osc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
2689 void *cookie, struct lustre_handle *lockh,
2690 enum ldlm_mode mode, __u64 *flags, bool speculative,
2693 bool intent = *flags & LDLM_FL_HAS_INTENT;
2697 /* The request was created before ldlm_cli_enqueue call. */
2698 if (intent && errcode == ELDLM_LOCK_ABORTED) {
2699 struct ldlm_reply *rep;
2701 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2702 LASSERT(rep != NULL);
2704 rep->lock_policy_res1 =
2705 ptlrpc_status_ntoh(rep->lock_policy_res1);
2706 if (rep->lock_policy_res1)
2707 errcode = rep->lock_policy_res1;
2709 *flags |= LDLM_FL_LVB_READY;
2710 } else if (errcode == ELDLM_OK) {
2711 *flags |= LDLM_FL_LVB_READY;
2714 /* Call the update callback. */
2715 rc = (*upcall)(cookie, lockh, errcode);
2717 /* release the reference taken in ldlm_cli_enqueue() */
2718 if (errcode == ELDLM_LOCK_MATCHED)
2720 if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
2721 ldlm_lock_decref(lockh, mode);
2726 int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
2729 struct osc_enqueue_args *aa = args;
2730 struct ldlm_lock *lock;
2731 struct lustre_handle *lockh = &aa->oa_lockh;
2732 enum ldlm_mode mode = aa->oa_mode;
2733 struct ost_lvb *lvb = aa->oa_lvb;
2734 __u32 lvb_len = sizeof(*lvb);
2736 struct ldlm_enqueue_info einfo = {
2737 .ei_type = aa->oa_type,
2743 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2745 lock = ldlm_handle2lock(lockh);
2746 LASSERTF(lock != NULL,
2747 "lockh %#llx, req %p, aa %p - client evicted?\n",
2748 lockh->cookie, req, aa);
2750 /* Take an additional reference so that a blocking AST that
2751 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2752 * to arrive after an upcall has been executed by
2753 * osc_enqueue_fini(). */
2754 ldlm_lock_addref(lockh, mode);
2756 /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2757 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2759 /* Let CP AST to grant the lock first. */
2760 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2762 if (aa->oa_speculative) {
2763 LASSERT(aa->oa_lvb == NULL);
2764 LASSERT(aa->oa_flags == NULL);
2765 aa->oa_flags = &flags;
2768 /* Complete obtaining the lock procedure. */
2769 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, &einfo, 1, aa->oa_flags,
2770 lvb, lvb_len, lockh, rc);
2771 /* Complete osc stuff. */
2772 rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
2773 aa->oa_flags, aa->oa_speculative, rc);
2775 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2777 ldlm_lock_decref(lockh, mode);
2778 LDLM_LOCK_PUT(lock);
2782 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2783 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2784 * other synchronous requests, however keeping some locks and trying to obtain
2785 * others may take a considerable amount of time in a case of ost failure; and
2786 * when other sync requests do not get released lock from a client, the client
2787 * is evicted from the cluster -- such scenarious make the life difficult, so
2788 * release locks just after they are obtained. */
2789 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2790 __u64 *flags, union ldlm_policy_data *policy,
2791 struct ost_lvb *lvb, osc_enqueue_upcall_f upcall,
2792 void *cookie, struct ldlm_enqueue_info *einfo,
2793 struct ptlrpc_request_set *rqset, int async,
2796 struct obd_device *obd = exp->exp_obd;
2797 struct lustre_handle lockh = { 0 };
2798 struct ptlrpc_request *req = NULL;
2799 int intent = *flags & LDLM_FL_HAS_INTENT;
2800 __u64 match_flags = *flags;
2801 enum ldlm_mode mode;
2805 /* Filesystem lock extents are extended to page boundaries so that
2806 * dealing with the page cache is a little smoother. */
2807 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2808 policy->l_extent.end |= ~PAGE_MASK;
2810 /* Next, search for already existing extent locks that will cover us */
2811 /* If we're trying to read, we also search for an existing PW lock. The
2812 * VFS and page cache already protect us locally, so lots of readers/
2813 * writers can share a single PW lock.
2815 * There are problems with conversion deadlocks, so instead of
2816 * converting a read lock to a write lock, we'll just enqueue a new
2819 * At some point we should cancel the read lock instead of making them
2820 * send us a blocking callback, but there are problems with canceling
2821 * locks out from other users right now, too. */
2822 mode = einfo->ei_mode;
2823 if (einfo->ei_mode == LCK_PR)
2825 /* Normal lock requests must wait for the LVB to be ready before
2826 * matching a lock; speculative lock requests do not need to,
2827 * because they will not actually use the lock. */
2829 match_flags |= LDLM_FL_LVB_READY;
2831 match_flags |= LDLM_FL_BLOCK_GRANTED;
2832 mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
2833 einfo->ei_type, policy, mode, &lockh);
2835 struct ldlm_lock *matched;
2837 if (*flags & LDLM_FL_TEST_LOCK)
2840 matched = ldlm_handle2lock(&lockh);
2842 /* This DLM lock request is speculative, and does not
2843 * have an associated IO request. Therefore if there
2844 * is already a DLM lock, it wll just inform the
2845 * caller to cancel the request for this stripe.*/
2846 lock_res_and_lock(matched);
2847 if (ldlm_extent_equal(&policy->l_extent,
2848 &matched->l_policy_data.l_extent))
2852 unlock_res_and_lock(matched);
2854 ldlm_lock_decref(&lockh, mode);
2855 LDLM_LOCK_PUT(matched);
2857 } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
2858 *flags |= LDLM_FL_LVB_READY;
2860 /* We already have a lock, and it's referenced. */
2861 (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
2863 ldlm_lock_decref(&lockh, mode);
2864 LDLM_LOCK_PUT(matched);
2867 ldlm_lock_decref(&lockh, mode);
2868 LDLM_LOCK_PUT(matched);
2872 if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
2875 /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2876 *flags &= ~LDLM_FL_BLOCK_GRANTED;
2878 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2879 sizeof(*lvb), LVB_T_OST, &lockh, async);
2882 struct osc_enqueue_args *aa;
2883 aa = ptlrpc_req_async_args(aa, req);
2885 aa->oa_mode = einfo->ei_mode;
2886 aa->oa_type = einfo->ei_type;
2887 lustre_handle_copy(&aa->oa_lockh, &lockh);
2888 aa->oa_upcall = upcall;
2889 aa->oa_cookie = cookie;
2890 aa->oa_speculative = speculative;
2892 aa->oa_flags = flags;
2895 /* speculative locks are essentially to enqueue
2896 * a DLM lock in advance, so we don't care
2897 * about the result of the enqueue. */
2899 aa->oa_flags = NULL;
2902 req->rq_interpret_reply = osc_enqueue_interpret;
2903 ptlrpc_set_add_req(rqset, req);
2908 rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
2909 flags, speculative, rc);
2914 int osc_match_base(const struct lu_env *env, struct obd_export *exp,
2915 struct ldlm_res_id *res_id, enum ldlm_type type,
2916 union ldlm_policy_data *policy, enum ldlm_mode mode,
2917 __u64 *flags, struct osc_object *obj,
2918 struct lustre_handle *lockh, enum ldlm_match_flags match_flags)
2920 struct obd_device *obd = exp->exp_obd;
2921 __u64 lflags = *flags;
2925 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2928 /* Filesystem lock extents are extended to page boundaries so that
2929 * dealing with the page cache is a little smoother */
2930 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2931 policy->l_extent.end |= ~PAGE_MASK;
2933 /* Next, search for already existing extent locks that will cover us */
2934 rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
2935 res_id, type, policy, mode, lockh,
2937 if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
2941 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2943 LASSERT(lock != NULL);
2944 if (osc_set_lock_data(lock, obj)) {
2945 lock_res_and_lock(lock);
2946 if (!ldlm_is_lvb_cached(lock)) {
2947 LASSERT(lock->l_ast_data == obj);
2948 osc_lock_lvb_update(env, obj, lock, NULL);
2949 ldlm_set_lvb_cached(lock);
2951 unlock_res_and_lock(lock);
2953 ldlm_lock_decref(lockh, rc);
2956 LDLM_LOCK_PUT(lock);
2961 static int osc_statfs_interpret(const struct lu_env *env,
2962 struct ptlrpc_request *req, void *args, int rc)
2964 struct osc_async_args *aa = args;
2965 struct obd_statfs *msfs;
2970 * The request has in fact never been sent due to issues at
2971 * a higher level (LOV). Exit immediately since the caller
2972 * is aware of the problem and takes care of the clean up.
2976 if ((rc == -ENOTCONN || rc == -EAGAIN) &&
2977 (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
2983 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2985 GOTO(out, rc = -EPROTO);
2987 *aa->aa_oi->oi_osfs = *msfs;
2989 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
2994 static int osc_statfs_async(struct obd_export *exp,
2995 struct obd_info *oinfo, time64_t max_age,
2996 struct ptlrpc_request_set *rqset)
2998 struct obd_device *obd = class_exp2obd(exp);
2999 struct ptlrpc_request *req;
3000 struct osc_async_args *aa;
3004 if (obd->obd_osfs_age >= max_age) {
3006 "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
3007 obd->obd_name, &obd->obd_osfs,
3008 obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
3009 obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
3010 spin_lock(&obd->obd_osfs_lock);
3011 memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
3012 spin_unlock(&obd->obd_osfs_lock);
3013 oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
3014 if (oinfo->oi_cb_up)
3015 oinfo->oi_cb_up(oinfo, 0);
3020 /* We could possibly pass max_age in the request (as an absolute
3021 * timestamp or a "seconds.usec ago") so the target can avoid doing
3022 * extra calls into the filesystem if that isn't necessary (e.g.
3023 * during mount that would help a bit). Having relative timestamps
3024 * is not so great if request processing is slow, while absolute
3025 * timestamps are not ideal because they need time synchronization. */
3026 req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3030 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3032 ptlrpc_request_free(req);
3035 ptlrpc_request_set_replen(req);
3036 req->rq_request_portal = OST_CREATE_PORTAL;
3037 ptlrpc_at_set_req_timeout(req);
3039 if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3040 /* procfs requests not want stat in wait for avoid deadlock */
3041 req->rq_no_resend = 1;
3042 req->rq_no_delay = 1;
3045 req->rq_interpret_reply = osc_statfs_interpret;
3046 aa = ptlrpc_req_async_args(aa, req);
3049 ptlrpc_set_add_req(rqset, req);
3053 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
3054 struct obd_statfs *osfs, time64_t max_age, __u32 flags)
3056 struct obd_device *obd = class_exp2obd(exp);
3057 struct obd_statfs *msfs;
3058 struct ptlrpc_request *req;
3059 struct obd_import *imp, *imp0;
3063 /*Since the request might also come from lprocfs, so we need
3064 *sync this with client_disconnect_export Bug15684
3066 with_imp_locked(obd, imp0, rc)
3067 imp = class_import_get(imp0);
3071 /* We could possibly pass max_age in the request (as an absolute
3072 * timestamp or a "seconds.usec ago") so the target can avoid doing
3073 * extra calls into the filesystem if that isn't necessary (e.g.
3074 * during mount that would help a bit). Having relative timestamps
3075 * is not so great if request processing is slow, while absolute
3076 * timestamps are not ideal because they need time synchronization. */
3077 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3079 class_import_put(imp);
3084 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3086 ptlrpc_request_free(req);
3089 ptlrpc_request_set_replen(req);
3090 req->rq_request_portal = OST_CREATE_PORTAL;
3091 ptlrpc_at_set_req_timeout(req);
3093 if (flags & OBD_STATFS_NODELAY) {
3094 /* procfs requests not want stat in wait for avoid deadlock */
3095 req->rq_no_resend = 1;
3096 req->rq_no_delay = 1;
3099 rc = ptlrpc_queue_wait(req);
3103 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3105 GOTO(out, rc = -EPROTO);
3111 ptlrpc_req_finished(req);
3115 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3116 void *karg, void __user *uarg)
3118 struct obd_device *obd = exp->exp_obd;
3119 struct obd_ioctl_data *data = karg;
3123 if (!try_module_get(THIS_MODULE)) {
3124 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
3125 module_name(THIS_MODULE));
3129 case OBD_IOC_CLIENT_RECOVER:
3130 rc = ptlrpc_recover_import(obd->u.cli.cl_import,
3131 data->ioc_inlbuf1, 0);
3135 case IOC_OSC_SET_ACTIVE:
3136 rc = ptlrpc_set_import_active(obd->u.cli.cl_import,
3141 CDEBUG(D_INODE, "%s: unrecognised ioctl %#x by %s: rc = %d\n",
3142 obd->obd_name, cmd, current->comm, rc);
3146 module_put(THIS_MODULE);
3150 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
3151 u32 keylen, void *key, u32 vallen, void *val,
3152 struct ptlrpc_request_set *set)
3154 struct ptlrpc_request *req;
3155 struct obd_device *obd = exp->exp_obd;
3156 struct obd_import *imp = class_exp2cliimp(exp);
3161 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
3163 if (KEY_IS(KEY_CHECKSUM)) {
3164 if (vallen != sizeof(int))
3166 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
3170 if (KEY_IS(KEY_SPTLRPC_CONF)) {
3171 sptlrpc_conf_client_adapt(obd);
3175 if (KEY_IS(KEY_FLUSH_CTX)) {
3176 sptlrpc_import_flush_my_ctx(imp);
3180 if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
3181 struct client_obd *cli = &obd->u.cli;
3182 long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
3183 long target = *(long *)val;
3185 nr = osc_lru_shrink(env, cli, min(nr, target), true);
3190 if (!set && !KEY_IS(KEY_GRANT_SHRINK))
3193 /* We pass all other commands directly to OST. Since nobody calls osc
3194 methods directly and everybody is supposed to go through LOV, we
3195 assume lov checked invalid values for us.
3196 The only recognised values so far are evict_by_nid and mds_conn.
3197 Even if something bad goes through, we'd get a -EINVAL from OST
3200 req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
3201 &RQF_OST_SET_GRANT_INFO :
3206 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3207 RCL_CLIENT, keylen);
3208 if (!KEY_IS(KEY_GRANT_SHRINK))
3209 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
3210 RCL_CLIENT, vallen);
3211 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
3213 ptlrpc_request_free(req);
3217 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3218 memcpy(tmp, key, keylen);
3219 tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
3222 memcpy(tmp, val, vallen);
3224 if (KEY_IS(KEY_GRANT_SHRINK)) {
3225 struct osc_grant_args *aa;
3228 aa = ptlrpc_req_async_args(aa, req);
3229 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
3231 ptlrpc_req_finished(req);
3234 *oa = ((struct ost_body *)val)->oa;
3236 req->rq_interpret_reply = osc_shrink_grant_interpret;
3239 ptlrpc_request_set_replen(req);
3240 if (!KEY_IS(KEY_GRANT_SHRINK)) {
3241 LASSERT(set != NULL);
3242 ptlrpc_set_add_req(set, req);
3243 ptlrpc_check_set(NULL, set);
3245 ptlrpcd_add_req(req);
3250 EXPORT_SYMBOL(osc_set_info_async);
3252 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
3253 struct obd_device *obd, struct obd_uuid *cluuid,
3254 struct obd_connect_data *data, void *localdata)
3256 struct client_obd *cli = &obd->u.cli;
3258 if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
3262 spin_lock(&cli->cl_loi_list_lock);
3263 grant = cli->cl_avail_grant + cli->cl_reserved_grant;
3264 if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM) {
3265 /* restore ocd_grant_blkbits as client page bits */
3266 data->ocd_grant_blkbits = PAGE_SHIFT;
3267 grant += cli->cl_dirty_grant;
3269 grant += cli->cl_dirty_pages << PAGE_SHIFT;
3271 data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
3272 lost_grant = cli->cl_lost_grant;
3273 cli->cl_lost_grant = 0;
3274 spin_unlock(&cli->cl_loi_list_lock);
3276 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
3277 " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
3278 data->ocd_version, data->ocd_grant, lost_grant);
3283 EXPORT_SYMBOL(osc_reconnect);
3285 int osc_disconnect(struct obd_export *exp)
3287 struct obd_device *obd = class_exp2obd(exp);
3290 rc = client_disconnect_export(exp);
3292 * Initially we put del_shrink_grant before disconnect_export, but it
3293 * causes the following problem if setup (connect) and cleanup
3294 * (disconnect) are tangled together.
3295 * connect p1 disconnect p2
3296 * ptlrpc_connect_import
3297 * ............... class_manual_cleanup
3300 * ptlrpc_connect_interrupt
3302 * add this client to shrink list
3304 * Bang! grant shrink thread trigger the shrink. BUG18662
3306 osc_del_grant_list(&obd->u.cli);
3309 EXPORT_SYMBOL(osc_disconnect);
3311 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
3312 struct hlist_node *hnode, void *arg)
3314 struct lu_env *env = arg;
3315 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
3316 struct ldlm_lock *lock;
3317 struct osc_object *osc = NULL;
3321 list_for_each_entry(lock, &res->lr_granted, l_res_link) {
3322 if (lock->l_ast_data != NULL && osc == NULL) {
3323 osc = lock->l_ast_data;
3324 cl_object_get(osc2cl(osc));
3327 /* clear LDLM_FL_CLEANED flag to make sure it will be canceled
3328 * by the 2nd round of ldlm_namespace_clean() call in
3329 * osc_import_event(). */
3330 ldlm_clear_cleaned(lock);
3335 osc_object_invalidate(env, osc);
3336 cl_object_put(env, osc2cl(osc));
3341 EXPORT_SYMBOL(osc_ldlm_resource_invalidate);
3343 static int osc_import_event(struct obd_device *obd,
3344 struct obd_import *imp,
3345 enum obd_import_event event)
3347 struct client_obd *cli;
3351 LASSERT(imp->imp_obd == obd);
3354 case IMP_EVENT_DISCON: {
3356 spin_lock(&cli->cl_loi_list_lock);
3357 cli->cl_avail_grant = 0;
3358 cli->cl_lost_grant = 0;
3359 spin_unlock(&cli->cl_loi_list_lock);
3362 case IMP_EVENT_INACTIVE: {
3363 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
3366 case IMP_EVENT_INVALIDATE: {
3367 struct ldlm_namespace *ns = obd->obd_namespace;
3371 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3373 env = cl_env_get(&refcheck);
3375 osc_io_unplug(env, &obd->u.cli, NULL);
3377 cfs_hash_for_each_nolock(ns->ns_rs_hash,
3378 osc_ldlm_resource_invalidate,
3380 cl_env_put(env, &refcheck);
3382 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3387 case IMP_EVENT_ACTIVE: {
3388 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
3391 case IMP_EVENT_OCD: {
3392 struct obd_connect_data *ocd = &imp->imp_connect_data;
3394 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3395 osc_init_grant(&obd->u.cli, ocd);
3398 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3399 imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3401 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
3404 case IMP_EVENT_DEACTIVATE: {
3405 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE);
3408 case IMP_EVENT_ACTIVATE: {
3409 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE);
3413 CERROR("Unknown import event %d\n", event);
3420 * Determine whether the lock can be canceled before replaying the lock
3421 * during recovery, see bug16774 for detailed information.
3423 * \retval zero the lock can't be canceled
3424 * \retval other ok to cancel
3426 static int osc_cancel_weight(struct ldlm_lock *lock)
3429 * Cancel all unused and granted extent lock.
3431 if (lock->l_resource->lr_type == LDLM_EXTENT &&
3432 ldlm_is_granted(lock) &&
3433 osc_ldlm_weigh_ast(lock) == 0)
3439 static int brw_queue_work(const struct lu_env *env, void *data)
3441 struct client_obd *cli = data;
3443 CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3445 osc_io_unplug(env, cli, NULL);
3449 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg)
3451 struct client_obd *cli = &obd->u.cli;
3457 rc = ptlrpcd_addref();
3461 rc = client_obd_setup(obd, lcfg);
3463 GOTO(out_ptlrpcd, rc);
3466 handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3467 if (IS_ERR(handler))
3468 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3469 cli->cl_writeback_work = handler;
3471 handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3472 if (IS_ERR(handler))
3473 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3474 cli->cl_lru_work = handler;
3476 rc = osc_quota_setup(obd);
3478 GOTO(out_ptlrpcd_work, rc);
3480 cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3481 osc_update_next_shrink(cli);
3486 if (cli->cl_writeback_work != NULL) {
3487 ptlrpcd_destroy_work(cli->cl_writeback_work);
3488 cli->cl_writeback_work = NULL;
3490 if (cli->cl_lru_work != NULL) {
3491 ptlrpcd_destroy_work(cli->cl_lru_work);
3492 cli->cl_lru_work = NULL;
3494 client_obd_cleanup(obd);
3499 EXPORT_SYMBOL(osc_setup_common);
3501 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3503 struct client_obd *cli = &obd->u.cli;
3511 rc = osc_setup_common(obd, lcfg);
3515 rc = osc_tunables_init(obd);
3520 * We try to control the total number of requests with a upper limit
3521 * osc_reqpool_maxreqcount. There might be some race which will cause
3522 * over-limit allocation, but it is fine.
3524 req_count = atomic_read(&osc_pool_req_count);
3525 if (req_count < osc_reqpool_maxreqcount) {
3526 adding = cli->cl_max_rpcs_in_flight + 2;
3527 if (req_count + adding > osc_reqpool_maxreqcount)
3528 adding = osc_reqpool_maxreqcount - req_count;
3530 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3531 atomic_add(added, &osc_pool_req_count);
3534 ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3536 spin_lock(&osc_shrink_lock);
3537 list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
3538 spin_unlock(&osc_shrink_lock);
3539 cli->cl_import->imp_idle_timeout = osc_idle_timeout;
3540 cli->cl_import->imp_idle_debug = D_HA;
3545 int osc_precleanup_common(struct obd_device *obd)
3547 struct client_obd *cli = &obd->u.cli;
3551 * for echo client, export may be on zombie list, wait for
3552 * zombie thread to cull it, because cli.cl_import will be
3553 * cleared in client_disconnect_export():
3554 * class_export_destroy() -> obd_cleanup() ->
3555 * echo_device_free() -> echo_client_cleanup() ->
3556 * obd_disconnect() -> osc_disconnect() ->
3557 * client_disconnect_export()
3559 obd_zombie_barrier();
3560 if (cli->cl_writeback_work) {
3561 ptlrpcd_destroy_work(cli->cl_writeback_work);
3562 cli->cl_writeback_work = NULL;
3565 if (cli->cl_lru_work) {
3566 ptlrpcd_destroy_work(cli->cl_lru_work);
3567 cli->cl_lru_work = NULL;
3570 obd_cleanup_client_import(obd);
3573 EXPORT_SYMBOL(osc_precleanup_common);
3575 static int osc_precleanup(struct obd_device *obd)
3579 osc_precleanup_common(obd);
3581 ptlrpc_lprocfs_unregister_obd(obd);
3585 int osc_cleanup_common(struct obd_device *obd)
3587 struct client_obd *cli = &obd->u.cli;
3592 spin_lock(&osc_shrink_lock);
3593 list_del(&cli->cl_shrink_list);
3594 spin_unlock(&osc_shrink_lock);
3597 if (cli->cl_cache != NULL) {
3598 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3599 spin_lock(&cli->cl_cache->ccc_lru_lock);
3600 list_del_init(&cli->cl_lru_osc);
3601 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3602 cli->cl_lru_left = NULL;
3603 cl_cache_decref(cli->cl_cache);
3604 cli->cl_cache = NULL;
3607 /* free memory of osc quota cache */
3608 osc_quota_cleanup(obd);
3610 rc = client_obd_cleanup(obd);
3615 EXPORT_SYMBOL(osc_cleanup_common);
3617 static const struct obd_ops osc_obd_ops = {
3618 .o_owner = THIS_MODULE,
3619 .o_setup = osc_setup,
3620 .o_precleanup = osc_precleanup,
3621 .o_cleanup = osc_cleanup_common,
3622 .o_add_conn = client_import_add_conn,
3623 .o_del_conn = client_import_del_conn,
3624 .o_connect = client_connect_import,
3625 .o_reconnect = osc_reconnect,
3626 .o_disconnect = osc_disconnect,
3627 .o_statfs = osc_statfs,
3628 .o_statfs_async = osc_statfs_async,
3629 .o_create = osc_create,
3630 .o_destroy = osc_destroy,
3631 .o_getattr = osc_getattr,
3632 .o_setattr = osc_setattr,
3633 .o_iocontrol = osc_iocontrol,
3634 .o_set_info_async = osc_set_info_async,
3635 .o_import_event = osc_import_event,
3636 .o_quotactl = osc_quotactl,
3639 LIST_HEAD(osc_shrink_list);
3640 DEFINE_SPINLOCK(osc_shrink_lock);
3642 #ifdef HAVE_SHRINKER_COUNT
3643 static struct shrinker osc_cache_shrinker = {
3644 .count_objects = osc_cache_shrink_count,
3645 .scan_objects = osc_cache_shrink_scan,
3646 .seeks = DEFAULT_SEEKS,
3649 static int osc_cache_shrink(struct shrinker *shrinker,
3650 struct shrink_control *sc)
3652 (void)osc_cache_shrink_scan(shrinker, sc);
3654 return osc_cache_shrink_count(shrinker, sc);
3657 static struct shrinker osc_cache_shrinker = {
3658 .shrink = osc_cache_shrink,
3659 .seeks = DEFAULT_SEEKS,
3663 static int __init osc_init(void)
3665 unsigned int reqpool_size;
3666 unsigned int reqsize;
3670 /* print an address of _any_ initialized kernel symbol from this
3671 * module, to allow debugging with gdb that doesn't support data
3672 * symbols from modules.*/
3673 CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3675 rc = lu_kmem_init(osc_caches);
3679 rc = class_register_type(&osc_obd_ops, NULL, true,
3680 LUSTRE_OSC_NAME, &osc_device_type);
3684 rc = register_shrinker(&osc_cache_shrinker);
3688 /* This is obviously too much memory, only prevent overflow here */
3689 if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
3690 GOTO(out_shrinker, rc = -EINVAL);
3692 reqpool_size = osc_reqpool_mem_max << 20;
3695 while (reqsize < OST_IO_MAXREQSIZE)
3696 reqsize = reqsize << 1;
3699 * We don't enlarge the request count in OSC pool according to
3700 * cl_max_rpcs_in_flight. The allocation from the pool will only be
3701 * tried after normal allocation failed. So a small OSC pool won't
3702 * cause much performance degression in most of cases.
3704 osc_reqpool_maxreqcount = reqpool_size / reqsize;
3706 atomic_set(&osc_pool_req_count, 0);
3707 osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
3708 ptlrpc_add_rqs_to_pool);
3710 if (osc_rq_pool == NULL)
3711 GOTO(out_shrinker, rc = -ENOMEM);
3713 rc = osc_start_grant_work();
3715 GOTO(out_req_pool, rc);
3720 ptlrpc_free_rq_pool(osc_rq_pool);
3722 unregister_shrinker(&osc_cache_shrinker);
3724 class_unregister_type(LUSTRE_OSC_NAME);
3726 lu_kmem_fini(osc_caches);
3731 static void __exit osc_exit(void)
3733 osc_stop_grant_work();
3734 unregister_shrinker(&osc_cache_shrinker);
3735 class_unregister_type(LUSTRE_OSC_NAME);
3736 lu_kmem_fini(osc_caches);
3737 ptlrpc_free_rq_pool(osc_rq_pool);
3740 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3741 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3742 MODULE_VERSION(LUSTRE_VERSION_STRING);
3743 MODULE_LICENSE("GPL");
3745 module_init(osc_init);
3746 module_exit(osc_exit);