4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #define DEBUG_SUBSYSTEM S_OSC
34 #include <linux/workqueue.h>
35 #include <libcfs/libcfs.h>
36 #include <linux/falloc.h>
37 #include <lprocfs_status.h>
38 #include <lustre_dlm.h>
39 #include <lustre_fid.h>
40 #include <lustre_ha.h>
41 #include <uapi/linux/lustre/lustre_ioctl.h>
42 #include <lustre_net.h>
43 #include <lustre_obdo.h>
45 #include <obd_cksum.h>
46 #include <obd_class.h>
47 #include <lustre_osc.h>
48 #include <linux/falloc.h>
50 #include "osc_internal.h"
52 atomic_t osc_pool_req_count;
53 unsigned int osc_reqpool_maxreqcount;
54 struct ptlrpc_request_pool *osc_rq_pool;
56 /* max memory used for request pool, unit is MB */
57 static unsigned int osc_reqpool_mem_max = 5;
58 module_param(osc_reqpool_mem_max, uint, 0444);
60 static int osc_idle_timeout = 20;
61 module_param(osc_idle_timeout, uint, 0644);
63 #define osc_grant_args osc_brw_async_args
65 struct osc_setattr_args {
67 obd_enqueue_update_f sa_upcall;
71 struct osc_fsync_args {
72 struct osc_object *fa_obj;
74 obd_enqueue_update_f fa_upcall;
78 struct osc_ladvise_args {
80 obd_enqueue_update_f la_upcall;
84 static void osc_release_ppga(struct brw_page **ppga, size_t count);
85 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
88 void osc_pack_req_body(struct ptlrpc_request *req, struct obdo *oa)
90 struct ost_body *body;
92 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
95 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
98 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
101 struct ptlrpc_request *req;
102 struct ost_body *body;
106 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
110 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
112 ptlrpc_request_free(req);
116 osc_pack_req_body(req, oa);
118 ptlrpc_request_set_replen(req);
120 rc = ptlrpc_queue_wait(req);
124 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
126 GOTO(out, rc = -EPROTO);
128 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
129 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
131 oa->o_blksize = cli_brw_size(exp->exp_obd);
132 oa->o_valid |= OBD_MD_FLBLKSZ;
136 ptlrpc_req_finished(req);
141 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
144 struct ptlrpc_request *req;
145 struct ost_body *body;
149 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
151 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
155 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
157 ptlrpc_request_free(req);
161 osc_pack_req_body(req, oa);
163 ptlrpc_request_set_replen(req);
165 rc = ptlrpc_queue_wait(req);
169 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
171 GOTO(out, rc = -EPROTO);
173 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
177 ptlrpc_req_finished(req);
182 static int osc_setattr_interpret(const struct lu_env *env,
183 struct ptlrpc_request *req, void *args, int rc)
185 struct osc_setattr_args *sa = args;
186 struct ost_body *body;
193 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
195 GOTO(out, rc = -EPROTO);
197 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
200 rc = sa->sa_upcall(sa->sa_cookie, rc);
204 int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
205 obd_enqueue_update_f upcall, void *cookie,
206 struct ptlrpc_request_set *rqset)
208 struct ptlrpc_request *req;
209 struct osc_setattr_args *sa;
214 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
218 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
220 ptlrpc_request_free(req);
224 osc_pack_req_body(req, oa);
226 ptlrpc_request_set_replen(req);
228 /* do mds to ost setattr asynchronously */
230 /* Do not wait for response. */
231 ptlrpcd_add_req(req);
233 req->rq_interpret_reply = osc_setattr_interpret;
235 sa = ptlrpc_req_async_args(sa, req);
237 sa->sa_upcall = upcall;
238 sa->sa_cookie = cookie;
240 ptlrpc_set_add_req(rqset, req);
246 static int osc_ladvise_interpret(const struct lu_env *env,
247 struct ptlrpc_request *req,
250 struct osc_ladvise_args *la = arg;
251 struct ost_body *body;
257 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
259 GOTO(out, rc = -EPROTO);
261 *la->la_oa = body->oa;
263 rc = la->la_upcall(la->la_cookie, rc);
268 * If rqset is NULL, do not wait for response. Upcall and cookie could also
269 * be NULL in this case
271 int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
272 struct ladvise_hdr *ladvise_hdr,
273 obd_enqueue_update_f upcall, void *cookie,
274 struct ptlrpc_request_set *rqset)
276 struct ptlrpc_request *req;
277 struct ost_body *body;
278 struct osc_ladvise_args *la;
280 struct lu_ladvise *req_ladvise;
281 struct lu_ladvise *ladvise = ladvise_hdr->lah_advise;
282 int num_advise = ladvise_hdr->lah_count;
283 struct ladvise_hdr *req_ladvise_hdr;
286 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_LADVISE);
290 req_capsule_set_size(&req->rq_pill, &RMF_OST_LADVISE, RCL_CLIENT,
291 num_advise * sizeof(*ladvise));
292 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_LADVISE);
294 ptlrpc_request_free(req);
297 req->rq_request_portal = OST_IO_PORTAL;
298 ptlrpc_at_set_req_timeout(req);
300 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
302 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
305 req_ladvise_hdr = req_capsule_client_get(&req->rq_pill,
306 &RMF_OST_LADVISE_HDR);
307 memcpy(req_ladvise_hdr, ladvise_hdr, sizeof(*ladvise_hdr));
309 req_ladvise = req_capsule_client_get(&req->rq_pill, &RMF_OST_LADVISE);
310 memcpy(req_ladvise, ladvise, sizeof(*ladvise) * num_advise);
311 ptlrpc_request_set_replen(req);
314 /* Do not wait for response. */
315 ptlrpcd_add_req(req);
319 req->rq_interpret_reply = osc_ladvise_interpret;
320 la = ptlrpc_req_async_args(la, req);
322 la->la_upcall = upcall;
323 la->la_cookie = cookie;
325 ptlrpc_set_add_req(rqset, req);
330 static int osc_create(const struct lu_env *env, struct obd_export *exp,
333 struct ptlrpc_request *req;
334 struct ost_body *body;
339 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
340 LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi)));
342 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
344 GOTO(out, rc = -ENOMEM);
346 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
348 ptlrpc_request_free(req);
352 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
355 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
357 ptlrpc_request_set_replen(req);
359 rc = ptlrpc_queue_wait(req);
363 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
365 GOTO(out_req, rc = -EPROTO);
367 CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
368 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
370 oa->o_blksize = cli_brw_size(exp->exp_obd);
371 oa->o_valid |= OBD_MD_FLBLKSZ;
373 CDEBUG(D_HA, "transno: %lld\n",
374 lustre_msg_get_transno(req->rq_repmsg));
376 ptlrpc_req_finished(req);
381 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
382 obd_enqueue_update_f upcall, void *cookie)
384 struct ptlrpc_request *req;
385 struct osc_setattr_args *sa;
386 struct obd_import *imp = class_exp2cliimp(exp);
387 struct ost_body *body;
392 req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
396 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
398 ptlrpc_request_free(req);
402 osc_set_io_portal(req);
404 ptlrpc_at_set_req_timeout(req);
406 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
408 lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
410 ptlrpc_request_set_replen(req);
412 req->rq_interpret_reply = osc_setattr_interpret;
413 sa = ptlrpc_req_async_args(sa, req);
415 sa->sa_upcall = upcall;
416 sa->sa_cookie = cookie;
418 ptlrpcd_add_req(req);
422 EXPORT_SYMBOL(osc_punch_send);
425 * osc_fallocate_base() - Handles fallocate request.
427 * @exp: Export structure
428 * @oa: Attributes passed to OSS from client (obdo structure)
429 * @upcall: Primary & supplementary group information
430 * @cookie: Exclusive identifier
431 * @rqset: Request list.
432 * @mode: Operation done on given range.
434 * osc_fallocate_base() - Handles fallocate requests only. Only block
435 * allocation or standard preallocate operation is supported currently.
436 * Other mode flags is not supported yet. ftruncate(2) or truncate(2)
437 * is supported via SETATTR request.
439 * Return: Non-zero on failure and O on success.
441 int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
442 obd_enqueue_update_f upcall, void *cookie, int mode)
444 struct ptlrpc_request *req;
445 struct osc_setattr_args *sa;
446 struct ost_body *body;
447 struct obd_import *imp = class_exp2cliimp(exp);
451 oa->o_falloc_mode = mode;
452 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
457 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_FALLOCATE);
459 ptlrpc_request_free(req);
463 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
466 lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
468 ptlrpc_request_set_replen(req);
470 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
471 BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
472 sa = ptlrpc_req_async_args(sa, req);
474 sa->sa_upcall = upcall;
475 sa->sa_cookie = cookie;
477 ptlrpcd_add_req(req);
482 static int osc_sync_interpret(const struct lu_env *env,
483 struct ptlrpc_request *req, void *args, int rc)
485 struct osc_fsync_args *fa = args;
486 struct ost_body *body;
487 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
488 unsigned long valid = 0;
489 struct cl_object *obj;
495 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
497 CERROR("can't unpack ost_body\n");
498 GOTO(out, rc = -EPROTO);
501 *fa->fa_oa = body->oa;
502 obj = osc2cl(fa->fa_obj);
504 /* Update osc object's blocks attribute */
505 cl_object_attr_lock(obj);
506 if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
507 attr->cat_blocks = body->oa.o_blocks;
512 cl_object_attr_update(env, obj, attr, valid);
513 cl_object_attr_unlock(obj);
516 rc = fa->fa_upcall(fa->fa_cookie, rc);
520 int osc_sync_base(struct osc_object *obj, struct obdo *oa,
521 obd_enqueue_update_f upcall, void *cookie,
522 struct ptlrpc_request_set *rqset)
524 struct obd_export *exp = osc_export(obj);
525 struct ptlrpc_request *req;
526 struct ost_body *body;
527 struct osc_fsync_args *fa;
531 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
535 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
537 ptlrpc_request_free(req);
541 /* overload the size and blocks fields in the oa with start/end */
542 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
544 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
546 ptlrpc_request_set_replen(req);
547 req->rq_interpret_reply = osc_sync_interpret;
549 fa = ptlrpc_req_async_args(fa, req);
552 fa->fa_upcall = upcall;
553 fa->fa_cookie = cookie;
555 ptlrpc_set_add_req(rqset, req);
560 /* Find and cancel locally locks matched by @mode in the resource found by
561 * @objid. Found locks are added into @cancel list. Returns the amount of
562 * locks added to @cancels list. */
563 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
564 struct list_head *cancels,
565 enum ldlm_mode mode, __u64 lock_flags)
567 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
568 struct ldlm_res_id res_id;
569 struct ldlm_resource *res;
573 /* Return, i.e. cancel nothing, only if ELC is supported (flag in
574 * export) but disabled through procfs (flag in NS).
576 * This distinguishes from a case when ELC is not supported originally,
577 * when we still want to cancel locks in advance and just cancel them
578 * locally, without sending any RPC. */
579 if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
582 ostid_build_res_name(&oa->o_oi, &res_id);
583 res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
587 LDLM_RESOURCE_ADDREF(res);
588 count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
589 lock_flags, 0, NULL);
590 LDLM_RESOURCE_DELREF(res);
591 ldlm_resource_putref(res);
595 static int osc_destroy_interpret(const struct lu_env *env,
596 struct ptlrpc_request *req, void *args, int rc)
598 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
600 atomic_dec(&cli->cl_destroy_in_flight);
601 wake_up(&cli->cl_destroy_waitq);
606 static int osc_can_send_destroy(struct client_obd *cli)
608 if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
609 cli->cl_max_rpcs_in_flight) {
610 /* The destroy request can be sent */
613 if (atomic_dec_return(&cli->cl_destroy_in_flight) <
614 cli->cl_max_rpcs_in_flight) {
616 * The counter has been modified between the two atomic
619 wake_up(&cli->cl_destroy_waitq);
624 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
627 struct client_obd *cli = &exp->exp_obd->u.cli;
628 struct ptlrpc_request *req;
629 struct ost_body *body;
635 CDEBUG(D_INFO, "oa NULL\n");
639 count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
640 LDLM_FL_DISCARD_DATA);
642 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
644 ldlm_lock_list_put(&cancels, l_bl_ast, count);
648 rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
651 ptlrpc_request_free(req);
655 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
656 ptlrpc_at_set_req_timeout(req);
658 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
660 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
662 ptlrpc_request_set_replen(req);
664 req->rq_interpret_reply = osc_destroy_interpret;
665 if (!osc_can_send_destroy(cli)) {
667 * Wait until the number of on-going destroy RPCs drops
668 * under max_rpc_in_flight
670 rc = l_wait_event_abortable_exclusive(
671 cli->cl_destroy_waitq,
672 osc_can_send_destroy(cli));
674 ptlrpc_req_finished(req);
679 /* Do not wait for response */
680 ptlrpcd_add_req(req);
684 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
687 u64 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
689 LASSERT(!(oa->o_valid & bits));
692 spin_lock(&cli->cl_loi_list_lock);
693 if (cli->cl_ocd_grant_param)
694 oa->o_dirty = cli->cl_dirty_grant;
696 oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
697 if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
698 CERROR("dirty %lu > dirty_max %lu\n",
700 cli->cl_dirty_max_pages);
702 } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
703 (long)(obd_max_dirty_pages + 1))) {
704 /* The atomic_read() allowing the atomic_inc() are
705 * not covered by a lock thus they may safely race and trip
706 * this CERROR() unless we add in a small fudge factor (+1). */
707 CERROR("%s: dirty %ld > system dirty_max %ld\n",
708 cli_name(cli), atomic_long_read(&obd_dirty_pages),
709 obd_max_dirty_pages);
711 } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
713 CERROR("dirty %lu - dirty_max %lu too big???\n",
714 cli->cl_dirty_pages, cli->cl_dirty_max_pages);
717 unsigned long nrpages;
718 unsigned long undirty;
720 nrpages = cli->cl_max_pages_per_rpc;
721 nrpages *= cli->cl_max_rpcs_in_flight + 1;
722 nrpages = max(nrpages, cli->cl_dirty_max_pages);
723 undirty = nrpages << PAGE_SHIFT;
724 if (cli->cl_ocd_grant_param) {
727 /* take extent tax into account when asking for more
729 nrextents = (nrpages + cli->cl_max_extent_pages - 1) /
730 cli->cl_max_extent_pages;
731 undirty += nrextents * cli->cl_grant_extent_tax;
733 /* Do not ask for more than OBD_MAX_GRANT - a margin for server
734 * to add extent tax, etc.
736 oa->o_undirty = min(undirty, OBD_MAX_GRANT &
737 ~(PTLRPC_MAX_BRW_SIZE * 4UL));
739 oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
740 /* o_dropped AKA o_misc is 32 bits, but cl_lost_grant is 64 bits */
741 if (cli->cl_lost_grant > INT_MAX) {
743 "%s: avoided o_dropped overflow: cl_lost_grant %lu\n",
744 cli_name(cli), cli->cl_lost_grant);
745 oa->o_dropped = INT_MAX;
747 oa->o_dropped = cli->cl_lost_grant;
749 cli->cl_lost_grant -= oa->o_dropped;
750 spin_unlock(&cli->cl_loi_list_lock);
751 CDEBUG(D_CACHE, "%s: dirty: %llu undirty: %u dropped %u grant: %llu"
752 " cl_lost_grant %lu\n", cli_name(cli), oa->o_dirty,
753 oa->o_undirty, oa->o_dropped, oa->o_grant, cli->cl_lost_grant);
756 void osc_update_next_shrink(struct client_obd *cli)
758 cli->cl_next_shrink_grant = ktime_get_seconds() +
759 cli->cl_grant_shrink_interval;
761 CDEBUG(D_CACHE, "next time %lld to shrink grant\n",
762 cli->cl_next_shrink_grant);
765 static void __osc_update_grant(struct client_obd *cli, u64 grant)
767 spin_lock(&cli->cl_loi_list_lock);
768 cli->cl_avail_grant += grant;
769 spin_unlock(&cli->cl_loi_list_lock);
772 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
774 if (body->oa.o_valid & OBD_MD_FLGRANT) {
775 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
776 __osc_update_grant(cli, body->oa.o_grant);
781 * grant thread data for shrinking space.
783 struct grant_thread_data {
784 struct list_head gtd_clients;
785 struct mutex gtd_mutex;
786 unsigned long gtd_stopped:1;
788 static struct grant_thread_data client_gtd;
790 static int osc_shrink_grant_interpret(const struct lu_env *env,
791 struct ptlrpc_request *req,
794 struct osc_grant_args *aa = args;
795 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
796 struct ost_body *body;
799 __osc_update_grant(cli, aa->aa_oa->o_grant);
803 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
805 osc_update_grant(cli, body);
807 OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
813 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
815 spin_lock(&cli->cl_loi_list_lock);
816 oa->o_grant = cli->cl_avail_grant / 4;
817 cli->cl_avail_grant -= oa->o_grant;
818 spin_unlock(&cli->cl_loi_list_lock);
819 if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
820 oa->o_valid |= OBD_MD_FLFLAGS;
823 oa->o_flags |= OBD_FL_SHRINK_GRANT;
824 osc_update_next_shrink(cli);
827 /* Shrink the current grant, either from some large amount to enough for a
828 * full set of in-flight RPCs, or if we have already shrunk to that limit
829 * then to enough for a single RPC. This avoids keeping more grant than
830 * needed, and avoids shrinking the grant piecemeal. */
831 static int osc_shrink_grant(struct client_obd *cli)
833 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
834 (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
836 spin_lock(&cli->cl_loi_list_lock);
837 if (cli->cl_avail_grant <= target_bytes)
838 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
839 spin_unlock(&cli->cl_loi_list_lock);
841 return osc_shrink_grant_to_target(cli, target_bytes);
844 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
847 struct ost_body *body;
850 spin_lock(&cli->cl_loi_list_lock);
851 /* Don't shrink if we are already above or below the desired limit
852 * We don't want to shrink below a single RPC, as that will negatively
853 * impact block allocation and long-term performance. */
854 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
855 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
857 if (target_bytes >= cli->cl_avail_grant) {
858 spin_unlock(&cli->cl_loi_list_lock);
861 spin_unlock(&cli->cl_loi_list_lock);
867 osc_announce_cached(cli, &body->oa, 0);
869 spin_lock(&cli->cl_loi_list_lock);
870 if (target_bytes >= cli->cl_avail_grant) {
871 /* available grant has changed since target calculation */
872 spin_unlock(&cli->cl_loi_list_lock);
873 GOTO(out_free, rc = 0);
875 body->oa.o_grant = cli->cl_avail_grant - target_bytes;
876 cli->cl_avail_grant = target_bytes;
877 spin_unlock(&cli->cl_loi_list_lock);
878 if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
879 body->oa.o_valid |= OBD_MD_FLFLAGS;
880 body->oa.o_flags = 0;
882 body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
883 osc_update_next_shrink(cli);
885 rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
886 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
887 sizeof(*body), body, NULL);
889 __osc_update_grant(cli, body->oa.o_grant);
895 static int osc_should_shrink_grant(struct client_obd *client)
897 time64_t next_shrink = client->cl_next_shrink_grant;
899 if (client->cl_import == NULL)
902 if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
903 client->cl_import->imp_grant_shrink_disabled) {
904 osc_update_next_shrink(client);
908 if (ktime_get_seconds() >= next_shrink - 5) {
909 /* Get the current RPC size directly, instead of going via:
910 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
911 * Keep comment here so that it can be found by searching. */
912 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
914 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
915 client->cl_avail_grant > brw_size)
918 osc_update_next_shrink(client);
923 #define GRANT_SHRINK_RPC_BATCH 100
925 static struct delayed_work work;
927 static void osc_grant_work_handler(struct work_struct *data)
929 struct client_obd *cli;
931 bool init_next_shrink = true;
932 time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
935 mutex_lock(&client_gtd.gtd_mutex);
936 list_for_each_entry(cli, &client_gtd.gtd_clients,
938 if (rpc_sent < GRANT_SHRINK_RPC_BATCH &&
939 osc_should_shrink_grant(cli)) {
940 osc_shrink_grant(cli);
944 if (!init_next_shrink) {
945 if (cli->cl_next_shrink_grant < next_shrink &&
946 cli->cl_next_shrink_grant > ktime_get_seconds())
947 next_shrink = cli->cl_next_shrink_grant;
949 init_next_shrink = false;
950 next_shrink = cli->cl_next_shrink_grant;
953 mutex_unlock(&client_gtd.gtd_mutex);
955 if (client_gtd.gtd_stopped == 1)
958 if (next_shrink > ktime_get_seconds()) {
959 time64_t delay = next_shrink - ktime_get_seconds();
961 schedule_delayed_work(&work, cfs_time_seconds(delay));
963 schedule_work(&work.work);
967 void osc_schedule_grant_work(void)
969 cancel_delayed_work_sync(&work);
970 schedule_work(&work.work);
974 * Start grant thread for returing grant to server for idle clients.
976 static int osc_start_grant_work(void)
978 client_gtd.gtd_stopped = 0;
979 mutex_init(&client_gtd.gtd_mutex);
980 INIT_LIST_HEAD(&client_gtd.gtd_clients);
982 INIT_DELAYED_WORK(&work, osc_grant_work_handler);
983 schedule_work(&work.work);
988 static void osc_stop_grant_work(void)
990 client_gtd.gtd_stopped = 1;
991 cancel_delayed_work_sync(&work);
994 static void osc_add_grant_list(struct client_obd *client)
996 mutex_lock(&client_gtd.gtd_mutex);
997 list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
998 mutex_unlock(&client_gtd.gtd_mutex);
1001 static void osc_del_grant_list(struct client_obd *client)
1003 if (list_empty(&client->cl_grant_chain))
1006 mutex_lock(&client_gtd.gtd_mutex);
1007 list_del_init(&client->cl_grant_chain);
1008 mutex_unlock(&client_gtd.gtd_mutex);
1011 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1014 * ocd_grant is the total grant amount we're expect to hold: if we've
1015 * been evicted, it's the new avail_grant amount, cl_dirty_pages will
1016 * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
1019 * race is tolerable here: if we're evicted, but imp_state already
1020 * left EVICTED state, then cl_dirty_pages must be 0 already.
1022 spin_lock(&cli->cl_loi_list_lock);
1023 cli->cl_avail_grant = ocd->ocd_grant;
1024 if (cli->cl_import->imp_state != LUSTRE_IMP_EVICTED) {
1025 unsigned long consumed = cli->cl_reserved_grant;
1027 if (OCD_HAS_FLAG(ocd, GRANT_PARAM))
1028 consumed += cli->cl_dirty_grant;
1030 consumed += cli->cl_dirty_pages << PAGE_SHIFT;
1031 if (cli->cl_avail_grant < consumed) {
1032 CERROR("%s: granted %ld but already consumed %ld\n",
1033 cli_name(cli), cli->cl_avail_grant, consumed);
1034 cli->cl_avail_grant = 0;
1036 cli->cl_avail_grant -= consumed;
1040 if (OCD_HAS_FLAG(ocd, GRANT_PARAM)) {
1044 /* overhead for each extent insertion */
1045 cli->cl_grant_extent_tax = ocd->ocd_grant_tax_kb << 10;
1046 /* determine the appropriate chunk size used by osc_extent. */
1047 cli->cl_chunkbits = max_t(int, PAGE_SHIFT,
1048 ocd->ocd_grant_blkbits);
1049 /* max_pages_per_rpc must be chunk aligned */
1050 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
1051 cli->cl_max_pages_per_rpc = (cli->cl_max_pages_per_rpc +
1052 ~chunk_mask) & chunk_mask;
1053 /* determine maximum extent size, in #pages */
1054 size = (u64)ocd->ocd_grant_max_blks << ocd->ocd_grant_blkbits;
1055 cli->cl_max_extent_pages = (size >> PAGE_SHIFT) ?: 1;
1056 cli->cl_ocd_grant_param = 1;
1058 cli->cl_ocd_grant_param = 0;
1059 cli->cl_grant_extent_tax = 0;
1060 cli->cl_chunkbits = PAGE_SHIFT;
1061 cli->cl_max_extent_pages = DT_MAX_BRW_PAGES;
1063 spin_unlock(&cli->cl_loi_list_lock);
1066 "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
1068 cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
1069 cli->cl_max_extent_pages);
1071 if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
1072 osc_add_grant_list(cli);
1074 EXPORT_SYMBOL(osc_init_grant);
1076 /* We assume that the reason this OSC got a short read is because it read
1077 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1078 * via the LOV, and it _knows_ it's reading inside the file, it's just that
1079 * this stripe never got written at or beyond this stripe offset yet. */
1080 static void handle_short_read(int nob_read, size_t page_count,
1081 struct brw_page **pga)
1086 /* skip bytes read OK */
1087 while (nob_read > 0) {
1088 LASSERT (page_count > 0);
1090 if (pga[i]->count > nob_read) {
1091 /* EOF inside this page */
1092 ptr = kmap(pga[i]->pg) +
1093 (pga[i]->off & ~PAGE_MASK);
1094 memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1101 nob_read -= pga[i]->count;
1106 /* zero remaining pages */
1107 while (page_count-- > 0) {
1108 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
1109 memset(ptr, 0, pga[i]->count);
1115 static int check_write_rcs(struct ptlrpc_request *req,
1116 int requested_nob, int niocount,
1117 size_t page_count, struct brw_page **pga)
1122 remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1123 sizeof(*remote_rcs) *
1125 if (remote_rcs == NULL) {
1126 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1130 /* return error if any niobuf was in error */
1131 for (i = 0; i < niocount; i++) {
1132 if ((int)remote_rcs[i] < 0) {
1133 CDEBUG(D_INFO, "rc[%d]: %d req %p\n",
1134 i, remote_rcs[i], req);
1135 return remote_rcs[i];
1138 if (remote_rcs[i] != 0) {
1139 CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1140 i, remote_rcs[i], req);
1144 if (req->rq_bulk != NULL &&
1145 req->rq_bulk->bd_nob_transferred != requested_nob) {
1146 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1147 req->rq_bulk->bd_nob_transferred, requested_nob);
1154 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1156 if (p1->flag != p2->flag) {
1157 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1158 OBD_BRW_SYNC | OBD_BRW_ASYNC |
1159 OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
1161 /* warn if we try to combine flags that we don't know to be
1162 * safe to combine */
1163 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1164 CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1165 "report this at https://jira.whamcloud.com/\n",
1166 p1->flag, p2->flag);
1171 return (p1->off + p1->count == p2->off);
1174 #if IS_ENABLED(CONFIG_CRC_T10DIF)
1175 static int osc_checksum_bulk_t10pi(const char *obd_name, int nob,
1176 size_t pg_count, struct brw_page **pga,
1177 int opc, obd_dif_csum_fn *fn,
1181 struct ahash_request *req;
1182 /* Used Adler as the default checksum type on top of DIF tags */
1183 unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
1184 struct page *__page;
1185 unsigned char *buffer;
1187 unsigned int bufsize;
1189 int used_number = 0;
1195 LASSERT(pg_count > 0);
1197 __page = alloc_page(GFP_KERNEL);
1201 req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1204 CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
1205 obd_name, cfs_crypto_hash_name(cfs_alg), rc);
1209 buffer = kmap(__page);
1210 guard_start = (__u16 *)buffer;
1211 guard_number = PAGE_SIZE / sizeof(*guard_start);
1212 while (nob > 0 && pg_count > 0) {
1213 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1215 /* corrupt the data before we compute the checksum, to
1216 * simulate an OST->client data error */
1217 if (unlikely(i == 0 && opc == OST_READ &&
1218 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))) {
1219 unsigned char *ptr = kmap(pga[i]->pg);
1220 int off = pga[i]->off & ~PAGE_MASK;
1222 memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1227 * The left guard number should be able to hold checksums of a
1230 rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg,
1231 pga[i]->off & ~PAGE_MASK,
1233 guard_start + used_number,
1234 guard_number - used_number,
1240 used_number += used;
1241 if (used_number == guard_number) {
1242 cfs_crypto_hash_update_page(req, __page, 0,
1243 used_number * sizeof(*guard_start));
1247 nob -= pga[i]->count;
1255 if (used_number != 0)
1256 cfs_crypto_hash_update_page(req, __page, 0,
1257 used_number * sizeof(*guard_start));
1259 bufsize = sizeof(cksum);
1260 cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
1262 /* For sending we only compute the wrong checksum instead
1263 * of corrupting the data so it is still correct on a redo */
1264 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1269 __free_page(__page);
1272 #else /* !CONFIG_CRC_T10DIF */
1273 #define obd_dif_ip_fn NULL
1274 #define obd_dif_crc_fn NULL
1275 #define osc_checksum_bulk_t10pi(name, nob, pgc, pga, opc, fn, ssize, csum) \
1277 #endif /* CONFIG_CRC_T10DIF */
1279 static int osc_checksum_bulk(int nob, size_t pg_count,
1280 struct brw_page **pga, int opc,
1281 enum cksum_types cksum_type,
1285 struct ahash_request *req;
1286 unsigned int bufsize;
1287 unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
1289 LASSERT(pg_count > 0);
1291 req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1293 CERROR("Unable to initialize checksum hash %s\n",
1294 cfs_crypto_hash_name(cfs_alg));
1295 return PTR_ERR(req);
1298 while (nob > 0 && pg_count > 0) {
1299 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1301 /* corrupt the data before we compute the checksum, to
1302 * simulate an OST->client data error */
1303 if (i == 0 && opc == OST_READ &&
1304 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1305 unsigned char *ptr = kmap(pga[i]->pg);
1306 int off = pga[i]->off & ~PAGE_MASK;
1308 memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1311 cfs_crypto_hash_update_page(req, pga[i]->pg,
1312 pga[i]->off & ~PAGE_MASK,
1314 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
1315 (int)(pga[i]->off & ~PAGE_MASK));
1317 nob -= pga[i]->count;
1322 bufsize = sizeof(*cksum);
1323 cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
1325 /* For sending we only compute the wrong checksum instead
1326 * of corrupting the data so it is still correct on a redo */
1327 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1333 static int osc_checksum_bulk_rw(const char *obd_name,
1334 enum cksum_types cksum_type,
1335 int nob, size_t pg_count,
1336 struct brw_page **pga, int opc,
1339 obd_dif_csum_fn *fn = NULL;
1340 int sector_size = 0;
1344 obd_t10_cksum2dif(cksum_type, &fn, §or_size);
1347 rc = osc_checksum_bulk_t10pi(obd_name, nob, pg_count, pga,
1348 opc, fn, sector_size, check_sum);
1350 rc = osc_checksum_bulk(nob, pg_count, pga, opc, cksum_type,
1356 static inline void osc_release_bounce_pages(struct brw_page **pga,
1359 #ifdef HAVE_LUSTRE_CRYPTO
1362 for (i = 0; i < page_count; i++) {
1363 /* Bounce pages allocated by a call to
1364 * llcrypt_encrypt_pagecache_blocks() in osc_brw_prep_request()
1365 * are identified thanks to the PageChecked flag.
1367 if (PageChecked(pga[i]->pg))
1368 llcrypt_finalize_bounce_page(&pga[i]->pg);
1369 pga[i]->count -= pga[i]->bp_count_diff;
1370 pga[i]->off += pga[i]->bp_off_diff;
1376 osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
1377 u32 page_count, struct brw_page **pga,
1378 struct ptlrpc_request **reqp, int resend)
1380 struct ptlrpc_request *req;
1381 struct ptlrpc_bulk_desc *desc;
1382 struct ost_body *body;
1383 struct obd_ioobj *ioobj;
1384 struct niobuf_remote *niobuf;
1385 int niocount, i, requested_nob, opc, rc, short_io_size = 0;
1386 struct osc_brw_async_args *aa;
1387 struct req_capsule *pill;
1388 struct brw_page *pg_prev;
1390 const char *obd_name = cli->cl_import->imp_obd->obd_name;
1391 struct inode *inode;
1392 bool directio = false;
1395 inode = page2inode(pga[0]->pg);
1396 if (inode == NULL) {
1397 /* Try to get reference to inode from cl_page if we are
1398 * dealing with direct IO, as handled pages are not
1399 * actual page cache pages.
1401 struct osc_async_page *oap = brw_page2oap(pga[0]);
1402 struct cl_page *clpage = oap2cl_page(oap);
1404 inode = clpage->cp_inode;
1408 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1409 RETURN(-ENOMEM); /* Recoverable */
1410 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1411 RETURN(-EINVAL); /* Fatal */
1413 if ((cmd & OBD_BRW_WRITE) != 0) {
1415 req = ptlrpc_request_alloc_pool(cli->cl_import,
1417 &RQF_OST_BRW_WRITE);
1420 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1425 if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
1426 for (i = 0; i < page_count; i++) {
1427 struct brw_page *pg = pga[i];
1428 struct page *data_page = NULL;
1429 bool retried = false;
1430 bool lockedbymyself;
1431 u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1432 struct address_space *map_orig = NULL;
1436 if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1437 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1438 LUSTRE_ENCRYPTION_UNIT_SIZE;
1439 /* The page can already be locked when we arrive here.
1440 * This is possible when cl_page_assume/vvp_page_assume
1441 * is stuck on wait_on_page_writeback with page lock
1442 * held. In this case there is no risk for the lock to
1443 * be released while we are doing our encryption
1444 * processing, because writeback against that page will
1445 * end in vvp_page_completion_write/cl_page_completion,
1446 * which means only once the page is fully processed.
1448 lockedbymyself = trylock_page(pg->pg);
1450 map_orig = pg->pg->mapping;
1451 pg->pg->mapping = inode->i_mapping;
1452 index_orig = pg->pg->index;
1453 pg->pg->index = pg->off >> PAGE_SHIFT;
1456 llcrypt_encrypt_pagecache_blocks(pg->pg,
1460 pg->pg->mapping = map_orig;
1461 pg->pg->index = index_orig;
1464 unlock_page(pg->pg);
1465 if (IS_ERR(data_page)) {
1466 rc = PTR_ERR(data_page);
1467 if (rc == -ENOMEM && !retried) {
1472 ptlrpc_request_free(req);
1475 /* Set PageChecked flag on bounce page for
1476 * disambiguation in osc_release_bounce_pages().
1478 SetPageChecked(data_page);
1480 /* there should be no gap in the middle of page array */
1481 if (i == page_count - 1) {
1482 struct osc_async_page *oap = brw_page2oap(pg);
1484 oa->o_size = oap->oap_count +
1485 oap->oap_obj_off + oap->oap_page_off;
1487 /* len is forced to nunits, and relative offset to 0
1488 * so store the old, clear text info
1490 pg->bp_count_diff = nunits - pg->count;
1492 pg->bp_off_diff = pg->off & ~PAGE_MASK;
1493 pg->off = pg->off & PAGE_MASK;
1495 } else if (opc == OST_READ && inode && IS_ENCRYPTED(inode)) {
1496 for (i = 0; i < page_count; i++) {
1497 struct brw_page *pg = pga[i];
1498 u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1500 if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1501 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1502 LUSTRE_ENCRYPTION_UNIT_SIZE;
1503 /* count/off are forced to cover the whole encryption
1504 * unit size so that all encrypted data is stored on the
1505 * OST, so adjust bp_{count,off}_diff for the size of
1508 pg->bp_count_diff = nunits - pg->count;
1510 pg->bp_off_diff = pg->off & ~PAGE_MASK;
1511 pg->off = pg->off & PAGE_MASK;
1515 for (niocount = i = 1; i < page_count; i++) {
1516 if (!can_merge_pages(pga[i - 1], pga[i]))
1520 pill = &req->rq_pill;
1521 req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1523 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1524 niocount * sizeof(*niobuf));
1526 for (i = 0; i < page_count; i++) {
1527 short_io_size += pga[i]->count;
1528 if (!inode || !IS_ENCRYPTED(inode)) {
1529 pga[i]->bp_count_diff = 0;
1530 pga[i]->bp_off_diff = 0;
1534 /* Check if read/write is small enough to be a short io. */
1535 if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
1536 !imp_connect_shortio(cli->cl_import))
1539 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
1540 opc == OST_READ ? 0 : short_io_size);
1541 if (opc == OST_READ)
1542 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_SERVER,
1545 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1547 ptlrpc_request_free(req);
1550 osc_set_io_portal(req);
1552 ptlrpc_at_set_req_timeout(req);
1553 /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1555 req->rq_no_retry_einprogress = 1;
1557 if (short_io_size != 0) {
1559 short_io_buf = NULL;
1563 desc = ptlrpc_prep_bulk_imp(req, page_count,
1564 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1565 (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
1566 PTLRPC_BULK_PUT_SINK),
1568 &ptlrpc_bulk_kiov_pin_ops);
1571 GOTO(out, rc = -ENOMEM);
1572 /* NB request now owns desc and will free it when it gets freed */
1574 body = req_capsule_client_get(pill, &RMF_OST_BODY);
1575 ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1576 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1577 LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1579 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1581 /* For READ and WRITE, we can't fill o_uid and o_gid using from_kuid()
1582 * and from_kgid(), because they are asynchronous. Fortunately, variable
1583 * oa contains valid o_uid and o_gid in these two operations.
1584 * Besides, filling o_uid and o_gid is enough for nrs-tbf, see LU-9658.
1585 * OBD_MD_FLUID and OBD_MD_FLUID is not set in order to avoid breaking
1586 * other process logic */
1587 body->oa.o_uid = oa->o_uid;
1588 body->oa.o_gid = oa->o_gid;
1590 obdo_to_ioobj(oa, ioobj);
1591 ioobj->ioo_bufcnt = niocount;
1592 /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1593 * that might be send for this request. The actual number is decided
1594 * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1595 * "max - 1" for old client compatibility sending "0", and also so the
1596 * the actual maximum is a power-of-two number, not one less. LU-1431 */
1598 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1600 ioobj_max_brw_set(ioobj, 0);
1602 if (short_io_size != 0) {
1603 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1604 body->oa.o_valid |= OBD_MD_FLFLAGS;
1605 body->oa.o_flags = 0;
1607 body->oa.o_flags |= OBD_FL_SHORT_IO;
1608 CDEBUG(D_CACHE, "Using short io for data transfer, size = %d\n",
1610 if (opc == OST_WRITE) {
1611 short_io_buf = req_capsule_client_get(pill,
1613 LASSERT(short_io_buf != NULL);
1617 LASSERT(page_count > 0);
1619 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1620 struct brw_page *pg = pga[i];
1621 int poff = pg->off & ~PAGE_MASK;
1623 LASSERT(pg->count > 0);
1624 /* make sure there is no gap in the middle of page array */
1625 LASSERTF(page_count == 1 ||
1626 (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1627 ergo(i > 0 && i < page_count - 1,
1628 poff == 0 && pg->count == PAGE_SIZE) &&
1629 ergo(i == page_count - 1, poff == 0)),
1630 "i: %d/%d pg: %p off: %llu, count: %u\n",
1631 i, page_count, pg, pg->off, pg->count);
1632 LASSERTF(i == 0 || pg->off > pg_prev->off,
1633 "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
1634 " prev_pg %p [pri %lu ind %lu] off %llu\n",
1636 pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1637 pg_prev->pg, page_private(pg_prev->pg),
1638 pg_prev->pg->index, pg_prev->off);
1639 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1640 (pg->flag & OBD_BRW_SRVLOCK));
1641 if (short_io_size != 0 && opc == OST_WRITE) {
1642 unsigned char *ptr = kmap_atomic(pg->pg);
1644 LASSERT(short_io_size >= requested_nob + pg->count);
1645 memcpy(short_io_buf + requested_nob,
1649 } else if (short_io_size == 0) {
1650 desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
1653 requested_nob += pg->count;
1655 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1657 niobuf->rnb_len += pg->count;
1659 niobuf->rnb_offset = pg->off;
1660 niobuf->rnb_len = pg->count;
1661 niobuf->rnb_flags = pg->flag;
1666 LASSERTF((void *)(niobuf - niocount) ==
1667 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1668 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1669 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1671 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1673 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1674 body->oa.o_valid |= OBD_MD_FLFLAGS;
1675 body->oa.o_flags = 0;
1677 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1680 if (osc_should_shrink_grant(cli))
1681 osc_shrink_grant_local(cli, &body->oa);
1683 /* size[REQ_REC_OFF] still sizeof (*body) */
1684 if (opc == OST_WRITE) {
1685 if (cli->cl_checksum &&
1686 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1687 /* store cl_cksum_type in a local variable since
1688 * it can be changed via lprocfs */
1689 enum cksum_types cksum_type = cli->cl_cksum_type;
1691 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1692 body->oa.o_flags = 0;
1694 body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1696 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1698 rc = osc_checksum_bulk_rw(obd_name, cksum_type,
1699 requested_nob, page_count,
1703 CDEBUG(D_PAGE, "failed to checksum, rc = %d\n",
1707 CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1710 /* save this in 'oa', too, for later checking */
1711 oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1712 oa->o_flags |= obd_cksum_type_pack(obd_name,
1715 /* clear out the checksum flag, in case this is a
1716 * resend but cl_checksum is no longer set. b=11238 */
1717 oa->o_valid &= ~OBD_MD_FLCKSUM;
1719 oa->o_cksum = body->oa.o_cksum;
1720 /* 1 RC per niobuf */
1721 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1722 sizeof(__u32) * niocount);
1724 if (cli->cl_checksum &&
1725 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1726 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1727 body->oa.o_flags = 0;
1728 body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1729 cli->cl_cksum_type);
1730 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1733 /* Client cksum has been already copied to wire obdo in previous
1734 * lustre_set_wire_obdo(), and in the case a bulk-read is being
1735 * resent due to cksum error, this will allow Server to
1736 * check+dump pages on its side */
1738 ptlrpc_request_set_replen(req);
1740 aa = ptlrpc_req_async_args(aa, req);
1742 aa->aa_requested_nob = requested_nob;
1743 aa->aa_nio_count = niocount;
1744 aa->aa_page_count = page_count;
1748 INIT_LIST_HEAD(&aa->aa_oaps);
1751 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1752 CDEBUG(D_RPCTRACE, "brw rpc %p - object "DOSTID" offset %lld<>%lld\n",
1753 req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
1754 niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
1758 ptlrpc_req_finished(req);
1762 char dbgcksum_file_name[PATH_MAX];
1764 static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
1765 struct brw_page **pga, __u32 server_cksum,
1773 /* will only keep dump of pages on first error for the same range in
1774 * file/fid, not during the resends/retries. */
1775 snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
1776 "%s-checksum_dump-osc-"DFID":[%llu-%llu]-%x-%x",
1777 (strncmp(libcfs_debug_file_path, "NONE", 4) != 0 ?
1778 libcfs_debug_file_path : LIBCFS_DEBUG_FILE_PATH_DEFAULT),
1779 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
1780 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1781 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1783 pga[page_count-1]->off + pga[page_count-1]->count - 1,
1784 client_cksum, server_cksum);
1785 filp = filp_open(dbgcksum_file_name,
1786 O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
1790 CDEBUG(D_INFO, "%s: can't open to dump pages with "
1791 "checksum error: rc = %d\n", dbgcksum_file_name,
1794 CERROR("%s: can't open to dump pages with checksum "
1795 "error: rc = %d\n", dbgcksum_file_name, rc);
1799 for (i = 0; i < page_count; i++) {
1800 len = pga[i]->count;
1801 buf = kmap(pga[i]->pg);
1803 rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
1805 CERROR("%s: wanted to write %u but got %d "
1806 "error\n", dbgcksum_file_name, len, rc);
1811 CDEBUG(D_INFO, "%s: wrote %d bytes\n",
1812 dbgcksum_file_name, rc);
1817 rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
1819 CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
1820 filp_close(filp, NULL);
1824 check_write_checksum(struct obdo *oa, const struct lnet_process_id *peer,
1825 __u32 client_cksum, __u32 server_cksum,
1826 struct osc_brw_async_args *aa)
1828 const char *obd_name = aa->aa_cli->cl_import->imp_obd->obd_name;
1829 enum cksum_types cksum_type;
1830 obd_dif_csum_fn *fn = NULL;
1831 int sector_size = 0;
1836 if (server_cksum == client_cksum) {
1837 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1841 if (aa->aa_cli->cl_checksum_dump)
1842 dump_all_bulk_pages(oa, aa->aa_page_count, aa->aa_ppga,
1843 server_cksum, client_cksum);
1845 cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1848 switch (cksum_type) {
1849 case OBD_CKSUM_T10IP512:
1853 case OBD_CKSUM_T10IP4K:
1857 case OBD_CKSUM_T10CRC512:
1858 fn = obd_dif_crc_fn;
1861 case OBD_CKSUM_T10CRC4K:
1862 fn = obd_dif_crc_fn;
1870 rc = osc_checksum_bulk_t10pi(obd_name, aa->aa_requested_nob,
1871 aa->aa_page_count, aa->aa_ppga,
1872 OST_WRITE, fn, sector_size,
1875 rc = osc_checksum_bulk(aa->aa_requested_nob, aa->aa_page_count,
1876 aa->aa_ppga, OST_WRITE, cksum_type,
1880 msg = "failed to calculate the client write checksum";
1881 else if (cksum_type != obd_cksum_type_unpack(aa->aa_oa->o_flags))
1882 msg = "the server did not use the checksum type specified in "
1883 "the original request - likely a protocol problem";
1884 else if (new_cksum == server_cksum)
1885 msg = "changed on the client after we checksummed it - "
1886 "likely false positive due to mmap IO (bug 11742)";
1887 else if (new_cksum == client_cksum)
1888 msg = "changed in transit before arrival at OST";
1890 msg = "changed in transit AND doesn't match the original - "
1891 "likely false positive due to mmap IO (bug 11742)";
1893 LCONSOLE_ERROR_MSG(0x132, "%s: BAD WRITE CHECKSUM: %s: from %s inode "
1894 DFID " object "DOSTID" extent [%llu-%llu], original "
1895 "client csum %x (type %x), server csum %x (type %x),"
1896 " client csum now %x\n",
1897 obd_name, msg, libcfs_nid2str(peer->nid),
1898 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1899 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1900 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1901 POSTID(&oa->o_oi), aa->aa_ppga[0]->off,
1902 aa->aa_ppga[aa->aa_page_count - 1]->off +
1903 aa->aa_ppga[aa->aa_page_count-1]->count - 1,
1905 obd_cksum_type_unpack(aa->aa_oa->o_flags),
1906 server_cksum, cksum_type, new_cksum);
1910 /* Note rc enters this function as number of bytes transferred */
1911 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1913 struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1914 struct client_obd *cli = aa->aa_cli;
1915 const char *obd_name = cli->cl_import->imp_obd->obd_name;
1916 const struct lnet_process_id *peer =
1917 &req->rq_import->imp_connection->c_peer;
1918 struct ost_body *body;
1919 u32 client_cksum = 0;
1920 struct inode *inode;
1921 unsigned int blockbits = 0, blocksize = 0;
1925 if (rc < 0 && rc != -EDQUOT) {
1926 DEBUG_REQ(D_INFO, req, "Failed request: rc = %d", rc);
1930 LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1931 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1933 DEBUG_REQ(D_INFO, req, "cannot unpack body");
1937 /* set/clear over quota flag for a uid/gid/projid */
1938 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1939 body->oa.o_valid & (OBD_MD_FLALLQUOTA)) {
1940 unsigned qid[LL_MAXQUOTAS] = {
1941 body->oa.o_uid, body->oa.o_gid,
1942 body->oa.o_projid };
1944 "setdq for [%u %u %u] with valid %#llx, flags %x\n",
1945 body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
1946 body->oa.o_valid, body->oa.o_flags);
1947 osc_quota_setdq(cli, req->rq_xid, qid, body->oa.o_valid,
1951 osc_update_grant(cli, body);
1956 if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1957 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1959 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1961 CERROR("%s: unexpected positive size %d\n",
1966 if (req->rq_bulk != NULL &&
1967 sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1970 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1971 check_write_checksum(&body->oa, peer, client_cksum,
1972 body->oa.o_cksum, aa))
1975 rc = check_write_rcs(req, aa->aa_requested_nob,
1976 aa->aa_nio_count, aa->aa_page_count,
1981 /* The rest of this function executes only for OST_READs */
1983 if (req->rq_bulk == NULL) {
1984 rc = req_capsule_get_size(&req->rq_pill, &RMF_SHORT_IO,
1986 LASSERT(rc == req->rq_status);
1988 /* if unwrap_bulk failed, return -EAGAIN to retry */
1989 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
1992 GOTO(out, rc = -EAGAIN);
1994 if (rc > aa->aa_requested_nob) {
1995 CERROR("%s: unexpected size %d, requested %d\n", obd_name,
1996 rc, aa->aa_requested_nob);
2000 if (req->rq_bulk != NULL && rc != req->rq_bulk->bd_nob_transferred) {
2001 CERROR("%s: unexpected size %d, transferred %d\n", obd_name,
2002 rc, req->rq_bulk->bd_nob_transferred);
2006 if (req->rq_bulk == NULL) {
2008 int nob, pg_count, i = 0;
2011 CDEBUG(D_CACHE, "Using short io read, size %d\n", rc);
2012 pg_count = aa->aa_page_count;
2013 buf = req_capsule_server_sized_get(&req->rq_pill, &RMF_SHORT_IO,
2016 while (nob > 0 && pg_count > 0) {
2018 int count = aa->aa_ppga[i]->count > nob ?
2019 nob : aa->aa_ppga[i]->count;
2021 CDEBUG(D_CACHE, "page %p count %d\n",
2022 aa->aa_ppga[i]->pg, count);
2023 ptr = kmap_atomic(aa->aa_ppga[i]->pg);
2024 memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
2026 kunmap_atomic((void *) ptr);
2035 if (rc < aa->aa_requested_nob)
2036 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
2038 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
2039 static int cksum_counter;
2040 u32 server_cksum = body->oa.o_cksum;
2043 enum cksum_types cksum_type;
2044 u32 o_flags = body->oa.o_valid & OBD_MD_FLFLAGS ?
2045 body->oa.o_flags : 0;
2047 cksum_type = obd_cksum_type_unpack(o_flags);
2048 rc = osc_checksum_bulk_rw(obd_name, cksum_type, rc,
2049 aa->aa_page_count, aa->aa_ppga,
2050 OST_READ, &client_cksum);
2054 if (req->rq_bulk != NULL &&
2055 peer->nid != req->rq_bulk->bd_sender) {
2057 router = libcfs_nid2str(req->rq_bulk->bd_sender);
2060 if (server_cksum != client_cksum) {
2061 struct ost_body *clbody;
2062 u32 page_count = aa->aa_page_count;
2064 clbody = req_capsule_client_get(&req->rq_pill,
2066 if (cli->cl_checksum_dump)
2067 dump_all_bulk_pages(&clbody->oa, page_count,
2068 aa->aa_ppga, server_cksum,
2071 LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
2072 "%s%s%s inode "DFID" object "DOSTID
2073 " extent [%llu-%llu], client %x, "
2074 "server %x, cksum_type %x\n",
2076 libcfs_nid2str(peer->nid),
2078 clbody->oa.o_valid & OBD_MD_FLFID ?
2079 clbody->oa.o_parent_seq : 0ULL,
2080 clbody->oa.o_valid & OBD_MD_FLFID ?
2081 clbody->oa.o_parent_oid : 0,
2082 clbody->oa.o_valid & OBD_MD_FLFID ?
2083 clbody->oa.o_parent_ver : 0,
2084 POSTID(&body->oa.o_oi),
2085 aa->aa_ppga[0]->off,
2086 aa->aa_ppga[page_count-1]->off +
2087 aa->aa_ppga[page_count-1]->count - 1,
2088 client_cksum, server_cksum,
2091 aa->aa_oa->o_cksum = client_cksum;
2095 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
2098 } else if (unlikely(client_cksum)) {
2099 static int cksum_missed;
2102 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
2103 CERROR("%s: checksum %u requested from %s but not sent\n",
2104 obd_name, cksum_missed,
2105 libcfs_nid2str(peer->nid));
2110 inode = page2inode(aa->aa_ppga[0]->pg);
2111 if (inode == NULL) {
2112 /* Try to get reference to inode from cl_page if we are
2113 * dealing with direct IO, as handled pages are not
2114 * actual page cache pages.
2116 struct osc_async_page *oap = brw_page2oap(aa->aa_ppga[0]);
2118 inode = oap2cl_page(oap)->cp_inode;
2120 blockbits = inode->i_blkbits;
2121 blocksize = 1 << blockbits;
2124 if (inode && IS_ENCRYPTED(inode)) {
2127 if (!llcrypt_has_encryption_key(inode)) {
2128 CDEBUG(D_SEC, "no enc key for ino %lu\n", inode->i_ino);
2131 for (idx = 0; idx < aa->aa_page_count; idx++) {
2132 struct brw_page *pg = aa->aa_ppga[idx];
2133 unsigned int offs = 0;
2135 while (offs < PAGE_SIZE) {
2136 /* do not decrypt if page is all 0s */
2137 if (memchr_inv(page_address(pg->pg) + offs, 0,
2138 LUSTRE_ENCRYPTION_UNIT_SIZE) == NULL) {
2139 /* if page is empty forward info to
2140 * upper layers (ll_io_zero_page) by
2141 * clearing PagePrivate2
2144 ClearPagePrivate2(pg->pg);
2149 /* This is direct IO case. Directly call
2150 * decrypt function that takes inode as
2151 * input parameter. Page does not need
2155 ((u64)(pg->off >> PAGE_SHIFT) <<
2156 (PAGE_SHIFT - blockbits)) +
2157 (offs >> blockbits);
2162 LUSTRE_ENCRYPTION_UNIT_SIZE;
2163 i += blocksize, lblk_num++) {
2165 llcrypt_decrypt_block_inplace(
2173 rc = llcrypt_decrypt_pagecache_blocks(
2175 LUSTRE_ENCRYPTION_UNIT_SIZE,
2181 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
2188 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
2189 aa->aa_oa, &body->oa);
2194 static int osc_brw_redo_request(struct ptlrpc_request *request,
2195 struct osc_brw_async_args *aa, int rc)
2197 struct ptlrpc_request *new_req;
2198 struct osc_brw_async_args *new_aa;
2199 struct osc_async_page *oap;
2202 /* The below message is checked in replay-ost-single.sh test_8ae*/
2203 DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
2204 "redo for recoverable error %d", rc);
2206 rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
2207 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
2208 aa->aa_cli, aa->aa_oa, aa->aa_page_count,
2209 aa->aa_ppga, &new_req, 1);
2213 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2214 if (oap->oap_request != NULL) {
2215 LASSERTF(request == oap->oap_request,
2216 "request %p != oap_request %p\n",
2217 request, oap->oap_request);
2221 * New request takes over pga and oaps from old request.
2222 * Note that copying a list_head doesn't work, need to move it...
2225 new_req->rq_interpret_reply = request->rq_interpret_reply;
2226 new_req->rq_async_args = request->rq_async_args;
2227 new_req->rq_commit_cb = request->rq_commit_cb;
2228 /* cap resend delay to the current request timeout, this is similar to
2229 * what ptlrpc does (see after_reply()) */
2230 if (aa->aa_resends > new_req->rq_timeout)
2231 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
2233 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
2234 new_req->rq_generation_set = 1;
2235 new_req->rq_import_generation = request->rq_import_generation;
2237 new_aa = ptlrpc_req_async_args(new_aa, new_req);
2239 INIT_LIST_HEAD(&new_aa->aa_oaps);
2240 list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
2241 INIT_LIST_HEAD(&new_aa->aa_exts);
2242 list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
2243 new_aa->aa_resends = aa->aa_resends;
2245 list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
2246 if (oap->oap_request) {
2247 ptlrpc_req_finished(oap->oap_request);
2248 oap->oap_request = ptlrpc_request_addref(new_req);
2252 /* XXX: This code will run into problem if we're going to support
2253 * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
2254 * and wait for all of them to be finished. We should inherit request
2255 * set from old request. */
2256 ptlrpcd_add_req(new_req);
2258 DEBUG_REQ(D_INFO, new_req, "new request");
2263 * ugh, we want disk allocation on the target to happen in offset order. we'll
2264 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
2265 * fine for our small page arrays and doesn't require allocation. its an
2266 * insertion sort that swaps elements that are strides apart, shrinking the
2267 * stride down until its '1' and the array is sorted.
2269 static void sort_brw_pages(struct brw_page **array, int num)
2272 struct brw_page *tmp;
2276 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
2281 for (i = stride ; i < num ; i++) {
2284 while (j >= stride && array[j - stride]->off > tmp->off) {
2285 array[j] = array[j - stride];
2290 } while (stride > 1);
2293 static void osc_release_ppga(struct brw_page **ppga, size_t count)
2295 LASSERT(ppga != NULL);
2296 OBD_FREE_PTR_ARRAY_LARGE(ppga, count);
2299 static int brw_interpret(const struct lu_env *env,
2300 struct ptlrpc_request *req, void *args, int rc)
2302 struct osc_brw_async_args *aa = args;
2303 struct osc_extent *ext;
2304 struct osc_extent *tmp;
2305 struct client_obd *cli = aa->aa_cli;
2306 unsigned long transferred = 0;
2310 rc = osc_brw_fini_request(req, rc);
2311 CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2313 /* restore clear text pages */
2314 osc_release_bounce_pages(aa->aa_ppga, aa->aa_page_count);
2317 * When server returns -EINPROGRESS, client should always retry
2318 * regardless of the number of times the bulk was resent already.
2320 if (osc_recoverable_error(rc) && !req->rq_no_delay) {
2321 if (req->rq_import_generation !=
2322 req->rq_import->imp_generation) {
2323 CDEBUG(D_HA, "%s: resend cross eviction for object: "
2324 ""DOSTID", rc = %d.\n",
2325 req->rq_import->imp_obd->obd_name,
2326 POSTID(&aa->aa_oa->o_oi), rc);
2327 } else if (rc == -EINPROGRESS ||
2328 client_should_resend(aa->aa_resends, aa->aa_cli)) {
2329 rc = osc_brw_redo_request(req, aa, rc);
2331 CERROR("%s: too many resent retries for object: "
2332 "%llu:%llu, rc = %d.\n",
2333 req->rq_import->imp_obd->obd_name,
2334 POSTID(&aa->aa_oa->o_oi), rc);
2339 else if (rc == -EAGAIN || rc == -EINPROGRESS)
2344 struct obdo *oa = aa->aa_oa;
2345 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
2346 unsigned long valid = 0;
2347 struct cl_object *obj;
2348 struct osc_async_page *last;
2350 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
2351 obj = osc2cl(last->oap_obj);
2353 cl_object_attr_lock(obj);
2354 if (oa->o_valid & OBD_MD_FLBLOCKS) {
2355 attr->cat_blocks = oa->o_blocks;
2356 valid |= CAT_BLOCKS;
2358 if (oa->o_valid & OBD_MD_FLMTIME) {
2359 attr->cat_mtime = oa->o_mtime;
2362 if (oa->o_valid & OBD_MD_FLATIME) {
2363 attr->cat_atime = oa->o_atime;
2366 if (oa->o_valid & OBD_MD_FLCTIME) {
2367 attr->cat_ctime = oa->o_ctime;
2371 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
2372 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
2373 loff_t last_off = last->oap_count + last->oap_obj_off +
2376 /* Change file size if this is an out of quota or
2377 * direct IO write and it extends the file size */
2378 if (loi->loi_lvb.lvb_size < last_off) {
2379 attr->cat_size = last_off;
2382 /* Extend KMS if it's not a lockless write */
2383 if (loi->loi_kms < last_off &&
2384 oap2osc_page(last)->ops_srvlock == 0) {
2385 attr->cat_kms = last_off;
2391 cl_object_attr_update(env, obj, attr, valid);
2392 cl_object_attr_unlock(obj);
2394 OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
2397 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
2398 osc_inc_unstable_pages(req);
2400 list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
2401 list_del_init(&ext->oe_link);
2402 osc_extent_finish(env, ext, 1,
2403 rc && req->rq_no_delay ? -EAGAIN : rc);
2405 LASSERT(list_empty(&aa->aa_exts));
2406 LASSERT(list_empty(&aa->aa_oaps));
2408 transferred = (req->rq_bulk == NULL ? /* short io */
2409 aa->aa_requested_nob :
2410 req->rq_bulk->bd_nob_transferred);
2412 osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2413 ptlrpc_lprocfs_brw(req, transferred);
2415 spin_lock(&cli->cl_loi_list_lock);
2416 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2417 * is called so we know whether to go to sync BRWs or wait for more
2418 * RPCs to complete */
2419 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2420 cli->cl_w_in_flight--;
2422 cli->cl_r_in_flight--;
2423 osc_wake_cache_waiters(cli);
2424 spin_unlock(&cli->cl_loi_list_lock);
2426 osc_io_unplug(env, cli, NULL);
2430 static void brw_commit(struct ptlrpc_request *req)
2432 /* If osc_inc_unstable_pages (via osc_extent_finish) races with
2433 * this called via the rq_commit_cb, I need to ensure
2434 * osc_dec_unstable_pages is still called. Otherwise unstable
2435 * pages may be leaked. */
2436 spin_lock(&req->rq_lock);
2437 if (likely(req->rq_unstable)) {
2438 req->rq_unstable = 0;
2439 spin_unlock(&req->rq_lock);
2441 osc_dec_unstable_pages(req);
2443 req->rq_committed = 1;
2444 spin_unlock(&req->rq_lock);
2449 * Build an RPC by the list of extent @ext_list. The caller must ensure
2450 * that the total pages in this list are NOT over max pages per RPC.
2451 * Extents in the list must be in OES_RPC state.
2453 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
2454 struct list_head *ext_list, int cmd)
2456 struct ptlrpc_request *req = NULL;
2457 struct osc_extent *ext;
2458 struct brw_page **pga = NULL;
2459 struct osc_brw_async_args *aa = NULL;
2460 struct obdo *oa = NULL;
2461 struct osc_async_page *oap;
2462 struct osc_object *obj = NULL;
2463 struct cl_req_attr *crattr = NULL;
2464 loff_t starting_offset = OBD_OBJECT_EOF;
2465 loff_t ending_offset = 0;
2466 /* '1' for consistency with code that checks !mpflag to restore */
2470 bool soft_sync = false;
2471 bool ndelay = false;
2475 __u32 layout_version = 0;
2476 LIST_HEAD(rpc_list);
2477 struct ost_body *body;
2479 LASSERT(!list_empty(ext_list));
2481 /* add pages into rpc_list to build BRW rpc */
2482 list_for_each_entry(ext, ext_list, oe_link) {
2483 LASSERT(ext->oe_state == OES_RPC);
2484 mem_tight |= ext->oe_memalloc;
2485 grant += ext->oe_grants;
2486 page_count += ext->oe_nr_pages;
2487 layout_version = max(layout_version, ext->oe_layout_version);
2492 soft_sync = osc_over_unstable_soft_limit(cli);
2494 mpflag = memalloc_noreclaim_save();
2496 OBD_ALLOC_PTR_ARRAY_LARGE(pga, page_count);
2498 GOTO(out, rc = -ENOMEM);
2500 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
2502 GOTO(out, rc = -ENOMEM);
2505 list_for_each_entry(ext, ext_list, oe_link) {
2506 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
2508 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
2510 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
2511 pga[i] = &oap->oap_brw_page;
2512 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2515 list_add_tail(&oap->oap_rpc_item, &rpc_list);
2516 if (starting_offset == OBD_OBJECT_EOF ||
2517 starting_offset > oap->oap_obj_off)
2518 starting_offset = oap->oap_obj_off;
2520 LASSERT(oap->oap_page_off == 0);
2521 if (ending_offset < oap->oap_obj_off + oap->oap_count)
2522 ending_offset = oap->oap_obj_off +
2525 LASSERT(oap->oap_page_off + oap->oap_count ==
2532 /* first page in the list */
2533 oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item);
2535 crattr = &osc_env_info(env)->oti_req_attr;
2536 memset(crattr, 0, sizeof(*crattr));
2537 crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2538 crattr->cra_flags = ~0ULL;
2539 crattr->cra_page = oap2cl_page(oap);
2540 crattr->cra_oa = oa;
2541 cl_req_attr_set(env, osc2cl(obj), crattr);
2543 if (cmd == OBD_BRW_WRITE) {
2544 oa->o_grant_used = grant;
2545 if (layout_version > 0) {
2546 CDEBUG(D_LAYOUT, DFID": write with layout version %u\n",
2547 PFID(&oa->o_oi.oi_fid), layout_version);
2549 oa->o_layout_version = layout_version;
2550 oa->o_valid |= OBD_MD_LAYOUT_VERSION;
2554 sort_brw_pages(pga, page_count);
2555 rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 0);
2557 CERROR("prep_req failed: %d\n", rc);
2561 req->rq_commit_cb = brw_commit;
2562 req->rq_interpret_reply = brw_interpret;
2563 req->rq_memalloc = mem_tight != 0;
2564 oap->oap_request = ptlrpc_request_addref(req);
2566 req->rq_no_resend = req->rq_no_delay = 1;
2567 /* probably set a shorter timeout value.
2568 * to handle ETIMEDOUT in brw_interpret() correctly. */
2569 /* lustre_msg_set_timeout(req, req->rq_timeout / 2); */
2572 /* Need to update the timestamps after the request is built in case
2573 * we race with setattr (locally or in queue at OST). If OST gets
2574 * later setattr before earlier BRW (as determined by the request xid),
2575 * the OST will not use BRW timestamps. Sadly, there is no obvious
2576 * way to do this in a single call. bug 10150 */
2577 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2578 crattr->cra_oa = &body->oa;
2579 crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
2580 cl_req_attr_set(env, osc2cl(obj), crattr);
2581 lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2583 aa = ptlrpc_req_async_args(aa, req);
2584 INIT_LIST_HEAD(&aa->aa_oaps);
2585 list_splice_init(&rpc_list, &aa->aa_oaps);
2586 INIT_LIST_HEAD(&aa->aa_exts);
2587 list_splice_init(ext_list, &aa->aa_exts);
2589 spin_lock(&cli->cl_loi_list_lock);
2590 starting_offset >>= PAGE_SHIFT;
2591 if (cmd == OBD_BRW_READ) {
2592 cli->cl_r_in_flight++;
2593 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2594 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2595 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2596 starting_offset + 1);
2598 cli->cl_w_in_flight++;
2599 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2600 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2601 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2602 starting_offset + 1);
2604 spin_unlock(&cli->cl_loi_list_lock);
2606 DEBUG_REQ(D_INODE, req, "%d pages, aa %p, now %ur/%uw in flight",
2607 page_count, aa, cli->cl_r_in_flight,
2608 cli->cl_w_in_flight);
2609 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
2611 ptlrpcd_add_req(req);
2617 memalloc_noreclaim_restore(mpflag);
2620 LASSERT(req == NULL);
2623 OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
2625 osc_release_bounce_pages(pga, page_count);
2626 osc_release_ppga(pga, page_count);
2628 /* this should happen rarely and is pretty bad, it makes the
2629 * pending list not follow the dirty order */
2630 while (!list_empty(ext_list)) {
2631 ext = list_entry(ext_list->next, struct osc_extent,
2633 list_del_init(&ext->oe_link);
2634 osc_extent_finish(env, ext, 0, rc);
2640 static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
2644 LASSERT(lock != NULL);
2646 lock_res_and_lock(lock);
2648 if (lock->l_ast_data == NULL)
2649 lock->l_ast_data = data;
2650 if (lock->l_ast_data == data)
2653 unlock_res_and_lock(lock);
2658 int osc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
2659 void *cookie, struct lustre_handle *lockh,
2660 enum ldlm_mode mode, __u64 *flags, bool speculative,
2663 bool intent = *flags & LDLM_FL_HAS_INTENT;
2667 /* The request was created before ldlm_cli_enqueue call. */
2668 if (intent && errcode == ELDLM_LOCK_ABORTED) {
2669 struct ldlm_reply *rep;
2671 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2672 LASSERT(rep != NULL);
2674 rep->lock_policy_res1 =
2675 ptlrpc_status_ntoh(rep->lock_policy_res1);
2676 if (rep->lock_policy_res1)
2677 errcode = rep->lock_policy_res1;
2679 *flags |= LDLM_FL_LVB_READY;
2680 } else if (errcode == ELDLM_OK) {
2681 *flags |= LDLM_FL_LVB_READY;
2684 /* Call the update callback. */
2685 rc = (*upcall)(cookie, lockh, errcode);
2687 /* release the reference taken in ldlm_cli_enqueue() */
2688 if (errcode == ELDLM_LOCK_MATCHED)
2690 if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
2691 ldlm_lock_decref(lockh, mode);
2696 int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
2699 struct osc_enqueue_args *aa = args;
2700 struct ldlm_lock *lock;
2701 struct lustre_handle *lockh = &aa->oa_lockh;
2702 enum ldlm_mode mode = aa->oa_mode;
2703 struct ost_lvb *lvb = aa->oa_lvb;
2704 __u32 lvb_len = sizeof(*lvb);
2706 struct ldlm_enqueue_info einfo = {
2707 .ei_type = aa->oa_type,
2713 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2715 lock = ldlm_handle2lock(lockh);
2716 LASSERTF(lock != NULL,
2717 "lockh %#llx, req %p, aa %p - client evicted?\n",
2718 lockh->cookie, req, aa);
2720 /* Take an additional reference so that a blocking AST that
2721 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2722 * to arrive after an upcall has been executed by
2723 * osc_enqueue_fini(). */
2724 ldlm_lock_addref(lockh, mode);
2726 /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2727 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2729 /* Let CP AST to grant the lock first. */
2730 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2732 if (aa->oa_speculative) {
2733 LASSERT(aa->oa_lvb == NULL);
2734 LASSERT(aa->oa_flags == NULL);
2735 aa->oa_flags = &flags;
2738 /* Complete obtaining the lock procedure. */
2739 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, &einfo, 1, aa->oa_flags,
2740 lvb, lvb_len, lockh, rc);
2741 /* Complete osc stuff. */
2742 rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
2743 aa->oa_flags, aa->oa_speculative, rc);
2745 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2747 ldlm_lock_decref(lockh, mode);
2748 LDLM_LOCK_PUT(lock);
2752 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2753 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2754 * other synchronous requests, however keeping some locks and trying to obtain
2755 * others may take a considerable amount of time in a case of ost failure; and
2756 * when other sync requests do not get released lock from a client, the client
2757 * is evicted from the cluster -- such scenarious make the life difficult, so
2758 * release locks just after they are obtained. */
2759 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2760 __u64 *flags, union ldlm_policy_data *policy,
2761 struct ost_lvb *lvb, osc_enqueue_upcall_f upcall,
2762 void *cookie, struct ldlm_enqueue_info *einfo,
2763 struct ptlrpc_request_set *rqset, int async,
2766 struct obd_device *obd = exp->exp_obd;
2767 struct lustre_handle lockh = { 0 };
2768 struct ptlrpc_request *req = NULL;
2769 int intent = *flags & LDLM_FL_HAS_INTENT;
2770 __u64 match_flags = *flags;
2771 enum ldlm_mode mode;
2775 /* Filesystem lock extents are extended to page boundaries so that
2776 * dealing with the page cache is a little smoother. */
2777 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2778 policy->l_extent.end |= ~PAGE_MASK;
2780 /* Next, search for already existing extent locks that will cover us */
2781 /* If we're trying to read, we also search for an existing PW lock. The
2782 * VFS and page cache already protect us locally, so lots of readers/
2783 * writers can share a single PW lock.
2785 * There are problems with conversion deadlocks, so instead of
2786 * converting a read lock to a write lock, we'll just enqueue a new
2789 * At some point we should cancel the read lock instead of making them
2790 * send us a blocking callback, but there are problems with canceling
2791 * locks out from other users right now, too. */
2792 mode = einfo->ei_mode;
2793 if (einfo->ei_mode == LCK_PR)
2795 /* Normal lock requests must wait for the LVB to be ready before
2796 * matching a lock; speculative lock requests do not need to,
2797 * because they will not actually use the lock. */
2799 match_flags |= LDLM_FL_LVB_READY;
2801 match_flags |= LDLM_FL_BLOCK_GRANTED;
2802 mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
2803 einfo->ei_type, policy, mode, &lockh);
2805 struct ldlm_lock *matched;
2807 if (*flags & LDLM_FL_TEST_LOCK)
2810 matched = ldlm_handle2lock(&lockh);
2812 /* This DLM lock request is speculative, and does not
2813 * have an associated IO request. Therefore if there
2814 * is already a DLM lock, it wll just inform the
2815 * caller to cancel the request for this stripe.*/
2816 lock_res_and_lock(matched);
2817 if (ldlm_extent_equal(&policy->l_extent,
2818 &matched->l_policy_data.l_extent))
2822 unlock_res_and_lock(matched);
2824 ldlm_lock_decref(&lockh, mode);
2825 LDLM_LOCK_PUT(matched);
2827 } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
2828 *flags |= LDLM_FL_LVB_READY;
2830 /* We already have a lock, and it's referenced. */
2831 (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
2833 ldlm_lock_decref(&lockh, mode);
2834 LDLM_LOCK_PUT(matched);
2837 ldlm_lock_decref(&lockh, mode);
2838 LDLM_LOCK_PUT(matched);
2842 if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
2845 /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2846 *flags &= ~LDLM_FL_BLOCK_GRANTED;
2848 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2849 sizeof(*lvb), LVB_T_OST, &lockh, async);
2852 struct osc_enqueue_args *aa;
2853 aa = ptlrpc_req_async_args(aa, req);
2855 aa->oa_mode = einfo->ei_mode;
2856 aa->oa_type = einfo->ei_type;
2857 lustre_handle_copy(&aa->oa_lockh, &lockh);
2858 aa->oa_upcall = upcall;
2859 aa->oa_cookie = cookie;
2860 aa->oa_speculative = speculative;
2862 aa->oa_flags = flags;
2865 /* speculative locks are essentially to enqueue
2866 * a DLM lock in advance, so we don't care
2867 * about the result of the enqueue. */
2869 aa->oa_flags = NULL;
2872 req->rq_interpret_reply = osc_enqueue_interpret;
2873 ptlrpc_set_add_req(rqset, req);
2878 rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
2879 flags, speculative, rc);
2884 int osc_match_base(const struct lu_env *env, struct obd_export *exp,
2885 struct ldlm_res_id *res_id, enum ldlm_type type,
2886 union ldlm_policy_data *policy, enum ldlm_mode mode,
2887 __u64 *flags, struct osc_object *obj,
2888 struct lustre_handle *lockh, enum ldlm_match_flags match_flags)
2890 struct obd_device *obd = exp->exp_obd;
2891 __u64 lflags = *flags;
2895 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2898 /* Filesystem lock extents are extended to page boundaries so that
2899 * dealing with the page cache is a little smoother */
2900 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2901 policy->l_extent.end |= ~PAGE_MASK;
2903 /* Next, search for already existing extent locks that will cover us */
2904 rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
2905 res_id, type, policy, mode, lockh,
2907 if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
2911 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2913 LASSERT(lock != NULL);
2914 if (osc_set_lock_data(lock, obj)) {
2915 lock_res_and_lock(lock);
2916 if (!ldlm_is_lvb_cached(lock)) {
2917 LASSERT(lock->l_ast_data == obj);
2918 osc_lock_lvb_update(env, obj, lock, NULL);
2919 ldlm_set_lvb_cached(lock);
2921 unlock_res_and_lock(lock);
2923 ldlm_lock_decref(lockh, rc);
2926 LDLM_LOCK_PUT(lock);
2931 static int osc_statfs_interpret(const struct lu_env *env,
2932 struct ptlrpc_request *req, void *args, int rc)
2934 struct osc_async_args *aa = args;
2935 struct obd_statfs *msfs;
2940 * The request has in fact never been sent due to issues at
2941 * a higher level (LOV). Exit immediately since the caller
2942 * is aware of the problem and takes care of the clean up.
2946 if ((rc == -ENOTCONN || rc == -EAGAIN) &&
2947 (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
2953 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2955 GOTO(out, rc = -EPROTO);
2957 *aa->aa_oi->oi_osfs = *msfs;
2959 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
2964 static int osc_statfs_async(struct obd_export *exp,
2965 struct obd_info *oinfo, time64_t max_age,
2966 struct ptlrpc_request_set *rqset)
2968 struct obd_device *obd = class_exp2obd(exp);
2969 struct ptlrpc_request *req;
2970 struct osc_async_args *aa;
2974 if (obd->obd_osfs_age >= max_age) {
2976 "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
2977 obd->obd_name, &obd->obd_osfs,
2978 obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
2979 obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
2980 spin_lock(&obd->obd_osfs_lock);
2981 memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
2982 spin_unlock(&obd->obd_osfs_lock);
2983 oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
2984 if (oinfo->oi_cb_up)
2985 oinfo->oi_cb_up(oinfo, 0);
2990 /* We could possibly pass max_age in the request (as an absolute
2991 * timestamp or a "seconds.usec ago") so the target can avoid doing
2992 * extra calls into the filesystem if that isn't necessary (e.g.
2993 * during mount that would help a bit). Having relative timestamps
2994 * is not so great if request processing is slow, while absolute
2995 * timestamps are not ideal because they need time synchronization. */
2996 req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3000 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3002 ptlrpc_request_free(req);
3005 ptlrpc_request_set_replen(req);
3006 req->rq_request_portal = OST_CREATE_PORTAL;
3007 ptlrpc_at_set_req_timeout(req);
3009 if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3010 /* procfs requests not want stat in wait for avoid deadlock */
3011 req->rq_no_resend = 1;
3012 req->rq_no_delay = 1;
3015 req->rq_interpret_reply = osc_statfs_interpret;
3016 aa = ptlrpc_req_async_args(aa, req);
3019 ptlrpc_set_add_req(rqset, req);
3023 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
3024 struct obd_statfs *osfs, time64_t max_age, __u32 flags)
3026 struct obd_device *obd = class_exp2obd(exp);
3027 struct obd_statfs *msfs;
3028 struct ptlrpc_request *req;
3029 struct obd_import *imp, *imp0;
3033 /*Since the request might also come from lprocfs, so we need
3034 *sync this with client_disconnect_export Bug15684
3036 with_imp_locked(obd, imp0, rc)
3037 imp = class_import_get(imp0);
3041 /* We could possibly pass max_age in the request (as an absolute
3042 * timestamp or a "seconds.usec ago") so the target can avoid doing
3043 * extra calls into the filesystem if that isn't necessary (e.g.
3044 * during mount that would help a bit). Having relative timestamps
3045 * is not so great if request processing is slow, while absolute
3046 * timestamps are not ideal because they need time synchronization. */
3047 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3049 class_import_put(imp);
3054 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3056 ptlrpc_request_free(req);
3059 ptlrpc_request_set_replen(req);
3060 req->rq_request_portal = OST_CREATE_PORTAL;
3061 ptlrpc_at_set_req_timeout(req);
3063 if (flags & OBD_STATFS_NODELAY) {
3064 /* procfs requests not want stat in wait for avoid deadlock */
3065 req->rq_no_resend = 1;
3066 req->rq_no_delay = 1;
3069 rc = ptlrpc_queue_wait(req);
3073 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3075 GOTO(out, rc = -EPROTO);
3081 ptlrpc_req_finished(req);
3085 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3086 void *karg, void __user *uarg)
3088 struct obd_device *obd = exp->exp_obd;
3089 struct obd_ioctl_data *data = karg;
3093 if (!try_module_get(THIS_MODULE)) {
3094 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
3095 module_name(THIS_MODULE));
3099 case OBD_IOC_CLIENT_RECOVER:
3100 rc = ptlrpc_recover_import(obd->u.cli.cl_import,
3101 data->ioc_inlbuf1, 0);
3105 case IOC_OSC_SET_ACTIVE:
3106 rc = ptlrpc_set_import_active(obd->u.cli.cl_import,
3111 CDEBUG(D_INODE, "%s: unrecognised ioctl %#x by %s: rc = %d\n",
3112 obd->obd_name, cmd, current->comm, rc);
3116 module_put(THIS_MODULE);
3120 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
3121 u32 keylen, void *key, u32 vallen, void *val,
3122 struct ptlrpc_request_set *set)
3124 struct ptlrpc_request *req;
3125 struct obd_device *obd = exp->exp_obd;
3126 struct obd_import *imp = class_exp2cliimp(exp);
3131 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
3133 if (KEY_IS(KEY_CHECKSUM)) {
3134 if (vallen != sizeof(int))
3136 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
3140 if (KEY_IS(KEY_SPTLRPC_CONF)) {
3141 sptlrpc_conf_client_adapt(obd);
3145 if (KEY_IS(KEY_FLUSH_CTX)) {
3146 sptlrpc_import_flush_my_ctx(imp);
3150 if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
3151 struct client_obd *cli = &obd->u.cli;
3152 long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
3153 long target = *(long *)val;
3155 nr = osc_lru_shrink(env, cli, min(nr, target), true);
3160 if (!set && !KEY_IS(KEY_GRANT_SHRINK))
3163 /* We pass all other commands directly to OST. Since nobody calls osc
3164 methods directly and everybody is supposed to go through LOV, we
3165 assume lov checked invalid values for us.
3166 The only recognised values so far are evict_by_nid and mds_conn.
3167 Even if something bad goes through, we'd get a -EINVAL from OST
3170 req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
3171 &RQF_OST_SET_GRANT_INFO :
3176 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3177 RCL_CLIENT, keylen);
3178 if (!KEY_IS(KEY_GRANT_SHRINK))
3179 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
3180 RCL_CLIENT, vallen);
3181 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
3183 ptlrpc_request_free(req);
3187 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3188 memcpy(tmp, key, keylen);
3189 tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
3192 memcpy(tmp, val, vallen);
3194 if (KEY_IS(KEY_GRANT_SHRINK)) {
3195 struct osc_grant_args *aa;
3198 aa = ptlrpc_req_async_args(aa, req);
3199 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
3201 ptlrpc_req_finished(req);
3204 *oa = ((struct ost_body *)val)->oa;
3206 req->rq_interpret_reply = osc_shrink_grant_interpret;
3209 ptlrpc_request_set_replen(req);
3210 if (!KEY_IS(KEY_GRANT_SHRINK)) {
3211 LASSERT(set != NULL);
3212 ptlrpc_set_add_req(set, req);
3213 ptlrpc_check_set(NULL, set);
3215 ptlrpcd_add_req(req);
3220 EXPORT_SYMBOL(osc_set_info_async);
3222 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
3223 struct obd_device *obd, struct obd_uuid *cluuid,
3224 struct obd_connect_data *data, void *localdata)
3226 struct client_obd *cli = &obd->u.cli;
3228 if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
3232 spin_lock(&cli->cl_loi_list_lock);
3233 grant = cli->cl_avail_grant + cli->cl_reserved_grant;
3234 if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM) {
3235 /* restore ocd_grant_blkbits as client page bits */
3236 data->ocd_grant_blkbits = PAGE_SHIFT;
3237 grant += cli->cl_dirty_grant;
3239 grant += cli->cl_dirty_pages << PAGE_SHIFT;
3241 data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
3242 lost_grant = cli->cl_lost_grant;
3243 cli->cl_lost_grant = 0;
3244 spin_unlock(&cli->cl_loi_list_lock);
3246 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
3247 " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
3248 data->ocd_version, data->ocd_grant, lost_grant);
3253 EXPORT_SYMBOL(osc_reconnect);
3255 int osc_disconnect(struct obd_export *exp)
3257 struct obd_device *obd = class_exp2obd(exp);
3260 rc = client_disconnect_export(exp);
3262 * Initially we put del_shrink_grant before disconnect_export, but it
3263 * causes the following problem if setup (connect) and cleanup
3264 * (disconnect) are tangled together.
3265 * connect p1 disconnect p2
3266 * ptlrpc_connect_import
3267 * ............... class_manual_cleanup
3270 * ptlrpc_connect_interrupt
3272 * add this client to shrink list
3274 * Bang! grant shrink thread trigger the shrink. BUG18662
3276 osc_del_grant_list(&obd->u.cli);
3279 EXPORT_SYMBOL(osc_disconnect);
3281 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
3282 struct hlist_node *hnode, void *arg)
3284 struct lu_env *env = arg;
3285 struct ldlm_resource *res = cfs_hash_object(hs, hnode);
3286 struct ldlm_lock *lock;
3287 struct osc_object *osc = NULL;
3291 list_for_each_entry(lock, &res->lr_granted, l_res_link) {
3292 if (lock->l_ast_data != NULL && osc == NULL) {
3293 osc = lock->l_ast_data;
3294 cl_object_get(osc2cl(osc));
3297 /* clear LDLM_FL_CLEANED flag to make sure it will be canceled
3298 * by the 2nd round of ldlm_namespace_clean() call in
3299 * osc_import_event(). */
3300 ldlm_clear_cleaned(lock);
3305 osc_object_invalidate(env, osc);
3306 cl_object_put(env, osc2cl(osc));
3311 EXPORT_SYMBOL(osc_ldlm_resource_invalidate);
3313 static int osc_import_event(struct obd_device *obd,
3314 struct obd_import *imp,
3315 enum obd_import_event event)
3317 struct client_obd *cli;
3321 LASSERT(imp->imp_obd == obd);
3324 case IMP_EVENT_DISCON: {
3326 spin_lock(&cli->cl_loi_list_lock);
3327 cli->cl_avail_grant = 0;
3328 cli->cl_lost_grant = 0;
3329 spin_unlock(&cli->cl_loi_list_lock);
3332 case IMP_EVENT_INACTIVE: {
3333 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
3336 case IMP_EVENT_INVALIDATE: {
3337 struct ldlm_namespace *ns = obd->obd_namespace;
3341 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3343 env = cl_env_get(&refcheck);
3345 osc_io_unplug(env, &obd->u.cli, NULL);
3347 cfs_hash_for_each_nolock(ns->ns_rs_hash,
3348 osc_ldlm_resource_invalidate,
3350 cl_env_put(env, &refcheck);
3352 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3357 case IMP_EVENT_ACTIVE: {
3358 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
3361 case IMP_EVENT_OCD: {
3362 struct obd_connect_data *ocd = &imp->imp_connect_data;
3364 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3365 osc_init_grant(&obd->u.cli, ocd);
3368 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3369 imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3371 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
3374 case IMP_EVENT_DEACTIVATE: {
3375 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE);
3378 case IMP_EVENT_ACTIVATE: {
3379 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE);
3383 CERROR("Unknown import event %d\n", event);
3390 * Determine whether the lock can be canceled before replaying the lock
3391 * during recovery, see bug16774 for detailed information.
3393 * \retval zero the lock can't be canceled
3394 * \retval other ok to cancel
3396 static int osc_cancel_weight(struct ldlm_lock *lock)
3399 * Cancel all unused and granted extent lock.
3401 if (lock->l_resource->lr_type == LDLM_EXTENT &&
3402 ldlm_is_granted(lock) &&
3403 osc_ldlm_weigh_ast(lock) == 0)
3409 static int brw_queue_work(const struct lu_env *env, void *data)
3411 struct client_obd *cli = data;
3413 CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3415 osc_io_unplug(env, cli, NULL);
3419 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg)
3421 struct client_obd *cli = &obd->u.cli;
3427 rc = ptlrpcd_addref();
3431 rc = client_obd_setup(obd, lcfg);
3433 GOTO(out_ptlrpcd, rc);
3436 handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3437 if (IS_ERR(handler))
3438 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3439 cli->cl_writeback_work = handler;
3441 handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3442 if (IS_ERR(handler))
3443 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3444 cli->cl_lru_work = handler;
3446 rc = osc_quota_setup(obd);
3448 GOTO(out_ptlrpcd_work, rc);
3450 cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3451 osc_update_next_shrink(cli);
3456 if (cli->cl_writeback_work != NULL) {
3457 ptlrpcd_destroy_work(cli->cl_writeback_work);
3458 cli->cl_writeback_work = NULL;
3460 if (cli->cl_lru_work != NULL) {
3461 ptlrpcd_destroy_work(cli->cl_lru_work);
3462 cli->cl_lru_work = NULL;
3464 client_obd_cleanup(obd);
3469 EXPORT_SYMBOL(osc_setup_common);
3471 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3473 struct client_obd *cli = &obd->u.cli;
3481 rc = osc_setup_common(obd, lcfg);
3485 rc = osc_tunables_init(obd);
3490 * We try to control the total number of requests with a upper limit
3491 * osc_reqpool_maxreqcount. There might be some race which will cause
3492 * over-limit allocation, but it is fine.
3494 req_count = atomic_read(&osc_pool_req_count);
3495 if (req_count < osc_reqpool_maxreqcount) {
3496 adding = cli->cl_max_rpcs_in_flight + 2;
3497 if (req_count + adding > osc_reqpool_maxreqcount)
3498 adding = osc_reqpool_maxreqcount - req_count;
3500 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3501 atomic_add(added, &osc_pool_req_count);
3504 ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3506 spin_lock(&osc_shrink_lock);
3507 list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
3508 spin_unlock(&osc_shrink_lock);
3509 cli->cl_import->imp_idle_timeout = osc_idle_timeout;
3510 cli->cl_import->imp_idle_debug = D_HA;
3515 int osc_precleanup_common(struct obd_device *obd)
3517 struct client_obd *cli = &obd->u.cli;
3521 * for echo client, export may be on zombie list, wait for
3522 * zombie thread to cull it, because cli.cl_import will be
3523 * cleared in client_disconnect_export():
3524 * class_export_destroy() -> obd_cleanup() ->
3525 * echo_device_free() -> echo_client_cleanup() ->
3526 * obd_disconnect() -> osc_disconnect() ->
3527 * client_disconnect_export()
3529 obd_zombie_barrier();
3530 if (cli->cl_writeback_work) {
3531 ptlrpcd_destroy_work(cli->cl_writeback_work);
3532 cli->cl_writeback_work = NULL;
3535 if (cli->cl_lru_work) {
3536 ptlrpcd_destroy_work(cli->cl_lru_work);
3537 cli->cl_lru_work = NULL;
3540 obd_cleanup_client_import(obd);
3543 EXPORT_SYMBOL(osc_precleanup_common);
3545 static int osc_precleanup(struct obd_device *obd)
3549 osc_precleanup_common(obd);
3551 ptlrpc_lprocfs_unregister_obd(obd);
3555 int osc_cleanup_common(struct obd_device *obd)
3557 struct client_obd *cli = &obd->u.cli;
3562 spin_lock(&osc_shrink_lock);
3563 list_del(&cli->cl_shrink_list);
3564 spin_unlock(&osc_shrink_lock);
3567 if (cli->cl_cache != NULL) {
3568 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3569 spin_lock(&cli->cl_cache->ccc_lru_lock);
3570 list_del_init(&cli->cl_lru_osc);
3571 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3572 cli->cl_lru_left = NULL;
3573 cl_cache_decref(cli->cl_cache);
3574 cli->cl_cache = NULL;
3577 /* free memory of osc quota cache */
3578 osc_quota_cleanup(obd);
3580 rc = client_obd_cleanup(obd);
3585 EXPORT_SYMBOL(osc_cleanup_common);
3587 static const struct obd_ops osc_obd_ops = {
3588 .o_owner = THIS_MODULE,
3589 .o_setup = osc_setup,
3590 .o_precleanup = osc_precleanup,
3591 .o_cleanup = osc_cleanup_common,
3592 .o_add_conn = client_import_add_conn,
3593 .o_del_conn = client_import_del_conn,
3594 .o_connect = client_connect_import,
3595 .o_reconnect = osc_reconnect,
3596 .o_disconnect = osc_disconnect,
3597 .o_statfs = osc_statfs,
3598 .o_statfs_async = osc_statfs_async,
3599 .o_create = osc_create,
3600 .o_destroy = osc_destroy,
3601 .o_getattr = osc_getattr,
3602 .o_setattr = osc_setattr,
3603 .o_iocontrol = osc_iocontrol,
3604 .o_set_info_async = osc_set_info_async,
3605 .o_import_event = osc_import_event,
3606 .o_quotactl = osc_quotactl,
3609 LIST_HEAD(osc_shrink_list);
3610 DEFINE_SPINLOCK(osc_shrink_lock);
3612 #ifdef HAVE_SHRINKER_COUNT
3613 static struct shrinker osc_cache_shrinker = {
3614 .count_objects = osc_cache_shrink_count,
3615 .scan_objects = osc_cache_shrink_scan,
3616 .seeks = DEFAULT_SEEKS,
3619 static int osc_cache_shrink(struct shrinker *shrinker,
3620 struct shrink_control *sc)
3622 (void)osc_cache_shrink_scan(shrinker, sc);
3624 return osc_cache_shrink_count(shrinker, sc);
3627 static struct shrinker osc_cache_shrinker = {
3628 .shrink = osc_cache_shrink,
3629 .seeks = DEFAULT_SEEKS,
3633 static int __init osc_init(void)
3635 unsigned int reqpool_size;
3636 unsigned int reqsize;
3640 /* print an address of _any_ initialized kernel symbol from this
3641 * module, to allow debugging with gdb that doesn't support data
3642 * symbols from modules.*/
3643 CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3645 rc = lu_kmem_init(osc_caches);
3649 rc = class_register_type(&osc_obd_ops, NULL, true,
3650 LUSTRE_OSC_NAME, &osc_device_type);
3654 rc = register_shrinker(&osc_cache_shrinker);
3658 /* This is obviously too much memory, only prevent overflow here */
3659 if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
3660 GOTO(out_shrinker, rc = -EINVAL);
3662 reqpool_size = osc_reqpool_mem_max << 20;
3665 while (reqsize < OST_IO_MAXREQSIZE)
3666 reqsize = reqsize << 1;
3669 * We don't enlarge the request count in OSC pool according to
3670 * cl_max_rpcs_in_flight. The allocation from the pool will only be
3671 * tried after normal allocation failed. So a small OSC pool won't
3672 * cause much performance degression in most of cases.
3674 osc_reqpool_maxreqcount = reqpool_size / reqsize;
3676 atomic_set(&osc_pool_req_count, 0);
3677 osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
3678 ptlrpc_add_rqs_to_pool);
3680 if (osc_rq_pool == NULL)
3681 GOTO(out_shrinker, rc = -ENOMEM);
3683 rc = osc_start_grant_work();
3685 GOTO(out_req_pool, rc);
3690 ptlrpc_free_rq_pool(osc_rq_pool);
3692 unregister_shrinker(&osc_cache_shrinker);
3694 class_unregister_type(LUSTRE_OSC_NAME);
3696 lu_kmem_fini(osc_caches);
3701 static void __exit osc_exit(void)
3703 osc_stop_grant_work();
3704 unregister_shrinker(&osc_cache_shrinker);
3705 class_unregister_type(LUSTRE_OSC_NAME);
3706 lu_kmem_fini(osc_caches);
3707 ptlrpc_free_rq_pool(osc_rq_pool);
3710 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3711 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3712 MODULE_VERSION(LUSTRE_VERSION_STRING);
3713 MODULE_LICENSE("GPL");
3715 module_init(osc_init);
3716 module_exit(osc_exit);