4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * Implementation of cl_page for OSC layer.
33 * Author: Nikita Danilov <nikita.danilov@sun.com>
34 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
37 #define DEBUG_SUBSYSTEM S_OSC
38 #include <lustre_osc.h>
40 #include "osc_internal.h"
42 static void osc_lru_del(struct client_obd *cli, struct osc_page *opg);
43 static void osc_lru_use(struct client_obd *cli, struct osc_page *opg);
44 static int osc_lru_alloc(const struct lu_env *env, struct client_obd *cli,
45 struct osc_page *opg);
54 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
56 struct cl_page *page = opg->ops_cl.cpl_page;
58 LASSERT(!opg->ops_transfer_pinned);
60 lu_ref_add_atomic(&page->cp_reference, label, page);
61 opg->ops_transfer_pinned = 1;
64 static void osc_page_transfer_put(const struct lu_env *env,
67 struct cl_page *page = opg->ops_cl.cpl_page;
69 if (opg->ops_transfer_pinned) {
70 opg->ops_transfer_pinned = 0;
71 lu_ref_del(&page->cp_reference, "transfer", page);
72 cl_page_put(env, page);
77 * This is called once for every page when it is submitted for a transfer
78 * either opportunistic (osc_page_cache_add()), or immediate
79 * (osc_page_submit()).
81 static void osc_page_transfer_add(const struct lu_env *env,
82 struct osc_page *opg, enum cl_req_type crt)
84 struct osc_object *obj = osc_page_object(opg);
86 osc_lru_use(osc_cli(obj), opg);
89 int osc_page_cache_add(const struct lu_env *env, struct osc_object *osc,
90 struct osc_page *opg, struct cl_io *io,
96 osc_page_transfer_get(opg, "transfer\0cache");
97 result = osc_queue_async_io(env, io, osc, opg, cb);
99 osc_page_transfer_put(env, opg);
101 osc_page_transfer_add(env, opg, CRT_WRITE);
106 void osc_index2policy(union ldlm_policy_data *policy,
107 const struct cl_object *obj, pgoff_t start, pgoff_t end)
109 memset(policy, 0, sizeof *policy);
110 policy->l_extent.start = start << PAGE_SHIFT;
111 policy->l_extent.end = ((end + 1) << PAGE_SHIFT) - 1;
114 static int osc_page_print(const struct lu_env *env,
115 const struct cl_page_slice *slice,
116 void *cookie, lu_printer_t printer)
118 struct osc_page *opg = cl2osc_page(slice);
119 struct osc_async_page *oap = &opg->ops_oap;
120 struct osc_object *obj = osc_page_object(opg);
121 struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
123 return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p %lu: "
125 "2< %lld %u %u %#x %#x | %p %p > "
127 "4< %d %d %d %lu %c | %c %c %c %c > "
128 "5< %c %c %c %c | %d %c | %d %c %c>\n",
132 list_empty_marker(&oap->oap_pending_item),
133 list_empty_marker(&oap->oap_rpc_item),
135 oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
136 oap->oap_async_flags, oap->oap_brw_flags,
139 opg->ops_transfer_pinned,
142 cli->cl_r_in_flight, cli->cl_w_in_flight,
143 cli->cl_max_rpcs_in_flight,
145 waitqueue_active(&cli->cl_cache_waiters) ? '+' : '-',
146 list_empty_marker(&cli->cl_loi_ready_list),
147 list_empty_marker(&cli->cl_loi_hp_ready_list),
148 list_empty_marker(&cli->cl_loi_write_list),
149 list_empty_marker(&cli->cl_loi_read_list),
151 list_empty_marker(&obj->oo_ready_item),
152 list_empty_marker(&obj->oo_hp_ready_item),
153 list_empty_marker(&obj->oo_write_item),
154 list_empty_marker(&obj->oo_read_item),
155 atomic_read(&obj->oo_nr_reads),
156 list_empty_marker(&obj->oo_reading_exts),
157 atomic_read(&obj->oo_nr_writes),
158 list_empty_marker(&obj->oo_hp_exts),
159 list_empty_marker(&obj->oo_urgent_exts));
162 static void osc_page_delete(const struct lu_env *env,
163 const struct cl_page_slice *slice)
165 struct osc_page *opg = cl2osc_page(slice);
166 struct osc_object *obj = osc_page_object(opg);
170 CDEBUG(D_TRACE, "%p\n", opg);
171 osc_page_transfer_put(env, opg);
172 rc = osc_teardown_async_page(env, obj, opg);
174 CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page,
175 "Trying to teardown failed: %d\n", rc);
179 osc_lru_del(osc_cli(obj), opg);
181 if (slice->cpl_page->cp_type == CPT_CACHEABLE) {
184 spin_lock(&obj->oo_tree_lock);
185 if (opg->ops_intree) {
186 value = radix_tree_delete(&obj->oo_tree,
193 spin_unlock(&obj->oo_tree_lock);
195 LASSERT(ergo(value != NULL, value == opg));
201 static void osc_page_clip(const struct lu_env *env,
202 const struct cl_page_slice *slice,
205 struct osc_page *opg = cl2osc_page(slice);
206 struct osc_async_page *oap = &opg->ops_oap;
208 CDEBUG(D_CACHE, "from %d, to %d\n", from, to);
210 opg->ops_from = from;
211 /* argument @to is exclusive, but @ops_to is inclusive */
212 opg->ops_to = to - 1;
213 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
216 static int osc_page_flush(const struct lu_env *env,
217 const struct cl_page_slice *slice,
220 struct osc_page *opg = cl2osc_page(slice);
223 rc = osc_flush_async_page(env, io, opg);
227 static void osc_page_touch(const struct lu_env *env,
228 const struct cl_page_slice *slice, size_t to)
230 struct osc_page *opg = cl2osc_page(slice);
231 struct cl_object *obj = osc2cl(osc_page_object(opg));
233 osc_page_touch_at(env, obj, osc_index(opg), to);
236 static const struct cl_page_operations osc_page_ops = {
237 .cpo_print = osc_page_print,
238 .cpo_delete = osc_page_delete,
239 .cpo_clip = osc_page_clip,
240 .cpo_flush = osc_page_flush,
241 .cpo_page_touch = osc_page_touch,
244 int osc_page_init(const struct lu_env *env, struct cl_object *obj,
245 struct cl_page *cl_page, pgoff_t index)
247 struct osc_object *osc = cl2osc(obj);
248 struct osc_page *opg = cl_object_page_slice(obj, cl_page);
249 struct osc_io *oio = osc_env_io(env);
253 opg->ops_to = PAGE_SIZE - 1;
255 INIT_LIST_HEAD(&opg->ops_lru);
257 result = osc_prep_async_page(osc, opg, cl_page, index << PAGE_SHIFT);
261 opg->ops_srvlock = osc_io_srvlock(oio);
262 cl_page_slice_add(cl_page, &opg->ops_cl, obj, &osc_page_ops);
264 /* reserve an LRU space for this page */
265 if (cl_page->cp_type == CPT_CACHEABLE) {
266 result = osc_lru_alloc(env, osc_cli(osc), opg);
268 result = radix_tree_preload(GFP_NOFS);
270 spin_lock(&osc->oo_tree_lock);
271 result = radix_tree_insert(&osc->oo_tree,
277 spin_unlock(&osc->oo_tree_lock);
279 radix_tree_preload_end();
286 EXPORT_SYMBOL(osc_page_init);
289 * Helper function called by osc_io_submit() for every page in an immediate
290 * transfer (i.e., transferred synchronously).
292 void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
293 enum cl_req_type crt, int brw_flags)
295 struct osc_io *oio = osc_env_io(env);
296 struct osc_async_page *oap = &opg->ops_oap;
298 LASSERT(oap->oap_async_flags & ASYNC_READY);
299 LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE);
301 oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
302 oap->oap_page_off = opg->ops_from;
303 oap->oap_count = opg->ops_to - opg->ops_from + 1;
304 oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags;
306 if (oio->oi_cap_sys_resource)
307 oap->oap_brw_flags |= OBD_BRW_SYS_RESOURCE;
309 osc_page_transfer_get(opg, "transfer\0imm");
310 osc_page_transfer_add(env, opg, crt);
313 /* --------------- LRU page management ------------------ */
315 /* OSC is a natural place to manage LRU pages as applications are specialized
316 * to write OSC by OSC. Ideally, if one OSC is used more frequently it should
317 * occupy more LRU slots. On the other hand, we should avoid using up all LRU
318 * slots (client_obd::cl_lru_left) otherwise process has to be put into sleep
319 * for free LRU slots - this will be very bad so the algorithm requires each
320 * OSC to free slots voluntarily to maintain a reasonable number of free slots
324 static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
327 * LRU pages are freed in batch mode. OSC should at least free this
328 * number of pages to avoid running out of LRU slots.
330 static inline int lru_shrink_min(struct client_obd *cli)
332 return cli->cl_max_pages_per_rpc * 2;
336 * free this number at most otherwise it will take too long time to finsih.
338 static inline int lru_shrink_max(struct client_obd *cli)
340 return cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
344 * Check if we can free LRU slots from this OSC. If there exists LRU waiters,
345 * we should free slots aggressively. In this way, slots are freed in a steady
346 * step to maintain fairness among OSCs.
348 * Return how many LRU pages should be freed.
350 static int osc_cache_too_much(struct client_obd *cli)
352 struct cl_client_cache *cache = cli->cl_cache;
353 long pages = atomic_long_read(&cli->cl_lru_in_list);
354 unsigned long budget;
356 LASSERT(cache != NULL);
357 budget = cache->ccc_lru_max / (refcount_read(&cache->ccc_users) - 2);
359 /* if it's going to run out LRU slots, we should free some, but not
360 * too much to maintain faireness among OSCs. */
361 if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 2) {
363 return lru_shrink_max(cli);
364 else if (pages >= budget / 2)
365 return lru_shrink_min(cli);
367 time64_t duration = ktime_get_real_seconds();
370 /* knock out pages by duration of no IO activity */
371 duration -= cli->cl_lru_last_used;
373 * The difference shouldn't be more than 70 years
374 * so we can safely case to a long. Round to
375 * approximately 1 minute.
377 timediff = (long)(duration >> 6);
378 if (timediff > 0 && pages >= budget / timediff)
379 return lru_shrink_min(cli);
384 int lru_queue_work(const struct lu_env *env, void *data)
386 struct client_obd *cli = data;
389 CDEBUG(D_CACHE, "%s: run LRU work for client obd\n", cli_name(cli));
390 count = osc_cache_too_much(cli);
392 int rc = osc_lru_shrink(env, cli, count, false);
394 CDEBUG(D_CACHE, "%s: shrank %d/%d pages from client obd\n",
395 cli_name(cli), rc, count);
397 CDEBUG(D_CACHE, "%s: queue again\n", cli_name(cli));
398 ptlrpcd_queue_work(cli->cl_lru_work);
405 void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
408 struct osc_async_page *oap;
411 list_for_each_entry(oap, plist, oap_pending_item) {
412 struct osc_page *opg = oap2osc_page(oap);
414 if (!opg->ops_in_lru)
418 LASSERT(list_empty(&opg->ops_lru));
419 list_add(&opg->ops_lru, &lru);
423 spin_lock(&cli->cl_lru_list_lock);
424 list_splice_tail(&lru, &cli->cl_lru_list);
425 atomic_long_sub(npages, &cli->cl_lru_busy);
426 atomic_long_add(npages, &cli->cl_lru_in_list);
427 cli->cl_lru_last_used = ktime_get_real_seconds();
428 spin_unlock(&cli->cl_lru_list_lock);
430 if (waitqueue_active(&osc_lru_waitq))
431 (void)ptlrpcd_queue_work(cli->cl_lru_work);
435 static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
437 LASSERT(atomic_long_read(&cli->cl_lru_in_list) > 0);
438 list_del_init(&opg->ops_lru);
439 atomic_long_dec(&cli->cl_lru_in_list);
443 * Page is being destroyed. The page may be not in LRU list, if the transfer
444 * has never finished(error occurred).
446 static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
448 if (opg->ops_in_lru) {
449 spin_lock(&cli->cl_lru_list_lock);
450 if (!list_empty(&opg->ops_lru)) {
451 __osc_lru_del(cli, opg);
453 LASSERT(atomic_long_read(&cli->cl_lru_busy) > 0);
454 atomic_long_dec(&cli->cl_lru_busy);
456 spin_unlock(&cli->cl_lru_list_lock);
458 atomic_long_inc(cli->cl_lru_left);
459 /* this is a great place to release more LRU pages if
460 * this osc occupies too many LRU pages and kernel is
461 * stealing one of them. */
462 if (osc_cache_too_much(cli)) {
463 CDEBUG(D_CACHE, "%s: queue LRU work\n", cli_name(cli));
464 (void)ptlrpcd_queue_work(cli->cl_lru_work);
466 wake_up(&osc_lru_waitq);
468 LASSERT(list_empty(&opg->ops_lru));
473 * Delete page from LRU list for redirty.
475 static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
477 /* If page is being transferred for the first time,
478 * ops_lru should be empty */
479 if (opg->ops_in_lru) {
480 if (list_empty(&opg->ops_lru))
482 spin_lock(&cli->cl_lru_list_lock);
483 if (!list_empty(&opg->ops_lru)) {
484 __osc_lru_del(cli, opg);
485 atomic_long_inc(&cli->cl_lru_busy);
487 spin_unlock(&cli->cl_lru_list_lock);
491 static void discard_cl_pages(const struct lu_env *env, struct cl_io *io,
492 struct cl_page **pvec, int max_index)
494 struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch;
497 ll_folio_batch_init(fbatch, 0);
498 for (i = 0; i < max_index; i++) {
499 struct cl_page *page = pvec[i];
501 LASSERT(cl_page_is_owned(page, io));
502 cl_page_discard(env, io, page);
503 cl_page_disown(env, io, page);
504 cl_batch_put(env, page, fbatch);
508 folio_batch_release(fbatch);
512 * Check if a cl_page can be released, i.e, it's not being used.
514 * If unstable account is turned on, bulk transfer may hold one refcount
515 * for recovery so we need to check vmpage refcount as well; otherwise,
516 * even we can destroy cl_page but the corresponding vmpage can't be reused.
518 static inline bool lru_page_busy(struct client_obd *cli, struct cl_page *page)
520 if (cl_page_in_use_noref(page))
523 if (cli->cl_cache->ccc_unstable_check) {
524 struct page *vmpage = cl_page_vmpage(page);
526 /* vmpage have two known users: cl_page and VM page cache */
527 if (page_count(vmpage) - page_mapcount(vmpage) > 2)
534 * Drop @target of pages from LRU at most.
536 long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
537 long target, bool force)
540 struct cl_object *clobj = NULL;
541 struct cl_page **pvec;
542 struct osc_page *opg;
549 LASSERT(atomic_long_read(&cli->cl_lru_in_list) >= 0);
550 if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0)
553 CDEBUG(D_CACHE, "%s: shrinkers: %d, force: %d\n",
554 cli_name(cli), atomic_read(&cli->cl_lru_shrinkers), force);
556 if (atomic_read(&cli->cl_lru_shrinkers) > 0)
559 if (atomic_inc_return(&cli->cl_lru_shrinkers) > 1) {
560 atomic_dec(&cli->cl_lru_shrinkers);
564 atomic_inc(&cli->cl_lru_shrinkers);
567 pvec = (struct cl_page **)osc_env_info(env)->oti_pvec;
568 io = osc_env_thread_io(env);
570 spin_lock(&cli->cl_lru_list_lock);
572 cli->cl_lru_reclaim++;
573 maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list));
574 while (!list_empty(&cli->cl_lru_list)) {
575 struct cl_page *page;
576 bool will_free = false;
578 if (!force && atomic_read(&cli->cl_lru_shrinkers) > 1)
584 opg = list_first_entry(&cli->cl_lru_list, struct osc_page,
586 page = opg->ops_cl.cpl_page;
587 if (lru_page_busy(cli, page)) {
588 list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
592 LASSERT(page->cp_obj != NULL);
593 if (clobj != page->cp_obj) {
594 struct cl_object *tmp = page->cp_obj;
597 spin_unlock(&cli->cl_lru_list_lock);
600 discard_cl_pages(env, io, pvec, index);
604 cl_object_put(env, clobj);
610 io->ci_ignore_layout = 1;
611 rc = cl_io_init(env, io, CIT_MISC, clobj);
613 spin_lock(&cli->cl_lru_list_lock);
622 if (cl_page_own_try(env, io, page) == 0) {
623 if (!lru_page_busy(cli, page)) {
624 /* remove it from lru list earlier to avoid
626 __osc_lru_del(cli, opg);
627 opg->ops_in_lru = 0; /* will be discarded */
632 cl_page_disown(env, io, page);
637 list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
641 /* Don't discard and free the page with cl_lru_list held */
642 pvec[index++] = page;
643 if (unlikely(index == OTI_PVEC_SIZE)) {
644 spin_unlock(&cli->cl_lru_list_lock);
645 discard_cl_pages(env, io, pvec, index);
648 spin_lock(&cli->cl_lru_list_lock);
651 if (++count >= target)
654 spin_unlock(&cli->cl_lru_list_lock);
657 discard_cl_pages(env, io, pvec, index);
660 cl_object_put(env, clobj);
663 atomic_dec(&cli->cl_lru_shrinkers);
665 atomic_long_add(count, cli->cl_lru_left);
666 wake_up(&osc_lru_waitq);
668 RETURN(count > 0 ? count : rc);
670 EXPORT_SYMBOL(osc_lru_shrink);
673 * Reclaim LRU pages by an IO thread. The caller wants to reclaim at least
674 * \@npages of LRU slots. For performance consideration, it's better to drop
675 * LRU pages in batch. Therefore, the actual number is adjusted at least
678 static long osc_lru_reclaim(struct client_obd *cli, unsigned long npages)
681 struct cl_client_cache *cache = cli->cl_cache;
682 struct client_obd *scan;
688 LASSERT(cache != NULL);
690 env = cl_env_get(&refcheck);
694 npages = max_t(int, npages, cli->cl_max_pages_per_rpc);
695 CDEBUG(D_CACHE, "%s: start to reclaim %ld pages from LRU\n",
696 cli_name(cli), npages);
697 rc = osc_lru_shrink(env, cli, npages, true);
699 CDEBUG(D_CACHE, "%s: reclaimed %ld/%ld pages from LRU\n",
700 cli_name(cli), rc, npages);
701 if (osc_cache_too_much(cli) > 0)
702 ptlrpcd_queue_work(cli->cl_lru_work);
708 CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld/%ld, want: %ld\n",
709 cli_name(cli), cli, atomic_long_read(&cli->cl_lru_in_list),
710 atomic_long_read(&cli->cl_lru_busy), npages);
712 /* Reclaim LRU slots from other client_obd as it can't free enough
713 * from its own. This should rarely happen. */
714 spin_lock(&cache->ccc_lru_lock);
715 LASSERT(!list_empty(&cache->ccc_lru));
717 cache->ccc_lru_shrinkers++;
718 list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
720 max_scans = refcount_read(&cache->ccc_users) - 2;
721 while (--max_scans > 0 &&
722 (scan = list_first_entry_or_null(&cache->ccc_lru,
724 cl_lru_osc)) != NULL) {
725 CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n",
726 cli_name(scan), scan,
727 atomic_long_read(&scan->cl_lru_in_list),
728 atomic_long_read(&scan->cl_lru_busy));
730 list_move_tail(&scan->cl_lru_osc, &cache->ccc_lru);
731 if (osc_cache_too_much(scan) > 0) {
732 spin_unlock(&cache->ccc_lru_lock);
734 rc = osc_lru_shrink(env, scan, npages, true);
735 spin_lock(&cache->ccc_lru_lock);
742 spin_unlock(&cache->ccc_lru_lock);
745 cl_env_put(env, &refcheck);
746 CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n",
747 cli_name(cli), cli, rc);
752 * osc_lru_alloc() is called to allocate an LRU slot for a cl_page.
754 * Usually the LRU slots are reserved in osc_io_iter_rw_init().
755 * Only in the case that the LRU slots are in extreme shortage, it should
756 * have reserved enough slots for an IO.
758 static int osc_lru_alloc(const struct lu_env *env, struct client_obd *cli,
759 struct osc_page *opg)
761 struct osc_io *oio = osc_env_io(env);
766 if (cli->cl_cache == NULL) /* shall not be in LRU */
769 if (oio->oi_lru_reserved > 0) {
770 --oio->oi_lru_reserved;
774 LASSERT(atomic_long_read(cli->cl_lru_left) >= 0);
775 while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) {
776 /* run out of LRU spaces, try to drop some by itself */
777 rc = osc_lru_reclaim(cli, 1);
782 /* IO issued by readahead, don't try hard */
783 if (oio->oi_is_readahead) {
784 if (atomic_long_read(cli->cl_lru_left) > 0)
791 rc = l_wait_event_abortable(
793 atomic_long_read(cli->cl_lru_left) > 0);
802 atomic_long_inc(&cli->cl_lru_busy);
811 * osc_lru_reserve() is called to reserve enough LRU slots for I/O.
813 * The benefit of doing this is to reduce contention against atomic counter
814 * cl_lru_left by changing it from per-page access to per-IO access.
816 unsigned long osc_lru_reserve(struct client_obd *cli, unsigned long npages)
818 unsigned long reserved = 0;
819 unsigned long max_pages;
824 c = atomic_long_read(cli->cl_lru_left);
825 if (c < npages && osc_lru_reclaim(cli, npages) > 0)
826 c = atomic_long_read(cli->cl_lru_left);
830 * Trigger writeback in the hope some LRU slot could
833 rc = ptlrpcd_queue_work(cli->cl_writeback_work);
838 while (c >= npages) {
839 if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) {
843 c = atomic_long_read(cli->cl_lru_left);
846 if (reserved != npages) {
848 rc = l_wait_event_abortable(
850 atomic_long_read(cli->cl_lru_left) > 0);
854 max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
855 if (atomic_long_read(cli->cl_lru_left) < max_pages) {
856 /* If there aren't enough pages in the per-OSC LRU then
857 * wake up the LRU thread to try and clear out space, so
858 * we don't block if pages are being dirtied quickly. */
859 CDEBUG(D_CACHE, "%s: queue LRU, left: %lu/%ld.\n",
860 cli_name(cli), atomic_long_read(cli->cl_lru_left),
862 (void)ptlrpcd_queue_work(cli->cl_lru_work);
869 * osc_lru_unreserve() is called to unreserve LRU slots.
871 * LRU slots reserved by osc_lru_reserve() may have entries left due to several
872 * reasons such as page already existing or I/O error. Those reserved slots
873 * should be freed by calling this function.
875 void osc_lru_unreserve(struct client_obd *cli, unsigned long npages)
877 atomic_long_add(npages, cli->cl_lru_left);
878 wake_up(&osc_lru_waitq);
882 * Atomic operations are expensive. We accumulate the accounting for the
883 * same page zone to get better performance.
884 * In practice this can work pretty good because the pages in the same RPC
885 * are likely from the same page zone.
887 #ifdef HAVE_NR_UNSTABLE_NFS
888 /* Old kernels use a separate counter for unstable pages,
889 * newer kernels treat them like any other writeback.
890 * (see Linux commit: v5.7-467-g8d92890bd6b8)
892 #define NR_ZONE_WRITE_PENDING ((enum zone_stat_item)NR_UNSTABLE_NFS)
893 #elif !defined(HAVE_NR_ZONE_WRITE_PENDING)
894 #define NR_ZONE_WRITE_PENDING ((enum zone_stat_item)NR_WRITEBACK)
897 static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
907 page_count = desc->bd_iov_count;
909 CDEBUG(D_PAGE, "%s %d unstable pages\n",
910 factor == 1 ? "adding" : "removing", page_count);
912 for (i = 0; i < page_count; i++) {
913 void *pz = page_zone(desc->bd_vec[i].bv_page);
915 if (likely(pz == zone)) {
921 mod_zone_page_state(zone, NR_ZONE_WRITE_PENDING,
929 mod_zone_page_state(zone, NR_ZONE_WRITE_PENDING,
935 static inline void add_unstable_pages(struct ptlrpc_bulk_desc *desc)
937 unstable_page_accounting(desc, 1);
940 static inline void dec_unstable_pages(struct ptlrpc_bulk_desc *desc)
942 unstable_page_accounting(desc, -1);
946 * Performs "unstable" page accounting. This function balances the
947 * increment operations performed in osc_inc_unstable_pages. It is
948 * registered as the RPC request callback, and is executed when the
949 * bulk RPC is committed on the server. Thus at this point, the pages
950 * involved in the bulk transfer are no longer considered unstable.
952 * If this function is called, the request should have been committed
953 * or req:rq_unstable must have been set; it implies that the unstable
954 * statistic have been added.
956 void osc_dec_unstable_pages(struct ptlrpc_request *req)
958 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
959 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
963 /* no desc means short io, which doesn't have separate unstable pages,
964 * it's just using space inside the RPC itself
969 page_count = desc->bd_iov_count;
971 LASSERT(page_count >= 0);
973 dec_unstable_pages(desc);
975 unstable_count = atomic_long_sub_return(page_count,
976 &cli->cl_unstable_count);
977 LASSERT(unstable_count >= 0);
979 unstable_count = atomic_long_sub_return(page_count,
980 &cli->cl_cache->ccc_unstable_nr);
981 LASSERT(unstable_count >= 0);
983 if (waitqueue_active(&osc_lru_waitq))
984 (void)ptlrpcd_queue_work(cli->cl_lru_work);
988 * "unstable" page accounting. See: osc_dec_unstable_pages.
990 void osc_inc_unstable_pages(struct ptlrpc_request *req)
992 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
993 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
996 /* No unstable page tracking */
997 if (cli->cl_cache == NULL || !cli->cl_cache->ccc_unstable_check)
1000 /* no desc means short io, which doesn't have separate unstable pages,
1001 * it's just using space inside the RPC itself
1006 page_count = desc->bd_iov_count;
1008 add_unstable_pages(desc);
1009 atomic_long_add(page_count, &cli->cl_unstable_count);
1010 atomic_long_add(page_count, &cli->cl_cache->ccc_unstable_nr);
1012 /* If the request has already been committed (i.e. brw_commit
1013 * called via rq_commit_cb), we need to undo the unstable page
1014 * increments we just performed because rq_commit_cb wont be
1016 spin_lock(&req->rq_lock);
1017 if (unlikely(req->rq_committed)) {
1018 spin_unlock(&req->rq_lock);
1020 osc_dec_unstable_pages(req);
1022 req->rq_unstable = 1;
1023 spin_unlock(&req->rq_lock);
1028 * Check if it piggybacks SOFT_SYNC flag to OST from this OSC.
1029 * This function will be called by every BRW RPC so it's critical
1030 * to make this function fast.
1032 bool osc_over_unstable_soft_limit(struct client_obd *cli)
1034 long unstable_nr, osc_unstable_count;
1036 /* Can't check cli->cl_unstable_count, therefore, no soft limit */
1037 if (cli->cl_cache == NULL || !cli->cl_cache->ccc_unstable_check)
1040 osc_unstable_count = atomic_long_read(&cli->cl_unstable_count);
1041 unstable_nr = atomic_long_read(&cli->cl_cache->ccc_unstable_nr);
1044 "%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n",
1045 cli_name(cli), cli, unstable_nr, osc_unstable_count);
1047 /* If the LRU slots are in shortage - 25% remaining AND this OSC
1048 * has one full RPC window of unstable pages, it's a good chance
1049 * to piggyback a SOFT_SYNC flag.
1050 * Please notice that the OST won't take immediate response for the
1051 * SOFT_SYNC request so active OSCs will have more chance to carry
1052 * the flag, this is reasonable. */
1053 return unstable_nr > cli->cl_cache->ccc_lru_max >> 2 &&
1054 osc_unstable_count > cli->cl_max_pages_per_rpc *
1055 cli->cl_max_rpcs_in_flight;
1059 * Return how many LRU pages in the cache of all OSC devices
1061 * \retval return # of cached LRU pages times reclaimation tendency
1062 * \retval SHRINK_STOP if it cannot do any scanning in this time
1064 unsigned long osc_cache_shrink_count(struct shrinker *sk,
1065 struct shrink_control *sc)
1067 struct client_obd *cli;
1068 unsigned long cached = 0;
1070 if (!osc_page_cache_shrink_enabled)
1073 spin_lock(&osc_shrink_lock);
1074 list_for_each_entry(cli, &osc_shrink_list, cl_shrink_list)
1075 cached += atomic_long_read(&cli->cl_lru_in_list);
1076 spin_unlock(&osc_shrink_lock);
1078 return (cached * sysctl_vfs_cache_pressure) / 100;
1082 * Scan and try to reclaim sc->nr_to_scan cached LRU pages
1084 * \retval number of cached LRU pages reclaimed
1085 * \retval SHRINK_STOP if it cannot do any scanning in this time
1087 * Linux kernel will loop calling this shrinker scan routine with
1088 * sc->nr_to_scan = SHRINK_BATCH(128 for now) until kernel got enough memory.
1090 * If sc->nr_to_scan is 0, the VM is querying the cache size, we don't need
1091 * to scan and try to reclaim LRU pages, just return 0 and
1092 * osc_cache_shrink_count() will report the LRU page number.
1094 unsigned long osc_cache_shrink_scan(struct shrinker *sk,
1095 struct shrink_control *sc)
1097 struct client_obd *cli;
1098 struct client_obd *stop_anchor = NULL;
1104 if (sc->nr_to_scan == 0)
1107 if (!(sc->gfp_mask & __GFP_FS))
1110 env = cl_env_get(&refcheck);
1114 spin_lock(&osc_shrink_lock);
1115 while ((cli = list_first_entry_or_null(&osc_shrink_list,
1117 cl_shrink_list)) != NULL) {
1118 if (stop_anchor == NULL)
1120 else if (cli == stop_anchor)
1123 list_move_tail(&cli->cl_shrink_list, &osc_shrink_list);
1124 spin_unlock(&osc_shrink_lock);
1126 /* shrink no more than max_pages_per_rpc for an OSC */
1127 rc = osc_lru_shrink(env, cli, (sc->nr_to_scan - shrank) >
1128 cli->cl_max_pages_per_rpc ?
1129 cli->cl_max_pages_per_rpc :
1130 sc->nr_to_scan - shrank, true);
1134 if (shrank >= sc->nr_to_scan)
1137 spin_lock(&osc_shrink_lock);
1139 spin_unlock(&osc_shrink_lock);
1142 cl_env_put(env, &refcheck);