4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2010, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/ldlm/ldlm_pool.c
33 * Author: Yury Umanets <umka@clusterfs.com>
37 * Idea of this code is rather simple. Each second, for each server namespace
38 * we have SLV - server lock volume which is calculated on current number of
39 * granted locks, grant speed for past period, etc - that is, locking load.
40 * This SLV number may be thought as a flow definition for simplicity. It is
41 * sent to clients with each occasion to let them know what is current load
42 * situation on the server. By default, at the beginning, SLV on server is
43 * set max value which is calculated as the following: allow to one client
44 * have all locks of limit ->pl_limit for 10h.
46 * Next, on clients, number of cached locks is not limited artificially in any
47 * way as it was before. Instead, client calculates CLV, that is, client lock
48 * volume for each lock and compares it with last SLV from the server. CLV is
49 * calculated as the number of locks in LRU * lock live time in seconds. If
50 * CLV > SLV - lock is canceled.
52 * Client has LVF, that is, lock volume factor which regulates how much
53 * sensitive client should be about last SLV from server. The higher LVF is the
54 * more locks will be canceled on client. Default value for it is 1. Setting
55 * LVF to 2 means that client will cancel locks 2 times faster.
57 * Locks on a client will be canceled more intensively in these cases:
58 * (1) if SLV is smaller, that is, load is higher on the server;
59 * (2) client has a lot of locks (the more locks are held by client, the bigger
60 * chances that some of them should be canceled);
61 * (3) client has old locks (taken some time ago);
63 * Thus, according to flow paradigm that we use for better understanding SLV,
64 * CLV is the volume of particle in flow described by SLV. According to this,
65 * if flow is getting thinner, more and more particles become outside of it and
66 * as particles are locks, they should be canceled.
68 * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com).
69 * Andreas Dilger(adilger@clusterfs.com) proposed few nice ideas like using LVF
70 * and many cleanups. Flow definition to allow more easy understanding of the
71 * logic belongs to Nikita Danilov(nikita@clusterfs.com) as well as many
72 * cleanups and fixes. And design and implementation are done by Yury Umanets
73 * (umka@clusterfs.com).
75 * Glossary for terms used:
77 * pl_limit - Number of allowed locks in pool. Applies to server and client
80 * pl_granted - Number of granted locks (calculated);
81 * pl_grant_rate - Number of granted locks for last T (calculated);
82 * pl_cancel_rate - Number of canceled locks for last T (calculated);
83 * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
84 * pl_grant_plan - Planned number of granted locks for next T (calculated);
85 * pl_server_lock_volume - Current server lock volume (calculated);
87 * As it may be seen from list above, we have few possible tunables which may
88 * affect behavior much. They all may be modified via sysfs. However, they also
89 * give a possibility for constructing few pre-defined behavior policies. If
90 * none of predefines is suitable for a working pattern being used, new one may
91 * be "constructed" via sysfs tunables.
94 #define DEBUG_SUBSYSTEM S_LDLM
96 #include <linux/workqueue.h>
97 #include <libcfs/linux/linux-mem.h>
98 #include <lustre_dlm.h>
99 #include <cl_object.h>
100 #include <obd_class.h>
101 #include <obd_support.h>
102 #include "ldlm_internal.h"
104 #ifdef HAVE_LRU_RESIZE_SUPPORT
107 * 50 ldlm locks for 1MB of RAM.
109 #define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
112 * Maximal possible grant step plan in %.
114 #define LDLM_POOL_MAX_GSP (30)
117 * Minimal possible grant step plan in %.
119 #define LDLM_POOL_MIN_GSP (1)
122 * This controls the speed of reaching LDLM_POOL_MAX_GSP
123 * with increasing thread period.
125 #define LDLM_POOL_GSP_STEP_SHIFT (2)
128 * LDLM_POOL_GSP% of all locks is default GP.
130 #define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
133 * Max age for locks on clients.
135 #define LDLM_POOL_MAX_AGE (36000)
138 * The granularity of SLV calculation.
140 #define LDLM_POOL_SLV_SHIFT (10)
142 static inline __u64 dru(__u64 val, __u32 shift, int round_up)
144 return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
147 static inline __u64 ldlm_pool_slv_max(__u32 L)
150 * Allow to have all locks for 1 client for 10 hrs.
151 * Formula is the following: limit * 10h / 1 client.
153 __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
157 static inline __u64 ldlm_pool_slv_min(__u32 L)
163 LDLM_POOL_FIRST_STAT = 0,
164 LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
165 LDLM_POOL_GRANT_STAT,
166 LDLM_POOL_CANCEL_STAT,
167 LDLM_POOL_GRANT_RATE_STAT,
168 LDLM_POOL_CANCEL_RATE_STAT,
169 LDLM_POOL_GRANT_PLAN_STAT,
171 LDLM_POOL_SHRINK_REQTD_STAT,
172 LDLM_POOL_SHRINK_FREED_STAT,
173 LDLM_POOL_RECALC_STAT,
174 LDLM_POOL_TIMING_STAT,
178 static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl)
180 return container_of(pl, struct ldlm_namespace, ns_pool);
184 * Calculates suggested grant_step in % of available locks for passed
185 * \a period. This is later used in grant_plan calculations.
187 static inline int ldlm_pool_t2gsp(unsigned int t)
190 * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
191 * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
193 * How this will affect execution is the following:
195 * - for thread period 1s we will have grant_step 1% which good from
196 * pov of taking some load off from server and push it out to clients.
197 * This is like that because 1% for grant_step means that server will
198 * not allow clients to get lots of locks in short period of time and
199 * keep all old locks in their caches. Clients will always have to
200 * get some locks back if they want to take some new;
202 * - for thread period 10s (which is default) we will have 23% which
203 * means that clients will have enough of room to take some new locks
204 * without getting some back. All locks from this 23% which were not
205 * taken by clients in current period will contribute in SLV growing.
206 * SLV growing means more locks cached on clients until limit or grant
209 return LDLM_POOL_MAX_GSP -
210 ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
211 (t >> LDLM_POOL_GSP_STEP_SHIFT));
214 static inline int ldlm_pool_granted(struct ldlm_pool *pl)
216 return atomic_read(&pl->pl_granted);
220 * Recalculates next grant limit on passed \a pl.
222 * \pre ->pl_lock is locked.
224 static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
226 int granted, grant_step, limit;
228 limit = ldlm_pool_get_limit(pl);
229 granted = ldlm_pool_granted(pl);
231 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
232 grant_step = ((limit - granted) * grant_step) / 100;
233 pl->pl_grant_plan = granted + grant_step;
234 limit = (limit * 5) >> 2;
235 if (pl->pl_grant_plan > limit)
236 pl->pl_grant_plan = limit;
240 * Recalculates next SLV on passed \a pl.
242 * \pre ->pl_lock is locked.
244 static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
254 slv = pl->pl_server_lock_volume;
255 grant_plan = pl->pl_grant_plan;
256 limit = ldlm_pool_get_limit(pl);
257 granted = ldlm_pool_granted(pl);
258 round_up = granted < limit;
260 grant_usage = max_t(int, limit - (granted - grant_plan), 1);
263 * Find out SLV change factor which is the ratio of grant usage
264 * from limit. SLV changes as fast as the ratio of grant plan
265 * consumption. The more locks from grant plan are not consumed
266 * by clients in last interval (idle time), the faster grows
267 * SLV. And the opposite, the more grant plan is over-consumed
268 * (load time) the faster drops SLV.
270 slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
271 do_div(slv_factor, limit);
272 slv = slv * slv_factor;
273 slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
275 if (slv > ldlm_pool_slv_max(limit))
276 slv = ldlm_pool_slv_max(limit);
277 else if (slv < ldlm_pool_slv_min(limit))
278 slv = ldlm_pool_slv_min(limit);
280 pl->pl_server_lock_volume = slv;
284 * Recalculates next stats on passed \a pl.
286 * \pre ->pl_lock is locked.
288 static void ldlm_pool_recalc_stats(struct ldlm_pool *pl, timeout_t period)
290 int grant_plan = pl->pl_grant_plan;
291 __u64 slv = pl->pl_server_lock_volume;
292 int granted = ldlm_pool_granted(pl);
293 int grant_rate = atomic_read(&pl->pl_grant_rate) / period;
294 int cancel_rate = atomic_read(&pl->pl_cancel_rate) / period;
296 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
298 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
300 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
302 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
304 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
309 * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd.
311 static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
313 struct obd_device *obd;
316 * Set new SLV in obd field for using it later without accessing the
317 * pool. This is required to avoid race between sending reply to client
318 * with new SLV and cleanup server stack in which we can't guarantee
319 * that namespace is still alive. We know only that obd is alive as
320 * long as valid export is alive.
322 obd = ldlm_pl2ns(pl)->ns_obd;
323 LASSERT(obd != NULL);
324 write_lock(&obd->obd_pool_lock);
325 obd->obd_pool_slv = pl->pl_server_lock_volume;
326 write_unlock(&obd->obd_pool_lock);
330 * Recalculates all pool fields on passed \a pl.
332 * \pre ->pl_lock is not locked.
334 static int ldlm_srv_pool_recalc(struct ldlm_pool *pl, bool force)
336 timeout_t recalc_interval_sec;
340 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
341 if (!force && recalc_interval_sec < pl->pl_recalc_period)
344 spin_lock(&pl->pl_lock);
345 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
346 if (!force && recalc_interval_sec < pl->pl_recalc_period) {
347 spin_unlock(&pl->pl_lock);
351 * Recalc SLV after last period. This should be done
352 * _before_ recalculating new grant plan.
354 ldlm_pool_recalc_slv(pl);
357 * Make sure that pool informed obd of last SLV changes.
359 ldlm_srv_pool_push_slv(pl);
362 * Update grant_plan for new period.
364 ldlm_pool_recalc_grant_plan(pl);
366 pl->pl_recalc_time = ktime_get_seconds();
367 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
368 recalc_interval_sec);
369 spin_unlock(&pl->pl_lock);
374 * This function is used on server side as main entry point for memory
375 * pressure handling. It decreases SLV on \a pl according to passed
376 * \a nr and \a gfp_mask.
378 * Our goal here is to decrease SLV such a way that clients hold \a nr
379 * locks smaller in next 10h.
381 static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
382 int nr, gfp_t gfp_mask)
387 * VM is asking how many entries may be potentially freed.
390 return ldlm_pool_granted(pl);
393 * Client already canceled locks but server is already in shrinker
394 * and can't cancel anything. Let's catch this race.
396 if (ldlm_pool_granted(pl) == 0)
399 spin_lock(&pl->pl_lock);
402 * We want shrinker to possibly cause cancellation of @nr locks from
403 * clients or grant approximately @nr locks smaller next intervals.
405 * This is why we decreased SLV by @nr. This effect will only be as
406 * long as one re-calc interval (1s these days) and this should be
407 * enough to pass this decreased SLV to all clients. On next recalc
408 * interval pool will either increase SLV if locks load is not high
409 * or will keep on same level or even decrease again, thus, shrinker
410 * decreased SLV will affect next recalc intervals and this way will
411 * make locking load lower.
413 if (nr < pl->pl_server_lock_volume) {
414 pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr;
416 limit = ldlm_pool_get_limit(pl);
417 pl->pl_server_lock_volume = ldlm_pool_slv_min(limit);
421 * Make sure that pool informed obd of last SLV changes.
423 ldlm_srv_pool_push_slv(pl);
424 spin_unlock(&pl->pl_lock);
427 * We did not really free any memory here so far, it only will be
428 * freed later may be, so that we return 0 to not confuse VM.
434 * Setup server side pool \a pl with passed \a limit.
436 static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
438 struct obd_device *obd;
440 obd = ldlm_pl2ns(pl)->ns_obd;
441 LASSERT(obd != NULL && obd != LP_POISON);
442 LASSERT(obd->obd_type != LP_POISON);
443 write_lock(&obd->obd_pool_lock);
444 obd->obd_pool_limit = limit;
445 write_unlock(&obd->obd_pool_lock);
447 ldlm_pool_set_limit(pl, limit);
452 * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
454 static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
456 struct obd_device *obd;
459 * Get new SLV and Limit from obd which is updated with coming
462 obd = ldlm_pl2ns(pl)->ns_obd;
463 LASSERT(obd != NULL);
464 read_lock(&obd->obd_pool_lock);
465 pl->pl_server_lock_volume = obd->obd_pool_slv;
466 ldlm_pool_set_limit(pl, obd->obd_pool_limit);
467 read_unlock(&obd->obd_pool_lock);
471 * Recalculates client size pool \a pl according to current SLV and Limit.
473 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl, bool force)
475 timeout_t recalc_interval_sec;
480 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
481 if (!force && recalc_interval_sec < pl->pl_recalc_period)
484 spin_lock(&pl->pl_lock);
486 * Check if we need to recalc lists now.
488 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
489 if (!force && recalc_interval_sec < pl->pl_recalc_period) {
490 spin_unlock(&pl->pl_lock);
495 * Make sure that pool knows last SLV and Limit from obd.
497 ldlm_cli_pool_pop_slv(pl);
498 spin_unlock(&pl->pl_lock);
501 * In the time of canceling locks on client we do not need to maintain
502 * sharp timing, we only want to cancel locks asap according to new SLV.
503 * It may be called when SLV has changed much, this is why we do not
504 * take into account pl->pl_recalc_time here.
506 ret = ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, 0);
508 spin_lock(&pl->pl_lock);
510 * Time of LRU resizing might be longer than period,
511 * so update after LRU resizing rather than before it.
513 pl->pl_recalc_time = ktime_get_seconds();
514 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
515 recalc_interval_sec);
516 spin_unlock(&pl->pl_lock);
521 * This function is main entry point for memory pressure handling on client
522 * side. Main goal of this function is to cancel some number of locks on
523 * passed \a pl according to \a nr and \a gfp_mask.
525 static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
526 int nr, gfp_t gfp_mask)
528 struct ldlm_namespace *ns;
534 * Do not cancel locks in case lru resize is disabled for this ns.
536 if (!ns_connect_lru_resize(ns))
540 * Make sure that pool knows last SLV and Limit from obd.
542 spin_lock(&pl->pl_lock);
543 ldlm_cli_pool_pop_slv(pl);
544 spin_unlock(&pl->pl_lock);
546 spin_lock(&ns->ns_lock);
547 unused = ns->ns_nr_unused;
548 spin_unlock(&ns->ns_lock);
551 return (unused / 100) * sysctl_vfs_cache_pressure;
553 return ldlm_cancel_lru(ns, nr, LCF_ASYNC, 0);
556 static struct ldlm_pool_ops ldlm_srv_pool_ops = {
557 .po_recalc = ldlm_srv_pool_recalc,
558 .po_shrink = ldlm_srv_pool_shrink,
559 .po_setup = ldlm_srv_pool_setup
562 static struct ldlm_pool_ops ldlm_cli_pool_ops = {
563 .po_recalc = ldlm_cli_pool_recalc,
564 .po_shrink = ldlm_cli_pool_shrink
568 * Pool recalc wrapper. Will call either client or server pool recalc callback
569 * depending what pool \a pl is used.
571 * \retval time in seconds for the next recalc of this pool
573 time64_t ldlm_pool_recalc(struct ldlm_pool *pl, bool force)
575 timeout_t recalc_interval_sec;
578 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
579 if (recalc_interval_sec > 0) {
580 spin_lock(&pl->pl_lock);
581 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
583 if (recalc_interval_sec > 0) {
585 * Update pool statistics every recalc interval.
587 ldlm_pool_recalc_stats(pl, recalc_interval_sec);
590 * Zero out all rates and speed for the last period.
592 atomic_set(&pl->pl_grant_rate, 0);
593 atomic_set(&pl->pl_cancel_rate, 0);
595 spin_unlock(&pl->pl_lock);
598 if (pl->pl_ops->po_recalc != NULL) {
599 count = pl->pl_ops->po_recalc(pl, force);
600 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
604 return pl->pl_recalc_time + pl->pl_recalc_period;
608 * Pool shrink wrapper. Will call either client or server pool recalc callback
609 * depending what pool \a pl is used.
611 int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask)
615 if (pl->pl_ops->po_shrink != NULL) {
616 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
618 lprocfs_counter_add(pl->pl_stats,
619 LDLM_POOL_SHRINK_REQTD_STAT,
621 lprocfs_counter_add(pl->pl_stats,
622 LDLM_POOL_SHRINK_FREED_STAT,
625 "%s: request to shrink %d locks, shrunk %d\n",
626 pl->pl_name, nr, cancel);
633 * Pool setup wrapper. Will call either client or server pool recalc callback
634 * depending what pool \a pl is used.
636 * Sets passed \a limit into pool \a pl.
638 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
640 if (pl->pl_ops->po_setup != NULL)
641 return pl->pl_ops->po_setup(pl, limit);
645 static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
647 int granted, grant_rate, cancel_rate, grant_step;
648 int grant_speed, grant_plan, lvf;
649 struct ldlm_pool *pl = m->private;
654 spin_lock(&pl->pl_lock);
655 slv = pl->pl_server_lock_volume;
656 clv = pl->pl_client_lock_volume;
657 limit = ldlm_pool_get_limit(pl);
658 grant_plan = pl->pl_grant_plan;
659 granted = ldlm_pool_granted(pl);
660 period = ktime_get_seconds() - pl->pl_recalc_time;
663 grant_rate = atomic_read(&pl->pl_grant_rate) / period;
664 cancel_rate = atomic_read(&pl->pl_cancel_rate) / period;
665 grant_speed = grant_rate - cancel_rate;
666 lvf = atomic_read(&pl->pl_lock_volume_factor);
667 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
668 spin_unlock(&pl->pl_lock);
670 seq_printf(m, "LDLM pool state (%s):\n"
674 pl->pl_name, slv, clv, (lvf * 100) >> 8);
676 if (ns_is_server(ldlm_pl2ns(pl))) {
677 seq_printf(m, " GSP: %d%%\n", grant_step);
678 seq_printf(m, " GP: %d\n", grant_plan);
681 seq_printf(m, " GR: %d\n CR: %d\n GS: %d\n G: %d\n L: %d\n",
682 grant_rate, cancel_rate, grant_speed,
687 LDEBUGFS_SEQ_FOPS_RO(lprocfs_pool_state);
689 static ssize_t grant_speed_show(struct kobject *kobj, struct attribute *attr,
692 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
697 spin_lock(&pl->pl_lock);
698 /* serialize with ldlm_pool_recalc */
699 period = ktime_get_seconds() - pl->pl_recalc_time;
702 grant_speed = (atomic_read(&pl->pl_grant_rate) -
703 atomic_read(&pl->pl_cancel_rate)) / period;
704 spin_unlock(&pl->pl_lock);
705 return sprintf(buf, "%d\n", grant_speed);
707 LUSTRE_RO_ATTR(grant_speed);
709 LDLM_POOL_SYSFS_READER_SHOW(grant_plan, int);
710 LUSTRE_RO_ATTR(grant_plan);
712 LDLM_POOL_SYSFS_READER_SHOW(recalc_period, int);
713 LDLM_POOL_SYSFS_WRITER_STORE(recalc_period, int);
714 LUSTRE_RW_ATTR(recalc_period);
716 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(server_lock_volume, u64);
717 LUSTRE_RO_ATTR(server_lock_volume);
719 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(client_lock_volume, u64);
720 LUSTRE_RO_ATTR(client_lock_volume);
722 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(limit, atomic);
723 LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(limit, atomic);
724 LUSTRE_RW_ATTR(limit);
726 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(granted, atomic);
727 LUSTRE_RO_ATTR(granted);
729 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(cancel_rate, atomic);
730 LUSTRE_RO_ATTR(cancel_rate);
732 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(grant_rate, atomic);
733 LUSTRE_RO_ATTR(grant_rate);
735 static ssize_t lock_volume_factor_show(struct kobject *kobj,
736 struct attribute *attr,
739 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, pl_kobj);
742 tmp = (atomic_read(&pl->pl_lock_volume_factor) * 100) >> 8;
743 return sprintf(buf, "%lu\n", tmp);
746 static ssize_t lock_volume_factor_store(struct kobject *kobj,
747 struct attribute *attr,
751 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, pl_kobj);
755 rc = kstrtoul(buffer, 10, &tmp);
760 tmp = (tmp << 8) / 100;
761 atomic_set(&pl->pl_lock_volume_factor, tmp);
766 LUSTRE_RW_ATTR(lock_volume_factor);
768 static ssize_t recalc_time_show(struct kobject *kobj,
769 struct attribute *attr,
772 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, pl_kobj);
774 return snprintf(buf, PAGE_SIZE, "%llu\n",
775 ktime_get_seconds() - pl->pl_recalc_time);
777 LUSTRE_RO_ATTR(recalc_time);
779 /* These are for pools in /sys/fs/lustre/ldlm/namespaces/.../pool */
780 static struct attribute *ldlm_pl_attrs[] = {
781 &lustre_attr_grant_speed.attr,
782 &lustre_attr_grant_plan.attr,
783 &lustre_attr_recalc_period.attr,
784 &lustre_attr_server_lock_volume.attr,
785 &lustre_attr_client_lock_volume.attr,
786 &lustre_attr_recalc_time.attr,
787 &lustre_attr_limit.attr,
788 &lustre_attr_granted.attr,
789 &lustre_attr_cancel_rate.attr,
790 &lustre_attr_grant_rate.attr,
791 &lustre_attr_lock_volume_factor.attr,
795 KOBJ_ATTRIBUTE_GROUPS(ldlm_pl);
797 static void ldlm_pl_release(struct kobject *kobj)
799 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
801 complete(&pl->pl_kobj_unregister);
804 static struct kobj_type ldlm_pl_ktype = {
805 .default_groups = KOBJ_ATTR_GROUPS(ldlm_pl),
806 .sysfs_ops = &lustre_sysfs_ops,
807 .release = ldlm_pl_release,
810 static int ldlm_pool_sysfs_init(struct ldlm_pool *pl)
812 struct ldlm_namespace *ns = ldlm_pl2ns(pl);
815 init_completion(&pl->pl_kobj_unregister);
816 err = kobject_init_and_add(&pl->pl_kobj, &ldlm_pl_ktype, &ns->ns_kobj,
822 static int ldlm_pool_debugfs_init(struct ldlm_pool *pl)
824 struct ldlm_namespace *ns = ldlm_pl2ns(pl);
825 struct dentry *debugfs_ns_parent;
826 struct ldebugfs_vars pool_vars[2];
831 debugfs_ns_parent = ns->ns_debugfs_entry;
832 if (IS_ERR_OR_NULL(debugfs_ns_parent)) {
833 CERROR("%s: debugfs entry is not initialized\n",
835 GOTO(out, rc = -EINVAL);
837 pl->pl_debugfs_entry = debugfs_create_dir("pool", debugfs_ns_parent);
839 memset(pool_vars, 0, sizeof(pool_vars));
841 ldlm_add_var(&pool_vars[0], pl->pl_debugfs_entry, "state", pl,
842 &lprocfs_pool_state_fops);
844 pl->pl_stats = lprocfs_stats_alloc(LDLM_POOL_LAST_STAT -
845 LDLM_POOL_FIRST_STAT, 0);
847 GOTO(out, rc = -ENOMEM);
849 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
850 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKS,
852 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
853 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKS,
855 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
856 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKS,
858 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
859 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKSPS,
861 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
862 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKSPS,
864 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
865 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKSPS,
867 lprocfs_counter_init_units(pl->pl_stats, LDLM_POOL_SLV_STAT,
868 LPROCFS_CNTR_AVGMINMAX, "slv", "lock.secs");
869 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
870 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKS,
872 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
873 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKS,
875 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
876 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKS,
878 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
879 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_SECS,
881 debugfs_create_file("stats", 0644, pl->pl_debugfs_entry,
882 pl->pl_stats, &ldebugfs_stats_seq_fops);
889 static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl)
891 kobject_put(&pl->pl_kobj);
892 wait_for_completion(&pl->pl_kobj_unregister);
895 static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
897 if (pl->pl_stats != NULL) {
898 lprocfs_stats_free(&pl->pl_stats);
901 debugfs_remove_recursive(pl->pl_debugfs_entry);
902 pl->pl_debugfs_entry = NULL;
905 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
906 int idx, enum ldlm_side client)
912 spin_lock_init(&pl->pl_lock);
913 atomic_set(&pl->pl_granted, 0);
914 pl->pl_recalc_time = ktime_get_seconds();
915 atomic_set(&pl->pl_lock_volume_factor, 1 << 8);
917 atomic_set(&pl->pl_grant_rate, 0);
918 atomic_set(&pl->pl_cancel_rate, 0);
919 pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
921 snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
922 ldlm_ns_name(ns), idx);
924 if (client == LDLM_NAMESPACE_SERVER) {
925 pl->pl_ops = &ldlm_srv_pool_ops;
926 ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);
927 pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD;
928 pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
930 ldlm_pool_set_limit(pl, 1);
931 pl->pl_server_lock_volume = 0;
932 pl->pl_ops = &ldlm_cli_pool_ops;
933 pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
935 pl->pl_client_lock_volume = 0;
936 rc = ldlm_pool_debugfs_init(pl);
940 rc = ldlm_pool_sysfs_init(pl);
944 CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
949 void ldlm_pool_fini(struct ldlm_pool *pl)
952 ldlm_pool_sysfs_fini(pl);
953 ldlm_pool_debugfs_fini(pl);
956 * Pool should not be used after this point. We can't free it here as
957 * it lives in struct ldlm_namespace, but still interested in catching
958 * any abnormal using cases.
960 POISON(pl, 0x5a, sizeof(*pl));
965 * Add new taken ldlm lock \a lock into pool \a pl accounting.
967 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
970 * FLOCK locks are special in a sense that they are almost never
971 * cancelled, instead special kind of lock is used to drop them.
972 * also there is no LRU for flock locks, so no point in tracking
975 * PLAIN locks are used by config and quota, the quantity is small
976 * and usually they are not in LRU.
978 if (lock->l_resource->lr_type == LDLM_FLOCK ||
979 lock->l_resource->lr_type == LDLM_PLAIN)
982 ldlm_reclaim_add(lock);
984 atomic_inc(&pl->pl_granted);
985 atomic_inc(&pl->pl_grant_rate);
986 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
988 * Do not do pool recalc for client side as all locks which
989 * potentially may be canceled has already been packed into
990 * enqueue/cancel rpc. Also we do not want to run out of stack
991 * with too long call paths.
993 if (ns_is_server(ldlm_pl2ns(pl)))
994 ldlm_pool_recalc(pl, false);
998 * Remove ldlm lock \a lock from pool \a pl accounting.
1000 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
1003 * Filter out FLOCK & PLAIN locks. Read above comment in
1006 if (lock->l_resource->lr_type == LDLM_FLOCK ||
1007 lock->l_resource->lr_type == LDLM_PLAIN)
1010 ldlm_reclaim_del(lock);
1012 LASSERT(atomic_read(&pl->pl_granted) > 0);
1013 atomic_dec(&pl->pl_granted);
1014 atomic_inc(&pl->pl_cancel_rate);
1016 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
1018 if (ns_is_server(ldlm_pl2ns(pl)))
1019 ldlm_pool_recalc(pl, false);
1023 * Returns current \a pl SLV.
1025 * \pre ->pl_lock is not locked.
1027 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
1031 spin_lock(&pl->pl_lock);
1032 slv = pl->pl_server_lock_volume;
1033 spin_unlock(&pl->pl_lock);
1038 * Sets passed \a slv to \a pl.
1040 * \pre ->pl_lock is not locked.
1042 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
1044 spin_lock(&pl->pl_lock);
1045 pl->pl_server_lock_volume = slv;
1046 spin_unlock(&pl->pl_lock);
1050 * Returns current \a pl CLV.
1052 * \pre ->pl_lock is not locked.
1054 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
1058 spin_lock(&pl->pl_lock);
1059 slv = pl->pl_client_lock_volume;
1060 spin_unlock(&pl->pl_lock);
1065 * Sets passed \a clv to \a pl.
1067 * \pre ->pl_lock is not locked.
1069 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
1071 spin_lock(&pl->pl_lock);
1072 pl->pl_client_lock_volume = clv;
1073 spin_unlock(&pl->pl_lock);
1077 * Returns current \a pl limit.
1079 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
1081 return atomic_read(&pl->pl_limit);
1085 * Sets passed \a limit to \a pl.
1087 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1089 atomic_set(&pl->pl_limit, limit);
1093 * Returns current LVF from \a pl.
1095 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1097 return atomic_read(&pl->pl_lock_volume_factor);
1101 * count locks from all namespaces (if possible). Returns number of
1104 static unsigned long ldlm_pools_count(enum ldlm_side client, gfp_t gfp_mask)
1106 unsigned long total = 0;
1108 struct ldlm_namespace *ns;
1109 struct ldlm_namespace *ns_old = NULL; /* loop detection */
1111 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
1115 * Find out how many resources we may release.
1117 for (nr_ns = ldlm_namespace_nr_read(client);
1118 nr_ns > 0; nr_ns--) {
1119 mutex_lock(ldlm_namespace_lock(client));
1120 if (list_empty(ldlm_namespace_list(client))) {
1121 mutex_unlock(ldlm_namespace_lock(client));
1124 ns = ldlm_namespace_first_locked(client);
1127 mutex_unlock(ldlm_namespace_lock(client));
1131 if (ldlm_ns_empty(ns)) {
1132 ldlm_namespace_move_to_inactive_locked(ns, client);
1133 mutex_unlock(ldlm_namespace_lock(client));
1140 ldlm_namespace_get(ns);
1141 ldlm_namespace_move_to_active_locked(ns, client);
1142 mutex_unlock(ldlm_namespace_lock(client));
1143 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
1144 ldlm_namespace_put(ns);
1150 static unsigned long ldlm_pools_scan(enum ldlm_side client, int nr,
1153 unsigned long freed = 0;
1155 struct ldlm_namespace *ns;
1157 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
1161 * Shrink at least ldlm_namespace_nr_read(client) namespaces.
1163 for (tmp = nr_ns = ldlm_namespace_nr_read(client);
1165 int cancel, nr_locks;
1168 * Do not call shrink under ldlm_namespace_lock(client)
1170 mutex_lock(ldlm_namespace_lock(client));
1171 if (list_empty(ldlm_namespace_list(client))) {
1172 mutex_unlock(ldlm_namespace_lock(client));
1175 ns = ldlm_namespace_first_locked(client);
1176 ldlm_namespace_get(ns);
1177 ldlm_namespace_move_to_active_locked(ns, client);
1178 mutex_unlock(ldlm_namespace_lock(client));
1180 nr_locks = ldlm_pool_granted(&ns->ns_pool);
1182 * We use to shrink propotionally but with new shrinker API,
1183 * we lost the total number of freeable locks.
1185 cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
1186 freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
1187 ldlm_namespace_put(ns);
1190 * we only decrease the SLV in server pools shrinker, return
1191 * SHRINK_STOP to kernel to avoid needless loop. LU-1128
1193 return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
1196 #ifdef HAVE_SHRINKER_COUNT
1197 static unsigned long ldlm_pools_srv_count(struct shrinker *s,
1198 struct shrink_control *sc)
1200 return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
1203 static unsigned long ldlm_pools_srv_scan(struct shrinker *s,
1204 struct shrink_control *sc)
1206 return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
1210 static unsigned long ldlm_pools_cli_count(struct shrinker *s,
1211 struct shrink_control *sc)
1213 return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
1216 static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
1217 struct shrink_control *sc)
1219 return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
1223 static struct shrinker ldlm_pools_srv_shrinker = {
1224 .count_objects = ldlm_pools_srv_count,
1225 .scan_objects = ldlm_pools_srv_scan,
1226 .seeks = DEFAULT_SEEKS,
1229 static struct shrinker ldlm_pools_cli_shrinker = {
1230 .count_objects = ldlm_pools_cli_count,
1231 .scan_objects = ldlm_pools_cli_scan,
1232 .seeks = DEFAULT_SEEKS,
1236 * Cancel \a nr locks from all namespaces (if possible). Returns number of
1237 * cached locks after shrink is finished. All namespaces are asked to
1238 * cancel approximately equal amount of locks to keep balancing.
1240 static int ldlm_pools_shrink(enum ldlm_side client, int nr, gfp_t gfp_mask)
1242 unsigned long total = 0;
1244 if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
1245 !(gfp_mask & __GFP_FS))
1248 total = ldlm_pools_count(client, gfp_mask);
1250 if (nr == 0 || total == 0)
1253 return ldlm_pools_scan(client, nr, gfp_mask);
1256 static int ldlm_pools_srv_shrink(struct shrinker *shrinker,
1257 struct shrink_control *sc)
1259 return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER,
1260 sc->nr_to_scan, sc->gfp_mask);
1263 static int ldlm_pools_cli_shrink(struct shrinker *shrinker,
1264 struct shrink_control *sc)
1266 return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT,
1267 sc->nr_to_scan, sc->gfp_mask);
1270 static struct shrinker ldlm_pools_srv_shrinker = {
1271 .shrink = ldlm_pools_srv_shrink,
1272 .seeks = DEFAULT_SEEKS,
1275 static struct shrinker ldlm_pools_cli_shrinker = {
1276 .shrink = ldlm_pools_cli_shrink,
1277 .seeks = DEFAULT_SEEKS,
1279 #endif /* HAVE_SHRINKER_COUNT */
1281 static time64_t ldlm_pools_recalc_delay(enum ldlm_side side)
1283 struct ldlm_namespace *ns;
1284 struct ldlm_namespace *ns_old = NULL;
1285 /* seconds of sleep if no active namespaces */
1286 time64_t delay = ktime_get_seconds() +
1287 (side == LDLM_NAMESPACE_SERVER ?
1288 LDLM_POOL_SRV_DEF_RECALC_PERIOD :
1289 LDLM_POOL_CLI_DEF_RECALC_PERIOD);
1292 /* Recalc at least ldlm_namespace_nr(side) namespaces. */
1293 for (nr = ldlm_namespace_nr_read(side); nr > 0; nr--) {
1296 * Lock the list, get first @ns in the list, getref, move it
1297 * to the tail, unlock and call pool recalc. This way we avoid
1298 * calling recalc under @ns lock, which is really good as we
1299 * get rid of potential deadlock on side nodes when canceling
1300 * locks synchronously.
1302 mutex_lock(ldlm_namespace_lock(side));
1303 if (list_empty(ldlm_namespace_list(side))) {
1304 mutex_unlock(ldlm_namespace_lock(side));
1307 ns = ldlm_namespace_first_locked(side);
1309 if (ns_old == ns) { /* Full pass complete */
1310 mutex_unlock(ldlm_namespace_lock(side));
1314 /* We got an empty namespace, need to move it back to inactive
1316 * The race with parallel resource creation is fine:
1317 * - If they do namespace_get before our check, we fail the
1318 * check and they move this item to the end of the list anyway
1319 * - If we do the check and then they do namespace_get, then
1320 * we move the namespace to inactive and they will move
1321 * it back to active (synchronised by the lock, so no clash
1324 if (ldlm_ns_empty(ns)) {
1325 ldlm_namespace_move_to_inactive_locked(ns, side);
1326 mutex_unlock(ldlm_namespace_lock(side));
1333 spin_lock(&ns->ns_lock);
1335 * skip ns which is being freed, and we don't want to increase
1336 * its refcount again, not even temporarily. bz21519 & LU-499.
1338 if (ns->ns_stopping) {
1342 ldlm_namespace_get(ns);
1344 spin_unlock(&ns->ns_lock);
1346 ldlm_namespace_move_to_active_locked(ns, side);
1347 mutex_unlock(ldlm_namespace_lock(side));
1350 * After setup is done - recalc the pool.
1354 ldlm_pool_recalc(&ns->ns_pool, false));
1355 ldlm_namespace_put(ns);
1362 static void ldlm_pools_recalc_task(struct work_struct *ws);
1363 static DECLARE_DELAYED_WORK(ldlm_pools_recalc_work, ldlm_pools_recalc_task);
1365 static void ldlm_pools_recalc_task(struct work_struct *ws)
1367 /* seconds of sleep if no active namespaces */
1369 #ifdef HAVE_SERVER_SUPPORT
1370 struct ldlm_namespace *ns;
1371 unsigned long nr_l = 0, nr_p = 0, l;
1374 /* Check all modest namespaces first. */
1375 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
1376 list_for_each_entry(ns, ldlm_namespace_list(LDLM_NAMESPACE_SERVER),
1378 if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
1381 l = ldlm_pool_granted(&ns->ns_pool);
1386 * Set the modest pools limit equal to their avg granted
1389 l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0);
1390 ldlm_pool_setup(&ns->ns_pool, l);
1396 * Make sure than modest namespaces did not eat more that 2/3
1399 if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
1400 CWARN("'Modest' pools eat out 2/3 of server locks limit (%lu of %lu). This means that you have too many clients for this amount of server RAM. Upgrade server!\n",
1401 nr_l, LDLM_POOL_HOST_L);
1405 /* The rest is given to greedy namespaces. */
1406 list_for_each_entry(ns, ldlm_namespace_list(LDLM_NAMESPACE_SERVER),
1408 if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
1413 * In the case 2/3 locks are eaten out by
1414 * modest pools, we re-setup equal limit
1417 l = LDLM_POOL_HOST_L /
1418 ldlm_namespace_nr_read(LDLM_NAMESPACE_SERVER);
1421 * All the rest of greedy pools will have
1422 * all locks in equal parts.
1424 l = (LDLM_POOL_HOST_L - nr_l) /
1425 (ldlm_namespace_nr_read(LDLM_NAMESPACE_SERVER) -
1428 ldlm_pool_setup(&ns->ns_pool, l);
1430 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
1432 delay = min(ldlm_pools_recalc_delay(LDLM_NAMESPACE_SERVER),
1433 ldlm_pools_recalc_delay(LDLM_NAMESPACE_CLIENT));
1434 #else /* !HAVE_SERVER_SUPPORT */
1435 delay = ldlm_pools_recalc_delay(LDLM_NAMESPACE_CLIENT);
1436 #endif /* HAVE_SERVER_SUPPORT */
1438 /* Wake up the blocking threads from time to time. */
1439 ldlm_bl_thread_wakeup();
1441 delay -= ktime_get_seconds();
1443 /* Prevent too frequent recalculation. */
1444 CDEBUG(D_DLMTRACE, "Negative interval(%lld)\n", delay);
1448 schedule_delayed_work(&ldlm_pools_recalc_work, cfs_time_seconds(delay));
1451 static bool ldlm_pools_init_done;
1453 int ldlm_pools_init(void)
1458 #ifdef HAVE_SERVER_SUPPORT
1459 delay = min(LDLM_POOL_SRV_DEF_RECALC_PERIOD,
1460 LDLM_POOL_CLI_DEF_RECALC_PERIOD);
1462 delay = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
1465 rc = register_shrinker(&ldlm_pools_srv_shrinker);
1469 rc = register_shrinker(&ldlm_pools_cli_shrinker);
1473 schedule_delayed_work(&ldlm_pools_recalc_work, delay);
1474 ldlm_pools_init_done = true;
1478 unregister_shrinker(&ldlm_pools_cli_shrinker);
1483 void ldlm_pools_fini(void)
1485 if (ldlm_pools_init_done) {
1486 unregister_shrinker(&ldlm_pools_srv_shrinker);
1487 unregister_shrinker(&ldlm_pools_cli_shrinker);
1489 cancel_delayed_work_sync(&ldlm_pools_recalc_work);
1492 ldlm_pools_init_done = false;
1495 #else /* !HAVE_LRU_RESIZE_SUPPORT */
1496 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
1501 time64_t ldlm_pool_recalc(struct ldlm_pool *pl, bool force)
1506 int ldlm_pool_shrink(struct ldlm_pool *pl,
1507 int nr, gfp_t gfp_mask)
1512 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
1513 int idx, enum ldlm_side client)
1518 void ldlm_pool_fini(struct ldlm_pool *pl)
1523 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
1528 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
1533 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
1538 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
1543 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
1548 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
1553 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
1558 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1563 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1568 int ldlm_pools_init(void)
1573 void ldlm_pools_fini(void)
1578 #endif /* HAVE_LRU_RESIZE_SUPPORT */