4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2010, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/ldlm/ldlm_pool.c
33 * Author: Yury Umanets <umka@clusterfs.com>
37 * Idea of this code is rather simple. Each second, for each server namespace
38 * we have SLV - server lock volume which is calculated on current number of
39 * granted locks, grant speed for past period, etc - that is, locking load.
40 * This SLV number may be thought as a flow definition for simplicity. It is
41 * sent to clients with each occasion to let them know what is current load
42 * situation on the server. By default, at the beginning, SLV on server is
43 * set max value which is calculated as the following: allow to one client
44 * have all locks of limit ->pl_limit for 10h.
46 * Next, on clients, number of cached locks is not limited artificially in any
47 * way as it was before. Instead, client calculates CLV, that is, client lock
48 * volume for each lock and compares it with last SLV from the server. CLV is
49 * calculated as the number of locks in LRU * lock live time in seconds. If
50 * CLV > SLV - lock is canceled.
52 * Client has LVF, that is, lock volume factor which regulates how much
53 * sensitive client should be about last SLV from server. The higher LVF is the
54 * more locks will be canceled on client. Default value for it is 1. Setting
55 * LVF to 2 means that client will cancel locks 2 times faster.
57 * Locks on a client will be canceled more intensively in these cases:
58 * (1) if SLV is smaller, that is, load is higher on the server;
59 * (2) client has a lot of locks (the more locks are held by client, the bigger
60 * chances that some of them should be canceled);
61 * (3) client has old locks (taken some time ago);
63 * Thus, according to flow paradigm that we use for better understanding SLV,
64 * CLV is the volume of particle in flow described by SLV. According to this,
65 * if flow is getting thinner, more and more particles become outside of it and
66 * as particles are locks, they should be canceled.
68 * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com).
69 * Andreas Dilger(adilger@clusterfs.com) proposed few nice ideas like using LVF
70 * and many cleanups. Flow definition to allow more easy understanding of the
71 * logic belongs to Nikita Danilov(nikita@clusterfs.com) as well as many
72 * cleanups and fixes. And design and implementation are done by Yury Umanets
73 * (umka@clusterfs.com).
75 * Glossary for terms used:
77 * pl_limit - Number of allowed locks in pool. Applies to server and client
80 * pl_granted - Number of granted locks (calculated);
81 * pl_grant_rate - Number of granted locks for last T (calculated);
82 * pl_cancel_rate - Number of canceled locks for last T (calculated);
83 * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
84 * pl_grant_plan - Planned number of granted locks for next T (calculated);
85 * pl_server_lock_volume - Current server lock volume (calculated);
87 * As it may be seen from list above, we have few possible tunables which may
88 * affect behavior much. They all may be modified via sysfs. However, they also
89 * give a possibility for constructing few pre-defined behavior policies. If
90 * none of predefines is suitable for a working pattern being used, new one may
91 * be "constructed" via sysfs tunables.
94 #define DEBUG_SUBSYSTEM S_LDLM
96 #include <linux/workqueue.h>
97 #include <libcfs/linux/linux-mem.h>
98 #include <lustre_dlm.h>
99 #include <cl_object.h>
100 #include <obd_class.h>
101 #include <obd_support.h>
102 #include "ldlm_internal.h"
104 #ifdef HAVE_LRU_RESIZE_SUPPORT
107 * 50 ldlm locks for 1MB of RAM.
109 #define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
112 * Maximal possible grant step plan in %.
114 #define LDLM_POOL_MAX_GSP (30)
117 * Minimal possible grant step plan in %.
119 #define LDLM_POOL_MIN_GSP (1)
122 * This controls the speed of reaching LDLM_POOL_MAX_GSP
123 * with increasing thread period.
125 #define LDLM_POOL_GSP_STEP_SHIFT (2)
128 * LDLM_POOL_GSP% of all locks is default GP.
130 #define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
133 * Max age for locks on clients.
135 #define LDLM_POOL_MAX_AGE (36000)
138 * The granularity of SLV calculation.
140 #define LDLM_POOL_SLV_SHIFT (10)
142 static inline __u64 dru(__u64 val, __u32 shift, int round_up)
144 return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
147 static inline __u64 ldlm_pool_slv_max(__u32 L)
150 * Allow to have all locks for 1 client for 10 hrs.
151 * Formula is the following: limit * 10h / 1 client.
153 __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
157 static inline __u64 ldlm_pool_slv_min(__u32 L)
163 LDLM_POOL_FIRST_STAT = 0,
164 LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
165 LDLM_POOL_GRANT_STAT,
166 LDLM_POOL_CANCEL_STAT,
167 LDLM_POOL_GRANT_RATE_STAT,
168 LDLM_POOL_CANCEL_RATE_STAT,
169 LDLM_POOL_GRANT_PLAN_STAT,
171 LDLM_POOL_SHRINK_REQTD_STAT,
172 LDLM_POOL_SHRINK_FREED_STAT,
173 LDLM_POOL_RECALC_STAT,
174 LDLM_POOL_TIMING_STAT,
178 static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl)
180 return container_of(pl, struct ldlm_namespace, ns_pool);
184 * Calculates suggested grant_step in % of available locks for passed
185 * \a period. This is later used in grant_plan calculations.
187 static inline int ldlm_pool_t2gsp(unsigned int t)
190 * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
191 * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
193 * How this will affect execution is the following:
195 * - for thread period 1s we will have grant_step 1% which good from
196 * pov of taking some load off from server and push it out to clients.
197 * This is like that because 1% for grant_step means that server will
198 * not allow clients to get lots of locks in short period of time and
199 * keep all old locks in their caches. Clients will always have to
200 * get some locks back if they want to take some new;
202 * - for thread period 10s (which is default) we will have 23% which
203 * means that clients will have enough of room to take some new locks
204 * without getting some back. All locks from this 23% which were not
205 * taken by clients in current period will contribute in SLV growing.
206 * SLV growing means more locks cached on clients until limit or grant
209 return LDLM_POOL_MAX_GSP -
210 ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
211 (t >> LDLM_POOL_GSP_STEP_SHIFT));
214 static inline int ldlm_pool_granted(struct ldlm_pool *pl)
216 return atomic_read(&pl->pl_granted);
220 * Recalculates next grant limit on passed \a pl.
222 * \pre ->pl_lock is locked.
224 static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
226 int granted, grant_step, limit;
228 limit = ldlm_pool_get_limit(pl);
229 granted = ldlm_pool_granted(pl);
231 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
232 grant_step = ((limit - granted) * grant_step) / 100;
233 pl->pl_grant_plan = granted + grant_step;
234 limit = (limit * 5) >> 2;
235 if (pl->pl_grant_plan > limit)
236 pl->pl_grant_plan = limit;
240 * Recalculates next SLV on passed \a pl.
242 * \pre ->pl_lock is locked.
244 static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
254 slv = pl->pl_server_lock_volume;
255 grant_plan = pl->pl_grant_plan;
256 limit = ldlm_pool_get_limit(pl);
257 granted = ldlm_pool_granted(pl);
258 round_up = granted < limit;
260 grant_usage = max_t(int, limit - (granted - grant_plan), 1);
263 * Find out SLV change factor which is the ratio of grant usage
264 * from limit. SLV changes as fast as the ratio of grant plan
265 * consumption. The more locks from grant plan are not consumed
266 * by clients in last interval (idle time), the faster grows
267 * SLV. And the opposite, the more grant plan is over-consumed
268 * (load time) the faster drops SLV.
270 slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
271 do_div(slv_factor, limit);
272 slv = slv * slv_factor;
273 slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
275 if (slv > ldlm_pool_slv_max(limit))
276 slv = ldlm_pool_slv_max(limit);
277 else if (slv < ldlm_pool_slv_min(limit))
278 slv = ldlm_pool_slv_min(limit);
280 pl->pl_server_lock_volume = slv;
284 * Recalculates next stats on passed \a pl.
286 * \pre ->pl_lock is locked.
288 static void ldlm_pool_recalc_stats(struct ldlm_pool *pl, timeout_t period)
290 int grant_plan = pl->pl_grant_plan;
291 __u64 slv = pl->pl_server_lock_volume;
292 int granted = ldlm_pool_granted(pl);
293 int grant_rate = atomic_read(&pl->pl_grant_rate) / period;
294 int cancel_rate = atomic_read(&pl->pl_cancel_rate) / period;
296 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
298 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
300 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
302 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
304 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
309 * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd.
311 static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
313 struct obd_device *obd;
316 * Set new SLV in obd field for using it later without accessing the
317 * pool. This is required to avoid race between sending reply to client
318 * with new SLV and cleanup server stack in which we can't guarantee
319 * that namespace is still alive. We know only that obd is alive as
320 * long as valid export is alive.
322 obd = ldlm_pl2ns(pl)->ns_obd;
323 LASSERT(obd != NULL);
324 write_lock(&obd->obd_pool_lock);
325 obd->obd_pool_slv = pl->pl_server_lock_volume;
326 write_unlock(&obd->obd_pool_lock);
330 * Recalculates all pool fields on passed \a pl.
332 * \pre ->pl_lock is not locked.
334 static int ldlm_srv_pool_recalc(struct ldlm_pool *pl, bool force)
336 timeout_t recalc_interval_sec;
340 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
341 if (!force && recalc_interval_sec < pl->pl_recalc_period)
344 spin_lock(&pl->pl_lock);
345 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
346 if (!force && recalc_interval_sec < pl->pl_recalc_period) {
347 spin_unlock(&pl->pl_lock);
351 * Recalc SLV after last period. This should be done
352 * _before_ recalculating new grant plan.
354 ldlm_pool_recalc_slv(pl);
357 * Make sure that pool informed obd of last SLV changes.
359 ldlm_srv_pool_push_slv(pl);
362 * Update grant_plan for new period.
364 ldlm_pool_recalc_grant_plan(pl);
366 pl->pl_recalc_time = ktime_get_seconds();
367 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
368 recalc_interval_sec);
369 spin_unlock(&pl->pl_lock);
374 * This function is used on server side as main entry point for memory
375 * pressure handling. It decreases SLV on \a pl according to passed
376 * \a nr and \a gfp_mask.
378 * Our goal here is to decrease SLV such a way that clients hold \a nr
379 * locks smaller in next 10h.
381 static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
382 int nr, gfp_t gfp_mask)
387 * VM is asking how many entries may be potentially freed.
390 return ldlm_pool_granted(pl);
393 * Client already canceled locks but server is already in shrinker
394 * and can't cancel anything. Let's catch this race.
396 if (ldlm_pool_granted(pl) == 0)
399 spin_lock(&pl->pl_lock);
402 * We want shrinker to possibly cause cancellation of @nr locks from
403 * clients or grant approximately @nr locks smaller next intervals.
405 * This is why we decreased SLV by @nr. This effect will only be as
406 * long as one re-calc interval (1s these days) and this should be
407 * enough to pass this decreased SLV to all clients. On next recalc
408 * interval pool will either increase SLV if locks load is not high
409 * or will keep on same level or even decrease again, thus, shrinker
410 * decreased SLV will affect next recalc intervals and this way will
411 * make locking load lower.
413 if (nr < pl->pl_server_lock_volume) {
414 pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr;
416 limit = ldlm_pool_get_limit(pl);
417 pl->pl_server_lock_volume = ldlm_pool_slv_min(limit);
421 * Make sure that pool informed obd of last SLV changes.
423 ldlm_srv_pool_push_slv(pl);
424 spin_unlock(&pl->pl_lock);
427 * We did not really free any memory here so far, it only will be
428 * freed later may be, so that we return 0 to not confuse VM.
434 * Setup server side pool \a pl with passed \a limit.
436 static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
438 struct obd_device *obd;
440 obd = ldlm_pl2ns(pl)->ns_obd;
441 LASSERT(obd != NULL && obd != LP_POISON);
442 LASSERT(obd->obd_type != LP_POISON);
443 write_lock(&obd->obd_pool_lock);
444 obd->obd_pool_limit = limit;
445 write_unlock(&obd->obd_pool_lock);
447 ldlm_pool_set_limit(pl, limit);
452 * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
454 static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
456 struct obd_device *obd;
459 * Get new SLV and Limit from obd which is updated with coming
462 obd = ldlm_pl2ns(pl)->ns_obd;
463 LASSERT(obd != NULL);
464 read_lock(&obd->obd_pool_lock);
465 pl->pl_server_lock_volume = obd->obd_pool_slv;
466 ldlm_pool_set_limit(pl, obd->obd_pool_limit);
467 read_unlock(&obd->obd_pool_lock);
471 * Recalculates client size pool \a pl according to current SLV and Limit.
473 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl, bool force)
475 timeout_t recalc_interval_sec;
480 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
481 if (!force && recalc_interval_sec < pl->pl_recalc_period)
484 spin_lock(&pl->pl_lock);
486 * Check if we need to recalc lists now.
488 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
489 if (!force && recalc_interval_sec < pl->pl_recalc_period) {
490 spin_unlock(&pl->pl_lock);
495 * Make sure that pool knows last SLV and Limit from obd.
497 ldlm_cli_pool_pop_slv(pl);
498 spin_unlock(&pl->pl_lock);
501 * In the time of canceling locks on client we do not need to maintain
502 * sharp timing, we only want to cancel locks asap according to new SLV.
503 * It may be called when SLV has changed much, this is why we do not
504 * take into account pl->pl_recalc_time here.
506 ret = ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, 0);
508 spin_lock(&pl->pl_lock);
510 * Time of LRU resizing might be longer than period,
511 * so update after LRU resizing rather than before it.
513 pl->pl_recalc_time = ktime_get_seconds();
514 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
515 recalc_interval_sec);
516 spin_unlock(&pl->pl_lock);
521 * This function is main entry point for memory pressure handling on client
522 * side. Main goal of this function is to cancel some number of locks on
523 * passed \a pl according to \a nr and \a gfp_mask.
525 static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
526 int nr, gfp_t gfp_mask)
528 struct ldlm_namespace *ns;
534 * Do not cancel locks in case lru resize is disabled for this ns.
536 if (!ns_connect_lru_resize(ns))
540 * Make sure that pool knows last SLV and Limit from obd.
542 spin_lock(&pl->pl_lock);
543 ldlm_cli_pool_pop_slv(pl);
544 spin_unlock(&pl->pl_lock);
546 spin_lock(&ns->ns_lock);
547 unused = ns->ns_nr_unused;
548 spin_unlock(&ns->ns_lock);
551 return (unused / 100) * sysctl_vfs_cache_pressure;
553 return ldlm_cancel_lru(ns, nr, LCF_ASYNC, 0);
556 static struct ldlm_pool_ops ldlm_srv_pool_ops = {
557 .po_recalc = ldlm_srv_pool_recalc,
558 .po_shrink = ldlm_srv_pool_shrink,
559 .po_setup = ldlm_srv_pool_setup
562 static struct ldlm_pool_ops ldlm_cli_pool_ops = {
563 .po_recalc = ldlm_cli_pool_recalc,
564 .po_shrink = ldlm_cli_pool_shrink
568 * Pool recalc wrapper. Will call either client or server pool recalc callback
569 * depending what pool \a pl is used.
571 * \retval time in seconds for the next recalc of this pool
573 time64_t ldlm_pool_recalc(struct ldlm_pool *pl, bool force)
575 timeout_t recalc_interval_sec;
578 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
579 if (recalc_interval_sec > 0) {
580 spin_lock(&pl->pl_lock);
581 recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
583 if (recalc_interval_sec > 0) {
585 * Update pool statistics every recalc interval.
587 ldlm_pool_recalc_stats(pl, recalc_interval_sec);
590 * Zero out all rates and speed for the last period.
592 atomic_set(&pl->pl_grant_rate, 0);
593 atomic_set(&pl->pl_cancel_rate, 0);
595 spin_unlock(&pl->pl_lock);
598 if (pl->pl_ops->po_recalc != NULL) {
599 count = pl->pl_ops->po_recalc(pl, force);
600 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
604 return pl->pl_recalc_time + pl->pl_recalc_period;
608 * Pool shrink wrapper. Will call either client or server pool recalc callback
609 * depending what pool \a pl is used.
611 int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask)
615 if (pl->pl_ops->po_shrink != NULL) {
616 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
618 lprocfs_counter_add(pl->pl_stats,
619 LDLM_POOL_SHRINK_REQTD_STAT,
621 lprocfs_counter_add(pl->pl_stats,
622 LDLM_POOL_SHRINK_FREED_STAT,
625 "%s: request to shrink %d locks, shrunk %d\n",
626 pl->pl_name, nr, cancel);
633 * Pool setup wrapper. Will call either client or server pool recalc callback
634 * depending what pool \a pl is used.
636 * Sets passed \a limit into pool \a pl.
638 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
640 if (pl->pl_ops->po_setup != NULL)
641 return pl->pl_ops->po_setup(pl, limit);
645 static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
647 int granted, grant_rate, cancel_rate, grant_step;
648 int grant_speed, grant_plan, lvf;
649 struct ldlm_pool *pl = m->private;
654 spin_lock(&pl->pl_lock);
655 slv = pl->pl_server_lock_volume;
656 clv = pl->pl_client_lock_volume;
657 limit = ldlm_pool_get_limit(pl);
658 grant_plan = pl->pl_grant_plan;
659 granted = ldlm_pool_granted(pl);
660 period = ktime_get_seconds() - pl->pl_recalc_time;
663 grant_rate = atomic_read(&pl->pl_grant_rate) / period;
664 cancel_rate = atomic_read(&pl->pl_cancel_rate) / period;
665 grant_speed = grant_rate - cancel_rate;
666 lvf = atomic_read(&pl->pl_lock_volume_factor);
667 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
668 spin_unlock(&pl->pl_lock);
670 seq_printf(m, "LDLM pool state (%s):\n"
674 pl->pl_name, slv, clv, (lvf * 100) >> 8);
676 if (ns_is_server(ldlm_pl2ns(pl))) {
677 seq_printf(m, " GSP: %d%%\n", grant_step);
678 seq_printf(m, " GP: %d\n", grant_plan);
681 seq_printf(m, " GR: %d\n CR: %d\n GS: %d\n G: %d\n L: %d\n",
682 grant_rate, cancel_rate, grant_speed,
687 LDEBUGFS_SEQ_FOPS_RO(lprocfs_pool_state);
689 static ssize_t grant_speed_show(struct kobject *kobj, struct attribute *attr,
692 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
697 spin_lock(&pl->pl_lock);
698 /* serialize with ldlm_pool_recalc */
699 period = ktime_get_seconds() - pl->pl_recalc_time;
702 grant_speed = (atomic_read(&pl->pl_grant_rate) -
703 atomic_read(&pl->pl_cancel_rate)) / period;
704 spin_unlock(&pl->pl_lock);
705 return sprintf(buf, "%d\n", grant_speed);
707 LUSTRE_RO_ATTR(grant_speed);
709 LDLM_POOL_SYSFS_READER_SHOW(grant_plan, int);
710 LUSTRE_RO_ATTR(grant_plan);
712 LDLM_POOL_SYSFS_READER_SHOW(recalc_period, int);
713 LDLM_POOL_SYSFS_WRITER_STORE(recalc_period, int);
714 LUSTRE_RW_ATTR(recalc_period);
716 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(server_lock_volume, u64);
717 LUSTRE_RO_ATTR(server_lock_volume);
719 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(client_lock_volume, u64);
720 LUSTRE_RO_ATTR(client_lock_volume);
722 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(limit, atomic);
723 LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(limit, atomic);
724 LUSTRE_RW_ATTR(limit);
726 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(granted, atomic);
727 LUSTRE_RO_ATTR(granted);
729 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(cancel_rate, atomic);
730 LUSTRE_RO_ATTR(cancel_rate);
732 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(grant_rate, atomic);
733 LUSTRE_RO_ATTR(grant_rate);
735 static ssize_t lock_volume_factor_show(struct kobject *kobj,
736 struct attribute *attr,
739 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, pl_kobj);
742 tmp = (atomic_read(&pl->pl_lock_volume_factor) * 100) >> 8;
743 return sprintf(buf, "%lu\n", tmp);
746 static ssize_t lock_volume_factor_store(struct kobject *kobj,
747 struct attribute *attr,
751 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, pl_kobj);
755 rc = kstrtoul(buffer, 10, &tmp);
759 tmp = (tmp << 8) / 100;
760 atomic_set(&pl->pl_lock_volume_factor, tmp);
765 LUSTRE_RW_ATTR(lock_volume_factor);
767 static ssize_t recalc_time_show(struct kobject *kobj,
768 struct attribute *attr,
771 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, pl_kobj);
773 return snprintf(buf, PAGE_SIZE, "%llu\n",
774 ktime_get_seconds() - pl->pl_recalc_time);
776 LUSTRE_RO_ATTR(recalc_time);
778 /* These are for pools in /sys/fs/lustre/ldlm/namespaces/.../pool */
779 static struct attribute *ldlm_pl_attrs[] = {
780 &lustre_attr_grant_speed.attr,
781 &lustre_attr_grant_plan.attr,
782 &lustre_attr_recalc_period.attr,
783 &lustre_attr_server_lock_volume.attr,
784 &lustre_attr_client_lock_volume.attr,
785 &lustre_attr_recalc_time.attr,
786 &lustre_attr_limit.attr,
787 &lustre_attr_granted.attr,
788 &lustre_attr_cancel_rate.attr,
789 &lustre_attr_grant_rate.attr,
790 &lustre_attr_lock_volume_factor.attr,
794 KOBJ_ATTRIBUTE_GROUPS(ldlm_pl);
796 static void ldlm_pl_release(struct kobject *kobj)
798 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
800 complete(&pl->pl_kobj_unregister);
803 static struct kobj_type ldlm_pl_ktype = {
804 .default_groups = KOBJ_ATTR_GROUPS(ldlm_pl),
805 .sysfs_ops = &lustre_sysfs_ops,
806 .release = ldlm_pl_release,
809 static int ldlm_pool_sysfs_init(struct ldlm_pool *pl)
811 struct ldlm_namespace *ns = ldlm_pl2ns(pl);
814 init_completion(&pl->pl_kobj_unregister);
815 err = kobject_init_and_add(&pl->pl_kobj, &ldlm_pl_ktype, &ns->ns_kobj,
821 static int ldlm_pool_debugfs_init(struct ldlm_pool *pl)
823 struct ldlm_namespace *ns = ldlm_pl2ns(pl);
824 struct dentry *debugfs_ns_parent;
825 struct ldebugfs_vars pool_vars[2];
830 debugfs_ns_parent = ns->ns_debugfs_entry;
831 if (IS_ERR_OR_NULL(debugfs_ns_parent)) {
832 CERROR("%s: debugfs entry is not initialized\n",
834 GOTO(out, rc = -EINVAL);
836 pl->pl_debugfs_entry = debugfs_create_dir("pool", debugfs_ns_parent);
838 memset(pool_vars, 0, sizeof(pool_vars));
840 ldlm_add_var(&pool_vars[0], pl->pl_debugfs_entry, "state", pl,
841 &lprocfs_pool_state_fops);
843 pl->pl_stats = lprocfs_stats_alloc(LDLM_POOL_LAST_STAT -
844 LDLM_POOL_FIRST_STAT, 0);
846 GOTO(out, rc = -ENOMEM);
848 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
849 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKS,
851 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
852 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKS,
854 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
855 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKS,
857 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
858 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKSPS,
860 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
861 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKSPS,
863 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
864 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKSPS,
866 lprocfs_counter_init_units(pl->pl_stats, LDLM_POOL_SLV_STAT,
867 LPROCFS_CNTR_AVGMINMAX, "slv", "lock.secs");
868 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
869 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKS,
871 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
872 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKS,
874 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
875 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_LOCKS,
877 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
878 LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_SECS,
880 debugfs_create_file("stats", 0644, pl->pl_debugfs_entry,
881 pl->pl_stats, &ldebugfs_stats_seq_fops);
888 static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl)
890 kobject_put(&pl->pl_kobj);
891 wait_for_completion(&pl->pl_kobj_unregister);
894 static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
896 if (pl->pl_stats != NULL) {
897 lprocfs_stats_free(&pl->pl_stats);
900 debugfs_remove_recursive(pl->pl_debugfs_entry);
901 pl->pl_debugfs_entry = NULL;
904 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
905 int idx, enum ldlm_side client)
911 spin_lock_init(&pl->pl_lock);
912 atomic_set(&pl->pl_granted, 0);
913 pl->pl_recalc_time = ktime_get_seconds();
914 atomic_set(&pl->pl_lock_volume_factor, 1 << 8);
916 atomic_set(&pl->pl_grant_rate, 0);
917 atomic_set(&pl->pl_cancel_rate, 0);
918 pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
920 snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
921 ldlm_ns_name(ns), idx);
923 if (client == LDLM_NAMESPACE_SERVER) {
924 pl->pl_ops = &ldlm_srv_pool_ops;
925 ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);
926 pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD;
927 pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
929 ldlm_pool_set_limit(pl, 1);
930 pl->pl_server_lock_volume = 0;
931 pl->pl_ops = &ldlm_cli_pool_ops;
932 pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
934 pl->pl_client_lock_volume = 0;
935 rc = ldlm_pool_debugfs_init(pl);
939 rc = ldlm_pool_sysfs_init(pl);
943 CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
948 void ldlm_pool_fini(struct ldlm_pool *pl)
951 ldlm_pool_sysfs_fini(pl);
952 ldlm_pool_debugfs_fini(pl);
955 * Pool should not be used after this point. We can't free it here as
956 * it lives in struct ldlm_namespace, but still interested in catching
957 * any abnormal using cases.
959 POISON(pl, 0x5a, sizeof(*pl));
964 * Add new taken ldlm lock \a lock into pool \a pl accounting.
966 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
969 * FLOCK locks are special in a sense that they are almost never
970 * cancelled, instead special kind of lock is used to drop them.
971 * also there is no LRU for flock locks, so no point in tracking
974 * PLAIN locks are used by config and quota, the quantity is small
975 * and usually they are not in LRU.
977 if (lock->l_resource->lr_type == LDLM_FLOCK ||
978 lock->l_resource->lr_type == LDLM_PLAIN)
981 ldlm_reclaim_add(lock);
983 atomic_inc(&pl->pl_granted);
984 atomic_inc(&pl->pl_grant_rate);
985 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
987 * Do not do pool recalc for client side as all locks which
988 * potentially may be canceled has already been packed into
989 * enqueue/cancel rpc. Also we do not want to run out of stack
990 * with too long call paths.
992 if (ns_is_server(ldlm_pl2ns(pl)))
993 ldlm_pool_recalc(pl, false);
997 * Remove ldlm lock \a lock from pool \a pl accounting.
999 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
1002 * Filter out FLOCK & PLAIN locks. Read above comment in
1005 if (lock->l_resource->lr_type == LDLM_FLOCK ||
1006 lock->l_resource->lr_type == LDLM_PLAIN)
1009 ldlm_reclaim_del(lock);
1011 LASSERT(atomic_read(&pl->pl_granted) > 0);
1012 atomic_dec(&pl->pl_granted);
1013 atomic_inc(&pl->pl_cancel_rate);
1015 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
1017 if (ns_is_server(ldlm_pl2ns(pl)))
1018 ldlm_pool_recalc(pl, false);
1022 * Returns current \a pl SLV.
1024 * \pre ->pl_lock is not locked.
1026 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
1030 spin_lock(&pl->pl_lock);
1031 slv = pl->pl_server_lock_volume;
1032 spin_unlock(&pl->pl_lock);
1037 * Sets passed \a slv to \a pl.
1039 * \pre ->pl_lock is not locked.
1041 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
1043 spin_lock(&pl->pl_lock);
1044 pl->pl_server_lock_volume = slv;
1045 spin_unlock(&pl->pl_lock);
1049 * Returns current \a pl CLV.
1051 * \pre ->pl_lock is not locked.
1053 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
1057 spin_lock(&pl->pl_lock);
1058 slv = pl->pl_client_lock_volume;
1059 spin_unlock(&pl->pl_lock);
1064 * Sets passed \a clv to \a pl.
1066 * \pre ->pl_lock is not locked.
1068 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
1070 spin_lock(&pl->pl_lock);
1071 pl->pl_client_lock_volume = clv;
1072 spin_unlock(&pl->pl_lock);
1076 * Returns current \a pl limit.
1078 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
1080 return atomic_read(&pl->pl_limit);
1084 * Sets passed \a limit to \a pl.
1086 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1088 atomic_set(&pl->pl_limit, limit);
1092 * Returns current LVF from \a pl.
1094 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1096 return atomic_read(&pl->pl_lock_volume_factor);
1100 * count locks from all namespaces (if possible). Returns number of
1103 static unsigned long ldlm_pools_count(enum ldlm_side client, gfp_t gfp_mask)
1105 unsigned long total = 0;
1107 struct ldlm_namespace *ns;
1108 struct ldlm_namespace *ns_old = NULL; /* loop detection */
1110 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
1114 * Find out how many resources we may release.
1116 for (nr_ns = ldlm_namespace_nr_read(client);
1117 nr_ns > 0; nr_ns--) {
1118 mutex_lock(ldlm_namespace_lock(client));
1119 if (list_empty(ldlm_namespace_list(client))) {
1120 mutex_unlock(ldlm_namespace_lock(client));
1123 ns = ldlm_namespace_first_locked(client);
1126 mutex_unlock(ldlm_namespace_lock(client));
1130 if (ldlm_ns_empty(ns)) {
1131 ldlm_namespace_move_to_inactive_locked(ns, client);
1132 mutex_unlock(ldlm_namespace_lock(client));
1139 ldlm_namespace_get(ns);
1140 ldlm_namespace_move_to_active_locked(ns, client);
1141 mutex_unlock(ldlm_namespace_lock(client));
1142 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
1143 ldlm_namespace_put(ns);
1149 static unsigned long ldlm_pools_scan(enum ldlm_side client, int nr,
1152 unsigned long freed = 0;
1154 struct ldlm_namespace *ns;
1156 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
1160 * Shrink at least ldlm_namespace_nr_read(client) namespaces.
1162 for (tmp = nr_ns = ldlm_namespace_nr_read(client);
1164 int cancel, nr_locks;
1167 * Do not call shrink under ldlm_namespace_lock(client)
1169 mutex_lock(ldlm_namespace_lock(client));
1170 if (list_empty(ldlm_namespace_list(client))) {
1171 mutex_unlock(ldlm_namespace_lock(client));
1174 ns = ldlm_namespace_first_locked(client);
1175 ldlm_namespace_get(ns);
1176 ldlm_namespace_move_to_active_locked(ns, client);
1177 mutex_unlock(ldlm_namespace_lock(client));
1179 nr_locks = ldlm_pool_granted(&ns->ns_pool);
1181 * We use to shrink propotionally but with new shrinker API,
1182 * we lost the total number of freeable locks.
1184 cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
1185 freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
1186 ldlm_namespace_put(ns);
1189 * we only decrease the SLV in server pools shrinker, return
1190 * SHRINK_STOP to kernel to avoid needless loop. LU-1128
1192 return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
1195 #ifdef HAVE_SHRINKER_COUNT
1196 static unsigned long ldlm_pools_srv_count(struct shrinker *s,
1197 struct shrink_control *sc)
1199 return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
1202 static unsigned long ldlm_pools_srv_scan(struct shrinker *s,
1203 struct shrink_control *sc)
1205 return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
1209 static unsigned long ldlm_pools_cli_count(struct shrinker *s,
1210 struct shrink_control *sc)
1212 return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
1215 static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
1216 struct shrink_control *sc)
1218 return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
1222 static struct ll_shrinker_ops ldlm_pools_srv_sh_ops = {
1223 .count_objects = ldlm_pools_srv_count,
1224 .scan_objects = ldlm_pools_srv_scan,
1225 .seeks = DEFAULT_SEEKS,
1228 static struct ll_shrinker_ops ldlm_pools_cli_sh_ops = {
1229 .count_objects = ldlm_pools_cli_count,
1230 .scan_objects = ldlm_pools_cli_scan,
1231 .seeks = DEFAULT_SEEKS,
1235 * Cancel \a nr locks from all namespaces (if possible). Returns number of
1236 * cached locks after shrink is finished. All namespaces are asked to
1237 * cancel approximately equal amount of locks to keep balancing.
1239 static int ldlm_pools_shrink(enum ldlm_side client, int nr, gfp_t gfp_mask)
1241 unsigned long total = 0;
1243 if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
1244 !(gfp_mask & __GFP_FS))
1247 total = ldlm_pools_count(client, gfp_mask);
1249 if (nr == 0 || total == 0)
1252 return ldlm_pools_scan(client, nr, gfp_mask);
1255 static int ldlm_pools_srv_shrink(struct shrinker *shrinker,
1256 struct shrink_control *sc)
1258 return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER,
1259 sc->nr_to_scan, sc->gfp_mask);
1262 static int ldlm_pools_cli_shrink(struct shrinker *shrinker,
1263 struct shrink_control *sc)
1265 return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT,
1266 sc->nr_to_scan, sc->gfp_mask);
1269 static struct ll_shrinker_ops ldlm_pools_srv_sh_ops = {
1270 .shrink = ldlm_pools_srv_shrink,
1271 .seeks = DEFAULT_SEEKS,
1274 static struct ll_shrinker_ops ldlm_pools_cli_sh_ops = {
1275 .shrink = ldlm_pools_cli_shrink,
1276 .seeks = DEFAULT_SEEKS,
1278 #endif /* HAVE_SHRINKER_COUNT */
1280 static time64_t ldlm_pools_recalc_delay(enum ldlm_side side)
1282 struct ldlm_namespace *ns;
1283 struct ldlm_namespace *ns_old = NULL;
1284 /* seconds of sleep if no active namespaces */
1285 time64_t delay = ktime_get_seconds() +
1286 (side == LDLM_NAMESPACE_SERVER ?
1287 LDLM_POOL_SRV_DEF_RECALC_PERIOD :
1288 LDLM_POOL_CLI_DEF_RECALC_PERIOD);
1291 /* Recalc at least ldlm_namespace_nr(side) namespaces. */
1292 for (nr = ldlm_namespace_nr_read(side); nr > 0; nr--) {
1295 * Lock the list, get first @ns in the list, getref, move it
1296 * to the tail, unlock and call pool recalc. This way we avoid
1297 * calling recalc under @ns lock, which is really good as we
1298 * get rid of potential deadlock on side nodes when canceling
1299 * locks synchronously.
1301 mutex_lock(ldlm_namespace_lock(side));
1302 if (list_empty(ldlm_namespace_list(side))) {
1303 mutex_unlock(ldlm_namespace_lock(side));
1306 ns = ldlm_namespace_first_locked(side);
1308 if (ns_old == ns) { /* Full pass complete */
1309 mutex_unlock(ldlm_namespace_lock(side));
1313 /* We got an empty namespace, need to move it back to inactive
1315 * The race with parallel resource creation is fine:
1316 * - If they do namespace_get before our check, we fail the
1317 * check and they move this item to the end of the list anyway
1318 * - If we do the check and then they do namespace_get, then
1319 * we move the namespace to inactive and they will move
1320 * it back to active (synchronised by the lock, so no clash
1323 if (ldlm_ns_empty(ns)) {
1324 ldlm_namespace_move_to_inactive_locked(ns, side);
1325 mutex_unlock(ldlm_namespace_lock(side));
1332 spin_lock(&ns->ns_lock);
1334 * skip ns which is being freed, and we don't want to increase
1335 * its refcount again, not even temporarily. bz21519 & LU-499.
1337 if (ns->ns_stopping) {
1341 ldlm_namespace_get(ns);
1343 spin_unlock(&ns->ns_lock);
1345 ldlm_namespace_move_to_active_locked(ns, side);
1346 mutex_unlock(ldlm_namespace_lock(side));
1349 * After setup is done - recalc the pool.
1353 ldlm_pool_recalc(&ns->ns_pool, false));
1354 ldlm_namespace_put(ns);
1361 static void ldlm_pools_recalc_task(struct work_struct *ws);
1362 static DECLARE_DELAYED_WORK(ldlm_pools_recalc_work, ldlm_pools_recalc_task);
1364 static void ldlm_pools_recalc_task(struct work_struct *ws)
1366 /* seconds of sleep if no active namespaces */
1368 #ifdef HAVE_SERVER_SUPPORT
1369 struct ldlm_namespace *ns;
1370 unsigned long nr_l = 0, nr_p = 0, l;
1373 /* Check all modest namespaces first. */
1374 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
1375 list_for_each_entry(ns, ldlm_namespace_list(LDLM_NAMESPACE_SERVER),
1377 if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
1380 l = ldlm_pool_granted(&ns->ns_pool);
1385 * Set the modest pools limit equal to their avg granted
1388 l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0);
1389 ldlm_pool_setup(&ns->ns_pool, l);
1395 * Make sure than modest namespaces did not eat more that 2/3
1398 if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
1399 CWARN("'Modest' pools eat out 2/3 of server locks limit (%lu of %lu). This means that you have too many clients for this amount of server RAM. Upgrade server!\n",
1400 nr_l, LDLM_POOL_HOST_L);
1404 /* The rest is given to greedy namespaces. */
1405 list_for_each_entry(ns, ldlm_namespace_list(LDLM_NAMESPACE_SERVER),
1407 if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
1412 * In the case 2/3 locks are eaten out by
1413 * modest pools, we re-setup equal limit
1416 l = LDLM_POOL_HOST_L /
1417 ldlm_namespace_nr_read(LDLM_NAMESPACE_SERVER);
1420 * All the rest of greedy pools will have
1421 * all locks in equal parts.
1423 l = (LDLM_POOL_HOST_L - nr_l) /
1424 (ldlm_namespace_nr_read(LDLM_NAMESPACE_SERVER) -
1427 ldlm_pool_setup(&ns->ns_pool, l);
1429 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
1431 delay = min(ldlm_pools_recalc_delay(LDLM_NAMESPACE_SERVER),
1432 ldlm_pools_recalc_delay(LDLM_NAMESPACE_CLIENT));
1433 #else /* !HAVE_SERVER_SUPPORT */
1434 delay = ldlm_pools_recalc_delay(LDLM_NAMESPACE_CLIENT);
1435 #endif /* HAVE_SERVER_SUPPORT */
1437 /* Wake up the blocking threads from time to time. */
1438 ldlm_bl_thread_wakeup();
1440 delay -= ktime_get_seconds();
1442 /* Prevent too frequent recalculation. */
1443 CDEBUG(D_DLMTRACE, "Negative interval(%lld)\n", delay);
1447 schedule_delayed_work(&ldlm_pools_recalc_work, cfs_time_seconds(delay));
1450 static bool ldlm_pools_init_done;
1452 static struct shrinker *ldlm_pools_srv_shrinker;
1453 static struct shrinker *ldlm_pools_cli_shrinker;
1455 int ldlm_pools_init(void)
1462 #ifdef HAVE_SERVER_SUPPORT
1463 delay = min(LDLM_POOL_SRV_DEF_RECALC_PERIOD,
1464 LDLM_POOL_CLI_DEF_RECALC_PERIOD);
1466 delay = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
1468 ldlm_pools_srv_shrinker = ll_shrinker_create(&ldlm_pools_srv_sh_ops, 0,
1469 "ldlm_pools_server");
1470 if (IS_ERR(ldlm_pools_srv_shrinker))
1471 GOTO(out, rc = PTR_ERR(ldlm_pools_srv_shrinker));
1473 ldlm_pools_cli_shrinker = ll_shrinker_create(&ldlm_pools_cli_sh_ops, 0,
1474 "ldlm_pools_client");
1475 if (IS_ERR(ldlm_pools_cli_shrinker))
1476 GOTO(out_shrinker, rc = PTR_ERR(ldlm_pools_cli_shrinker));
1478 schedule_delayed_work(&ldlm_pools_recalc_work, delay);
1479 ldlm_pools_init_done = true;
1483 shrinker_free(ldlm_pools_srv_shrinker);
1488 void ldlm_pools_fini(void)
1490 if (ldlm_pools_init_done) {
1491 cancel_delayed_work_sync(&ldlm_pools_recalc_work);
1493 shrinker_free(ldlm_pools_srv_shrinker);
1494 shrinker_free(ldlm_pools_cli_shrinker);
1497 ldlm_pools_init_done = false;
1500 #else /* !HAVE_LRU_RESIZE_SUPPORT */
1501 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
1506 time64_t ldlm_pool_recalc(struct ldlm_pool *pl, bool force)
1511 int ldlm_pool_shrink(struct ldlm_pool *pl,
1512 int nr, gfp_t gfp_mask)
1517 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
1518 int idx, enum ldlm_side client)
1523 void ldlm_pool_fini(struct ldlm_pool *pl)
1527 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
1531 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
1535 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
1540 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
1544 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
1549 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
1553 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
1558 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1562 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1567 int ldlm_pools_init(void)
1572 void ldlm_pools_fini(void)
1576 #endif /* HAVE_LRU_RESIZE_SUPPORT */