4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2010, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/ldlm/ldlm_pool.c
34 * Author: Yury Umanets <umka@clusterfs.com>
38 * Idea of this code is rather simple. Each second, for each server namespace
39 * we have SLV - server lock volume which is calculated on current number of
40 * granted locks, grant speed for past period, etc - that is, locking load.
41 * This SLV number may be thought as a flow definition for simplicity. It is
42 * sent to clients with each occasion to let them know what is current load
43 * situation on the server. By default, at the beginning, SLV on server is
44 * set max value which is calculated as the following: allow to one client
45 * have all locks of limit ->pl_limit for 10h.
47 * Next, on clients, number of cached locks is not limited artificially in any
48 * way as it was before. Instead, client calculates CLV, that is, client lock
49 * volume for each lock and compares it with last SLV from the server. CLV is
50 * calculated as the number of locks in LRU * lock live time in seconds. If
51 * CLV > SLV - lock is canceled.
53 * Client has LVF, that is, lock volume factor which regulates how much sensitive
54 * client should be about last SLV from server. The higher LVF is the more locks
55 * will be canceled on client. Default value for it is 1. Setting LVF to 2 means
56 * that client will cancel locks 2 times faster.
58 * Locks on a client will be canceled more intensively in these cases:
59 * (1) if SLV is smaller, that is, load is higher on the server;
60 * (2) client has a lot of locks (the more locks are held by client, the bigger
61 * chances that some of them should be canceled);
62 * (3) client has old locks (taken some time ago);
64 * Thus, according to flow paradigm that we use for better understanding SLV,
65 * CLV is the volume of particle in flow described by SLV. According to this,
66 * if flow is getting thinner, more and more particles become outside of it and
67 * as particles are locks, they should be canceled.
69 * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com). Andreas
70 * Dilger (adilger@clusterfs.com) proposed few nice ideas like using LVF and many
71 * cleanups. Flow definition to allow more easy understanding of the logic belongs
72 * to Nikita Danilov (nikita@clusterfs.com) as well as many cleanups and fixes.
73 * And design and implementation are done by Yury Umanets (umka@clusterfs.com).
75 * Glossary for terms used:
77 * pl_limit - Number of allowed locks in pool. Applies to server and client
80 * pl_granted - Number of granted locks (calculated);
81 * pl_grant_rate - Number of granted locks for last T (calculated);
82 * pl_cancel_rate - Number of canceled locks for last T (calculated);
83 * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
84 * pl_grant_plan - Planned number of granted locks for next T (calculated);
85 * pl_server_lock_volume - Current server lock volume (calculated);
87 * As it may be seen from list above, we have few possible tunables which may
88 * affect behavior much. They all may be modified via sysfs. However, they also
89 * give a possibility for constructing few pre-defined behavior policies. If
90 * none of predefines is suitable for a working pattern being used, new one may
91 * be "constructed" via sysfs tunables.
94 #define DEBUG_SUBSYSTEM S_LDLM
96 #include <linux/workqueue.h>
97 #include <libcfs/linux/linux-mem.h>
98 #include <lustre_dlm.h>
99 #include <cl_object.h>
100 #include <obd_class.h>
101 #include <obd_support.h>
102 #include "ldlm_internal.h"
104 #ifdef HAVE_LRU_RESIZE_SUPPORT
107 * 50 ldlm locks for 1MB of RAM.
109 #define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
112 * Maximal possible grant step plan in %.
114 #define LDLM_POOL_MAX_GSP (30)
117 * Minimal possible grant step plan in %.
119 #define LDLM_POOL_MIN_GSP (1)
122 * This controls the speed of reaching LDLM_POOL_MAX_GSP
123 * with increasing thread period.
125 #define LDLM_POOL_GSP_STEP_SHIFT (2)
128 * LDLM_POOL_GSP% of all locks is default GP.
130 #define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
133 * Max age for locks on clients.
135 #define LDLM_POOL_MAX_AGE (36000)
138 * The granularity of SLV calculation.
140 #define LDLM_POOL_SLV_SHIFT (10)
142 extern struct proc_dir_entry *ldlm_ns_proc_dir;
144 static inline __u64 dru(__u64 val, __u32 shift, int round_up)
146 return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
149 static inline __u64 ldlm_pool_slv_max(__u32 L)
152 * Allow to have all locks for 1 client for 10 hrs.
153 * Formula is the following: limit * 10h / 1 client.
155 __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
159 static inline __u64 ldlm_pool_slv_min(__u32 L)
165 LDLM_POOL_FIRST_STAT = 0,
166 LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
167 LDLM_POOL_GRANT_STAT,
168 LDLM_POOL_CANCEL_STAT,
169 LDLM_POOL_GRANT_RATE_STAT,
170 LDLM_POOL_CANCEL_RATE_STAT,
171 LDLM_POOL_GRANT_PLAN_STAT,
173 LDLM_POOL_SHRINK_REQTD_STAT,
174 LDLM_POOL_SHRINK_FREED_STAT,
175 LDLM_POOL_RECALC_STAT,
176 LDLM_POOL_TIMING_STAT,
180 static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl)
182 return container_of(pl, struct ldlm_namespace, ns_pool);
186 * Calculates suggested grant_step in % of available locks for passed
187 * \a period. This is later used in grant_plan calculations.
189 static inline int ldlm_pool_t2gsp(unsigned int t)
192 * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
193 * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
195 * How this will affect execution is the following:
197 * - for thread period 1s we will have grant_step 1% which good from
198 * pov of taking some load off from server and push it out to clients.
199 * This is like that because 1% for grant_step means that server will
200 * not allow clients to get lots of locks in short period of time and
201 * keep all old locks in their caches. Clients will always have to
202 * get some locks back if they want to take some new;
204 * - for thread period 10s (which is default) we will have 23% which
205 * means that clients will have enough of room to take some new locks
206 * without getting some back. All locks from this 23% which were not
207 * taken by clients in current period will contribute in SLV growing.
208 * SLV growing means more locks cached on clients until limit or grant
211 return LDLM_POOL_MAX_GSP -
212 ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
213 (t >> LDLM_POOL_GSP_STEP_SHIFT));
216 static inline int ldlm_pool_granted(struct ldlm_pool *pl)
218 return atomic_read(&pl->pl_granted);
222 * Recalculates next grant limit on passed \a pl.
224 * \pre ->pl_lock is locked.
226 static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
228 int granted, grant_step, limit;
230 limit = ldlm_pool_get_limit(pl);
231 granted = ldlm_pool_granted(pl);
233 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
234 grant_step = ((limit - granted) * grant_step) / 100;
235 pl->pl_grant_plan = granted + grant_step;
236 limit = (limit * 5) >> 2;
237 if (pl->pl_grant_plan > limit)
238 pl->pl_grant_plan = limit;
242 * Recalculates next SLV on passed \a pl.
244 * \pre ->pl_lock is locked.
246 static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
256 slv = pl->pl_server_lock_volume;
257 grant_plan = pl->pl_grant_plan;
258 limit = ldlm_pool_get_limit(pl);
259 granted = ldlm_pool_granted(pl);
260 round_up = granted < limit;
262 grant_usage = max_t(int, limit - (granted - grant_plan), 1);
265 * Find out SLV change factor which is the ratio of grant usage
266 * from limit. SLV changes as fast as the ratio of grant plan
267 * consumption. The more locks from grant plan are not consumed
268 * by clients in last interval (idle time), the faster grows
269 * SLV. And the opposite, the more grant plan is over-consumed
270 * (load time) the faster drops SLV.
272 slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
273 do_div(slv_factor, limit);
274 slv = slv * slv_factor;
275 slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
277 if (slv > ldlm_pool_slv_max(limit)) {
278 slv = ldlm_pool_slv_max(limit);
279 } else if (slv < ldlm_pool_slv_min(limit)) {
280 slv = ldlm_pool_slv_min(limit);
283 pl->pl_server_lock_volume = slv;
287 * Recalculates next stats on passed \a pl.
289 * \pre ->pl_lock is locked.
291 static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
293 int grant_plan = pl->pl_grant_plan;
294 __u64 slv = pl->pl_server_lock_volume;
295 int granted = ldlm_pool_granted(pl);
296 int grant_rate = atomic_read(&pl->pl_grant_rate);
297 int cancel_rate = atomic_read(&pl->pl_cancel_rate);
299 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
301 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
303 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
305 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
307 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
312 * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd.
314 static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
316 struct obd_device *obd;
319 * Set new SLV in obd field for using it later without accessing the
320 * pool. This is required to avoid race between sending reply to client
321 * with new SLV and cleanup server stack in which we can't guarantee
322 * that namespace is still alive. We know only that obd is alive as
323 * long as valid export is alive.
325 obd = ldlm_pl2ns(pl)->ns_obd;
326 LASSERT(obd != NULL);
327 write_lock(&obd->obd_pool_lock);
328 obd->obd_pool_slv = pl->pl_server_lock_volume;
329 write_unlock(&obd->obd_pool_lock);
333 * Recalculates all pool fields on passed \a pl.
335 * \pre ->pl_lock is not locked.
337 static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
339 time64_t recalc_interval_sec;
342 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
343 if (recalc_interval_sec < pl->pl_recalc_period)
346 spin_lock(&pl->pl_lock);
347 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
348 if (recalc_interval_sec < pl->pl_recalc_period) {
349 spin_unlock(&pl->pl_lock);
353 * Recalc SLV after last period. This should be done
354 * _before_ recalculating new grant plan.
356 ldlm_pool_recalc_slv(pl);
359 * Make sure that pool informed obd of last SLV changes.
361 ldlm_srv_pool_push_slv(pl);
364 * Update grant_plan for new period.
366 ldlm_pool_recalc_grant_plan(pl);
368 pl->pl_recalc_time = ktime_get_real_seconds();
369 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
370 recalc_interval_sec);
371 spin_unlock(&pl->pl_lock);
376 * This function is used on server side as main entry point for memory
377 * pressure handling. It decreases SLV on \a pl according to passed
378 * \a nr and \a gfp_mask.
380 * Our goal here is to decrease SLV such a way that clients hold \a nr
381 * locks smaller in next 10h.
383 static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
384 int nr, gfp_t gfp_mask)
389 * VM is asking how many entries may be potentially freed.
392 return ldlm_pool_granted(pl);
395 * Client already canceled locks but server is already in shrinker
396 * and can't cancel anything. Let's catch this race.
398 if (ldlm_pool_granted(pl) == 0)
401 spin_lock(&pl->pl_lock);
404 * We want shrinker to possibly cause cancellation of @nr locks from
405 * clients or grant approximately @nr locks smaller next intervals.
407 * This is why we decreased SLV by @nr. This effect will only be as
408 * long as one re-calc interval (1s these days) and this should be
409 * enough to pass this decreased SLV to all clients. On next recalc
410 * interval pool will either increase SLV if locks load is not high
411 * or will keep on same level or even decrease again, thus, shrinker
412 * decreased SLV will affect next recalc intervals and this way will
413 * make locking load lower.
415 if (nr < pl->pl_server_lock_volume) {
416 pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr;
418 limit = ldlm_pool_get_limit(pl);
419 pl->pl_server_lock_volume = ldlm_pool_slv_min(limit);
423 * Make sure that pool informed obd of last SLV changes.
425 ldlm_srv_pool_push_slv(pl);
426 spin_unlock(&pl->pl_lock);
429 * We did not really free any memory here so far, it only will be
430 * freed later may be, so that we return 0 to not confuse VM.
436 * Setup server side pool \a pl with passed \a limit.
438 static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
440 struct obd_device *obd;
442 obd = ldlm_pl2ns(pl)->ns_obd;
443 LASSERT(obd != NULL && obd != LP_POISON);
444 LASSERT(obd->obd_type != LP_POISON);
445 write_lock(&obd->obd_pool_lock);
446 obd->obd_pool_limit = limit;
447 write_unlock(&obd->obd_pool_lock);
449 ldlm_pool_set_limit(pl, limit);
454 * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
456 static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
458 struct obd_device *obd;
461 * Get new SLV and Limit from obd which is updated with coming
464 obd = ldlm_pl2ns(pl)->ns_obd;
465 LASSERT(obd != NULL);
466 read_lock(&obd->obd_pool_lock);
467 pl->pl_server_lock_volume = obd->obd_pool_slv;
468 ldlm_pool_set_limit(pl, obd->obd_pool_limit);
469 read_unlock(&obd->obd_pool_lock);
473 * Recalculates client size pool \a pl according to current SLV and Limit.
475 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
477 time64_t recalc_interval_sec;
481 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
482 if (recalc_interval_sec < pl->pl_recalc_period)
485 spin_lock(&pl->pl_lock);
487 * Check if we need to recalc lists now.
489 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
490 if (recalc_interval_sec < pl->pl_recalc_period) {
491 spin_unlock(&pl->pl_lock);
496 * Make sure that pool knows last SLV and Limit from obd.
498 ldlm_cli_pool_pop_slv(pl);
499 spin_unlock(&pl->pl_lock);
502 * In the time of canceling locks on client we do not need to maintain
503 * sharp timing, we only want to cancel locks asap according to new SLV.
504 * It may be called when SLV has changed much, this is why we do not
505 * take into account pl->pl_recalc_time here.
507 ret = ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, 0);
509 spin_lock(&pl->pl_lock);
511 * Time of LRU resizing might be longer than period,
512 * so update after LRU resizing rather than before it.
514 pl->pl_recalc_time = ktime_get_real_seconds();
515 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
516 recalc_interval_sec);
517 spin_unlock(&pl->pl_lock);
522 * This function is main entry point for memory pressure handling on client
523 * side. Main goal of this function is to cancel some number of locks on
524 * passed \a pl according to \a nr and \a gfp_mask.
526 static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
527 int nr, gfp_t gfp_mask)
529 struct ldlm_namespace *ns;
535 * Do not cancel locks in case lru resize is disabled for this ns.
537 if (!ns_connect_lru_resize(ns))
541 * Make sure that pool knows last SLV and Limit from obd.
543 ldlm_cli_pool_pop_slv(pl);
545 spin_lock(&ns->ns_lock);
546 unused = ns->ns_nr_unused;
547 spin_unlock(&ns->ns_lock);
550 return (unused / 100) * sysctl_vfs_cache_pressure;
552 return ldlm_cancel_lru(ns, nr, LCF_ASYNC, 0);
555 static struct ldlm_pool_ops ldlm_srv_pool_ops = {
556 .po_recalc = ldlm_srv_pool_recalc,
557 .po_shrink = ldlm_srv_pool_shrink,
558 .po_setup = ldlm_srv_pool_setup
561 static struct ldlm_pool_ops ldlm_cli_pool_ops = {
562 .po_recalc = ldlm_cli_pool_recalc,
563 .po_shrink = ldlm_cli_pool_shrink
567 * Pool recalc wrapper. Will call either client or server pool recalc callback
568 * depending what pool \a pl is used.
570 time64_t ldlm_pool_recalc(struct ldlm_pool *pl)
572 time64_t recalc_interval_sec;
575 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
576 if (recalc_interval_sec > 0) {
577 spin_lock(&pl->pl_lock);
578 recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
580 if (recalc_interval_sec > 0) {
582 * Update pool statistics every 1s.
584 ldlm_pool_recalc_stats(pl);
587 * Zero out all rates and speed for the last period.
589 atomic_set(&pl->pl_grant_rate, 0);
590 atomic_set(&pl->pl_cancel_rate, 0);
592 spin_unlock(&pl->pl_lock);
595 if (pl->pl_ops->po_recalc != NULL) {
596 count = pl->pl_ops->po_recalc(pl);
597 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
601 recalc_interval_sec = pl->pl_recalc_time - ktime_get_real_seconds() +
602 pl->pl_recalc_period;
603 if (recalc_interval_sec <= 0) {
604 /* DEBUG: should be re-removed after LU-4536 is fixed */
605 CDEBUG(D_DLMTRACE, "%s: Negative interval(%lld), too short period(%lld)\n",
606 pl->pl_name, recalc_interval_sec,
607 (s64)pl->pl_recalc_period);
609 /* Prevent too frequent recalculation. */
610 recalc_interval_sec = 1;
613 return recalc_interval_sec;
617 * Pool shrink wrapper. Will call either client or server pool recalc callback
618 * depending what pool \a pl is used.
620 int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask)
624 if (pl->pl_ops->po_shrink != NULL) {
625 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
627 lprocfs_counter_add(pl->pl_stats,
628 LDLM_POOL_SHRINK_REQTD_STAT,
630 lprocfs_counter_add(pl->pl_stats,
631 LDLM_POOL_SHRINK_FREED_STAT,
633 CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, "
634 "shrunk %d\n", pl->pl_name, nr, cancel);
641 * Pool setup wrapper. Will call either client or server pool recalc callback
642 * depending what pool \a pl is used.
644 * Sets passed \a limit into pool \a pl.
646 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
648 if (pl->pl_ops->po_setup != NULL)
649 return(pl->pl_ops->po_setup(pl, limit));
653 static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
655 int granted, grant_rate, cancel_rate, grant_step;
656 int grant_speed, grant_plan, lvf;
657 struct ldlm_pool *pl = m->private;
661 spin_lock(&pl->pl_lock);
662 slv = pl->pl_server_lock_volume;
663 clv = pl->pl_client_lock_volume;
664 limit = ldlm_pool_get_limit(pl);
665 grant_plan = pl->pl_grant_plan;
666 granted = ldlm_pool_granted(pl);
667 grant_rate = atomic_read(&pl->pl_grant_rate);
668 cancel_rate = atomic_read(&pl->pl_cancel_rate);
669 grant_speed = grant_rate - cancel_rate;
670 lvf = atomic_read(&pl->pl_lock_volume_factor);
671 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
672 spin_unlock(&pl->pl_lock);
674 seq_printf(m, "LDLM pool state (%s):\n"
678 pl->pl_name, slv, clv, lvf);
680 if (ns_is_server(ldlm_pl2ns(pl))) {
681 seq_printf(m, " GSP: %d%%\n", grant_step);
682 seq_printf(m, " GP: %d\n", grant_plan);
685 seq_printf(m, " GR: %d\n CR: %d\n GS: %d\n G: %d\n L: %d\n",
686 grant_rate, cancel_rate, grant_speed,
691 LDEBUGFS_SEQ_FOPS_RO(lprocfs_pool_state);
693 static ssize_t grant_speed_show(struct kobject *kobj, struct attribute *attr,
696 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
700 spin_lock(&pl->pl_lock);
701 /* serialize with ldlm_pool_recalc */
702 grant_speed = atomic_read(&pl->pl_grant_rate) -
703 atomic_read(&pl->pl_cancel_rate);
704 spin_unlock(&pl->pl_lock);
705 return sprintf(buf, "%d\n", grant_speed);
707 LUSTRE_RO_ATTR(grant_speed);
709 LDLM_POOL_SYSFS_READER_SHOW(grant_plan, int);
710 LUSTRE_RO_ATTR(grant_plan);
712 LDLM_POOL_SYSFS_READER_SHOW(recalc_period, int);
713 LDLM_POOL_SYSFS_WRITER_STORE(recalc_period, int);
714 LUSTRE_RW_ATTR(recalc_period);
716 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(server_lock_volume, u64);
717 LUSTRE_RO_ATTR(server_lock_volume);
719 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(limit, atomic);
720 LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(limit, atomic);
721 LUSTRE_RW_ATTR(limit);
723 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(granted, atomic);
724 LUSTRE_RO_ATTR(granted);
726 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(cancel_rate, atomic);
727 LUSTRE_RO_ATTR(cancel_rate);
729 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(grant_rate, atomic);
730 LUSTRE_RO_ATTR(grant_rate);
732 LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(lock_volume_factor, atomic);
733 LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(lock_volume_factor, atomic);
734 LUSTRE_RW_ATTR(lock_volume_factor);
736 /* These are for pools in /sys/fs/lustre/ldlm/namespaces/.../pool */
737 static struct attribute *ldlm_pl_attrs[] = {
738 &lustre_attr_grant_speed.attr,
739 &lustre_attr_grant_plan.attr,
740 &lustre_attr_recalc_period.attr,
741 &lustre_attr_server_lock_volume.attr,
742 &lustre_attr_limit.attr,
743 &lustre_attr_granted.attr,
744 &lustre_attr_cancel_rate.attr,
745 &lustre_attr_grant_rate.attr,
746 &lustre_attr_lock_volume_factor.attr,
750 static void ldlm_pl_release(struct kobject *kobj)
752 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
754 complete(&pl->pl_kobj_unregister);
757 static struct kobj_type ldlm_pl_ktype = {
758 .default_attrs = ldlm_pl_attrs,
759 .sysfs_ops = &lustre_sysfs_ops,
760 .release = ldlm_pl_release,
763 static int ldlm_pool_sysfs_init(struct ldlm_pool *pl)
765 struct ldlm_namespace *ns = ldlm_pl2ns(pl);
768 init_completion(&pl->pl_kobj_unregister);
769 err = kobject_init_and_add(&pl->pl_kobj, &ldlm_pl_ktype, &ns->ns_kobj,
775 static int ldlm_pool_debugfs_init(struct ldlm_pool *pl)
777 struct ldlm_namespace *ns = ldlm_pl2ns(pl);
778 struct dentry *debugfs_ns_parent;
779 struct lprocfs_vars pool_vars[2];
780 char *var_name = NULL;
784 OBD_ALLOC(var_name, MAX_STRING_SIZE + 1);
788 debugfs_ns_parent = ns->ns_debugfs_entry;
789 if (IS_ERR_OR_NULL(debugfs_ns_parent)) {
790 CERROR("%s: debugfs entry is not initialized\n",
792 GOTO(out_free_name, rc = -EINVAL);
794 pl->pl_debugfs_entry = ldebugfs_register("pool", debugfs_ns_parent,
796 if (IS_ERR(pl->pl_debugfs_entry)) {
797 rc = PTR_ERR(pl->pl_debugfs_entry);
798 pl->pl_debugfs_entry = NULL;
799 CERROR("%s: cannot create 'pool' debugfs entry: rc = %d\n",
800 ldlm_ns_name(ns), rc);
801 GOTO(out_free_name, rc);
804 var_name[MAX_STRING_SIZE] = '\0';
805 memset(pool_vars, 0, sizeof(pool_vars));
806 pool_vars[0].name = var_name;
808 ldlm_add_var(&pool_vars[0], pl->pl_debugfs_entry, "state", pl,
809 &lprocfs_pool_state_fops);
811 pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
812 LDLM_POOL_FIRST_STAT, 0);
814 GOTO(out_free_name, rc = -ENOMEM);
816 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
817 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
819 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
820 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
822 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
823 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
825 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
826 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
827 "grant_rate", "locks/s");
828 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
829 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
830 "cancel_rate", "locks/s");
831 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
832 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
833 "grant_plan", "locks/s");
834 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
835 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
837 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
838 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
839 "shrink_request", "locks");
840 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
841 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
842 "shrink_freed", "locks");
843 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
844 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
845 "recalc_freed", "locks");
846 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
847 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
848 "recalc_timing", "sec");
849 rc = ldebugfs_register_stats(pl->pl_debugfs_entry, "stats",
854 OBD_FREE(var_name, MAX_STRING_SIZE + 1);
858 static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl)
860 kobject_put(&pl->pl_kobj);
861 wait_for_completion(&pl->pl_kobj_unregister);
864 static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
866 if (pl->pl_stats != NULL) {
867 lprocfs_free_stats(&pl->pl_stats);
870 if (pl->pl_debugfs_entry != NULL) {
871 ldebugfs_remove(&pl->pl_debugfs_entry);
872 pl->pl_debugfs_entry = NULL;
876 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
877 int idx, enum ldlm_side client)
882 spin_lock_init(&pl->pl_lock);
883 atomic_set(&pl->pl_granted, 0);
884 pl->pl_recalc_time = ktime_get_real_seconds();
885 atomic_set(&pl->pl_lock_volume_factor, 1);
887 atomic_set(&pl->pl_grant_rate, 0);
888 atomic_set(&pl->pl_cancel_rate, 0);
889 pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
891 snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
892 ldlm_ns_name(ns), idx);
894 if (client == LDLM_NAMESPACE_SERVER) {
895 pl->pl_ops = &ldlm_srv_pool_ops;
896 ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);
897 pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD;
898 pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
900 ldlm_pool_set_limit(pl, 1);
901 pl->pl_server_lock_volume = 0;
902 pl->pl_ops = &ldlm_cli_pool_ops;
903 pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
905 pl->pl_client_lock_volume = 0;
906 rc = ldlm_pool_debugfs_init(pl);
910 rc = ldlm_pool_sysfs_init(pl);
914 CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
919 void ldlm_pool_fini(struct ldlm_pool *pl)
922 ldlm_pool_sysfs_fini(pl);
923 ldlm_pool_debugfs_fini(pl);
926 * Pool should not be used after this point. We can't free it here as
927 * it lives in struct ldlm_namespace, but still interested in catching
928 * any abnormal using cases.
930 POISON(pl, 0x5a, sizeof(*pl));
935 * Add new taken ldlm lock \a lock into pool \a pl accounting.
937 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
940 * FLOCK locks are special in a sense that they are almost never
941 * cancelled, instead special kind of lock is used to drop them.
942 * also there is no LRU for flock locks, so no point in tracking
945 * PLAIN locks are used by config and quota, the quantity is small
946 * and usually they are not in LRU.
948 if (lock->l_resource->lr_type == LDLM_FLOCK ||
949 lock->l_resource->lr_type == LDLM_PLAIN)
952 ldlm_reclaim_add(lock);
954 atomic_inc(&pl->pl_granted);
955 atomic_inc(&pl->pl_grant_rate);
956 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
958 * Do not do pool recalc for client side as all locks which
959 * potentially may be canceled has already been packed into
960 * enqueue/cancel rpc. Also we do not want to run out of stack
961 * with too long call paths.
963 if (ns_is_server(ldlm_pl2ns(pl)))
964 ldlm_pool_recalc(pl);
968 * Remove ldlm lock \a lock from pool \a pl accounting.
970 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
973 * Filter out FLOCK & PLAIN locks. Read above comment in
976 if (lock->l_resource->lr_type == LDLM_FLOCK ||
977 lock->l_resource->lr_type == LDLM_PLAIN)
980 ldlm_reclaim_del(lock);
982 LASSERT(atomic_read(&pl->pl_granted) > 0);
983 atomic_dec(&pl->pl_granted);
984 atomic_inc(&pl->pl_cancel_rate);
986 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
988 if (ns_is_server(ldlm_pl2ns(pl)))
989 ldlm_pool_recalc(pl);
993 * Returns current \a pl SLV.
995 * \pre ->pl_lock is not locked.
997 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
1000 spin_lock(&pl->pl_lock);
1001 slv = pl->pl_server_lock_volume;
1002 spin_unlock(&pl->pl_lock);
1007 * Sets passed \a slv to \a pl.
1009 * \pre ->pl_lock is not locked.
1011 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
1013 spin_lock(&pl->pl_lock);
1014 pl->pl_server_lock_volume = slv;
1015 spin_unlock(&pl->pl_lock);
1019 * Returns current \a pl CLV.
1021 * \pre ->pl_lock is not locked.
1023 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
1026 spin_lock(&pl->pl_lock);
1027 slv = pl->pl_client_lock_volume;
1028 spin_unlock(&pl->pl_lock);
1033 * Sets passed \a clv to \a pl.
1035 * \pre ->pl_lock is not locked.
1037 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
1039 spin_lock(&pl->pl_lock);
1040 pl->pl_client_lock_volume = clv;
1041 spin_unlock(&pl->pl_lock);
1045 * Returns current \a pl limit.
1047 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
1049 return atomic_read(&pl->pl_limit);
1053 * Sets passed \a limit to \a pl.
1055 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1057 atomic_set(&pl->pl_limit, limit);
1061 * Returns current LVF from \a pl.
1063 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1065 return atomic_read(&pl->pl_lock_volume_factor);
1068 static struct shrinker *ldlm_pools_srv_shrinker;
1069 static struct shrinker *ldlm_pools_cli_shrinker;
1072 * count locks from all namespaces (if possible). Returns number of
1075 static unsigned long ldlm_pools_count(enum ldlm_side client, gfp_t gfp_mask)
1077 unsigned long total = 0;
1079 struct ldlm_namespace *ns;
1080 struct ldlm_namespace *ns_old = NULL; /* loop detection */
1082 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
1085 CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
1086 client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
1089 * Find out how many resources we may release.
1091 for (nr_ns = ldlm_namespace_nr_read(client);
1092 nr_ns > 0; nr_ns--) {
1093 mutex_lock(ldlm_namespace_lock(client));
1094 if (list_empty(ldlm_namespace_list(client))) {
1095 mutex_unlock(ldlm_namespace_lock(client));
1098 ns = ldlm_namespace_first_locked(client);
1101 mutex_unlock(ldlm_namespace_lock(client));
1105 if (ldlm_ns_empty(ns)) {
1106 ldlm_namespace_move_to_inactive_locked(ns, client);
1107 mutex_unlock(ldlm_namespace_lock(client));
1114 ldlm_namespace_get(ns);
1115 ldlm_namespace_move_to_active_locked(ns, client);
1116 mutex_unlock(ldlm_namespace_lock(client));
1117 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
1118 ldlm_namespace_put(ns);
1124 static unsigned long ldlm_pools_scan(enum ldlm_side client, int nr,
1127 unsigned long freed = 0;
1129 struct ldlm_namespace *ns;
1131 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
1135 * Shrink at least ldlm_namespace_nr_read(client) namespaces.
1137 for (tmp = nr_ns = ldlm_namespace_nr_read(client);
1139 int cancel, nr_locks;
1142 * Do not call shrink under ldlm_namespace_lock(client)
1144 mutex_lock(ldlm_namespace_lock(client));
1145 if (list_empty(ldlm_namespace_list(client))) {
1146 mutex_unlock(ldlm_namespace_lock(client));
1149 ns = ldlm_namespace_first_locked(client);
1150 ldlm_namespace_get(ns);
1151 ldlm_namespace_move_to_active_locked(ns, client);
1152 mutex_unlock(ldlm_namespace_lock(client));
1154 nr_locks = ldlm_pool_granted(&ns->ns_pool);
1156 * We use to shrink propotionally but with new shrinker API,
1157 * we lost the total number of freeable locks.
1159 cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
1160 freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
1161 ldlm_namespace_put(ns);
1164 * we only decrease the SLV in server pools shrinker, return
1165 * SHRINK_STOP to kernel to avoid needless loop. LU-1128
1167 return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
1170 #ifdef HAVE_SHRINKER_COUNT
1171 static unsigned long ldlm_pools_srv_count(struct shrinker *s,
1172 struct shrink_control *sc)
1174 return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
1177 static unsigned long ldlm_pools_srv_scan(struct shrinker *s,
1178 struct shrink_control *sc)
1180 return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
1184 static unsigned long ldlm_pools_cli_count(struct shrinker *s, struct shrink_control *sc)
1186 return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
1189 static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
1190 struct shrink_control *sc)
1192 return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
1198 * Cancel \a nr locks from all namespaces (if possible). Returns number of
1199 * cached locks after shrink is finished. All namespaces are asked to
1200 * cancel approximately equal amount of locks to keep balancing.
1202 static int ldlm_pools_shrink(enum ldlm_side client, int nr, gfp_t gfp_mask)
1204 unsigned long total = 0;
1206 if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
1207 !(gfp_mask & __GFP_FS))
1210 CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",
1211 nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
1213 total = ldlm_pools_count(client, gfp_mask);
1215 if (nr == 0 || total == 0)
1218 return ldlm_pools_scan(client, nr, gfp_mask);
1221 static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1223 return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER,
1224 shrink_param(sc, nr_to_scan),
1225 shrink_param(sc, gfp_mask));
1228 static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1230 return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT,
1231 shrink_param(sc, nr_to_scan),
1232 shrink_param(sc, gfp_mask));
1235 #endif /* HAVE_SHRINKER_COUNT */
1237 static time64_t ldlm_pools_recalc_delay(enum ldlm_side side)
1239 struct ldlm_namespace *ns;
1240 struct ldlm_namespace *ns_old = NULL;
1241 /* seconds of sleep if no active namespaces */
1242 time64_t delay = side == LDLM_NAMESPACE_SERVER ?
1243 LDLM_POOL_SRV_DEF_RECALC_PERIOD :
1244 LDLM_POOL_CLI_DEF_RECALC_PERIOD;
1247 /* Recalc at least ldlm_namespace_nr(side) namespaces. */
1248 for (nr = ldlm_namespace_nr_read(side); nr > 0; nr--) {
1251 * Lock the list, get first @ns in the list, getref, move it
1252 * to the tail, unlock and call pool recalc. This way we avoid
1253 * calling recalc under @ns lock, which is really good as we
1254 * get rid of potential deadlock on side nodes when canceling
1255 * locks synchronously.
1257 mutex_lock(ldlm_namespace_lock(side));
1258 if (list_empty(ldlm_namespace_list(side))) {
1259 mutex_unlock(ldlm_namespace_lock(side));
1262 ns = ldlm_namespace_first_locked(side);
1264 if (ns_old == ns) { /* Full pass complete */
1265 mutex_unlock(ldlm_namespace_lock(side));
1269 /* We got an empty namespace, need to move it back to inactive
1271 * The race with parallel resource creation is fine:
1272 * - If they do namespace_get before our check, we fail the
1273 * check and they move this item to the end of the list anyway
1274 * - If we do the check and then they do namespace_get, then
1275 * we move the namespace to inactive and they will move
1276 * it back to active (synchronised by the lock, so no clash
1279 if (ldlm_ns_empty(ns)) {
1280 ldlm_namespace_move_to_inactive_locked(ns, side);
1281 mutex_unlock(ldlm_namespace_lock(side));
1288 spin_lock(&ns->ns_lock);
1290 * skip ns which is being freed, and we don't want to increase
1291 * its refcount again, not even temporarily. bz21519 & LU-499.
1293 if (ns->ns_stopping) {
1297 ldlm_namespace_get(ns);
1299 spin_unlock(&ns->ns_lock);
1301 ldlm_namespace_move_to_active_locked(ns, side);
1302 mutex_unlock(ldlm_namespace_lock(side));
1305 * After setup is done - recalc the pool.
1308 delay = min(delay, ldlm_pool_recalc(&ns->ns_pool));
1309 ldlm_namespace_put(ns);
1316 static void ldlm_pools_recalc_task(struct work_struct *ws);
1317 static DECLARE_DELAYED_WORK(ldlm_pools_recalc_work, ldlm_pools_recalc_task);
1319 static void ldlm_pools_recalc_task(struct work_struct *ws)
1321 /* seconds of sleep if no active namespaces */
1323 #ifdef HAVE_SERVER_SUPPORT
1324 struct ldlm_namespace *ns;
1325 unsigned long nr_l = 0, nr_p = 0, l;
1328 /* Check all modest namespaces first. */
1329 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
1330 list_for_each_entry(ns, ldlm_namespace_list(LDLM_NAMESPACE_SERVER),
1332 if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
1335 l = ldlm_pool_granted(&ns->ns_pool);
1340 * Set the modest pools limit equal to their avg granted
1343 l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0);
1344 ldlm_pool_setup(&ns->ns_pool, l);
1350 * Make sure than modest namespaces did not eat more that 2/3
1353 if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
1354 CWARN("'Modest' pools eat out 2/3 of server locks "
1355 "limit (%lu of %lu). This means that you have too "
1356 "many clients for this amount of server RAM. "
1357 "Upgrade server!\n", nr_l, LDLM_POOL_HOST_L);
1361 /* The rest is given to greedy namespaces. */
1362 list_for_each_entry(ns, ldlm_namespace_list(LDLM_NAMESPACE_SERVER),
1364 if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
1369 * In the case 2/3 locks are eaten out by
1370 * modest pools, we re-setup equal limit
1373 l = LDLM_POOL_HOST_L /
1374 ldlm_namespace_nr_read(LDLM_NAMESPACE_SERVER);
1377 * All the rest of greedy pools will have
1378 * all locks in equal parts.
1380 l = (LDLM_POOL_HOST_L - nr_l) /
1381 (ldlm_namespace_nr_read(LDLM_NAMESPACE_SERVER) -
1384 ldlm_pool_setup(&ns->ns_pool, l);
1386 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
1388 delay = min(ldlm_pools_recalc_delay(LDLM_NAMESPACE_SERVER),
1389 ldlm_pools_recalc_delay(LDLM_NAMESPACE_CLIENT));
1390 #else /* !HAVE_SERVER_SUPPORT */
1391 delay = ldlm_pools_recalc_delay(LDLM_NAMESPACE_CLIENT);
1392 #endif /* HAVE_SERVER_SUPPORT */
1394 /* Wake up the blocking threads from time to time. */
1395 ldlm_bl_thread_wakeup();
1397 schedule_delayed_work(&ldlm_pools_recalc_work, cfs_time_seconds(delay));
1400 int ldlm_pools_init(void)
1402 DEF_SHRINKER_VAR(shsvar, ldlm_pools_srv_shrink,
1403 ldlm_pools_srv_count, ldlm_pools_srv_scan);
1404 DEF_SHRINKER_VAR(shcvar, ldlm_pools_cli_shrink,
1405 ldlm_pools_cli_count, ldlm_pools_cli_scan);
1407 schedule_delayed_work(&ldlm_pools_recalc_work,
1408 LDLM_POOL_CLI_DEF_RECALC_PERIOD);
1409 ldlm_pools_srv_shrinker = set_shrinker(DEFAULT_SEEKS, &shsvar);
1410 ldlm_pools_cli_shrinker = set_shrinker(DEFAULT_SEEKS, &shcvar);
1415 void ldlm_pools_fini(void)
1417 if (ldlm_pools_srv_shrinker != NULL) {
1418 remove_shrinker(ldlm_pools_srv_shrinker);
1419 ldlm_pools_srv_shrinker = NULL;
1421 if (ldlm_pools_cli_shrinker != NULL) {
1422 remove_shrinker(ldlm_pools_cli_shrinker);
1423 ldlm_pools_cli_shrinker = NULL;
1425 cancel_delayed_work_sync(&ldlm_pools_recalc_work);
1428 #else /* !HAVE_LRU_RESIZE_SUPPORT */
1429 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
1434 time64_t ldlm_pool_recalc(struct ldlm_pool *pl)
1439 int ldlm_pool_shrink(struct ldlm_pool *pl,
1440 int nr, gfp_t gfp_mask)
1445 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
1446 int idx, enum ldlm_side client)
1451 void ldlm_pool_fini(struct ldlm_pool *pl)
1456 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
1461 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
1466 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
1471 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
1476 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
1481 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
1486 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
1491 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1496 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1501 int ldlm_pools_init(void)
1506 void ldlm_pools_fini(void)
1511 #endif /* HAVE_LRU_RESIZE_SUPPORT */