4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2010, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/ldlm/ldlm_pool.c
34 * Author: Yury Umanets <umka@clusterfs.com>
38 * Idea of this code is rather simple. Each second, for each server namespace
39 * we have SLV - server lock volume which is calculated on current number of
40 * granted locks, grant speed for past period, etc - that is, locking load.
41 * This SLV number may be thought as a flow definition for simplicity. It is
42 * sent to clients with each occasion to let them know what is current load
43 * situation on the server. By default, at the beginning, SLV on server is
44 * set max value which is calculated as the following: allow to one client
45 * have all locks of limit ->pl_limit for 10h.
47 * Next, on clients, number of cached locks is not limited artificially in any
48 * way as it was before. Instead, client calculates CLV, that is, client lock
49 * volume for each lock and compares it with last SLV from the server. CLV is
50 * calculated as the number of locks in LRU * lock live time in seconds. If
51 * CLV > SLV - lock is canceled.
53 * Client has LVF, that is, lock volume factor which regulates how much sensitive
54 * client should be about last SLV from server. The higher LVF is the more locks
55 * will be canceled on client. Default value for it is 1. Setting LVF to 2 means
56 * that client will cancel locks 2 times faster.
58 * Locks on a client will be canceled more intensively in these cases:
59 * (1) if SLV is smaller, that is, load is higher on the server;
60 * (2) client has a lot of locks (the more locks are held by client, the bigger
61 * chances that some of them should be canceled);
62 * (3) client has old locks (taken some time ago);
64 * Thus, according to flow paradigm that we use for better understanding SLV,
65 * CLV is the volume of particle in flow described by SLV. According to this,
66 * if flow is getting thinner, more and more particles become outside of it and
67 * as particles are locks, they should be canceled.
69 * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com). Andreas
70 * Dilger (adilger@clusterfs.com) proposed few nice ideas like using LVF and many
71 * cleanups. Flow definition to allow more easy understanding of the logic belongs
72 * to Nikita Danilov (nikita@clusterfs.com) as well as many cleanups and fixes.
73 * And design and implementation are done by Yury Umanets (umka@clusterfs.com).
75 * Glossary for terms used:
77 * pl_limit - Number of allowed locks in pool. Applies to server and client
80 * pl_granted - Number of granted locks (calculated);
81 * pl_grant_rate - Number of granted locks for last T (calculated);
82 * pl_cancel_rate - Number of canceled locks for last T (calculated);
83 * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
84 * pl_grant_plan - Planned number of granted locks for next T (calculated);
85 * pl_server_lock_volume - Current server lock volume (calculated);
87 * As it may be seen from list above, we have few possible tunables which may
88 * affect behavior much. They all may be modified via proc. However, they also
89 * give a possibility for constructing few pre-defined behavior policies. If
90 * none of predefines is suitable for a working pattern being used, new one may
91 * be "constructed" via proc tunables.
94 #define DEBUG_SUBSYSTEM S_LDLM
96 #include <linux/kthread.h>
97 #include <lustre_dlm.h>
98 #include <cl_object.h>
99 #include <obd_class.h>
100 #include <obd_support.h>
101 #include "ldlm_internal.h"
103 #ifdef HAVE_LRU_RESIZE_SUPPORT
106 * 50 ldlm locks for 1MB of RAM.
108 #define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
111 * Maximal possible grant step plan in %.
113 #define LDLM_POOL_MAX_GSP (30)
116 * Minimal possible grant step plan in %.
118 #define LDLM_POOL_MIN_GSP (1)
121 * This controls the speed of reaching LDLM_POOL_MAX_GSP
122 * with increasing thread period.
124 #define LDLM_POOL_GSP_STEP_SHIFT (2)
127 * LDLM_POOL_GSP% of all locks is default GP.
129 #define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
132 * Max age for locks on clients.
134 #define LDLM_POOL_MAX_AGE (36000)
137 * The granularity of SLV calculation.
139 #define LDLM_POOL_SLV_SHIFT (10)
141 extern struct proc_dir_entry *ldlm_ns_proc_dir;
143 static inline __u64 dru(__u64 val, __u32 shift, int round_up)
145 return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
148 static inline __u64 ldlm_pool_slv_max(__u32 L)
151 * Allow to have all locks for 1 client for 10 hrs.
152 * Formula is the following: limit * 10h / 1 client.
154 __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
158 static inline __u64 ldlm_pool_slv_min(__u32 L)
164 LDLM_POOL_FIRST_STAT = 0,
165 LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
166 LDLM_POOL_GRANT_STAT,
167 LDLM_POOL_CANCEL_STAT,
168 LDLM_POOL_GRANT_RATE_STAT,
169 LDLM_POOL_CANCEL_RATE_STAT,
170 LDLM_POOL_GRANT_PLAN_STAT,
172 LDLM_POOL_SHRINK_REQTD_STAT,
173 LDLM_POOL_SHRINK_FREED_STAT,
174 LDLM_POOL_RECALC_STAT,
175 LDLM_POOL_TIMING_STAT,
179 static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl)
181 return container_of(pl, struct ldlm_namespace, ns_pool);
185 * Calculates suggested grant_step in % of available locks for passed
186 * \a period. This is later used in grant_plan calculations.
188 static inline int ldlm_pool_t2gsp(unsigned int t)
191 * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
192 * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
194 * How this will affect execution is the following:
196 * - for thread period 1s we will have grant_step 1% which good from
197 * pov of taking some load off from server and push it out to clients.
198 * This is like that because 1% for grant_step means that server will
199 * not allow clients to get lots of locks in short period of time and
200 * keep all old locks in their caches. Clients will always have to
201 * get some locks back if they want to take some new;
203 * - for thread period 10s (which is default) we will have 23% which
204 * means that clients will have enough of room to take some new locks
205 * without getting some back. All locks from this 23% which were not
206 * taken by clients in current period will contribute in SLV growing.
207 * SLV growing means more locks cached on clients until limit or grant
210 return LDLM_POOL_MAX_GSP -
211 ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
212 (t >> LDLM_POOL_GSP_STEP_SHIFT));
215 static inline int ldlm_pool_granted(struct ldlm_pool *pl)
217 return atomic_read(&pl->pl_granted);
221 * Recalculates next grant limit on passed \a pl.
223 * \pre ->pl_lock is locked.
225 static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
227 int granted, grant_step, limit;
229 limit = ldlm_pool_get_limit(pl);
230 granted = ldlm_pool_granted(pl);
232 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
233 grant_step = ((limit - granted) * grant_step) / 100;
234 pl->pl_grant_plan = granted + grant_step;
235 limit = (limit * 5) >> 2;
236 if (pl->pl_grant_plan > limit)
237 pl->pl_grant_plan = limit;
241 * Recalculates next SLV on passed \a pl.
243 * \pre ->pl_lock is locked.
245 static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
255 slv = pl->pl_server_lock_volume;
256 grant_plan = pl->pl_grant_plan;
257 limit = ldlm_pool_get_limit(pl);
258 granted = ldlm_pool_granted(pl);
259 round_up = granted < limit;
261 grant_usage = max_t(int, limit - (granted - grant_plan), 1);
264 * Find out SLV change factor which is the ratio of grant usage
265 * from limit. SLV changes as fast as the ratio of grant plan
266 * consumption. The more locks from grant plan are not consumed
267 * by clients in last interval (idle time), the faster grows
268 * SLV. And the opposite, the more grant plan is over-consumed
269 * (load time) the faster drops SLV.
271 slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
272 do_div(slv_factor, limit);
273 slv = slv * slv_factor;
274 slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
276 if (slv > ldlm_pool_slv_max(limit)) {
277 slv = ldlm_pool_slv_max(limit);
278 } else if (slv < ldlm_pool_slv_min(limit)) {
279 slv = ldlm_pool_slv_min(limit);
282 pl->pl_server_lock_volume = slv;
286 * Recalculates next stats on passed \a pl.
288 * \pre ->pl_lock is locked.
290 static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
292 int grant_plan = pl->pl_grant_plan;
293 __u64 slv = pl->pl_server_lock_volume;
294 int granted = ldlm_pool_granted(pl);
295 int grant_rate = atomic_read(&pl->pl_grant_rate);
296 int cancel_rate = atomic_read(&pl->pl_cancel_rate);
298 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
300 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
302 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
304 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
306 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
311 * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd.
313 static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
315 struct obd_device *obd;
318 * Set new SLV in obd field for using it later without accessing the
319 * pool. This is required to avoid race between sending reply to client
320 * with new SLV and cleanup server stack in which we can't guarantee
321 * that namespace is still alive. We know only that obd is alive as
322 * long as valid export is alive.
324 obd = ldlm_pl2ns(pl)->ns_obd;
325 LASSERT(obd != NULL);
326 write_lock(&obd->obd_pool_lock);
327 obd->obd_pool_slv = pl->pl_server_lock_volume;
328 write_unlock(&obd->obd_pool_lock);
332 * Recalculates all pool fields on passed \a pl.
334 * \pre ->pl_lock is not locked.
336 static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
338 time_t recalc_interval_sec;
341 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
342 if (recalc_interval_sec < pl->pl_recalc_period)
345 spin_lock(&pl->pl_lock);
346 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
347 if (recalc_interval_sec < pl->pl_recalc_period) {
348 spin_unlock(&pl->pl_lock);
352 * Recalc SLV after last period. This should be done
353 * _before_ recalculating new grant plan.
355 ldlm_pool_recalc_slv(pl);
358 * Make sure that pool informed obd of last SLV changes.
360 ldlm_srv_pool_push_slv(pl);
363 * Update grant_plan for new period.
365 ldlm_pool_recalc_grant_plan(pl);
367 pl->pl_recalc_time = cfs_time_current_sec();
368 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
369 recalc_interval_sec);
370 spin_unlock(&pl->pl_lock);
375 * This function is used on server side as main entry point for memory
376 * pressure handling. It decreases SLV on \a pl according to passed
377 * \a nr and \a gfp_mask.
379 * Our goal here is to decrease SLV such a way that clients hold \a nr
380 * locks smaller in next 10h.
382 static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
383 int nr, gfp_t gfp_mask)
388 * VM is asking how many entries may be potentially freed.
391 return ldlm_pool_granted(pl);
394 * Client already canceled locks but server is already in shrinker
395 * and can't cancel anything. Let's catch this race.
397 if (ldlm_pool_granted(pl) == 0)
400 spin_lock(&pl->pl_lock);
403 * We want shrinker to possibly cause cancellation of @nr locks from
404 * clients or grant approximately @nr locks smaller next intervals.
406 * This is why we decreased SLV by @nr. This effect will only be as
407 * long as one re-calc interval (1s these days) and this should be
408 * enough to pass this decreased SLV to all clients. On next recalc
409 * interval pool will either increase SLV if locks load is not high
410 * or will keep on same level or even decrease again, thus, shrinker
411 * decreased SLV will affect next recalc intervals and this way will
412 * make locking load lower.
414 if (nr < pl->pl_server_lock_volume) {
415 pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr;
417 limit = ldlm_pool_get_limit(pl);
418 pl->pl_server_lock_volume = ldlm_pool_slv_min(limit);
422 * Make sure that pool informed obd of last SLV changes.
424 ldlm_srv_pool_push_slv(pl);
425 spin_unlock(&pl->pl_lock);
428 * We did not really free any memory here so far, it only will be
429 * freed later may be, so that we return 0 to not confuse VM.
435 * Setup server side pool \a pl with passed \a limit.
437 static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
439 struct obd_device *obd;
441 obd = ldlm_pl2ns(pl)->ns_obd;
442 LASSERT(obd != NULL && obd != LP_POISON);
443 LASSERT(obd->obd_type != LP_POISON);
444 write_lock(&obd->obd_pool_lock);
445 obd->obd_pool_limit = limit;
446 write_unlock(&obd->obd_pool_lock);
448 ldlm_pool_set_limit(pl, limit);
453 * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
455 static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
457 struct obd_device *obd;
460 * Get new SLV and Limit from obd which is updated with coming
463 obd = ldlm_pl2ns(pl)->ns_obd;
464 LASSERT(obd != NULL);
465 read_lock(&obd->obd_pool_lock);
466 pl->pl_server_lock_volume = obd->obd_pool_slv;
467 ldlm_pool_set_limit(pl, obd->obd_pool_limit);
468 read_unlock(&obd->obd_pool_lock);
472 * Recalculates client size pool \a pl according to current SLV and Limit.
474 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
476 time_t recalc_interval_sec;
480 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
481 if (recalc_interval_sec < pl->pl_recalc_period)
484 spin_lock(&pl->pl_lock);
486 * Check if we need to recalc lists now.
488 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
489 if (recalc_interval_sec < pl->pl_recalc_period) {
490 spin_unlock(&pl->pl_lock);
495 * Make sure that pool knows last SLV and Limit from obd.
497 ldlm_cli_pool_pop_slv(pl);
498 spin_unlock(&pl->pl_lock);
501 * Do not cancel locks in case lru resize is disabled for this ns.
503 if (!ns_connect_lru_resize(ldlm_pl2ns(pl)))
507 * In the time of canceling locks on client we do not need to maintain
508 * sharp timing, we only want to cancel locks asap according to new SLV.
509 * It may be called when SLV has changed much, this is why we do not
510 * take into account pl->pl_recalc_time here.
512 ret = ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC,
516 spin_lock(&pl->pl_lock);
518 * Time of LRU resizing might be longer than period,
519 * so update after LRU resizing rather than before it.
521 pl->pl_recalc_time = cfs_time_current_sec();
522 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
523 recalc_interval_sec);
524 spin_unlock(&pl->pl_lock);
529 * This function is main entry point for memory pressure handling on client
530 * side. Main goal of this function is to cancel some number of locks on
531 * passed \a pl according to \a nr and \a gfp_mask.
533 static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
534 int nr, gfp_t gfp_mask)
536 struct ldlm_namespace *ns;
542 * Do not cancel locks in case lru resize is disabled for this ns.
544 if (!ns_connect_lru_resize(ns))
548 * Make sure that pool knows last SLV and Limit from obd.
550 ldlm_cli_pool_pop_slv(pl);
552 spin_lock(&ns->ns_lock);
553 unused = ns->ns_nr_unused;
554 spin_unlock(&ns->ns_lock);
557 return (unused / 100) * sysctl_vfs_cache_pressure;
559 return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_LRU_FLAG_SHRINK);
562 static struct ldlm_pool_ops ldlm_srv_pool_ops = {
563 .po_recalc = ldlm_srv_pool_recalc,
564 .po_shrink = ldlm_srv_pool_shrink,
565 .po_setup = ldlm_srv_pool_setup
568 static struct ldlm_pool_ops ldlm_cli_pool_ops = {
569 .po_recalc = ldlm_cli_pool_recalc,
570 .po_shrink = ldlm_cli_pool_shrink
574 * Pool recalc wrapper. Will call either client or server pool recalc callback
575 * depending what pool \a pl is used.
577 int ldlm_pool_recalc(struct ldlm_pool *pl)
579 time_t recalc_interval_sec;
582 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
583 if (recalc_interval_sec > 0) {
584 spin_lock(&pl->pl_lock);
585 recalc_interval_sec = cfs_time_current_sec() -
588 if (recalc_interval_sec > 0) {
590 * Update pool statistics every 1s.
592 ldlm_pool_recalc_stats(pl);
595 * Zero out all rates and speed for the last period.
597 atomic_set(&pl->pl_grant_rate, 0);
598 atomic_set(&pl->pl_cancel_rate, 0);
600 spin_unlock(&pl->pl_lock);
603 if (pl->pl_ops->po_recalc != NULL) {
604 count = pl->pl_ops->po_recalc(pl);
605 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
609 recalc_interval_sec = pl->pl_recalc_time - cfs_time_current_sec() +
610 pl->pl_recalc_period;
611 if (recalc_interval_sec <= 0) {
612 /* DEBUG: should be re-removed after LU-4536 is fixed */
613 CDEBUG(D_DLMTRACE, "%s: Negative interval(%ld), "
614 "too short period(%ld)\n",
615 pl->pl_name, recalc_interval_sec,
616 pl->pl_recalc_period);
618 /* Prevent too frequent recalculation. */
619 recalc_interval_sec = 1;
622 return recalc_interval_sec;
626 * Pool shrink wrapper. Will call either client or server pool recalc callback
627 * depending what pool \a pl is used.
629 int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask)
633 if (pl->pl_ops->po_shrink != NULL) {
634 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
636 lprocfs_counter_add(pl->pl_stats,
637 LDLM_POOL_SHRINK_REQTD_STAT,
639 lprocfs_counter_add(pl->pl_stats,
640 LDLM_POOL_SHRINK_FREED_STAT,
642 CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, "
643 "shrunk %d\n", pl->pl_name, nr, cancel);
650 * Pool setup wrapper. Will call either client or server pool recalc callback
651 * depending what pool \a pl is used.
653 * Sets passed \a limit into pool \a pl.
655 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
657 if (pl->pl_ops->po_setup != NULL)
658 return(pl->pl_ops->po_setup(pl, limit));
662 static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
664 int granted, grant_rate, cancel_rate, grant_step;
665 int grant_speed, grant_plan, lvf;
666 struct ldlm_pool *pl = m->private;
670 spin_lock(&pl->pl_lock);
671 slv = pl->pl_server_lock_volume;
672 clv = pl->pl_client_lock_volume;
673 limit = ldlm_pool_get_limit(pl);
674 grant_plan = pl->pl_grant_plan;
675 granted = ldlm_pool_granted(pl);
676 grant_rate = atomic_read(&pl->pl_grant_rate);
677 cancel_rate = atomic_read(&pl->pl_cancel_rate);
678 grant_speed = grant_rate - cancel_rate;
679 lvf = atomic_read(&pl->pl_lock_volume_factor);
680 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
681 spin_unlock(&pl->pl_lock);
683 seq_printf(m, "LDLM pool state (%s):\n"
687 pl->pl_name, slv, clv, lvf);
689 if (ns_is_server(ldlm_pl2ns(pl))) {
690 seq_printf(m, " GSP: %d%%\n", grant_step);
691 seq_printf(m, " GP: %d\n", grant_plan);
694 seq_printf(m, " GR: %d\n CR: %d\n GS: %d\n G: %d\n L: %d\n",
695 grant_rate, cancel_rate, grant_speed,
699 LPROC_SEQ_FOPS_RO(lprocfs_pool_state);
701 static int lprocfs_grant_speed_seq_show(struct seq_file *m, void *unused)
703 struct ldlm_pool *pl = m->private;
706 spin_lock(&pl->pl_lock);
707 /* serialize with ldlm_pool_recalc */
708 grant_speed = atomic_read(&pl->pl_grant_rate) -
709 atomic_read(&pl->pl_cancel_rate);
710 spin_unlock(&pl->pl_lock);
711 return lprocfs_uint_seq_show(m, &grant_speed);
714 LDLM_POOL_PROC_READER_SEQ_SHOW(grant_plan, int);
715 LPROC_SEQ_FOPS_RO(lprocfs_grant_plan);
717 LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period, int);
718 LDLM_POOL_PROC_WRITER(recalc_period, int);
719 static ssize_t lprocfs_recalc_period_seq_write(struct file *file,
720 const char __user *buf,
721 size_t len, loff_t *off)
723 struct seq_file *seq = file->private_data;
725 return lprocfs_wr_recalc_period(file, buf, len, seq->private);
727 LPROC_SEQ_FOPS(lprocfs_recalc_period);
729 LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, u64);
730 LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, atomic);
731 LPROC_SEQ_FOPS_RW_TYPE(ldlm_pool_rw, atomic);
733 LPROC_SEQ_FOPS_RO(lprocfs_grant_speed);
735 static int ldlm_pool_proc_init(struct ldlm_pool *pl)
737 struct ldlm_namespace *ns = ldlm_pl2ns(pl);
738 struct proc_dir_entry *parent_ns_proc;
739 struct lprocfs_vars pool_vars[2];
740 char *var_name = NULL;
744 OBD_ALLOC(var_name, MAX_STRING_SIZE + 1);
748 parent_ns_proc = ns->ns_proc_dir_entry;
749 if (parent_ns_proc == NULL) {
750 CERROR("%s: proc entry is not initialized\n",
752 GOTO(out_free_name, rc = -EINVAL);
754 pl->pl_proc_dir = lprocfs_register("pool", parent_ns_proc,
756 if (IS_ERR(pl->pl_proc_dir)) {
757 rc = PTR_ERR(pl->pl_proc_dir);
758 pl->pl_proc_dir = NULL;
759 CERROR("%s: cannot create 'pool' proc entry: rc = %d\n",
760 ldlm_ns_name(ns), rc);
761 GOTO(out_free_name, rc);
764 var_name[MAX_STRING_SIZE] = '\0';
765 memset(pool_vars, 0, sizeof(pool_vars));
766 pool_vars[0].name = var_name;
768 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "server_lock_volume",
769 &pl->pl_server_lock_volume, &ldlm_pool_u64_fops);
770 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "limit", &pl->pl_limit,
771 &ldlm_pool_rw_atomic_fops);
772 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "granted",
773 &pl->pl_granted, &ldlm_pool_atomic_fops);
774 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "grant_speed", pl,
775 &lprocfs_grant_speed_fops);
776 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "cancel_rate",
777 &pl->pl_cancel_rate, &ldlm_pool_atomic_fops);
778 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "grant_rate",
779 &pl->pl_grant_rate, &ldlm_pool_atomic_fops);
780 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "grant_plan", pl,
781 &lprocfs_grant_plan_fops);
782 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "recalc_period",
783 pl, &lprocfs_recalc_period_fops);
784 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "lock_volume_factor",
785 &pl->pl_lock_volume_factor, &ldlm_pool_rw_atomic_fops);
786 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "state", pl,
787 &lprocfs_pool_state_fops);
789 pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
790 LDLM_POOL_FIRST_STAT, 0);
792 GOTO(out_free_name, rc = -ENOMEM);
794 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
795 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
797 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
798 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
800 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
801 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
803 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
804 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
805 "grant_rate", "locks/s");
806 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
807 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
808 "cancel_rate", "locks/s");
809 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
810 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
811 "grant_plan", "locks/s");
812 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
813 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
815 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
816 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
817 "shrink_request", "locks");
818 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
819 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
820 "shrink_freed", "locks");
821 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
822 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
823 "recalc_freed", "locks");
824 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
825 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
826 "recalc_timing", "sec");
827 rc = lprocfs_register_stats(pl->pl_proc_dir, "stats", pl->pl_stats);
831 OBD_FREE(var_name, MAX_STRING_SIZE + 1);
835 static void ldlm_pool_proc_fini(struct ldlm_pool *pl)
837 if (pl->pl_stats != NULL) {
838 lprocfs_free_stats(&pl->pl_stats);
841 if (pl->pl_proc_dir != NULL) {
842 lprocfs_remove(&pl->pl_proc_dir);
843 pl->pl_proc_dir = NULL;
847 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
848 int idx, enum ldlm_side client)
853 spin_lock_init(&pl->pl_lock);
854 atomic_set(&pl->pl_granted, 0);
855 pl->pl_recalc_time = cfs_time_current_sec();
856 atomic_set(&pl->pl_lock_volume_factor, 1);
858 atomic_set(&pl->pl_grant_rate, 0);
859 atomic_set(&pl->pl_cancel_rate, 0);
860 pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
862 snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
863 ldlm_ns_name(ns), idx);
865 if (client == LDLM_NAMESPACE_SERVER) {
866 pl->pl_ops = &ldlm_srv_pool_ops;
867 ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);
868 pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD;
869 pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
871 ldlm_pool_set_limit(pl, 1);
872 pl->pl_server_lock_volume = 0;
873 pl->pl_ops = &ldlm_cli_pool_ops;
874 pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
876 pl->pl_client_lock_volume = 0;
877 rc = ldlm_pool_proc_init(pl);
881 CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
886 void ldlm_pool_fini(struct ldlm_pool *pl)
889 ldlm_pool_proc_fini(pl);
892 * Pool should not be used after this point. We can't free it here as
893 * it lives in struct ldlm_namespace, but still interested in catching
894 * any abnormal using cases.
896 POISON(pl, 0x5a, sizeof(*pl));
901 * Add new taken ldlm lock \a lock into pool \a pl accounting.
903 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
906 * FLOCK locks are special in a sense that they are almost never
907 * cancelled, instead special kind of lock is used to drop them.
908 * also there is no LRU for flock locks, so no point in tracking
911 * PLAIN locks are used by config and quota, the quantity is small
912 * and usually they are not in LRU.
914 if (lock->l_resource->lr_type == LDLM_FLOCK ||
915 lock->l_resource->lr_type == LDLM_PLAIN)
918 ldlm_reclaim_add(lock);
920 atomic_inc(&pl->pl_granted);
921 atomic_inc(&pl->pl_grant_rate);
922 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
924 * Do not do pool recalc for client side as all locks which
925 * potentially may be canceled has already been packed into
926 * enqueue/cancel rpc. Also we do not want to run out of stack
927 * with too long call paths.
929 if (ns_is_server(ldlm_pl2ns(pl)))
930 ldlm_pool_recalc(pl);
934 * Remove ldlm lock \a lock from pool \a pl accounting.
936 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
939 * Filter out FLOCK & PLAIN locks. Read above comment in
942 if (lock->l_resource->lr_type == LDLM_FLOCK ||
943 lock->l_resource->lr_type == LDLM_PLAIN)
946 ldlm_reclaim_del(lock);
948 LASSERT(atomic_read(&pl->pl_granted) > 0);
949 atomic_dec(&pl->pl_granted);
950 atomic_inc(&pl->pl_cancel_rate);
952 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
954 if (ns_is_server(ldlm_pl2ns(pl)))
955 ldlm_pool_recalc(pl);
959 * Returns current \a pl SLV.
961 * \pre ->pl_lock is not locked.
963 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
966 spin_lock(&pl->pl_lock);
967 slv = pl->pl_server_lock_volume;
968 spin_unlock(&pl->pl_lock);
973 * Sets passed \a slv to \a pl.
975 * \pre ->pl_lock is not locked.
977 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
979 spin_lock(&pl->pl_lock);
980 pl->pl_server_lock_volume = slv;
981 spin_unlock(&pl->pl_lock);
985 * Returns current \a pl CLV.
987 * \pre ->pl_lock is not locked.
989 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
992 spin_lock(&pl->pl_lock);
993 slv = pl->pl_client_lock_volume;
994 spin_unlock(&pl->pl_lock);
999 * Sets passed \a clv to \a pl.
1001 * \pre ->pl_lock is not locked.
1003 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
1005 spin_lock(&pl->pl_lock);
1006 pl->pl_client_lock_volume = clv;
1007 spin_unlock(&pl->pl_lock);
1011 * Returns current \a pl limit.
1013 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
1015 return atomic_read(&pl->pl_limit);
1019 * Sets passed \a limit to \a pl.
1021 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1023 atomic_set(&pl->pl_limit, limit);
1027 * Returns current LVF from \a pl.
1029 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1031 return atomic_read(&pl->pl_lock_volume_factor);
1034 static struct ptlrpc_thread *ldlm_pools_thread;
1035 static struct shrinker *ldlm_pools_srv_shrinker;
1036 static struct shrinker *ldlm_pools_cli_shrinker;
1037 static struct completion ldlm_pools_comp;
1040 * count locks from all namespaces (if possible). Returns number of
1043 static unsigned long ldlm_pools_count(enum ldlm_side client, gfp_t gfp_mask)
1045 unsigned long total = 0;
1047 struct ldlm_namespace *ns;
1048 struct ldlm_namespace *ns_old = NULL; /* loop detection */
1050 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
1053 CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
1054 client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
1057 * Find out how many resources we may release.
1059 for (nr_ns = ldlm_namespace_nr_read(client);
1060 nr_ns > 0; nr_ns--) {
1061 mutex_lock(ldlm_namespace_lock(client));
1062 if (list_empty(ldlm_namespace_list(client))) {
1063 mutex_unlock(ldlm_namespace_lock(client));
1066 ns = ldlm_namespace_first_locked(client);
1069 mutex_unlock(ldlm_namespace_lock(client));
1073 if (ldlm_ns_empty(ns)) {
1074 ldlm_namespace_move_to_inactive_locked(ns, client);
1075 mutex_unlock(ldlm_namespace_lock(client));
1082 ldlm_namespace_get(ns);
1083 ldlm_namespace_move_to_active_locked(ns, client);
1084 mutex_unlock(ldlm_namespace_lock(client));
1085 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
1086 ldlm_namespace_put(ns);
1092 static unsigned long ldlm_pools_scan(enum ldlm_side client, int nr,
1095 unsigned long freed = 0;
1097 struct ldlm_namespace *ns;
1099 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
1103 * Shrink at least ldlm_namespace_nr_read(client) namespaces.
1105 for (tmp = nr_ns = ldlm_namespace_nr_read(client);
1107 int cancel, nr_locks;
1110 * Do not call shrink under ldlm_namespace_lock(client)
1112 mutex_lock(ldlm_namespace_lock(client));
1113 if (list_empty(ldlm_namespace_list(client))) {
1114 mutex_unlock(ldlm_namespace_lock(client));
1117 ns = ldlm_namespace_first_locked(client);
1118 ldlm_namespace_get(ns);
1119 ldlm_namespace_move_to_active_locked(ns, client);
1120 mutex_unlock(ldlm_namespace_lock(client));
1122 nr_locks = ldlm_pool_granted(&ns->ns_pool);
1124 * We use to shrink propotionally but with new shrinker API,
1125 * we lost the total number of freeable locks.
1127 cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
1128 freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
1129 ldlm_namespace_put(ns);
1132 * we only decrease the SLV in server pools shrinker, return
1133 * SHRINK_STOP to kernel to avoid needless loop. LU-1128
1135 return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
1138 #ifdef HAVE_SHRINKER_COUNT
1139 static unsigned long ldlm_pools_srv_count(struct shrinker *s,
1140 struct shrink_control *sc)
1142 return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
1145 static unsigned long ldlm_pools_srv_scan(struct shrinker *s,
1146 struct shrink_control *sc)
1148 return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
1152 static unsigned long ldlm_pools_cli_count(struct shrinker *s, struct shrink_control *sc)
1154 return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
1157 static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
1158 struct shrink_control *sc)
1160 return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
1166 * Cancel \a nr locks from all namespaces (if possible). Returns number of
1167 * cached locks after shrink is finished. All namespaces are asked to
1168 * cancel approximately equal amount of locks to keep balancing.
1170 static int ldlm_pools_shrink(enum ldlm_side client, int nr, gfp_t gfp_mask)
1172 unsigned long total = 0;
1174 if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
1175 !(gfp_mask & __GFP_FS))
1178 CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",
1179 nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
1181 total = ldlm_pools_count(client, gfp_mask);
1183 if (nr == 0 || total == 0)
1186 return ldlm_pools_scan(client, nr, gfp_mask);
1189 static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1191 return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER,
1192 shrink_param(sc, nr_to_scan),
1193 shrink_param(sc, gfp_mask));
1196 static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1198 return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT,
1199 shrink_param(sc, nr_to_scan),
1200 shrink_param(sc, gfp_mask));
1203 #endif /* HAVE_SHRINKER_COUNT */
1205 int ldlm_pools_recalc(enum ldlm_side client)
1207 unsigned long nr_l = 0, nr_p = 0, l;
1208 struct ldlm_namespace *ns;
1209 struct ldlm_namespace *ns_old = NULL;
1211 /* seconds of sleep if no active namespaces */
1212 int time = client ? LDLM_POOL_CLI_DEF_RECALC_PERIOD :
1213 LDLM_POOL_SRV_DEF_RECALC_PERIOD;
1216 * No need to setup pool limit for client pools.
1218 if (client == LDLM_NAMESPACE_SERVER) {
1220 * Check all modest namespaces first.
1222 mutex_lock(ldlm_namespace_lock(client));
1223 list_for_each_entry(ns, ldlm_namespace_list(client),
1226 if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
1229 l = ldlm_pool_granted(&ns->ns_pool);
1234 * Set the modest pools limit equal to their avg granted
1237 l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0);
1238 ldlm_pool_setup(&ns->ns_pool, l);
1244 * Make sure that modest namespaces did not eat more that 2/3
1247 if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
1248 CWARN("\"Modest\" pools eat out 2/3 of server locks "
1249 "limit (%lu of %lu). This means that you have too "
1250 "many clients for this amount of server RAM. "
1251 "Upgrade server!\n", nr_l, LDLM_POOL_HOST_L);
1256 * The rest is given to greedy namespaces.
1258 list_for_each_entry(ns, ldlm_namespace_list(client),
1261 if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
1266 * In the case 2/3 locks are eaten out by
1267 * modest pools, we re-setup equal limit
1270 l = LDLM_POOL_HOST_L /
1271 ldlm_namespace_nr_read(client);
1274 * All the rest of greedy pools will have
1275 * all locks in equal parts.
1277 l = (LDLM_POOL_HOST_L - nr_l) /
1278 (ldlm_namespace_nr_read(client) -
1281 ldlm_pool_setup(&ns->ns_pool, l);
1283 mutex_unlock(ldlm_namespace_lock(client));
1287 * Recalc at least ldlm_namespace_nr(client) namespaces.
1289 for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
1292 * Lock the list, get first @ns in the list, getref, move it
1293 * to the tail, unlock and call pool recalc. This way we avoid
1294 * calling recalc under @ns lock what is really good as we get
1295 * rid of potential deadlock on client nodes when canceling
1296 * locks synchronously.
1298 mutex_lock(ldlm_namespace_lock(client));
1299 if (list_empty(ldlm_namespace_list(client))) {
1300 mutex_unlock(ldlm_namespace_lock(client));
1303 ns = ldlm_namespace_first_locked(client);
1305 if (ns_old == ns) { /* Full pass complete */
1306 mutex_unlock(ldlm_namespace_lock(client));
1310 /* We got an empty namespace, need to move it back to inactive
1312 * The race with parallel resource creation is fine:
1313 * - If they do namespace_get before our check, we fail the
1314 * check and they move this item to the end of the list anyway
1315 * - If we do the check and then they do namespace_get, then
1316 * we move the namespace to inactive and they will move
1317 * it back to active (synchronised by the lock, so no clash
1320 if (ldlm_ns_empty(ns)) {
1321 ldlm_namespace_move_to_inactive_locked(ns, client);
1322 mutex_unlock(ldlm_namespace_lock(client));
1329 spin_lock(&ns->ns_lock);
1331 * skip ns which is being freed, and we don't want to increase
1332 * its refcount again, not even temporarily. bz21519 & LU-499.
1334 if (ns->ns_stopping) {
1338 ldlm_namespace_get(ns);
1340 spin_unlock(&ns->ns_lock);
1342 ldlm_namespace_move_to_active_locked(ns, client);
1343 mutex_unlock(ldlm_namespace_lock(client));
1346 * After setup is done - recalc the pool.
1349 int ttime = ldlm_pool_recalc(&ns->ns_pool);
1354 ldlm_namespace_put(ns);
1358 /* Wake up the blocking threads from time to time. */
1359 ldlm_bl_thread_wakeup();
1364 static int ldlm_pools_thread_main(void *arg)
1366 struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
1370 thread_set_flags(thread, SVC_RUNNING);
1371 wake_up(&thread->t_ctl_waitq);
1373 CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
1374 "ldlm_poold", current_pid());
1377 struct l_wait_info lwi;
1380 * Recal all pools on this tick.
1382 s_time = ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
1383 c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
1386 * Wait until the next check time, or until we're
1389 lwi = LWI_TIMEOUT(cfs_time_seconds(min(s_time, c_time)),
1391 l_wait_event(thread->t_ctl_waitq,
1392 thread_is_stopping(thread) ||
1393 thread_is_event(thread),
1396 if (thread_test_and_clear_flags(thread, SVC_STOPPING))
1399 thread_test_and_clear_flags(thread, SVC_EVENT);
1402 thread_set_flags(thread, SVC_STOPPED);
1403 wake_up(&thread->t_ctl_waitq);
1405 CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
1406 "ldlm_poold", current_pid());
1408 complete_and_exit(&ldlm_pools_comp, 0);
1411 static int ldlm_pools_thread_start(void)
1413 struct l_wait_info lwi = { 0 };
1414 struct task_struct *task;
1417 if (ldlm_pools_thread != NULL)
1420 OBD_ALLOC_PTR(ldlm_pools_thread);
1421 if (ldlm_pools_thread == NULL)
1424 init_completion(&ldlm_pools_comp);
1425 init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq);
1427 task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread,
1430 CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task));
1431 OBD_FREE(ldlm_pools_thread, sizeof(*ldlm_pools_thread));
1432 ldlm_pools_thread = NULL;
1433 RETURN(PTR_ERR(task));
1435 l_wait_event(ldlm_pools_thread->t_ctl_waitq,
1436 thread_is_running(ldlm_pools_thread), &lwi);
1440 static void ldlm_pools_thread_stop(void)
1444 if (ldlm_pools_thread == NULL) {
1449 thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
1450 wake_up(&ldlm_pools_thread->t_ctl_waitq);
1453 * Make sure that pools thread is finished before freeing @thread.
1454 * This fixes possible race and oops due to accessing freed memory
1457 wait_for_completion(&ldlm_pools_comp);
1458 OBD_FREE_PTR(ldlm_pools_thread);
1459 ldlm_pools_thread = NULL;
1463 int ldlm_pools_init(void)
1466 DEF_SHRINKER_VAR(shsvar, ldlm_pools_srv_shrink,
1467 ldlm_pools_srv_count, ldlm_pools_srv_scan);
1468 DEF_SHRINKER_VAR(shcvar, ldlm_pools_cli_shrink,
1469 ldlm_pools_cli_count, ldlm_pools_cli_scan);
1472 rc = ldlm_pools_thread_start();
1474 ldlm_pools_srv_shrinker =
1475 set_shrinker(DEFAULT_SEEKS, &shsvar);
1476 ldlm_pools_cli_shrinker =
1477 set_shrinker(DEFAULT_SEEKS, &shcvar);
1482 void ldlm_pools_fini(void)
1484 if (ldlm_pools_srv_shrinker != NULL) {
1485 remove_shrinker(ldlm_pools_srv_shrinker);
1486 ldlm_pools_srv_shrinker = NULL;
1488 if (ldlm_pools_cli_shrinker != NULL) {
1489 remove_shrinker(ldlm_pools_cli_shrinker);
1490 ldlm_pools_cli_shrinker = NULL;
1492 ldlm_pools_thread_stop();
1495 #else /* !HAVE_LRU_RESIZE_SUPPORT */
1496 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
1501 int ldlm_pool_recalc(struct ldlm_pool *pl)
1506 int ldlm_pool_shrink(struct ldlm_pool *pl,
1507 int nr, gfp_t gfp_mask)
1512 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
1513 int idx, enum ldlm_side client)
1518 void ldlm_pool_fini(struct ldlm_pool *pl)
1523 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
1528 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
1533 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
1538 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
1543 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
1548 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
1553 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
1558 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1563 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1568 int ldlm_pools_init(void)
1573 void ldlm_pools_fini(void)
1578 int ldlm_pools_recalc(enum ldlm_side client)
1582 #endif /* HAVE_LRU_RESIZE_SUPPORT */