1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011 Whamcloud, Inc.
36 * This file is part of Lustre, http://www.lustre.org/
37 * Lustre is a trademark of Sun Microsystems, Inc.
39 * lustre/ldlm/ldlm_pool.c
41 * Author: Yury Umanets <umka@clusterfs.com>
45 * Idea of this code is rather simple. Each second, for each server namespace
46 * we have SLV - server lock volume which is calculated on current number of
47 * granted locks, grant speed for past period, etc - that is, locking load.
48 * This SLV number may be thought as a flow definition for simplicity. It is
49 * sent to clients with each occasion to let them know what is current load
50 * situation on the server. By default, at the beginning, SLV on server is
51 * set max value which is calculated as the following: allow to one client
52 * have all locks of limit ->pl_limit for 10h.
54 * Next, on clients, number of cached locks is not limited artificially in any
55 * way as it was before. Instead, client calculates CLV, that is, client lock
56 * volume for each lock and compares it with last SLV from the server. CLV is
57 * calculated as the number of locks in LRU * lock live time in seconds. If
58 * CLV > SLV - lock is canceled.
60 * Client has LVF, that is, lock volume factor which regulates how much sensitive
61 * client should be about last SLV from server. The higher LVF is the more locks
62 * will be canceled on client. Default value for it is 1. Setting LVF to 2 means
63 * that client will cancel locks 2 times faster.
65 * Locks on a client will be canceled more intensively in these cases:
66 * (1) if SLV is smaller, that is, load is higher on the server;
67 * (2) client has a lot of locks (the more locks are held by client, the bigger
68 * chances that some of them should be canceled);
69 * (3) client has old locks (taken some time ago);
71 * Thus, according to flow paradigm that we use for better understanding SLV,
72 * CLV is the volume of particle in flow described by SLV. According to this,
73 * if flow is getting thinner, more and more particles become outside of it and
74 * as particles are locks, they should be canceled.
76 * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com). Andreas
77 * Dilger (adilger@clusterfs.com) proposed few nice ideas like using LVF and many
78 * cleanups. Flow definition to allow more easy understanding of the logic belongs
79 * to Nikita Danilov (nikita@clusterfs.com) as well as many cleanups and fixes.
80 * And design and implementation are done by Yury Umanets (umka@clusterfs.com).
82 * Glossary for terms used:
84 * pl_limit - Number of allowed locks in pool. Applies to server and client
87 * pl_granted - Number of granted locks (calculated);
88 * pl_grant_rate - Number of granted locks for last T (calculated);
89 * pl_cancel_rate - Number of canceled locks for last T (calculated);
90 * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
91 * pl_grant_plan - Planned number of granted locks for next T (calculated);
92 * pl_server_lock_volume - Current server lock volume (calculated);
94 * As it may be seen from list above, we have few possible tunables which may
95 * affect behavior much. They all may be modified via proc. However, they also
96 * give a possibility for constructing few pre-defined behavior policies. If
97 * none of predefines is suitable for a working pattern being used, new one may
98 * be "constructed" via proc tunables.
101 #define DEBUG_SUBSYSTEM S_LDLM
104 # include <lustre_dlm.h>
106 # include <liblustre.h>
109 #include <cl_object.h>
111 #include <obd_class.h>
112 #include <obd_support.h>
113 #include "ldlm_internal.h"
115 #ifdef HAVE_LRU_RESIZE_SUPPORT
118 * 50 ldlm locks for 1MB of RAM.
120 #define LDLM_POOL_HOST_L ((CFS_NUM_CACHEPAGES >> (20 - CFS_PAGE_SHIFT)) * 50)
123 * Maximal possible grant step plan in %.
125 #define LDLM_POOL_MAX_GSP (30)
128 * Minimal possible grant step plan in %.
130 #define LDLM_POOL_MIN_GSP (1)
133 * This controls the speed of reaching LDLM_POOL_MAX_GSP
134 * with increasing thread period.
136 #define LDLM_POOL_GSP_STEP_SHIFT (2)
139 * LDLM_POOL_GSP% of all locks is default GP.
141 #define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
144 * Max age for locks on clients.
146 #define LDLM_POOL_MAX_AGE (36000)
149 * The granularity of SLV calculation.
151 #define LDLM_POOL_SLV_SHIFT (10)
154 extern cfs_proc_dir_entry_t *ldlm_ns_proc_dir;
157 static inline __u64 dru(__u64 val, __u32 shift, int round_up)
159 return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
162 static inline __u64 ldlm_pool_slv_max(__u32 L)
165 * Allow to have all locks for 1 client for 10 hrs.
166 * Formula is the following: limit * 10h / 1 client.
168 __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
172 static inline __u64 ldlm_pool_slv_min(__u32 L)
178 LDLM_POOL_FIRST_STAT = 0,
179 LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
180 LDLM_POOL_GRANT_STAT,
181 LDLM_POOL_CANCEL_STAT,
182 LDLM_POOL_GRANT_RATE_STAT,
183 LDLM_POOL_CANCEL_RATE_STAT,
184 LDLM_POOL_GRANT_PLAN_STAT,
186 LDLM_POOL_SHRINK_REQTD_STAT,
187 LDLM_POOL_SHRINK_FREED_STAT,
188 LDLM_POOL_RECALC_STAT,
189 LDLM_POOL_TIMING_STAT,
193 static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl)
195 return container_of(pl, struct ldlm_namespace, ns_pool);
199 * Calculates suggested grant_step in % of available locks for passed
200 * \a period. This is later used in grant_plan calculations.
202 static inline int ldlm_pool_t2gsp(unsigned int t)
205 * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
206 * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
208 * How this will affect execution is the following:
210 * - for thread period 1s we will have grant_step 1% which good from
211 * pov of taking some load off from server and push it out to clients.
212 * This is like that because 1% for grant_step means that server will
213 * not allow clients to get lots of locks in short period of time and
214 * keep all old locks in their caches. Clients will always have to
215 * get some locks back if they want to take some new;
217 * - for thread period 10s (which is default) we will have 23% which
218 * means that clients will have enough of room to take some new locks
219 * without getting some back. All locks from this 23% which were not
220 * taken by clients in current period will contribute in SLV growing.
221 * SLV growing means more locks cached on clients until limit or grant
224 return LDLM_POOL_MAX_GSP -
225 ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
226 (t >> LDLM_POOL_GSP_STEP_SHIFT));
230 * Recalculates next grant limit on passed \a pl.
232 * \pre ->pl_lock is locked.
234 static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
236 int granted, grant_step, limit;
238 limit = ldlm_pool_get_limit(pl);
239 granted = cfs_atomic_read(&pl->pl_granted);
241 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
242 grant_step = ((limit - granted) * grant_step) / 100;
243 pl->pl_grant_plan = granted + grant_step;
244 limit = (limit * 5) >> 2;
245 if (pl->pl_grant_plan > limit)
246 pl->pl_grant_plan = limit;
250 * Recalculates next SLV on passed \a pl.
252 * \pre ->pl_lock is locked.
254 static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
264 slv = pl->pl_server_lock_volume;
265 grant_plan = pl->pl_grant_plan;
266 limit = ldlm_pool_get_limit(pl);
267 granted = cfs_atomic_read(&pl->pl_granted);
268 round_up = granted < limit;
270 grant_usage = max_t(int, limit - (granted - grant_plan), 1);
273 * Find out SLV change factor which is the ratio of grant usage
274 * from limit. SLV changes as fast as the ratio of grant plan
275 * consumption. The more locks from grant plan are not consumed
276 * by clients in last interval (idle time), the faster grows
277 * SLV. And the opposite, the more grant plan is over-consumed
278 * (load time) the faster drops SLV.
280 slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
281 do_div(slv_factor, limit);
282 slv = slv * slv_factor;
283 slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
285 if (slv > ldlm_pool_slv_max(limit)) {
286 slv = ldlm_pool_slv_max(limit);
287 } else if (slv < ldlm_pool_slv_min(limit)) {
288 slv = ldlm_pool_slv_min(limit);
291 pl->pl_server_lock_volume = slv;
295 * Recalculates next stats on passed \a pl.
297 * \pre ->pl_lock is locked.
299 static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
301 int grant_plan = pl->pl_grant_plan;
302 __u64 slv = pl->pl_server_lock_volume;
303 int granted = cfs_atomic_read(&pl->pl_granted);
304 int grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
305 int cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
307 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
309 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
311 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
313 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
315 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
320 * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd.
322 static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
324 struct obd_device *obd;
327 * Set new SLV in obd field for using it later without accessing the
328 * pool. This is required to avoid race between sending reply to client
329 * with new SLV and cleanup server stack in which we can't guarantee
330 * that namespace is still alive. We know only that obd is alive as
331 * long as valid export is alive.
333 obd = ldlm_pl2ns(pl)->ns_obd;
334 LASSERT(obd != NULL);
335 cfs_write_lock(&obd->obd_pool_lock);
336 obd->obd_pool_slv = pl->pl_server_lock_volume;
337 cfs_write_unlock(&obd->obd_pool_lock);
341 * Recalculates all pool fields on passed \a pl.
343 * \pre ->pl_lock is not locked.
345 static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
347 time_t recalc_interval_sec;
350 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
351 if (recalc_interval_sec < pl->pl_recalc_period)
354 cfs_spin_lock(&pl->pl_lock);
355 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
356 if (recalc_interval_sec < pl->pl_recalc_period) {
357 cfs_spin_unlock(&pl->pl_lock);
361 * Recalc SLV after last period. This should be done
362 * _before_ recalculating new grant plan.
364 ldlm_pool_recalc_slv(pl);
367 * Make sure that pool informed obd of last SLV changes.
369 ldlm_srv_pool_push_slv(pl);
372 * Update grant_plan for new period.
374 ldlm_pool_recalc_grant_plan(pl);
376 pl->pl_recalc_time = cfs_time_current_sec();
377 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
378 recalc_interval_sec);
379 cfs_spin_unlock(&pl->pl_lock);
384 * This function is used on server side as main entry point for memory
385 * pressure handling. It decreases SLV on \a pl according to passed
386 * \a nr and \a gfp_mask.
388 * Our goal here is to decrease SLV such a way that clients hold \a nr
389 * locks smaller in next 10h.
391 static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
392 int nr, unsigned int gfp_mask)
397 * VM is asking how many entries may be potentially freed.
400 return cfs_atomic_read(&pl->pl_granted);
403 * Client already canceled locks but server is already in shrinker
404 * and can't cancel anything. Let's catch this race.
406 if (cfs_atomic_read(&pl->pl_granted) == 0)
409 cfs_spin_lock(&pl->pl_lock);
412 * We want shrinker to possibly cause cancellation of @nr locks from
413 * clients or grant approximately @nr locks smaller next intervals.
415 * This is why we decreased SLV by @nr. This effect will only be as
416 * long as one re-calc interval (1s these days) and this should be
417 * enough to pass this decreased SLV to all clients. On next recalc
418 * interval pool will either increase SLV if locks load is not high
419 * or will keep on same level or even decrease again, thus, shrinker
420 * decreased SLV will affect next recalc intervals and this way will
421 * make locking load lower.
423 if (nr < pl->pl_server_lock_volume) {
424 pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr;
426 limit = ldlm_pool_get_limit(pl);
427 pl->pl_server_lock_volume = ldlm_pool_slv_min(limit);
431 * Make sure that pool informed obd of last SLV changes.
433 ldlm_srv_pool_push_slv(pl);
434 cfs_spin_unlock(&pl->pl_lock);
437 * We did not really free any memory here so far, it only will be
438 * freed later may be, so that we return 0 to not confuse VM.
444 * Setup server side pool \a pl with passed \a limit.
446 static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
448 struct obd_device *obd;
450 obd = ldlm_pl2ns(pl)->ns_obd;
451 LASSERT(obd != NULL && obd != LP_POISON);
452 LASSERT(obd->obd_type != LP_POISON);
453 cfs_write_lock(&obd->obd_pool_lock);
454 obd->obd_pool_limit = limit;
455 cfs_write_unlock(&obd->obd_pool_lock);
457 ldlm_pool_set_limit(pl, limit);
462 * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
464 static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
466 struct obd_device *obd;
469 * Get new SLV and Limit from obd which is updated with coming
472 obd = ldlm_pl2ns(pl)->ns_obd;
473 LASSERT(obd != NULL);
474 cfs_read_lock(&obd->obd_pool_lock);
475 pl->pl_server_lock_volume = obd->obd_pool_slv;
476 ldlm_pool_set_limit(pl, obd->obd_pool_limit);
477 cfs_read_unlock(&obd->obd_pool_lock);
481 * Recalculates client size pool \a pl according to current SLV and Limit.
483 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
485 time_t recalc_interval_sec;
488 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
489 if (recalc_interval_sec < pl->pl_recalc_period)
492 cfs_spin_lock(&pl->pl_lock);
494 * Check if we need to recalc lists now.
496 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
497 if (recalc_interval_sec < pl->pl_recalc_period) {
498 cfs_spin_unlock(&pl->pl_lock);
503 * Make sure that pool knows last SLV and Limit from obd.
505 ldlm_cli_pool_pop_slv(pl);
507 pl->pl_recalc_time = cfs_time_current_sec();
508 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
509 recalc_interval_sec);
510 cfs_spin_unlock(&pl->pl_lock);
513 * Do not cancel locks in case lru resize is disabled for this ns.
515 if (!ns_connect_lru_resize(ldlm_pl2ns(pl)))
519 * In the time of canceling locks on client we do not need to maintain
520 * sharp timing, we only want to cancel locks asap according to new SLV.
521 * It may be called when SLV has changed much, this is why we do not
522 * take into account pl->pl_recalc_time here.
524 RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LDLM_SYNC,
529 * This function is main entry point for memory pressure handling on client
530 * side. Main goal of this function is to cancel some number of locks on
531 * passed \a pl according to \a nr and \a gfp_mask.
533 static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
534 int nr, unsigned int gfp_mask)
536 struct ldlm_namespace *ns;
537 int canceled = 0, unused;
542 * Do not cancel locks in case lru resize is disabled for this ns.
544 if (!ns_connect_lru_resize(ns))
548 * Make sure that pool knows last SLV and Limit from obd.
550 ldlm_cli_pool_pop_slv(pl);
552 cfs_spin_lock(&ns->ns_lock);
553 unused = ns->ns_nr_unused;
554 cfs_spin_unlock(&ns->ns_lock);
557 canceled = ldlm_cancel_lru(ns, nr, LDLM_ASYNC,
562 * Return the number of potentially reclaimable locks.
564 return ((unused - canceled) / 100) * sysctl_vfs_cache_pressure;
566 return unused - canceled;
570 struct ldlm_pool_ops ldlm_srv_pool_ops = {
571 .po_recalc = ldlm_srv_pool_recalc,
572 .po_shrink = ldlm_srv_pool_shrink,
573 .po_setup = ldlm_srv_pool_setup
576 struct ldlm_pool_ops ldlm_cli_pool_ops = {
577 .po_recalc = ldlm_cli_pool_recalc,
578 .po_shrink = ldlm_cli_pool_shrink
582 * Pool recalc wrapper. Will call either client or server pool recalc callback
583 * depending what pool \a pl is used.
585 int ldlm_pool_recalc(struct ldlm_pool *pl)
587 time_t recalc_interval_sec;
590 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
591 if (recalc_interval_sec <= 0)
594 cfs_spin_lock(&pl->pl_lock);
595 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
596 if (recalc_interval_sec > 0) {
598 * Update pool statistics every 1s.
600 ldlm_pool_recalc_stats(pl);
603 * Zero out all rates and speed for the last period.
605 cfs_atomic_set(&pl->pl_grant_rate, 0);
606 cfs_atomic_set(&pl->pl_cancel_rate, 0);
608 cfs_spin_unlock(&pl->pl_lock);
611 if (pl->pl_ops->po_recalc != NULL) {
612 count = pl->pl_ops->po_recalc(pl);
613 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
620 EXPORT_SYMBOL(ldlm_pool_recalc);
623 * Pool shrink wrapper. Will call either client or server pool recalc callback
624 * depending what pool \a pl is used.
626 int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
627 unsigned int gfp_mask)
631 if (pl->pl_ops->po_shrink != NULL) {
632 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
634 lprocfs_counter_add(pl->pl_stats,
635 LDLM_POOL_SHRINK_REQTD_STAT,
637 lprocfs_counter_add(pl->pl_stats,
638 LDLM_POOL_SHRINK_FREED_STAT,
640 CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, "
641 "shrunk %d\n", pl->pl_name, nr, cancel);
646 EXPORT_SYMBOL(ldlm_pool_shrink);
649 * Pool setup wrapper. Will call either client or server pool recalc callback
650 * depending what pool \a pl is used.
652 * Sets passed \a limit into pool \a pl.
654 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
656 if (pl->pl_ops->po_setup != NULL)
657 return(pl->pl_ops->po_setup(pl, limit));
660 EXPORT_SYMBOL(ldlm_pool_setup);
663 static int lprocfs_rd_pool_state(char *page, char **start, off_t off,
664 int count, int *eof, void *data)
666 int granted, grant_rate, cancel_rate, grant_step;
667 int nr = 0, grant_speed, grant_plan, lvf;
668 struct ldlm_pool *pl = data;
672 cfs_spin_lock(&pl->pl_lock);
673 slv = pl->pl_server_lock_volume;
674 clv = pl->pl_client_lock_volume;
675 limit = ldlm_pool_get_limit(pl);
676 grant_plan = pl->pl_grant_plan;
677 granted = cfs_atomic_read(&pl->pl_granted);
678 grant_rate = cfs_atomic_read(&pl->pl_grant_rate);
679 cancel_rate = cfs_atomic_read(&pl->pl_cancel_rate);
680 grant_speed = grant_rate - cancel_rate;
681 lvf = cfs_atomic_read(&pl->pl_lock_volume_factor);
682 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
683 cfs_spin_unlock(&pl->pl_lock);
685 nr += snprintf(page + nr, count - nr, "LDLM pool state (%s):\n",
687 nr += snprintf(page + nr, count - nr, " SLV: "LPU64"\n", slv);
688 nr += snprintf(page + nr, count - nr, " CLV: "LPU64"\n", clv);
689 nr += snprintf(page + nr, count - nr, " LVF: %d\n", lvf);
691 if (ns_is_server(ldlm_pl2ns(pl))) {
692 nr += snprintf(page + nr, count - nr, " GSP: %d%%\n",
694 nr += snprintf(page + nr, count - nr, " GP: %d\n",
697 nr += snprintf(page + nr, count - nr, " GR: %d\n",
699 nr += snprintf(page + nr, count - nr, " CR: %d\n",
701 nr += snprintf(page + nr, count - nr, " GS: %d\n",
703 nr += snprintf(page + nr, count - nr, " G: %d\n",
705 nr += snprintf(page + nr, count - nr, " L: %d\n",
710 static int lprocfs_rd_grant_speed(char *page, char **start, off_t off,
711 int count, int *eof, void *data)
713 struct ldlm_pool *pl = data;
716 cfs_spin_lock(&pl->pl_lock);
717 /* serialize with ldlm_pool_recalc */
718 grant_speed = cfs_atomic_read(&pl->pl_grant_rate) -
719 cfs_atomic_read(&pl->pl_cancel_rate);
720 cfs_spin_unlock(&pl->pl_lock);
721 return lprocfs_rd_uint(page, start, off, count, eof, &grant_speed);
724 LDLM_POOL_PROC_READER(grant_plan, int);
725 LDLM_POOL_PROC_READER(recalc_period, int);
726 LDLM_POOL_PROC_WRITER(recalc_period, int);
728 static int ldlm_pool_proc_init(struct ldlm_pool *pl)
730 struct ldlm_namespace *ns = ldlm_pl2ns(pl);
731 struct proc_dir_entry *parent_ns_proc;
732 struct lprocfs_vars pool_vars[2];
733 char *var_name = NULL;
737 OBD_ALLOC(var_name, MAX_STRING_SIZE + 1);
741 parent_ns_proc = lprocfs_srch(ldlm_ns_proc_dir,
743 if (parent_ns_proc == NULL) {
744 CERROR("%s: proc entry is not initialized\n",
746 GOTO(out_free_name, rc = -EINVAL);
748 pl->pl_proc_dir = lprocfs_register("pool", parent_ns_proc,
750 if (IS_ERR(pl->pl_proc_dir)) {
751 CERROR("LProcFS failed in ldlm-pool-init\n");
752 rc = PTR_ERR(pl->pl_proc_dir);
753 GOTO(out_free_name, rc);
756 var_name[MAX_STRING_SIZE] = '\0';
757 memset(pool_vars, 0, sizeof(pool_vars));
758 pool_vars[0].name = var_name;
760 snprintf(var_name, MAX_STRING_SIZE, "server_lock_volume");
761 pool_vars[0].data = &pl->pl_server_lock_volume;
762 pool_vars[0].read_fptr = lprocfs_rd_u64;
763 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
765 snprintf(var_name, MAX_STRING_SIZE, "limit");
766 pool_vars[0].data = &pl->pl_limit;
767 pool_vars[0].read_fptr = lprocfs_rd_atomic;
768 pool_vars[0].write_fptr = lprocfs_wr_atomic;
769 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
771 snprintf(var_name, MAX_STRING_SIZE, "granted");
772 pool_vars[0].data = &pl->pl_granted;
773 pool_vars[0].read_fptr = lprocfs_rd_atomic;
774 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
776 snprintf(var_name, MAX_STRING_SIZE, "grant_speed");
777 pool_vars[0].data = pl;
778 pool_vars[0].read_fptr = lprocfs_rd_grant_speed;
779 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
781 snprintf(var_name, MAX_STRING_SIZE, "cancel_rate");
782 pool_vars[0].data = &pl->pl_cancel_rate;
783 pool_vars[0].read_fptr = lprocfs_rd_atomic;
784 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
786 snprintf(var_name, MAX_STRING_SIZE, "grant_rate");
787 pool_vars[0].data = &pl->pl_grant_rate;
788 pool_vars[0].read_fptr = lprocfs_rd_atomic;
789 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
791 snprintf(var_name, MAX_STRING_SIZE, "grant_plan");
792 pool_vars[0].data = pl;
793 pool_vars[0].read_fptr = lprocfs_rd_grant_plan;
794 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
796 snprintf(var_name, MAX_STRING_SIZE, "recalc_period");
797 pool_vars[0].data = pl;
798 pool_vars[0].read_fptr = lprocfs_rd_recalc_period;
799 pool_vars[0].write_fptr = lprocfs_wr_recalc_period;
800 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
802 snprintf(var_name, MAX_STRING_SIZE, "lock_volume_factor");
803 pool_vars[0].data = &pl->pl_lock_volume_factor;
804 pool_vars[0].read_fptr = lprocfs_rd_atomic;
805 pool_vars[0].write_fptr = lprocfs_wr_atomic;
806 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
808 snprintf(var_name, MAX_STRING_SIZE, "state");
809 pool_vars[0].data = pl;
810 pool_vars[0].read_fptr = lprocfs_rd_pool_state;
811 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
813 pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
814 LDLM_POOL_FIRST_STAT, 0);
816 GOTO(out_free_name, rc = -ENOMEM);
818 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
819 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
821 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
822 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
824 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
825 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
827 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
828 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
829 "grant_rate", "locks/s");
830 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
831 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
832 "cancel_rate", "locks/s");
833 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
834 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
835 "grant_plan", "locks/s");
836 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
837 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
839 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
840 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
841 "shrink_request", "locks");
842 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
843 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
844 "shrink_freed", "locks");
845 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
846 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
847 "recalc_freed", "locks");
848 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
849 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
850 "recalc_timing", "sec");
851 lprocfs_register_stats(pl->pl_proc_dir, "stats", pl->pl_stats);
855 OBD_FREE(var_name, MAX_STRING_SIZE + 1);
859 static void ldlm_pool_proc_fini(struct ldlm_pool *pl)
861 if (pl->pl_stats != NULL) {
862 lprocfs_free_stats(&pl->pl_stats);
865 if (pl->pl_proc_dir != NULL) {
866 lprocfs_remove(&pl->pl_proc_dir);
867 pl->pl_proc_dir = NULL;
870 #else /* !__KERNEL__*/
871 #define ldlm_pool_proc_init(pl) (0)
872 #define ldlm_pool_proc_fini(pl) while (0) {}
875 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
876 int idx, ldlm_side_t client)
881 cfs_spin_lock_init(&pl->pl_lock);
882 cfs_atomic_set(&pl->pl_granted, 0);
883 pl->pl_recalc_time = cfs_time_current_sec();
884 cfs_atomic_set(&pl->pl_lock_volume_factor, 1);
886 cfs_atomic_set(&pl->pl_grant_rate, 0);
887 cfs_atomic_set(&pl->pl_cancel_rate, 0);
888 pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
890 snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
891 ldlm_ns_name(ns), idx);
893 if (client == LDLM_NAMESPACE_SERVER) {
894 pl->pl_ops = &ldlm_srv_pool_ops;
895 ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);
896 pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD;
897 pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
899 ldlm_pool_set_limit(pl, 1);
900 pl->pl_server_lock_volume = 0;
901 pl->pl_ops = &ldlm_cli_pool_ops;
902 pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
904 pl->pl_client_lock_volume = 0;
905 rc = ldlm_pool_proc_init(pl);
909 CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
913 EXPORT_SYMBOL(ldlm_pool_init);
915 void ldlm_pool_fini(struct ldlm_pool *pl)
918 ldlm_pool_proc_fini(pl);
921 * Pool should not be used after this point. We can't free it here as
922 * it lives in struct ldlm_namespace, but still interested in catching
923 * any abnormal using cases.
925 POISON(pl, 0x5a, sizeof(*pl));
928 EXPORT_SYMBOL(ldlm_pool_fini);
931 * Add new taken ldlm lock \a lock into pool \a pl accounting.
933 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
936 * FLOCK locks are special in a sense that they are almost never
937 * cancelled, instead special kind of lock is used to drop them.
938 * also there is no LRU for flock locks, so no point in tracking
941 if (lock->l_resource->lr_type == LDLM_FLOCK)
944 cfs_atomic_inc(&pl->pl_granted);
945 cfs_atomic_inc(&pl->pl_grant_rate);
946 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
948 * Do not do pool recalc for client side as all locks which
949 * potentially may be canceled has already been packed into
950 * enqueue/cancel rpc. Also we do not want to run out of stack
951 * with too long call paths.
953 if (ns_is_server(ldlm_pl2ns(pl)))
954 ldlm_pool_recalc(pl);
956 EXPORT_SYMBOL(ldlm_pool_add);
959 * Remove ldlm lock \a lock from pool \a pl accounting.
961 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
964 * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
966 if (lock->l_resource->lr_type == LDLM_FLOCK)
969 LASSERT(cfs_atomic_read(&pl->pl_granted) > 0);
970 cfs_atomic_dec(&pl->pl_granted);
971 cfs_atomic_inc(&pl->pl_cancel_rate);
973 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
975 if (ns_is_server(ldlm_pl2ns(pl)))
976 ldlm_pool_recalc(pl);
978 EXPORT_SYMBOL(ldlm_pool_del);
981 * Returns current \a pl SLV.
983 * \pre ->pl_lock is not locked.
985 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
988 cfs_spin_lock(&pl->pl_lock);
989 slv = pl->pl_server_lock_volume;
990 cfs_spin_unlock(&pl->pl_lock);
993 EXPORT_SYMBOL(ldlm_pool_get_slv);
996 * Sets passed \a slv to \a pl.
998 * \pre ->pl_lock is not locked.
1000 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
1002 cfs_spin_lock(&pl->pl_lock);
1003 pl->pl_server_lock_volume = slv;
1004 cfs_spin_unlock(&pl->pl_lock);
1006 EXPORT_SYMBOL(ldlm_pool_set_slv);
1009 * Returns current \a pl CLV.
1011 * \pre ->pl_lock is not locked.
1013 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
1016 cfs_spin_lock(&pl->pl_lock);
1017 slv = pl->pl_client_lock_volume;
1018 cfs_spin_unlock(&pl->pl_lock);
1021 EXPORT_SYMBOL(ldlm_pool_get_clv);
1024 * Sets passed \a clv to \a pl.
1026 * \pre ->pl_lock is not locked.
1028 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
1030 cfs_spin_lock(&pl->pl_lock);
1031 pl->pl_client_lock_volume = clv;
1032 cfs_spin_unlock(&pl->pl_lock);
1034 EXPORT_SYMBOL(ldlm_pool_set_clv);
1037 * Returns current \a pl limit.
1039 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
1041 return cfs_atomic_read(&pl->pl_limit);
1043 EXPORT_SYMBOL(ldlm_pool_get_limit);
1046 * Sets passed \a limit to \a pl.
1048 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1050 cfs_atomic_set(&pl->pl_limit, limit);
1052 EXPORT_SYMBOL(ldlm_pool_set_limit);
1055 * Returns current LVF from \a pl.
1057 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1059 return cfs_atomic_read(&pl->pl_lock_volume_factor);
1061 EXPORT_SYMBOL(ldlm_pool_get_lvf);
1064 static int ldlm_pool_granted(struct ldlm_pool *pl)
1066 return cfs_atomic_read(&pl->pl_granted);
1069 static struct ptlrpc_thread *ldlm_pools_thread;
1070 static struct cfs_shrinker *ldlm_pools_srv_shrinker;
1071 static struct cfs_shrinker *ldlm_pools_cli_shrinker;
1072 static cfs_completion_t ldlm_pools_comp;
1075 * Cancel \a nr locks from all namespaces (if possible). Returns number of
1076 * cached locks after shrink is finished. All namespaces are asked to
1077 * cancel approximately equal amount of locks to keep balancing.
1079 static int ldlm_pools_shrink(ldlm_side_t client, int nr,
1080 unsigned int gfp_mask)
1082 int total = 0, cached = 0, nr_ns;
1083 struct ldlm_namespace *ns;
1086 if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
1087 !(gfp_mask & __GFP_FS))
1090 CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",
1091 nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
1093 cookie = cl_env_reenter();
1096 * Find out how many resources we may release.
1098 for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
1101 cfs_mutex_down(ldlm_namespace_lock(client));
1102 if (cfs_list_empty(ldlm_namespace_list(client))) {
1103 cfs_mutex_up(ldlm_namespace_lock(client));
1104 cl_env_reexit(cookie);
1107 ns = ldlm_namespace_first_locked(client);
1108 ldlm_namespace_get(ns);
1109 ldlm_namespace_move_locked(ns, client);
1110 cfs_mutex_up(ldlm_namespace_lock(client));
1111 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
1112 ldlm_namespace_put(ns);
1115 if (nr == 0 || total == 0) {
1116 cl_env_reexit(cookie);
1121 * Shrink at least ldlm_namespace_nr(client) namespaces.
1123 for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
1126 int cancel, nr_locks;
1129 * Do not call shrink under ldlm_namespace_lock(client)
1131 cfs_mutex_down(ldlm_namespace_lock(client));
1132 if (cfs_list_empty(ldlm_namespace_list(client))) {
1133 cfs_mutex_up(ldlm_namespace_lock(client));
1135 * If list is empty, we can't return any @cached > 0,
1136 * that probably would cause needless shrinker
1142 ns = ldlm_namespace_first_locked(client);
1143 ldlm_namespace_get(ns);
1144 ldlm_namespace_move_locked(ns, client);
1145 cfs_mutex_up(ldlm_namespace_lock(client));
1147 nr_locks = ldlm_pool_granted(&ns->ns_pool);
1148 cancel = 1 + nr_locks * nr / total;
1149 ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
1150 cached += ldlm_pool_granted(&ns->ns_pool);
1151 ldlm_namespace_put(ns);
1153 cl_env_reexit(cookie);
1157 static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1159 return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER,
1160 shrink_param(sc, nr_to_scan),
1161 shrink_param(sc, gfp_mask));
1164 static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1166 return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT,
1167 shrink_param(sc, nr_to_scan),
1168 shrink_param(sc, gfp_mask));
1171 void ldlm_pools_recalc(ldlm_side_t client)
1173 __u32 nr_l = 0, nr_p = 0, l;
1174 struct ldlm_namespace *ns;
1178 * No need to setup pool limit for client pools.
1180 if (client == LDLM_NAMESPACE_SERVER) {
1182 * Check all modest namespaces first.
1184 cfs_mutex_down(ldlm_namespace_lock(client));
1185 cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
1188 if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
1191 l = ldlm_pool_granted(&ns->ns_pool);
1196 * Set the modest pools limit equal to their avg granted
1199 l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0);
1200 ldlm_pool_setup(&ns->ns_pool, l);
1206 * Make sure that modest namespaces did not eat more that 2/3
1209 if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
1210 CWARN("\"Modest\" pools eat out 2/3 of server locks "
1211 "limit (%d of %lu). This means that you have too "
1212 "many clients for this amount of server RAM. "
1213 "Upgrade server!\n", nr_l, LDLM_POOL_HOST_L);
1218 * The rest is given to greedy namespaces.
1220 cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
1223 if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
1228 * In the case 2/3 locks are eaten out by
1229 * modest pools, we re-setup equal limit
1232 l = LDLM_POOL_HOST_L /
1234 ldlm_namespace_nr(client));
1237 * All the rest of greedy pools will have
1238 * all locks in equal parts.
1240 l = (LDLM_POOL_HOST_L - nr_l) /
1242 ldlm_namespace_nr(client)) -
1245 ldlm_pool_setup(&ns->ns_pool, l);
1247 cfs_mutex_up(ldlm_namespace_lock(client));
1251 * Recalc at least ldlm_namespace_nr(client) namespaces.
1253 for (nr = cfs_atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
1256 * Lock the list, get first @ns in the list, getref, move it
1257 * to the tail, unlock and call pool recalc. This way we avoid
1258 * calling recalc under @ns lock what is really good as we get
1259 * rid of potential deadlock on client nodes when canceling
1260 * locks synchronously.
1262 cfs_mutex_down(ldlm_namespace_lock(client));
1263 if (cfs_list_empty(ldlm_namespace_list(client))) {
1264 cfs_mutex_up(ldlm_namespace_lock(client));
1267 ns = ldlm_namespace_first_locked(client);
1269 cfs_spin_lock(&ns->ns_lock);
1271 * skip ns which is being freed, and we don't want to increase
1272 * its refcount again, not even temporarily. bz21519 & LU-499.
1274 if (ns->ns_stopping) {
1278 ldlm_namespace_get(ns);
1280 cfs_spin_unlock(&ns->ns_lock);
1282 ldlm_namespace_move_locked(ns, client);
1283 cfs_mutex_up(ldlm_namespace_lock(client));
1286 * After setup is done - recalc the pool.
1289 ldlm_pool_recalc(&ns->ns_pool);
1290 ldlm_namespace_put(ns);
1294 EXPORT_SYMBOL(ldlm_pools_recalc);
1296 static int ldlm_pools_thread_main(void *arg)
1298 struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
1299 char *t_name = "ldlm_poold";
1302 cfs_daemonize(t_name);
1303 thread->t_flags = SVC_RUNNING;
1304 cfs_waitq_signal(&thread->t_ctl_waitq);
1306 CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
1307 t_name, cfs_curproc_pid());
1310 struct l_wait_info lwi;
1313 * Recal all pools on this tick.
1315 ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
1316 ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
1319 * Wait until the next check time, or until we're
1322 lwi = LWI_TIMEOUT(cfs_time_seconds(LDLM_POOLS_THREAD_PERIOD),
1324 l_wait_event(thread->t_ctl_waitq, (thread->t_flags &
1325 (SVC_STOPPING|SVC_EVENT)),
1328 if (thread->t_flags & SVC_STOPPING) {
1329 thread->t_flags &= ~SVC_STOPPING;
1331 } else if (thread->t_flags & SVC_EVENT) {
1332 thread->t_flags &= ~SVC_EVENT;
1336 thread->t_flags = SVC_STOPPED;
1337 cfs_waitq_signal(&thread->t_ctl_waitq);
1339 CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
1340 t_name, cfs_curproc_pid());
1342 cfs_complete_and_exit(&ldlm_pools_comp, 0);
1345 static int ldlm_pools_thread_start(void)
1347 struct l_wait_info lwi = { 0 };
1351 if (ldlm_pools_thread != NULL)
1354 OBD_ALLOC_PTR(ldlm_pools_thread);
1355 if (ldlm_pools_thread == NULL)
1358 cfs_init_completion(&ldlm_pools_comp);
1359 cfs_waitq_init(&ldlm_pools_thread->t_ctl_waitq);
1362 * CLONE_VM and CLONE_FILES just avoid a needless copy, because we
1363 * just drop the VM and FILES in cfs_daemonize() right away.
1365 rc = cfs_create_thread(ldlm_pools_thread_main, ldlm_pools_thread,
1368 CERROR("Can't start pool thread, error %d\n",
1370 OBD_FREE(ldlm_pools_thread, sizeof(*ldlm_pools_thread));
1371 ldlm_pools_thread = NULL;
1374 l_wait_event(ldlm_pools_thread->t_ctl_waitq,
1375 (ldlm_pools_thread->t_flags & SVC_RUNNING), &lwi);
1379 static void ldlm_pools_thread_stop(void)
1383 if (ldlm_pools_thread == NULL) {
1388 ldlm_pools_thread->t_flags = SVC_STOPPING;
1389 cfs_waitq_signal(&ldlm_pools_thread->t_ctl_waitq);
1392 * Make sure that pools thread is finished before freeing @thread.
1393 * This fixes possible race and oops due to accessing freed memory
1396 cfs_wait_for_completion(&ldlm_pools_comp);
1397 OBD_FREE_PTR(ldlm_pools_thread);
1398 ldlm_pools_thread = NULL;
1402 int ldlm_pools_init(void)
1407 rc = ldlm_pools_thread_start();
1409 ldlm_pools_srv_shrinker =
1410 cfs_set_shrinker(CFS_DEFAULT_SEEKS,
1411 ldlm_pools_srv_shrink);
1412 ldlm_pools_cli_shrinker =
1413 cfs_set_shrinker(CFS_DEFAULT_SEEKS,
1414 ldlm_pools_cli_shrink);
1418 EXPORT_SYMBOL(ldlm_pools_init);
1420 void ldlm_pools_fini(void)
1422 if (ldlm_pools_srv_shrinker != NULL) {
1423 cfs_remove_shrinker(ldlm_pools_srv_shrinker);
1424 ldlm_pools_srv_shrinker = NULL;
1426 if (ldlm_pools_cli_shrinker != NULL) {
1427 cfs_remove_shrinker(ldlm_pools_cli_shrinker);
1428 ldlm_pools_cli_shrinker = NULL;
1430 ldlm_pools_thread_stop();
1432 EXPORT_SYMBOL(ldlm_pools_fini);
1433 #endif /* __KERNEL__ */
1435 #else /* !HAVE_LRU_RESIZE_SUPPORT */
1436 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
1440 EXPORT_SYMBOL(ldlm_pool_setup);
1442 int ldlm_pool_recalc(struct ldlm_pool *pl)
1446 EXPORT_SYMBOL(ldlm_pool_recalc);
1448 int ldlm_pool_shrink(struct ldlm_pool *pl,
1449 int nr, unsigned int gfp_mask)
1453 EXPORT_SYMBOL(ldlm_pool_shrink);
1455 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
1456 int idx, ldlm_side_t client)
1460 EXPORT_SYMBOL(ldlm_pool_init);
1462 void ldlm_pool_fini(struct ldlm_pool *pl)
1466 EXPORT_SYMBOL(ldlm_pool_fini);
1468 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
1472 EXPORT_SYMBOL(ldlm_pool_add);
1474 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
1478 EXPORT_SYMBOL(ldlm_pool_del);
1480 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
1484 EXPORT_SYMBOL(ldlm_pool_get_slv);
1486 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
1490 EXPORT_SYMBOL(ldlm_pool_set_slv);
1492 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
1496 EXPORT_SYMBOL(ldlm_pool_get_clv);
1498 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
1502 EXPORT_SYMBOL(ldlm_pool_set_clv);
1504 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
1508 EXPORT_SYMBOL(ldlm_pool_get_limit);
1510 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1514 EXPORT_SYMBOL(ldlm_pool_set_limit);
1516 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1520 EXPORT_SYMBOL(ldlm_pool_get_lvf);
1522 int ldlm_pools_init(void)
1526 EXPORT_SYMBOL(ldlm_pools_init);
1528 void ldlm_pools_fini(void)
1532 EXPORT_SYMBOL(ldlm_pools_fini);
1534 void ldlm_pools_recalc(ldlm_side_t client)
1538 EXPORT_SYMBOL(ldlm_pools_recalc);
1539 #endif /* HAVE_LRU_RESIZE_SUPPORT */