1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ldlm/ldlm_pool.c
38 * Author: Yury Umanets <umka@clusterfs.com>
42 * Idea of this code is rather simple. Each second, for each server namespace
43 * we have SLV - server lock volume which is calculated on current number of
44 * granted locks, grant speed for past period, etc - that is, locking load.
45 * This SLV number may be thought as a flow definition for simplicity. It is
46 * sent to clients with each occasion to let them know what is current load
47 * situation on the server. By default, at the beginning, SLV on server is
48 * set max value which is calculated as the following: allow to one client
49 * have all locks of limit ->pl_limit for 10h.
51 * Next, on clients, number of cached locks is not limited artificially in any
52 * way as it was before. Instead, client calculates CLV, that is, client lock
53 * volume for each lock and compares it with last SLV from the server. CLV is
54 * calculated as the number of locks in LRU * lock live time in seconds. If
55 * CLV > SLV - lock is canceled.
57 * Client has LVF, that is, lock volume factor which regulates how much sensitive
58 * client should be about last SLV from server. The higher LVF is the more locks
59 * will be canceled on client. Default value for it is 1. Setting LVF to 2 means
60 * that client will cancel locks 2 times faster.
62 * Locks on a client will be canceled more intensively in these cases:
63 * (1) if SLV is smaller, that is, load is higher on the server;
64 * (2) client has a lot of locks (the more locks are held by client, the bigger
65 * chances that some of them should be canceled);
66 * (3) client has old locks (taken some time ago);
68 * Thus, according to flow paradigm that we use for better understanding SLV,
69 * CLV is the volume of particle in flow described by SLV. According to this,
70 * if flow is getting thinner, more and more particles become outside of it and
71 * as particles are locks, they should be canceled.
73 * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com). Andreas
74 * Dilger (adilger@clusterfs.com) proposed few nice ideas like using LVF and many
75 * cleanups. Flow definition to allow more easy understanding of the logic belongs
76 * to Nikita Danilov (nikita@clusterfs.com) as well as many cleanups and fixes.
77 * And design and implementation are done by Yury Umanets (umka@clusterfs.com).
79 * Glossary for terms used:
81 * pl_limit - Number of allowed locks in pool. Applies to server and client
84 * pl_granted - Number of granted locks (calculated);
85 * pl_grant_rate - Number of granted locks for last T (calculated);
86 * pl_cancel_rate - Number of canceled locks for last T (calculated);
87 * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
88 * pl_grant_plan - Planned number of granted locks for next T (calculated);
89 * pl_server_lock_volume - Current server lock volume (calculated);
91 * As it may be seen from list above, we have few possible tunables which may
92 * affect behavior much. They all may be modified via proc. However, they also
93 * give a possibility for constructing few pre-defined behavior policies. If
94 * none of predefines is suitable for a working pattern being used, new one may
95 * be "constructed" via proc tunables.
98 #define DEBUG_SUBSYSTEM S_LDLM
101 # include <lustre_dlm.h>
103 # include <liblustre.h>
104 # include <libcfs/kp30.h>
107 #include <obd_class.h>
108 #include <obd_support.h>
109 #include "ldlm_internal.h"
111 #ifdef HAVE_LRU_RESIZE_SUPPORT
114 * 50 ldlm locks for 1MB of RAM.
116 #define LDLM_POOL_HOST_L ((CFS_NUM_CACHEPAGES >> (20 - CFS_PAGE_SHIFT)) * 50)
119 * Maximal possible grant step plan in %.
121 #define LDLM_POOL_MAX_GSP (30)
124 * Minimal possible grant step plan in %.
126 #define LDLM_POOL_MIN_GSP (1)
129 * This controls the speed of reaching LDLM_POOL_MAX_GSP
130 * with increasing thread period. This is 4s which means
131 * that for 10s thread period we will have 2 steps by 4s
134 #define LDLM_POOL_GSP_STEP_SHIFT (2)
137 * LDLM_POOL_GSP% of all locks is default GP.
139 #define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
142 * Max age for locks on clients.
144 #define LDLM_POOL_MAX_AGE (36000)
147 * The granularity of SLV calculation.
149 #define LDLM_POOL_SLV_SHIFT (10)
152 extern cfs_proc_dir_entry_t *ldlm_ns_proc_dir;
155 static inline __u64 dru(__u64 val, __u32 shift, int round_up)
157 return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
160 static inline __u64 ldlm_pool_slv_max(__u32 L)
163 * Allow to have all locks for 1 client for 10 hrs.
164 * Formula is the following: limit * 10h / 1 client.
166 __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
170 static inline __u64 ldlm_pool_slv_min(__u32 L)
176 LDLM_POOL_FIRST_STAT = 0,
177 LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
178 LDLM_POOL_GRANT_STAT,
179 LDLM_POOL_CANCEL_STAT,
180 LDLM_POOL_GRANT_RATE_STAT,
181 LDLM_POOL_CANCEL_RATE_STAT,
182 LDLM_POOL_GRANT_PLAN_STAT,
184 LDLM_POOL_SHRINK_REQTD_STAT,
185 LDLM_POOL_SHRINK_FREED_STAT,
186 LDLM_POOL_RECALC_STAT,
187 LDLM_POOL_TIMING_STAT,
191 static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl)
193 return container_of(pl, struct ldlm_namespace, ns_pool);
197 * Calculates suggested grant_step in % of available locks for passed
198 * \a period. This is later used in grant_plan calculations.
200 static inline int ldlm_pool_t2gsp(unsigned int t)
203 * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
204 * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
206 * How this will affect execution is the following:
208 * - for thread period 1s we will have grant_step 1% which good from
209 * pov of taking some load off from server and push it out to clients.
210 * This is like that because 1% for grant_step means that server will
211 * not allow clients to get lots of locks in short period of time and
212 * keep all old locks in their caches. Clients will always have to
213 * get some locks back if they want to take some new;
215 * - for thread period 10s (which is default) we will have 23% which
216 * means that clients will have enough of room to take some new locks
217 * without getting some back. All locks from this 23% which were not
218 * taken by clients in current period will contribute in SLV growing.
219 * SLV growing means more locks cached on clients until limit or grant
222 return LDLM_POOL_MAX_GSP -
223 ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
224 (t >> LDLM_POOL_GSP_STEP_SHIFT));
228 * Recalculates next grant limit on passed \a pl.
230 * \pre ->pl_lock is locked.
232 static inline void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
234 int granted, grant_step, limit;
236 limit = ldlm_pool_get_limit(pl);
237 granted = atomic_read(&pl->pl_granted);
239 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
240 grant_step = ((limit - granted) * grant_step) / 100;
241 pl->pl_grant_plan = granted + grant_step;
242 limit = (limit * 5) >> 2;
243 if (pl->pl_grant_plan > limit)
244 pl->pl_grant_plan = limit;
248 * Recalculates next SLV on passed \a pl.
250 * \pre ->pl_lock is locked.
252 static inline void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
262 slv = pl->pl_server_lock_volume;
263 grant_plan = pl->pl_grant_plan;
264 limit = ldlm_pool_get_limit(pl);
265 granted = atomic_read(&pl->pl_granted);
266 round_up = granted < limit;
268 grant_usage = max_t(int, limit - (granted - grant_plan), 1);
271 * Find out SLV change factor which is the ratio of grant usage
272 * from limit. SLV changes as fast as the ratio of grant plan
273 * consumption. The more locks from grant plan are not consumed
274 * by clients in last interval (idle time), the faster grows
275 * SLV. And the opposite, the more grant plan is over-consumed
276 * (load time) the faster drops SLV.
278 slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
279 do_div(slv_factor, limit);
280 slv = slv * slv_factor;
281 slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
283 if (slv > ldlm_pool_slv_max(limit)) {
284 slv = ldlm_pool_slv_max(limit);
285 } else if (slv < ldlm_pool_slv_min(limit)) {
286 slv = ldlm_pool_slv_min(limit);
289 pl->pl_server_lock_volume = slv;
293 * Recalculates next stats on passed \a pl.
295 * \pre ->pl_lock is locked.
297 static inline void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
299 int grant_plan = pl->pl_grant_plan;
300 __u64 slv = pl->pl_server_lock_volume;
301 int granted = atomic_read(&pl->pl_granted);
302 int grant_rate = atomic_read(&pl->pl_grant_rate);
303 int cancel_rate = atomic_read(&pl->pl_cancel_rate);
305 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
307 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
309 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
311 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
313 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
318 * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd.
320 static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
322 struct obd_device *obd;
325 * Set new SLV in obd field for using it later without accessing the
326 * pool. This is required to avoid race between sending reply to client
327 * with new SLV and cleanup server stack in which we can't guarantee
328 * that namespace is still alive. We know only that obd is alive as
329 * long as valid export is alive.
331 obd = ldlm_pl2ns(pl)->ns_obd;
332 LASSERT(obd != NULL);
333 write_lock(&obd->obd_pool_lock);
334 obd->obd_pool_slv = pl->pl_server_lock_volume;
335 write_unlock(&obd->obd_pool_lock);
339 * Recalculates all pool fields on passed \a pl.
341 * \pre ->pl_lock is not locked.
343 static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
345 time_t recalc_interval_sec;
348 spin_lock(&pl->pl_lock);
349 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
350 if (recalc_interval_sec >= pl->pl_recalc_period) {
352 * Recalc SLV after last period. This should be done
353 * _before_ recalculating new grant plan.
355 ldlm_pool_recalc_slv(pl);
358 * Make sure that pool informed obd of last SLV changes.
360 ldlm_srv_pool_push_slv(pl);
363 * Update grant_plan for new period.
365 ldlm_pool_recalc_grant_plan(pl);
367 pl->pl_recalc_time = cfs_time_current_sec();
368 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
369 recalc_interval_sec);
371 spin_unlock(&pl->pl_lock);
376 * This function is used on server side as main entry point for memory
377 * pressure handling. It decreases SLV on \a pl according to passed
378 * \a nr and \a gfp_mask.
380 * Our goal here is to decrease SLV such a way that clients hold \a nr
381 * locks smaller in next 10h.
383 static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
384 int nr, unsigned int gfp_mask)
389 * VM is asking how many entries may be potentially freed.
392 return atomic_read(&pl->pl_granted);
395 * Client already canceled locks but server is already in shrinker
396 * and can't cancel anything. Let's catch this race.
398 if (atomic_read(&pl->pl_granted) == 0)
401 spin_lock(&pl->pl_lock);
404 * We want shrinker to possibly cause cancellation of @nr locks from
405 * clients or grant approximately @nr locks smaller next intervals.
407 * This is why we decreased SLV by @nr. This effect will only be as
408 * long as one re-calc interval (1s these days) and this should be
409 * enough to pass this decreased SLV to all clients. On next recalc
410 * interval pool will either increase SLV if locks load is not high
411 * or will keep on same level or even decrease again, thus, shrinker
412 * decreased SLV will affect next recalc intervals and this way will
413 * make locking load lower.
415 if (nr < pl->pl_server_lock_volume) {
416 pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr;
418 limit = ldlm_pool_get_limit(pl);
419 pl->pl_server_lock_volume = ldlm_pool_slv_min(limit);
423 * Make sure that pool informed obd of last SLV changes.
425 ldlm_srv_pool_push_slv(pl);
426 spin_unlock(&pl->pl_lock);
429 * We did not really free any memory here so far, it only will be
430 * freed later may be, so that we return 0 to not confuse VM.
436 * Setup server side pool \a pl with passed \a limit.
438 static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
440 struct obd_device *obd;
443 obd = ldlm_pl2ns(pl)->ns_obd;
444 LASSERT(obd != NULL && obd != LP_POISON);
445 LASSERT(obd->obd_type != LP_POISON);
446 write_lock(&obd->obd_pool_lock);
447 obd->obd_pool_limit = limit;
448 write_unlock(&obd->obd_pool_lock);
450 ldlm_pool_set_limit(pl, limit);
455 * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
457 static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
459 struct obd_device *obd;
462 * Get new SLV and Limit from obd which is updated with coming
465 obd = ldlm_pl2ns(pl)->ns_obd;
466 LASSERT(obd != NULL);
467 read_lock(&obd->obd_pool_lock);
468 pl->pl_server_lock_volume = obd->obd_pool_slv;
469 ldlm_pool_set_limit(pl, obd->obd_pool_limit);
470 read_unlock(&obd->obd_pool_lock);
474 * Recalculates client size pool \a pl according to current SLV and Limit.
476 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
478 time_t recalc_interval_sec;
481 spin_lock(&pl->pl_lock);
483 * Check if we need to recalc lists now.
485 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
486 if (recalc_interval_sec < pl->pl_recalc_period) {
487 spin_unlock(&pl->pl_lock);
492 * Make sure that pool knows last SLV and Limit from obd.
494 ldlm_cli_pool_pop_slv(pl);
496 pl->pl_recalc_time = cfs_time_current_sec();
497 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
498 recalc_interval_sec);
499 spin_unlock(&pl->pl_lock);
502 * Do not cancel locks in case lru resize is disabled for this ns.
504 if (!ns_connect_lru_resize(ldlm_pl2ns(pl)))
508 * In the time of canceling locks on client we do not need to maintain
509 * sharp timing, we only want to cancel locks asap according to new SLV.
510 * It may be called when SLV has changed much, this is why we do not
511 * take into account pl->pl_recalc_time here.
513 RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LDLM_SYNC,
518 * This function is main entry point for memory pressure handling on client
519 * side. Main goal of this function is to cancel some number of locks on
520 * passed \a pl according to \a nr and \a gfp_mask.
522 static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
523 int nr, unsigned int gfp_mask)
525 struct ldlm_namespace *ns;
526 int canceled = 0, unused;
531 * Do not cancel locks in case lru resize is disabled for this ns.
533 if (!ns_connect_lru_resize(ns))
537 * Make sure that pool knows last SLV and Limit from obd.
539 ldlm_cli_pool_pop_slv(pl);
541 spin_lock(&ns->ns_unused_lock);
542 unused = ns->ns_nr_unused;
543 spin_unlock(&ns->ns_unused_lock);
546 canceled = ldlm_cancel_lru(ns, nr, LDLM_SYNC,
551 * Return the number of potentially reclaimable locks.
553 return ((unused - canceled) / 100) * sysctl_vfs_cache_pressure;
555 return unused - canceled;
559 struct ldlm_pool_ops ldlm_srv_pool_ops = {
560 .po_recalc = ldlm_srv_pool_recalc,
561 .po_shrink = ldlm_srv_pool_shrink,
562 .po_setup = ldlm_srv_pool_setup
565 struct ldlm_pool_ops ldlm_cli_pool_ops = {
566 .po_recalc = ldlm_cli_pool_recalc,
567 .po_shrink = ldlm_cli_pool_shrink
571 * Pool recalc wrapper. Will call either client or server pool recalc callback
572 * depending what pool \a pl is used.
574 int ldlm_pool_recalc(struct ldlm_pool *pl)
576 time_t recalc_interval_sec;
579 spin_lock(&pl->pl_lock);
580 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
581 if (recalc_interval_sec > 0) {
583 * Update pool statistics every 1s.
585 ldlm_pool_recalc_stats(pl);
588 * Zero out all rates and speed for the last period.
590 atomic_set(&pl->pl_grant_rate, 0);
591 atomic_set(&pl->pl_cancel_rate, 0);
592 atomic_set(&pl->pl_grant_speed, 0);
594 spin_unlock(&pl->pl_lock);
596 if (pl->pl_ops->po_recalc != NULL) {
597 count = pl->pl_ops->po_recalc(pl);
598 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
605 EXPORT_SYMBOL(ldlm_pool_recalc);
608 * Pool shrink wrapper. Will call either client or server pool recalc callback
609 * depending what pool \a pl is used.
611 int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
612 unsigned int gfp_mask)
616 if (pl->pl_ops->po_shrink != NULL) {
617 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
619 lprocfs_counter_add(pl->pl_stats,
620 LDLM_POOL_SHRINK_REQTD_STAT,
622 lprocfs_counter_add(pl->pl_stats,
623 LDLM_POOL_SHRINK_FREED_STAT,
625 CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, "
626 "shrunk %d\n", pl->pl_name, nr, cancel);
631 EXPORT_SYMBOL(ldlm_pool_shrink);
634 * Pool setup wrapper. Will call either client or server pool recalc callback
635 * depending what pool \a pl is used.
637 * Sets passed \a limit into pool \a pl.
639 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
642 if (pl->pl_ops->po_setup != NULL)
643 RETURN(pl->pl_ops->po_setup(pl, limit));
646 EXPORT_SYMBOL(ldlm_pool_setup);
649 static int lprocfs_rd_pool_state(char *page, char **start, off_t off,
650 int count, int *eof, void *data)
652 int granted, grant_rate, cancel_rate, grant_step;
653 int nr = 0, grant_speed, grant_plan, lvf;
654 struct ldlm_pool *pl = data;
658 spin_lock(&pl->pl_lock);
659 slv = pl->pl_server_lock_volume;
660 clv = pl->pl_client_lock_volume;
661 limit = ldlm_pool_get_limit(pl);
662 grant_plan = pl->pl_grant_plan;
663 granted = atomic_read(&pl->pl_granted);
664 grant_rate = atomic_read(&pl->pl_grant_rate);
665 lvf = atomic_read(&pl->pl_lock_volume_factor);
666 grant_speed = atomic_read(&pl->pl_grant_speed);
667 cancel_rate = atomic_read(&pl->pl_cancel_rate);
668 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
669 spin_unlock(&pl->pl_lock);
671 nr += snprintf(page + nr, count - nr, "LDLM pool state (%s):\n",
673 nr += snprintf(page + nr, count - nr, " SLV: "LPU64"\n", slv);
674 nr += snprintf(page + nr, count - nr, " CLV: "LPU64"\n", clv);
675 nr += snprintf(page + nr, count - nr, " LVF: %d\n", lvf);
677 if (ns_is_server(ldlm_pl2ns(pl))) {
678 nr += snprintf(page + nr, count - nr, " GSP: %d%%\n",
680 nr += snprintf(page + nr, count - nr, " GP: %d\n",
683 nr += snprintf(page + nr, count - nr, " GR: %d\n",
685 nr += snprintf(page + nr, count - nr, " CR: %d\n",
687 nr += snprintf(page + nr, count - nr, " GS: %d\n",
689 nr += snprintf(page + nr, count - nr, " G: %d\n",
691 nr += snprintf(page + nr, count - nr, " L: %d\n",
696 LDLM_POOL_PROC_READER(grant_plan, int);
697 LDLM_POOL_PROC_READER(recalc_period, int);
698 LDLM_POOL_PROC_WRITER(recalc_period, int);
700 static int ldlm_pool_proc_init(struct ldlm_pool *pl)
702 struct ldlm_namespace *ns = ldlm_pl2ns(pl);
703 struct proc_dir_entry *parent_ns_proc;
704 struct lprocfs_vars pool_vars[2];
705 char *var_name = NULL;
709 OBD_ALLOC(var_name, MAX_STRING_SIZE + 1);
713 parent_ns_proc = lprocfs_srch(ldlm_ns_proc_dir, ns->ns_name);
714 if (parent_ns_proc == NULL) {
715 CERROR("%s: proc entry is not initialized\n",
717 GOTO(out_free_name, rc = -EINVAL);
719 pl->pl_proc_dir = lprocfs_register("pool", parent_ns_proc,
721 if (IS_ERR(pl->pl_proc_dir)) {
722 CERROR("LProcFS failed in ldlm-pool-init\n");
723 rc = PTR_ERR(pl->pl_proc_dir);
724 GOTO(out_free_name, rc);
727 var_name[MAX_STRING_SIZE] = '\0';
728 memset(pool_vars, 0, sizeof(pool_vars));
729 pool_vars[0].name = var_name;
731 snprintf(var_name, MAX_STRING_SIZE, "server_lock_volume");
732 pool_vars[0].data = &pl->pl_server_lock_volume;
733 pool_vars[0].read_fptr = lprocfs_rd_u64;
734 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
736 snprintf(var_name, MAX_STRING_SIZE, "limit");
737 pool_vars[0].data = &pl->pl_limit;
738 pool_vars[0].read_fptr = lprocfs_rd_atomic;
739 pool_vars[0].write_fptr = lprocfs_wr_atomic;
740 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
742 snprintf(var_name, MAX_STRING_SIZE, "granted");
743 pool_vars[0].data = &pl->pl_granted;
744 pool_vars[0].read_fptr = lprocfs_rd_atomic;
745 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
747 snprintf(var_name, MAX_STRING_SIZE, "grant_speed");
748 pool_vars[0].data = &pl->pl_grant_speed;
749 pool_vars[0].read_fptr = lprocfs_rd_atomic;
750 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
752 snprintf(var_name, MAX_STRING_SIZE, "cancel_rate");
753 pool_vars[0].data = &pl->pl_cancel_rate;
754 pool_vars[0].read_fptr = lprocfs_rd_atomic;
755 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
757 snprintf(var_name, MAX_STRING_SIZE, "grant_rate");
758 pool_vars[0].data = &pl->pl_grant_rate;
759 pool_vars[0].read_fptr = lprocfs_rd_atomic;
760 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
762 snprintf(var_name, MAX_STRING_SIZE, "grant_plan");
763 pool_vars[0].data = pl;
764 pool_vars[0].read_fptr = lprocfs_rd_grant_plan;
765 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
767 snprintf(var_name, MAX_STRING_SIZE, "recalc_period");
768 pool_vars[0].data = pl;
769 pool_vars[0].read_fptr = lprocfs_rd_recalc_period;
770 pool_vars[0].write_fptr = lprocfs_wr_recalc_period;
771 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
773 snprintf(var_name, MAX_STRING_SIZE, "lock_volume_factor");
774 pool_vars[0].data = &pl->pl_lock_volume_factor;
775 pool_vars[0].read_fptr = lprocfs_rd_atomic;
776 pool_vars[0].write_fptr = lprocfs_wr_atomic;
777 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
779 snprintf(var_name, MAX_STRING_SIZE, "state");
780 pool_vars[0].data = pl;
781 pool_vars[0].read_fptr = lprocfs_rd_pool_state;
782 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);
784 pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
785 LDLM_POOL_FIRST_STAT, 0);
787 GOTO(out_free_name, rc = -ENOMEM);
789 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
790 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
792 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
793 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
795 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
796 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
798 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
799 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
800 "grant_rate", "locks/s");
801 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
802 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
803 "cancel_rate", "locks/s");
804 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
805 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
806 "grant_plan", "locks/s");
807 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
808 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
810 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
811 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
812 "shrink_request", "locks");
813 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
814 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
815 "shrink_freed", "locks");
816 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
817 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
818 "recalc_freed", "locks");
819 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
820 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
821 "recalc_timing", "sec");
822 lprocfs_register_stats(pl->pl_proc_dir, "stats", pl->pl_stats);
826 OBD_FREE(var_name, MAX_STRING_SIZE + 1);
830 static void ldlm_pool_proc_fini(struct ldlm_pool *pl)
832 if (pl->pl_stats != NULL) {
833 lprocfs_free_stats(&pl->pl_stats);
836 if (pl->pl_proc_dir != NULL) {
837 lprocfs_remove(&pl->pl_proc_dir);
838 pl->pl_proc_dir = NULL;
841 #else /* !__KERNEL__*/
842 #define ldlm_pool_proc_init(pl) (0)
843 #define ldlm_pool_proc_fini(pl) while (0) {}
846 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
847 int idx, ldlm_side_t client)
852 spin_lock_init(&pl->pl_lock);
853 atomic_set(&pl->pl_granted, 0);
854 pl->pl_recalc_time = cfs_time_current_sec();
855 atomic_set(&pl->pl_lock_volume_factor, 1);
857 atomic_set(&pl->pl_grant_rate, 0);
858 atomic_set(&pl->pl_cancel_rate, 0);
859 atomic_set(&pl->pl_grant_speed, 0);
860 pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
862 snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
865 if (client == LDLM_NAMESPACE_SERVER) {
866 pl->pl_ops = &ldlm_srv_pool_ops;
867 ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);
868 pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD;
869 pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
871 ldlm_pool_set_limit(pl, 1);
872 pl->pl_server_lock_volume = 0;
873 pl->pl_ops = &ldlm_cli_pool_ops;
874 pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
876 pl->pl_client_lock_volume = 0;
877 rc = ldlm_pool_proc_init(pl);
881 CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
885 EXPORT_SYMBOL(ldlm_pool_init);
887 void ldlm_pool_fini(struct ldlm_pool *pl)
890 ldlm_pool_proc_fini(pl);
893 * Pool should not be used after this point. We can't free it here as
894 * it lives in struct ldlm_namespace, but still interested in catching
895 * any abnormal using cases.
897 POISON(pl, 0x5a, sizeof(*pl));
900 EXPORT_SYMBOL(ldlm_pool_fini);
903 * Add new taken ldlm lock \a lock into pool \a pl accounting.
905 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
908 * FLOCK locks are special in a sense that they are almost never
909 * cancelled, instead special kind of lock is used to drop them.
910 * also there is no LRU for flock locks, so no point in tracking
913 if (lock->l_resource->lr_type == LDLM_FLOCK)
917 atomic_inc(&pl->pl_granted);
918 atomic_inc(&pl->pl_grant_rate);
919 atomic_inc(&pl->pl_grant_speed);
921 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
924 * Do not do pool recalc for client side as all locks which
925 * potentially may be canceled has already been packed into
926 * enqueue/cancel rpc. Also we do not want to run out of stack
927 * with too long call paths.
929 if (ns_is_server(ldlm_pl2ns(pl)))
930 ldlm_pool_recalc(pl);
933 EXPORT_SYMBOL(ldlm_pool_add);
936 * Remove ldlm lock \a lock from pool \a pl accounting.
938 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
941 * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
943 if (lock->l_resource->lr_type == LDLM_FLOCK)
947 LASSERT(atomic_read(&pl->pl_granted) > 0);
948 atomic_dec(&pl->pl_granted);
949 atomic_inc(&pl->pl_cancel_rate);
950 atomic_dec(&pl->pl_grant_speed);
952 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
954 if (ns_is_server(ldlm_pl2ns(pl)))
955 ldlm_pool_recalc(pl);
958 EXPORT_SYMBOL(ldlm_pool_del);
961 * Returns current \a pl SLV.
963 * \pre ->pl_lock is not locked.
965 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
968 spin_lock(&pl->pl_lock);
969 slv = pl->pl_server_lock_volume;
970 spin_unlock(&pl->pl_lock);
973 EXPORT_SYMBOL(ldlm_pool_get_slv);
976 * Sets passed \a slv to \a pl.
978 * \pre ->pl_lock is not locked.
980 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
982 spin_lock(&pl->pl_lock);
983 pl->pl_server_lock_volume = slv;
984 spin_unlock(&pl->pl_lock);
986 EXPORT_SYMBOL(ldlm_pool_set_slv);
989 * Returns current \a pl CLV.
991 * \pre ->pl_lock is not locked.
993 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
996 spin_lock(&pl->pl_lock);
997 slv = pl->pl_client_lock_volume;
998 spin_unlock(&pl->pl_lock);
1001 EXPORT_SYMBOL(ldlm_pool_get_clv);
1004 * Sets passed \a clv to \a pl.
1006 * \pre ->pl_lock is not locked.
1008 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
1010 spin_lock(&pl->pl_lock);
1011 pl->pl_client_lock_volume = clv;
1012 spin_unlock(&pl->pl_lock);
1014 EXPORT_SYMBOL(ldlm_pool_set_clv);
1017 * Returns current \a pl limit.
1019 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
1021 return atomic_read(&pl->pl_limit);
1023 EXPORT_SYMBOL(ldlm_pool_get_limit);
1026 * Sets passed \a limit to \a pl.
1028 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1030 atomic_set(&pl->pl_limit, limit);
1032 EXPORT_SYMBOL(ldlm_pool_set_limit);
1035 * Returns current LVF from \a pl.
1037 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1039 return atomic_read(&pl->pl_lock_volume_factor);
1041 EXPORT_SYMBOL(ldlm_pool_get_lvf);
1044 static int ldlm_pool_granted(struct ldlm_pool *pl)
1046 return atomic_read(&pl->pl_granted);
1049 static struct ptlrpc_thread *ldlm_pools_thread;
1050 static struct shrinker *ldlm_pools_srv_shrinker;
1051 static struct shrinker *ldlm_pools_cli_shrinker;
1052 static struct completion ldlm_pools_comp;
1055 * Cancel \a nr locks from all namespaces (if possible). Returns number of
1056 * cached locks after shrink is finished. All namespaces are asked to
1057 * cancel approximately equal amount of locks to keep balancing.
1059 static int ldlm_pools_shrink(ldlm_side_t client, int nr,
1060 unsigned int gfp_mask)
1062 int total = 0, cached = 0, nr_ns;
1063 struct ldlm_namespace *ns;
1065 if (nr != 0 && !(gfp_mask & __GFP_FS))
1069 CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks\n",
1070 nr, client == LDLM_NAMESPACE_CLIENT ? "client":"server");
1073 * Find out how many resources we may release.
1075 for (nr_ns = atomic_read(ldlm_namespace_nr(client));
1078 mutex_down(ldlm_namespace_lock(client));
1079 if (list_empty(ldlm_namespace_list(client))) {
1080 mutex_up(ldlm_namespace_lock(client));
1083 ns = ldlm_namespace_first_locked(client);
1084 ldlm_namespace_get(ns);
1085 ldlm_namespace_move_locked(ns, client);
1086 mutex_up(ldlm_namespace_lock(client));
1087 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
1088 ldlm_namespace_put(ns, 1);
1091 if (nr == 0 || total == 0)
1095 * Shrink at least ldlm_namespace_nr(client) namespaces.
1097 for (nr_ns = atomic_read(ldlm_namespace_nr(client));
1100 int cancel, nr_locks;
1103 * Do not call shrink under ldlm_namespace_lock(client)
1105 mutex_down(ldlm_namespace_lock(client));
1106 if (list_empty(ldlm_namespace_list(client))) {
1107 mutex_up(ldlm_namespace_lock(client));
1109 * If list is empty, we can't return any @cached > 0,
1110 * that probably would cause needless shrinker
1116 ns = ldlm_namespace_first_locked(client);
1117 ldlm_namespace_get(ns);
1118 ldlm_namespace_move_locked(ns, client);
1119 mutex_up(ldlm_namespace_lock(client));
1121 nr_locks = ldlm_pool_granted(&ns->ns_pool);
1122 cancel = 1 + nr_locks * nr / total;
1123 ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
1124 cached += ldlm_pool_granted(&ns->ns_pool);
1125 ldlm_namespace_put(ns, 1);
1130 static int ldlm_pools_srv_shrink(int nr, unsigned int gfp_mask)
1132 return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER, nr, gfp_mask);
1135 static int ldlm_pools_cli_shrink(int nr, unsigned int gfp_mask)
1137 return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT, nr, gfp_mask);
1140 void ldlm_pools_recalc(ldlm_side_t client)
1142 __u32 nr_l = 0, nr_p = 0, l;
1143 struct ldlm_namespace *ns;
1147 * No need to setup pool limit for client pools.
1149 if (client == LDLM_NAMESPACE_SERVER) {
1151 * Check all modest namespaces first.
1153 mutex_down(ldlm_namespace_lock(client));
1154 list_for_each_entry(ns, ldlm_namespace_list(client),
1157 if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
1160 l = ldlm_pool_granted(&ns->ns_pool);
1165 * Set the modest pools limit equal to their avg granted
1168 l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0);
1169 ldlm_pool_setup(&ns->ns_pool, l);
1175 * Make sure that modest namespaces did not eat more that 2/3
1178 if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
1179 CWARN("\"Modest\" pools eat out 2/3 of server locks "
1180 "limit (%d of %lu). This means that you have too "
1181 "many clients for this amount of server RAM. "
1182 "Upgrade server!\n", nr_l, LDLM_POOL_HOST_L);
1187 * The rest is given to greedy namespaces.
1189 list_for_each_entry(ns, ldlm_namespace_list(client),
1192 if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
1197 * In the case 2/3 locks are eaten out by
1198 * modest pools, we re-setup equal limit
1201 l = LDLM_POOL_HOST_L /
1202 atomic_read(ldlm_namespace_nr(client));
1205 * All the rest of greedy pools will have
1206 * all locks in equal parts.
1208 l = (LDLM_POOL_HOST_L - nr_l) /
1209 (atomic_read(ldlm_namespace_nr(client)) -
1212 ldlm_pool_setup(&ns->ns_pool, l);
1214 mutex_up(ldlm_namespace_lock(client));
1218 * Recalc at least ldlm_namespace_nr(client) namespaces.
1220 for (nr = atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
1222 * Lock the list, get first @ns in the list, getref, move it
1223 * to the tail, unlock and call pool recalc. This way we avoid
1224 * calling recalc under @ns lock what is really good as we get
1225 * rid of potential deadlock on client nodes when canceling
1226 * locks synchronously.
1228 mutex_down(ldlm_namespace_lock(client));
1229 if (list_empty(ldlm_namespace_list(client))) {
1230 mutex_up(ldlm_namespace_lock(client));
1233 ns = ldlm_namespace_first_locked(client);
1234 ldlm_namespace_get(ns);
1235 ldlm_namespace_move_locked(ns, client);
1236 mutex_up(ldlm_namespace_lock(client));
1239 * After setup is done - recalc the pool.
1241 ldlm_pool_recalc(&ns->ns_pool);
1242 ldlm_namespace_put(ns, 1);
1245 EXPORT_SYMBOL(ldlm_pools_recalc);
1247 static int ldlm_pools_thread_main(void *arg)
1249 struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
1250 char *t_name = "ldlm_poold";
1253 cfs_daemonize(t_name);
1254 thread->t_flags = SVC_RUNNING;
1255 cfs_waitq_signal(&thread->t_ctl_waitq);
1257 CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
1258 t_name, cfs_curproc_pid());
1261 struct l_wait_info lwi;
1264 * Recal all pools on this tick.
1266 ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
1267 ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
1270 * Wait until the next check time, or until we're
1273 lwi = LWI_TIMEOUT(cfs_time_seconds(LDLM_POOLS_THREAD_PERIOD),
1275 l_wait_event(thread->t_ctl_waitq, (thread->t_flags &
1276 (SVC_STOPPING|SVC_EVENT)),
1279 if (thread->t_flags & SVC_STOPPING) {
1280 thread->t_flags &= ~SVC_STOPPING;
1282 } else if (thread->t_flags & SVC_EVENT) {
1283 thread->t_flags &= ~SVC_EVENT;
1287 thread->t_flags = SVC_STOPPED;
1288 cfs_waitq_signal(&thread->t_ctl_waitq);
1290 CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
1291 t_name, cfs_curproc_pid());
1293 complete_and_exit(&ldlm_pools_comp, 0);
1296 static int ldlm_pools_thread_start(void)
1298 struct l_wait_info lwi = { 0 };
1302 if (ldlm_pools_thread != NULL)
1305 OBD_ALLOC_PTR(ldlm_pools_thread);
1306 if (ldlm_pools_thread == NULL)
1309 init_completion(&ldlm_pools_comp);
1310 cfs_waitq_init(&ldlm_pools_thread->t_ctl_waitq);
1313 * CLONE_VM and CLONE_FILES just avoid a needless copy, because we
1314 * just drop the VM and FILES in cfs_daemonize() right away.
1316 rc = cfs_kernel_thread(ldlm_pools_thread_main, ldlm_pools_thread,
1317 CLONE_VM | CLONE_FILES);
1319 CERROR("Can't start pool thread, error %d\n",
1321 OBD_FREE(ldlm_pools_thread, sizeof(*ldlm_pools_thread));
1322 ldlm_pools_thread = NULL;
1325 l_wait_event(ldlm_pools_thread->t_ctl_waitq,
1326 (ldlm_pools_thread->t_flags & SVC_RUNNING), &lwi);
1330 static void ldlm_pools_thread_stop(void)
1334 if (ldlm_pools_thread == NULL) {
1339 ldlm_pools_thread->t_flags = SVC_STOPPING;
1340 cfs_waitq_signal(&ldlm_pools_thread->t_ctl_waitq);
1343 * Make sure that pools thread is finished before freeing @thread.
1344 * This fixes possible race and oops due to accessing freed memory
1347 wait_for_completion(&ldlm_pools_comp);
1348 OBD_FREE_PTR(ldlm_pools_thread);
1349 ldlm_pools_thread = NULL;
1353 int ldlm_pools_init(void)
1358 rc = ldlm_pools_thread_start();
1360 ldlm_pools_srv_shrinker = set_shrinker(DEFAULT_SEEKS,
1361 ldlm_pools_srv_shrink);
1362 ldlm_pools_cli_shrinker = set_shrinker(DEFAULT_SEEKS,
1363 ldlm_pools_cli_shrink);
1367 EXPORT_SYMBOL(ldlm_pools_init);
1369 void ldlm_pools_fini(void)
1371 if (ldlm_pools_srv_shrinker != NULL) {
1372 remove_shrinker(ldlm_pools_srv_shrinker);
1373 ldlm_pools_srv_shrinker = NULL;
1375 if (ldlm_pools_cli_shrinker != NULL) {
1376 remove_shrinker(ldlm_pools_cli_shrinker);
1377 ldlm_pools_cli_shrinker = NULL;
1379 ldlm_pools_thread_stop();
1381 EXPORT_SYMBOL(ldlm_pools_fini);
1382 #endif /* __KERNEL__ */
1384 #else /* !HAVE_LRU_RESIZE_SUPPORT */
1385 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
1389 EXPORT_SYMBOL(ldlm_pool_setup);
1391 int ldlm_pool_recalc(struct ldlm_pool *pl)
1395 EXPORT_SYMBOL(ldlm_pool_recalc);
1397 int ldlm_pool_shrink(struct ldlm_pool *pl,
1398 int nr, unsigned int gfp_mask)
1402 EXPORT_SYMBOL(ldlm_pool_shrink);
1404 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
1405 int idx, ldlm_side_t client)
1409 EXPORT_SYMBOL(ldlm_pool_init);
1411 void ldlm_pool_fini(struct ldlm_pool *pl)
1415 EXPORT_SYMBOL(ldlm_pool_fini);
1417 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
1421 EXPORT_SYMBOL(ldlm_pool_add);
1423 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
1427 EXPORT_SYMBOL(ldlm_pool_del);
1429 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
1433 EXPORT_SYMBOL(ldlm_pool_get_slv);
1435 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
1439 EXPORT_SYMBOL(ldlm_pool_set_slv);
1441 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
1445 EXPORT_SYMBOL(ldlm_pool_get_clv);
1447 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
1451 EXPORT_SYMBOL(ldlm_pool_set_clv);
1453 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
1457 EXPORT_SYMBOL(ldlm_pool_get_limit);
1459 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1463 EXPORT_SYMBOL(ldlm_pool_set_limit);
1465 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1469 EXPORT_SYMBOL(ldlm_pool_get_lvf);
1471 int ldlm_pools_init(void)
1475 EXPORT_SYMBOL(ldlm_pools_init);
1477 void ldlm_pools_fini(void)
1481 EXPORT_SYMBOL(ldlm_pools_fini);
1483 void ldlm_pools_recalc(ldlm_side_t client)
1487 EXPORT_SYMBOL(ldlm_pools_recalc);
1488 #endif /* HAVE_LRU_RESIZE_SUPPORT */