4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ldlm/ldlm_pool.c
38 * Author: Yury Umanets <umka@clusterfs.com>
42 * Idea of this code is rather simple. Each second, for each server namespace
43 * we have SLV - server lock volume which is calculated on current number of
44 * granted locks, grant speed for past period, etc - that is, locking load.
45 * This SLV number may be thought as a flow definition for simplicity. It is
46 * sent to clients with each occasion to let them know what is current load
47 * situation on the server. By default, at the beginning, SLV on server is
48 * set max value which is calculated as the following: allow to one client
49 * have all locks of limit ->pl_limit for 10h.
51 * Next, on clients, number of cached locks is not limited artificially in any
52 * way as it was before. Instead, client calculates CLV, that is, client lock
53 * volume for each lock and compares it with last SLV from the server. CLV is
54 * calculated as the number of locks in LRU * lock live time in seconds. If
55 * CLV > SLV - lock is canceled.
57 * Client has LVF, that is, lock volume factor which regulates how much sensitive
58 * client should be about last SLV from server. The higher LVF is the more locks
59 * will be canceled on client. Default value for it is 1. Setting LVF to 2 means
60 * that client will cancel locks 2 times faster.
62 * Locks on a client will be canceled more intensively in these cases:
63 * (1) if SLV is smaller, that is, load is higher on the server;
64 * (2) client has a lot of locks (the more locks are held by client, the bigger
65 * chances that some of them should be canceled);
66 * (3) client has old locks (taken some time ago);
68 * Thus, according to flow paradigm that we use for better understanding SLV,
69 * CLV is the volume of particle in flow described by SLV. According to this,
70 * if flow is getting thinner, more and more particles become outside of it and
71 * as particles are locks, they should be canceled.
73 * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com). Andreas
74 * Dilger (adilger@clusterfs.com) proposed few nice ideas like using LVF and many
75 * cleanups. Flow definition to allow more easy understanding of the logic belongs
76 * to Nikita Danilov (nikita@clusterfs.com) as well as many cleanups and fixes.
77 * And design and implementation are done by Yury Umanets (umka@clusterfs.com).
79 * Glossary for terms used:
81 * pl_limit - Number of allowed locks in pool. Applies to server and client
84 * pl_granted - Number of granted locks (calculated);
85 * pl_grant_rate - Number of granted locks for last T (calculated);
86 * pl_cancel_rate - Number of canceled locks for last T (calculated);
87 * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
88 * pl_grant_plan - Planned number of granted locks for next T (calculated);
89 * pl_server_lock_volume - Current server lock volume (calculated);
91 * As it may be seen from list above, we have few possible tunables which may
92 * affect behavior much. They all may be modified via proc. However, they also
93 * give a possibility for constructing few pre-defined behavior policies. If
94 * none of predefines is suitable for a working pattern being used, new one may
95 * be "constructed" via proc tunables.
98 #define DEBUG_SUBSYSTEM S_LDLM
101 # include <lustre_dlm.h>
103 # include <liblustre.h>
106 #include <cl_object.h>
108 #include <obd_class.h>
109 #include <obd_support.h>
110 #include "ldlm_internal.h"
112 #ifdef HAVE_LRU_RESIZE_SUPPORT
115 * 50 ldlm locks for 1MB of RAM.
117 #define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
120 * Maximal possible grant step plan in %.
122 #define LDLM_POOL_MAX_GSP (30)
125 * Minimal possible grant step plan in %.
127 #define LDLM_POOL_MIN_GSP (1)
130 * This controls the speed of reaching LDLM_POOL_MAX_GSP
131 * with increasing thread period.
133 #define LDLM_POOL_GSP_STEP_SHIFT (2)
136 * LDLM_POOL_GSP% of all locks is default GP.
138 #define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
141 * Max age for locks on clients.
143 #define LDLM_POOL_MAX_AGE (36000)
146 * The granularity of SLV calculation.
148 #define LDLM_POOL_SLV_SHIFT (10)
151 extern struct proc_dir_entry *ldlm_ns_proc_dir;
154 static inline __u64 dru(__u64 val, __u32 shift, int round_up)
156 return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
159 static inline __u64 ldlm_pool_slv_max(__u32 L)
162 * Allow to have all locks for 1 client for 10 hrs.
163 * Formula is the following: limit * 10h / 1 client.
165 __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
169 static inline __u64 ldlm_pool_slv_min(__u32 L)
175 LDLM_POOL_FIRST_STAT = 0,
176 LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
177 LDLM_POOL_GRANT_STAT,
178 LDLM_POOL_CANCEL_STAT,
179 LDLM_POOL_GRANT_RATE_STAT,
180 LDLM_POOL_CANCEL_RATE_STAT,
181 LDLM_POOL_GRANT_PLAN_STAT,
183 LDLM_POOL_SHRINK_REQTD_STAT,
184 LDLM_POOL_SHRINK_FREED_STAT,
185 LDLM_POOL_RECALC_STAT,
186 LDLM_POOL_TIMING_STAT,
190 static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl)
192 return container_of(pl, struct ldlm_namespace, ns_pool);
196 * Calculates suggested grant_step in % of available locks for passed
197 * \a period. This is later used in grant_plan calculations.
199 static inline int ldlm_pool_t2gsp(unsigned int t)
202 * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
203 * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
205 * How this will affect execution is the following:
207 * - for thread period 1s we will have grant_step 1% which good from
208 * pov of taking some load off from server and push it out to clients.
209 * This is like that because 1% for grant_step means that server will
210 * not allow clients to get lots of locks in short period of time and
211 * keep all old locks in their caches. Clients will always have to
212 * get some locks back if they want to take some new;
214 * - for thread period 10s (which is default) we will have 23% which
215 * means that clients will have enough of room to take some new locks
216 * without getting some back. All locks from this 23% which were not
217 * taken by clients in current period will contribute in SLV growing.
218 * SLV growing means more locks cached on clients until limit or grant
221 return LDLM_POOL_MAX_GSP -
222 ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
223 (t >> LDLM_POOL_GSP_STEP_SHIFT));
227 * Recalculates next grant limit on passed \a pl.
229 * \pre ->pl_lock is locked.
231 static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
233 int granted, grant_step, limit;
235 limit = ldlm_pool_get_limit(pl);
236 granted = atomic_read(&pl->pl_granted);
238 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
239 grant_step = ((limit - granted) * grant_step) / 100;
240 pl->pl_grant_plan = granted + grant_step;
241 limit = (limit * 5) >> 2;
242 if (pl->pl_grant_plan > limit)
243 pl->pl_grant_plan = limit;
247 * Recalculates next SLV on passed \a pl.
249 * \pre ->pl_lock is locked.
251 static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
261 slv = pl->pl_server_lock_volume;
262 grant_plan = pl->pl_grant_plan;
263 limit = ldlm_pool_get_limit(pl);
264 granted = atomic_read(&pl->pl_granted);
265 round_up = granted < limit;
267 grant_usage = max_t(int, limit - (granted - grant_plan), 1);
270 * Find out SLV change factor which is the ratio of grant usage
271 * from limit. SLV changes as fast as the ratio of grant plan
272 * consumption. The more locks from grant plan are not consumed
273 * by clients in last interval (idle time), the faster grows
274 * SLV. And the opposite, the more grant plan is over-consumed
275 * (load time) the faster drops SLV.
277 slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
278 do_div(slv_factor, limit);
279 slv = slv * slv_factor;
280 slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
282 if (slv > ldlm_pool_slv_max(limit)) {
283 slv = ldlm_pool_slv_max(limit);
284 } else if (slv < ldlm_pool_slv_min(limit)) {
285 slv = ldlm_pool_slv_min(limit);
288 pl->pl_server_lock_volume = slv;
292 * Recalculates next stats on passed \a pl.
294 * \pre ->pl_lock is locked.
296 static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
298 int grant_plan = pl->pl_grant_plan;
299 __u64 slv = pl->pl_server_lock_volume;
300 int granted = atomic_read(&pl->pl_granted);
301 int grant_rate = atomic_read(&pl->pl_grant_rate);
302 int cancel_rate = atomic_read(&pl->pl_cancel_rate);
304 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
306 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
308 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
310 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
312 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
317 * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd.
319 static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
321 struct obd_device *obd;
324 * Set new SLV in obd field for using it later without accessing the
325 * pool. This is required to avoid race between sending reply to client
326 * with new SLV and cleanup server stack in which we can't guarantee
327 * that namespace is still alive. We know only that obd is alive as
328 * long as valid export is alive.
330 obd = ldlm_pl2ns(pl)->ns_obd;
331 LASSERT(obd != NULL);
332 write_lock(&obd->obd_pool_lock);
333 obd->obd_pool_slv = pl->pl_server_lock_volume;
334 write_unlock(&obd->obd_pool_lock);
338 * Recalculates all pool fields on passed \a pl.
340 * \pre ->pl_lock is not locked.
342 static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
344 time_t recalc_interval_sec;
347 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
348 if (recalc_interval_sec < pl->pl_recalc_period)
351 spin_lock(&pl->pl_lock);
352 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
353 if (recalc_interval_sec < pl->pl_recalc_period) {
354 spin_unlock(&pl->pl_lock);
358 * Recalc SLV after last period. This should be done
359 * _before_ recalculating new grant plan.
361 ldlm_pool_recalc_slv(pl);
364 * Make sure that pool informed obd of last SLV changes.
366 ldlm_srv_pool_push_slv(pl);
369 * Update grant_plan for new period.
371 ldlm_pool_recalc_grant_plan(pl);
373 pl->pl_recalc_time = cfs_time_current_sec();
374 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
375 recalc_interval_sec);
376 spin_unlock(&pl->pl_lock);
381 * This function is used on server side as main entry point for memory
382 * pressure handling. It decreases SLV on \a pl according to passed
383 * \a nr and \a gfp_mask.
385 * Our goal here is to decrease SLV such a way that clients hold \a nr
386 * locks smaller in next 10h.
388 static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
389 int nr, unsigned int gfp_mask)
394 * VM is asking how many entries may be potentially freed.
397 return atomic_read(&pl->pl_granted);
400 * Client already canceled locks but server is already in shrinker
401 * and can't cancel anything. Let's catch this race.
403 if (atomic_read(&pl->pl_granted) == 0)
406 spin_lock(&pl->pl_lock);
409 * We want shrinker to possibly cause cancellation of @nr locks from
410 * clients or grant approximately @nr locks smaller next intervals.
412 * This is why we decreased SLV by @nr. This effect will only be as
413 * long as one re-calc interval (1s these days) and this should be
414 * enough to pass this decreased SLV to all clients. On next recalc
415 * interval pool will either increase SLV if locks load is not high
416 * or will keep on same level or even decrease again, thus, shrinker
417 * decreased SLV will affect next recalc intervals and this way will
418 * make locking load lower.
420 if (nr < pl->pl_server_lock_volume) {
421 pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr;
423 limit = ldlm_pool_get_limit(pl);
424 pl->pl_server_lock_volume = ldlm_pool_slv_min(limit);
428 * Make sure that pool informed obd of last SLV changes.
430 ldlm_srv_pool_push_slv(pl);
431 spin_unlock(&pl->pl_lock);
434 * We did not really free any memory here so far, it only will be
435 * freed later may be, so that we return 0 to not confuse VM.
441 * Setup server side pool \a pl with passed \a limit.
443 static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
445 struct obd_device *obd;
447 obd = ldlm_pl2ns(pl)->ns_obd;
448 LASSERT(obd != NULL && obd != LP_POISON);
449 LASSERT(obd->obd_type != LP_POISON);
450 write_lock(&obd->obd_pool_lock);
451 obd->obd_pool_limit = limit;
452 write_unlock(&obd->obd_pool_lock);
454 ldlm_pool_set_limit(pl, limit);
459 * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
461 static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
463 struct obd_device *obd;
466 * Get new SLV and Limit from obd which is updated with coming
469 obd = ldlm_pl2ns(pl)->ns_obd;
470 LASSERT(obd != NULL);
471 read_lock(&obd->obd_pool_lock);
472 pl->pl_server_lock_volume = obd->obd_pool_slv;
473 ldlm_pool_set_limit(pl, obd->obd_pool_limit);
474 read_unlock(&obd->obd_pool_lock);
478 * Recalculates client size pool \a pl according to current SLV and Limit.
480 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
482 time_t recalc_interval_sec;
486 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
487 if (recalc_interval_sec < pl->pl_recalc_period)
490 spin_lock(&pl->pl_lock);
492 * Check if we need to recalc lists now.
494 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
495 if (recalc_interval_sec < pl->pl_recalc_period) {
496 spin_unlock(&pl->pl_lock);
501 * Make sure that pool knows last SLV and Limit from obd.
503 ldlm_cli_pool_pop_slv(pl);
504 spin_unlock(&pl->pl_lock);
507 * Do not cancel locks in case lru resize is disabled for this ns.
509 if (!ns_connect_lru_resize(ldlm_pl2ns(pl)))
513 * In the time of canceling locks on client we do not need to maintain
514 * sharp timing, we only want to cancel locks asap according to new SLV.
515 * It may be called when SLV has changed much, this is why we do not
516 * take into account pl->pl_recalc_time here.
518 ret = ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC,
522 spin_lock(&pl->pl_lock);
524 * Time of LRU resizing might be longer than period,
525 * so update after LRU resizing rather than before it.
527 pl->pl_recalc_time = cfs_time_current_sec();
528 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
529 recalc_interval_sec);
530 spin_unlock(&pl->pl_lock);
535 * This function is main entry point for memory pressure handling on client
536 * side. Main goal of this function is to cancel some number of locks on
537 * passed \a pl according to \a nr and \a gfp_mask.
539 static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
540 int nr, unsigned int gfp_mask)
542 struct ldlm_namespace *ns;
548 * Do not cancel locks in case lru resize is disabled for this ns.
550 if (!ns_connect_lru_resize(ns))
554 * Make sure that pool knows last SLV and Limit from obd.
556 ldlm_cli_pool_pop_slv(pl);
558 spin_lock(&ns->ns_lock);
559 unused = ns->ns_nr_unused;
560 spin_unlock(&ns->ns_lock);
564 return (unused / 100) * sysctl_vfs_cache_pressure;
566 return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
568 return unused - (nr ? ldlm_cancel_lru(ns, nr, LCF_ASYNC,
569 LDLM_CANCEL_SHRINK) : 0);
573 struct ldlm_pool_ops ldlm_srv_pool_ops = {
574 .po_recalc = ldlm_srv_pool_recalc,
575 .po_shrink = ldlm_srv_pool_shrink,
576 .po_setup = ldlm_srv_pool_setup
579 struct ldlm_pool_ops ldlm_cli_pool_ops = {
580 .po_recalc = ldlm_cli_pool_recalc,
581 .po_shrink = ldlm_cli_pool_shrink
585 * Pool recalc wrapper. Will call either client or server pool recalc callback
586 * depending what pool \a pl is used.
588 int ldlm_pool_recalc(struct ldlm_pool *pl)
590 time_t recalc_interval_sec;
593 recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
594 if (recalc_interval_sec <= 0)
597 spin_lock(&pl->pl_lock);
598 if (recalc_interval_sec > 0) {
600 * Update pool statistics every 1s.
602 ldlm_pool_recalc_stats(pl);
605 * Zero out all rates and speed for the last period.
607 atomic_set(&pl->pl_grant_rate, 0);
608 atomic_set(&pl->pl_cancel_rate, 0);
610 spin_unlock(&pl->pl_lock);
613 if (pl->pl_ops->po_recalc != NULL) {
614 count = pl->pl_ops->po_recalc(pl);
615 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
618 recalc_interval_sec = pl->pl_recalc_time - cfs_time_current_sec() +
619 pl->pl_recalc_period;
620 if (recalc_interval_sec <= 0) {
621 /* Prevent too frequent recalculation. */
622 CDEBUG(D_DLMTRACE, "Negative interval(%ld), "
623 "too short period(%ld)",
625 pl->pl_recalc_period);
626 recalc_interval_sec = 1;
629 return recalc_interval_sec;
633 * Pool shrink wrapper. Will call either client or server pool recalc callback
634 * depending what pool \a pl is used.
636 int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
637 unsigned int gfp_mask)
641 if (pl->pl_ops->po_shrink != NULL) {
642 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
644 lprocfs_counter_add(pl->pl_stats,
645 LDLM_POOL_SHRINK_REQTD_STAT,
647 lprocfs_counter_add(pl->pl_stats,
648 LDLM_POOL_SHRINK_FREED_STAT,
650 CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, "
651 "shrunk %d\n", pl->pl_name, nr, cancel);
656 EXPORT_SYMBOL(ldlm_pool_shrink);
659 * Pool setup wrapper. Will call either client or server pool recalc callback
660 * depending what pool \a pl is used.
662 * Sets passed \a limit into pool \a pl.
664 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
666 if (pl->pl_ops->po_setup != NULL)
667 return(pl->pl_ops->po_setup(pl, limit));
670 EXPORT_SYMBOL(ldlm_pool_setup);
673 static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
675 int granted, grant_rate, cancel_rate, grant_step;
676 int grant_speed, grant_plan, lvf;
677 struct ldlm_pool *pl = m->private;
681 spin_lock(&pl->pl_lock);
682 slv = pl->pl_server_lock_volume;
683 clv = pl->pl_client_lock_volume;
684 limit = ldlm_pool_get_limit(pl);
685 grant_plan = pl->pl_grant_plan;
686 granted = atomic_read(&pl->pl_granted);
687 grant_rate = atomic_read(&pl->pl_grant_rate);
688 cancel_rate = atomic_read(&pl->pl_cancel_rate);
689 grant_speed = grant_rate - cancel_rate;
690 lvf = atomic_read(&pl->pl_lock_volume_factor);
691 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
692 spin_unlock(&pl->pl_lock);
694 seq_printf(m, "LDLM pool state (%s):\n"
698 pl->pl_name, slv, clv, lvf);
700 if (ns_is_server(ldlm_pl2ns(pl))) {
701 seq_printf(m, " GSP: %d%%\n"
703 grant_step, grant_plan);
705 seq_printf(m, " GR: %d\n" " CR: %d\n" " GS: %d\n"
706 " G: %d\n" " L: %d\n",
707 grant_rate, cancel_rate, grant_speed,
711 LPROC_SEQ_FOPS_RO(lprocfs_pool_state);
713 static int lprocfs_grant_speed_seq_show(struct seq_file *m, void *unused)
715 struct ldlm_pool *pl = m->private;
718 spin_lock(&pl->pl_lock);
719 /* serialize with ldlm_pool_recalc */
720 grant_speed = atomic_read(&pl->pl_grant_rate) -
721 atomic_read(&pl->pl_cancel_rate);
722 spin_unlock(&pl->pl_lock);
723 return lprocfs_uint_seq_show(m, &grant_speed);
726 LDLM_POOL_PROC_READER_SEQ_SHOW(grant_plan, int);
727 LPROC_SEQ_FOPS_RO(lprocfs_grant_plan);
729 LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period, int);
730 LDLM_POOL_PROC_WRITER(recalc_period, int);
731 static ssize_t lprocfs_recalc_period_seq_write(struct file *file, const char *buf,
732 size_t len, loff_t *off)
734 struct seq_file *seq = file->private_data;
736 return lprocfs_wr_recalc_period(file, buf, len, seq->private);
738 LPROC_SEQ_FOPS(lprocfs_recalc_period);
740 LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, u64);
741 LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, atomic);
742 LPROC_SEQ_FOPS_RW_TYPE(ldlm_pool_rw, atomic);
744 LPROC_SEQ_FOPS_RO(lprocfs_grant_speed);
746 static int ldlm_pool_proc_init(struct ldlm_pool *pl)
748 struct ldlm_namespace *ns = ldlm_pl2ns(pl);
749 struct proc_dir_entry *parent_ns_proc;
750 struct lprocfs_seq_vars pool_vars[2];
751 char *var_name = NULL;
755 OBD_ALLOC(var_name, MAX_STRING_SIZE + 1);
759 parent_ns_proc = ns->ns_proc_dir_entry;
760 if (parent_ns_proc == NULL) {
761 CERROR("%s: proc entry is not initialized\n",
763 GOTO(out_free_name, rc = -EINVAL);
765 pl->pl_proc_dir = lprocfs_seq_register("pool", parent_ns_proc,
767 if (IS_ERR(pl->pl_proc_dir)) {
768 rc = PTR_ERR(pl->pl_proc_dir);
769 pl->pl_proc_dir = NULL;
770 CERROR("%s: cannot create 'pool' proc entry: rc = %d\n",
771 ldlm_ns_name(ns), rc);
772 GOTO(out_free_name, rc);
775 var_name[MAX_STRING_SIZE] = '\0';
776 memset(pool_vars, 0, sizeof(pool_vars));
777 pool_vars[0].name = var_name;
779 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "server_lock_volume",
780 &pl->pl_server_lock_volume, &ldlm_pool_u64_fops);
781 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "limit", &pl->pl_limit,
782 &ldlm_pool_rw_atomic_fops);
783 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "granted",
784 &pl->pl_granted, &ldlm_pool_atomic_fops);
785 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "grant_speed", pl,
786 &lprocfs_grant_speed_fops);
787 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "cancel_rate",
788 &pl->pl_cancel_rate, &ldlm_pool_atomic_fops);
789 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "grant_rate",
790 &pl->pl_grant_rate, &ldlm_pool_atomic_fops);
791 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "grant_plan", pl,
792 &lprocfs_grant_plan_fops);
793 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "recalc_period",
794 pl, &lprocfs_recalc_period_fops);
795 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "lock_volume_factor",
796 &pl->pl_lock_volume_factor, &ldlm_pool_rw_atomic_fops);
797 ldlm_add_var(&pool_vars[0], pl->pl_proc_dir, "state", pl,
798 &lprocfs_pool_state_fops);
800 pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
801 LDLM_POOL_FIRST_STAT, 0);
803 GOTO(out_free_name, rc = -ENOMEM);
805 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
806 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
808 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
809 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
811 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
812 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
814 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
815 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
816 "grant_rate", "locks/s");
817 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
818 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
819 "cancel_rate", "locks/s");
820 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
821 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
822 "grant_plan", "locks/s");
823 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
824 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
826 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
827 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
828 "shrink_request", "locks");
829 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
830 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
831 "shrink_freed", "locks");
832 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
833 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
834 "recalc_freed", "locks");
835 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
836 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
837 "recalc_timing", "sec");
838 rc = lprocfs_register_stats(pl->pl_proc_dir, "stats", pl->pl_stats);
842 OBD_FREE(var_name, MAX_STRING_SIZE + 1);
846 static void ldlm_pool_proc_fini(struct ldlm_pool *pl)
848 if (pl->pl_stats != NULL) {
849 lprocfs_free_stats(&pl->pl_stats);
852 if (pl->pl_proc_dir != NULL) {
853 lprocfs_remove(&pl->pl_proc_dir);
854 pl->pl_proc_dir = NULL;
857 #else /* !__KERNEL__*/
858 #define ldlm_pool_proc_init(pl) (0)
859 #define ldlm_pool_proc_fini(pl) while (0) {}
862 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
863 int idx, ldlm_side_t client)
868 spin_lock_init(&pl->pl_lock);
869 atomic_set(&pl->pl_granted, 0);
870 pl->pl_recalc_time = cfs_time_current_sec();
871 atomic_set(&pl->pl_lock_volume_factor, 1);
873 atomic_set(&pl->pl_grant_rate, 0);
874 atomic_set(&pl->pl_cancel_rate, 0);
875 pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
877 snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
878 ldlm_ns_name(ns), idx);
880 if (client == LDLM_NAMESPACE_SERVER) {
881 pl->pl_ops = &ldlm_srv_pool_ops;
882 ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);
883 pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD;
884 pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
886 ldlm_pool_set_limit(pl, 1);
887 pl->pl_server_lock_volume = 0;
888 pl->pl_ops = &ldlm_cli_pool_ops;
889 pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
891 pl->pl_client_lock_volume = 0;
892 rc = ldlm_pool_proc_init(pl);
896 CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
900 EXPORT_SYMBOL(ldlm_pool_init);
902 void ldlm_pool_fini(struct ldlm_pool *pl)
905 ldlm_pool_proc_fini(pl);
908 * Pool should not be used after this point. We can't free it here as
909 * it lives in struct ldlm_namespace, but still interested in catching
910 * any abnormal using cases.
912 POISON(pl, 0x5a, sizeof(*pl));
915 EXPORT_SYMBOL(ldlm_pool_fini);
918 * Add new taken ldlm lock \a lock into pool \a pl accounting.
920 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
923 * FLOCK locks are special in a sense that they are almost never
924 * cancelled, instead special kind of lock is used to drop them.
925 * also there is no LRU for flock locks, so no point in tracking
928 if (lock->l_resource->lr_type == LDLM_FLOCK)
931 atomic_inc(&pl->pl_granted);
932 atomic_inc(&pl->pl_grant_rate);
933 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
935 * Do not do pool recalc for client side as all locks which
936 * potentially may be canceled has already been packed into
937 * enqueue/cancel rpc. Also we do not want to run out of stack
938 * with too long call paths.
940 if (ns_is_server(ldlm_pl2ns(pl)))
941 ldlm_pool_recalc(pl);
943 EXPORT_SYMBOL(ldlm_pool_add);
946 * Remove ldlm lock \a lock from pool \a pl accounting.
948 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
951 * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
953 if (lock->l_resource->lr_type == LDLM_FLOCK)
956 LASSERT(atomic_read(&pl->pl_granted) > 0);
957 atomic_dec(&pl->pl_granted);
958 atomic_inc(&pl->pl_cancel_rate);
960 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
962 if (ns_is_server(ldlm_pl2ns(pl)))
963 ldlm_pool_recalc(pl);
965 EXPORT_SYMBOL(ldlm_pool_del);
968 * Returns current \a pl SLV.
970 * \pre ->pl_lock is not locked.
972 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
975 spin_lock(&pl->pl_lock);
976 slv = pl->pl_server_lock_volume;
977 spin_unlock(&pl->pl_lock);
980 EXPORT_SYMBOL(ldlm_pool_get_slv);
983 * Sets passed \a slv to \a pl.
985 * \pre ->pl_lock is not locked.
987 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
989 spin_lock(&pl->pl_lock);
990 pl->pl_server_lock_volume = slv;
991 spin_unlock(&pl->pl_lock);
993 EXPORT_SYMBOL(ldlm_pool_set_slv);
996 * Returns current \a pl CLV.
998 * \pre ->pl_lock is not locked.
1000 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
1003 spin_lock(&pl->pl_lock);
1004 slv = pl->pl_client_lock_volume;
1005 spin_unlock(&pl->pl_lock);
1008 EXPORT_SYMBOL(ldlm_pool_get_clv);
1011 * Sets passed \a clv to \a pl.
1013 * \pre ->pl_lock is not locked.
1015 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
1017 spin_lock(&pl->pl_lock);
1018 pl->pl_client_lock_volume = clv;
1019 spin_unlock(&pl->pl_lock);
1021 EXPORT_SYMBOL(ldlm_pool_set_clv);
1024 * Returns current \a pl limit.
1026 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
1028 return atomic_read(&pl->pl_limit);
1030 EXPORT_SYMBOL(ldlm_pool_get_limit);
1033 * Sets passed \a limit to \a pl.
1035 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1037 atomic_set(&pl->pl_limit, limit);
1039 EXPORT_SYMBOL(ldlm_pool_set_limit);
1042 * Returns current LVF from \a pl.
1044 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1046 return atomic_read(&pl->pl_lock_volume_factor);
1048 EXPORT_SYMBOL(ldlm_pool_get_lvf);
1051 static unsigned int ldlm_pool_granted(struct ldlm_pool *pl)
1053 return atomic_read(&pl->pl_granted);
1056 static struct ptlrpc_thread *ldlm_pools_thread;
1057 static struct shrinker *ldlm_pools_srv_shrinker;
1058 static struct shrinker *ldlm_pools_cli_shrinker;
1059 static struct completion ldlm_pools_comp;
1062 * count locks from all namespaces (if possible). Returns number of
1065 static unsigned long ldlm_pools_count(ldlm_side_t client, unsigned int gfp_mask)
1067 int total = 0, nr_ns;
1068 struct ldlm_namespace *ns;
1069 struct ldlm_namespace *ns_old = NULL; /* loop detection */
1072 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
1075 CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
1076 client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
1078 cookie = cl_env_reenter();
1081 * Find out how many resources we may release.
1083 for (nr_ns = ldlm_namespace_nr_read(client);
1084 nr_ns > 0; nr_ns--) {
1085 mutex_lock(ldlm_namespace_lock(client));
1086 if (list_empty(ldlm_namespace_list(client))) {
1087 mutex_unlock(ldlm_namespace_lock(client));
1088 cl_env_reexit(cookie);
1091 ns = ldlm_namespace_first_locked(client);
1094 mutex_unlock(ldlm_namespace_lock(client));
1098 if (ldlm_ns_empty(ns)) {
1099 ldlm_namespace_move_to_inactive_locked(ns, client);
1100 mutex_unlock(ldlm_namespace_lock(client));
1107 ldlm_namespace_get(ns);
1108 ldlm_namespace_move_to_active_locked(ns, client);
1109 mutex_unlock(ldlm_namespace_lock(client));
1110 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
1111 ldlm_namespace_put(ns);
1114 cl_env_reexit(cookie);
1118 static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr,
1119 unsigned int gfp_mask)
1121 unsigned long freed = 0;
1123 struct ldlm_namespace *ns;
1126 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
1129 cookie = cl_env_reenter();
1132 * Shrink at least ldlm_namespace_nr_read(client) namespaces.
1134 for (tmp = nr_ns = ldlm_namespace_nr_read(client);
1136 int cancel, nr_locks;
1139 * Do not call shrink under ldlm_namespace_lock(client)
1141 mutex_lock(ldlm_namespace_lock(client));
1142 if (list_empty(ldlm_namespace_list(client))) {
1143 mutex_unlock(ldlm_namespace_lock(client));
1146 ns = ldlm_namespace_first_locked(client);
1147 ldlm_namespace_get(ns);
1148 ldlm_namespace_move_to_active_locked(ns, client);
1149 mutex_unlock(ldlm_namespace_lock(client));
1151 nr_locks = ldlm_pool_granted(&ns->ns_pool);
1153 * We use to shrink propotionally but with new shrinker API,
1154 * we lost the total number of freeable locks.
1156 cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
1157 freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
1158 ldlm_namespace_put(ns);
1160 cl_env_reexit(cookie);
1162 * we only decrease the SLV in server pools shrinker, return
1163 * SHRINK_STOP to kernel to avoid needless loop. LU-1128
1165 return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
1168 #ifdef HAVE_SHRINKER_COUNT
1169 static unsigned long ldlm_pools_srv_count(struct shrinker *s,
1170 struct shrink_control *sc)
1172 return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
1175 static unsigned long ldlm_pools_srv_scan(struct shrinker *s,
1176 struct shrink_control *sc)
1178 return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
1182 static unsigned long ldlm_pools_cli_count(struct shrinker *s, struct shrink_control *sc)
1184 return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
1187 static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
1188 struct shrink_control *sc)
1190 return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
1196 * Cancel \a nr locks from all namespaces (if possible). Returns number of
1197 * cached locks after shrink is finished. All namespaces are asked to
1198 * cancel approximately equal amount of locks to keep balancing.
1200 static int ldlm_pools_shrink(ldlm_side_t client, int nr,
1201 unsigned int gfp_mask)
1203 unsigned int total = 0;
1205 if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
1206 !(gfp_mask & __GFP_FS))
1209 CDEBUG(D_DLMTRACE, "Request to shrink %d %s locks from all pools\n",
1210 nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
1212 total = ldlm_pools_count(client, gfp_mask);
1214 if (nr == 0 || total == 0)
1217 return ldlm_pools_scan(client, nr, gfp_mask);
1220 static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1222 return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER,
1223 shrink_param(sc, nr_to_scan),
1224 shrink_param(sc, gfp_mask));
1227 static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1229 return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT,
1230 shrink_param(sc, nr_to_scan),
1231 shrink_param(sc, gfp_mask));
1234 #endif /* HAVE_SHRINKER_COUNT */
1236 int ldlm_pools_recalc(ldlm_side_t client)
1238 __u32 nr_l = 0, nr_p = 0, l;
1239 struct ldlm_namespace *ns;
1240 struct ldlm_namespace *ns_old = NULL;
1242 int time = 50; /* seconds of sleep if no active namespaces */
1245 * No need to setup pool limit for client pools.
1247 if (client == LDLM_NAMESPACE_SERVER) {
1249 * Check all modest namespaces first.
1251 mutex_lock(ldlm_namespace_lock(client));
1252 cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
1255 if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
1258 l = ldlm_pool_granted(&ns->ns_pool);
1263 * Set the modest pools limit equal to their avg granted
1266 l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0);
1267 ldlm_pool_setup(&ns->ns_pool, l);
1273 * Make sure that modest namespaces did not eat more that 2/3
1276 if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
1277 CWARN("\"Modest\" pools eat out 2/3 of server locks "
1278 "limit (%d of %lu). This means that you have too "
1279 "many clients for this amount of server RAM. "
1280 "Upgrade server!\n", nr_l, LDLM_POOL_HOST_L);
1285 * The rest is given to greedy namespaces.
1287 cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
1290 if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
1295 * In the case 2/3 locks are eaten out by
1296 * modest pools, we re-setup equal limit
1299 l = LDLM_POOL_HOST_L /
1300 ldlm_namespace_nr_read(client);
1303 * All the rest of greedy pools will have
1304 * all locks in equal parts.
1306 l = (LDLM_POOL_HOST_L - nr_l) /
1307 (ldlm_namespace_nr_read(client) -
1310 ldlm_pool_setup(&ns->ns_pool, l);
1312 mutex_unlock(ldlm_namespace_lock(client));
1316 * Recalc at least ldlm_namespace_nr(client) namespaces.
1318 for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
1321 * Lock the list, get first @ns in the list, getref, move it
1322 * to the tail, unlock and call pool recalc. This way we avoid
1323 * calling recalc under @ns lock what is really good as we get
1324 * rid of potential deadlock on client nodes when canceling
1325 * locks synchronously.
1327 mutex_lock(ldlm_namespace_lock(client));
1328 if (cfs_list_empty(ldlm_namespace_list(client))) {
1329 mutex_unlock(ldlm_namespace_lock(client));
1332 ns = ldlm_namespace_first_locked(client);
1334 if (ns_old == ns) { /* Full pass complete */
1335 mutex_unlock(ldlm_namespace_lock(client));
1339 /* We got an empty namespace, need to move it back to inactive
1341 * The race with parallel resource creation is fine:
1342 * - If they do namespace_get before our check, we fail the
1343 * check and they move this item to the end of the list anyway
1344 * - If we do the check and then they do namespace_get, then
1345 * we move the namespace to inactive and they will move
1346 * it back to active (synchronised by the lock, so no clash
1349 if (ldlm_ns_empty(ns)) {
1350 ldlm_namespace_move_to_inactive_locked(ns, client);
1351 mutex_unlock(ldlm_namespace_lock(client));
1358 spin_lock(&ns->ns_lock);
1360 * skip ns which is being freed, and we don't want to increase
1361 * its refcount again, not even temporarily. bz21519 & LU-499.
1363 if (ns->ns_stopping) {
1367 ldlm_namespace_get(ns);
1369 spin_unlock(&ns->ns_lock);
1371 ldlm_namespace_move_to_active_locked(ns, client);
1372 mutex_unlock(ldlm_namespace_lock(client));
1375 * After setup is done - recalc the pool.
1378 int ttime = ldlm_pool_recalc(&ns->ns_pool);
1383 ldlm_namespace_put(ns);
1388 EXPORT_SYMBOL(ldlm_pools_recalc);
1390 static int ldlm_pools_thread_main(void *arg)
1392 struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
1396 thread_set_flags(thread, SVC_RUNNING);
1397 wake_up(&thread->t_ctl_waitq);
1399 CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
1400 "ldlm_poold", current_pid());
1403 struct l_wait_info lwi;
1406 * Recal all pools on this tick.
1408 s_time = ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
1409 c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
1412 * Wait until the next check time, or until we're
1415 lwi = LWI_TIMEOUT(cfs_time_seconds(min(s_time, c_time)),
1417 l_wait_event(thread->t_ctl_waitq,
1418 thread_is_stopping(thread) ||
1419 thread_is_event(thread),
1422 if (thread_test_and_clear_flags(thread, SVC_STOPPING))
1425 thread_test_and_clear_flags(thread, SVC_EVENT);
1428 thread_set_flags(thread, SVC_STOPPED);
1429 wake_up(&thread->t_ctl_waitq);
1431 CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
1432 "ldlm_poold", current_pid());
1434 complete_and_exit(&ldlm_pools_comp, 0);
1437 static int ldlm_pools_thread_start(void)
1439 struct l_wait_info lwi = { 0 };
1440 struct task_struct *task;
1443 if (ldlm_pools_thread != NULL)
1446 OBD_ALLOC_PTR(ldlm_pools_thread);
1447 if (ldlm_pools_thread == NULL)
1450 init_completion(&ldlm_pools_comp);
1451 init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq);
1453 task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread,
1456 CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task));
1457 OBD_FREE(ldlm_pools_thread, sizeof(*ldlm_pools_thread));
1458 ldlm_pools_thread = NULL;
1459 RETURN(PTR_ERR(task));
1461 l_wait_event(ldlm_pools_thread->t_ctl_waitq,
1462 thread_is_running(ldlm_pools_thread), &lwi);
1466 static void ldlm_pools_thread_stop(void)
1470 if (ldlm_pools_thread == NULL) {
1475 thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
1476 wake_up(&ldlm_pools_thread->t_ctl_waitq);
1479 * Make sure that pools thread is finished before freeing @thread.
1480 * This fixes possible race and oops due to accessing freed memory
1483 wait_for_completion(&ldlm_pools_comp);
1484 OBD_FREE_PTR(ldlm_pools_thread);
1485 ldlm_pools_thread = NULL;
1489 int ldlm_pools_init(void)
1492 DEF_SHRINKER_VAR(shsvar, ldlm_pools_srv_shrink,
1493 ldlm_pools_srv_count, ldlm_pools_srv_scan);
1494 DEF_SHRINKER_VAR(shcvar, ldlm_pools_cli_shrink,
1495 ldlm_pools_cli_count, ldlm_pools_cli_scan);
1498 rc = ldlm_pools_thread_start();
1500 ldlm_pools_srv_shrinker =
1501 set_shrinker(DEFAULT_SEEKS, &shsvar);
1502 ldlm_pools_cli_shrinker =
1503 set_shrinker(DEFAULT_SEEKS, &shcvar);
1507 EXPORT_SYMBOL(ldlm_pools_init);
1509 void ldlm_pools_fini(void)
1511 if (ldlm_pools_srv_shrinker != NULL) {
1512 remove_shrinker(ldlm_pools_srv_shrinker);
1513 ldlm_pools_srv_shrinker = NULL;
1515 if (ldlm_pools_cli_shrinker != NULL) {
1516 remove_shrinker(ldlm_pools_cli_shrinker);
1517 ldlm_pools_cli_shrinker = NULL;
1519 ldlm_pools_thread_stop();
1521 EXPORT_SYMBOL(ldlm_pools_fini);
1522 #endif /* __KERNEL__ */
1524 #else /* !HAVE_LRU_RESIZE_SUPPORT */
1525 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
1529 EXPORT_SYMBOL(ldlm_pool_setup);
1531 int ldlm_pool_recalc(struct ldlm_pool *pl)
1535 EXPORT_SYMBOL(ldlm_pool_recalc);
1537 int ldlm_pool_shrink(struct ldlm_pool *pl,
1538 int nr, unsigned int gfp_mask)
1542 EXPORT_SYMBOL(ldlm_pool_shrink);
1544 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
1545 int idx, ldlm_side_t client)
1549 EXPORT_SYMBOL(ldlm_pool_init);
1551 void ldlm_pool_fini(struct ldlm_pool *pl)
1555 EXPORT_SYMBOL(ldlm_pool_fini);
1557 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
1561 EXPORT_SYMBOL(ldlm_pool_add);
1563 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
1567 EXPORT_SYMBOL(ldlm_pool_del);
1569 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
1573 EXPORT_SYMBOL(ldlm_pool_get_slv);
1575 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
1579 EXPORT_SYMBOL(ldlm_pool_set_slv);
1581 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
1585 EXPORT_SYMBOL(ldlm_pool_get_clv);
1587 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
1591 EXPORT_SYMBOL(ldlm_pool_set_clv);
1593 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
1597 EXPORT_SYMBOL(ldlm_pool_get_limit);
1599 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1603 EXPORT_SYMBOL(ldlm_pool_set_limit);
1605 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1609 EXPORT_SYMBOL(ldlm_pool_get_lvf);
1611 int ldlm_pools_init(void)
1615 EXPORT_SYMBOL(ldlm_pools_init);
1617 void ldlm_pools_fini(void)
1621 EXPORT_SYMBOL(ldlm_pools_fini);
1623 int ldlm_pools_recalc(ldlm_side_t client)
1627 EXPORT_SYMBOL(ldlm_pools_recalc);
1628 #endif /* HAVE_LRU_RESIZE_SUPPORT */