1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/lvfs/lvfs_lib.c
5 * Lustre filesystem abstraction routines
7 * Copyright (C) 2007 Cluster File Systems, Inc.
8 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/random.h>
29 #include <liblustre.h>
31 #include <lustre_lib.h>
32 #include <lprocfs_status.h>
34 __u64 obd_max_pages = 0;
35 __u64 obd_max_alloc = 0;
38 struct lprocfs_stats *obd_memory = NULL;
39 spinlock_t obd_updatemax_lock = SPIN_LOCK_UNLOCKED;
40 /* refine later and change to seqlock or simlar from libcfs */
46 unsigned int obd_fail_val = 0;
47 unsigned int obd_fail_loc = 0;
48 unsigned int obd_alloc_fail_rate = 0;
50 int obd_alloc_fail(const void *ptr, const char *name, const char *type,
51 size_t size, const char *file, int line)
54 (ll_rand() & OBD_ALLOC_FAIL_MASK) < obd_alloc_fail_rate) {
55 CERROR("%s%salloc of %s ("LPU64" bytes) failed at %s:%d\n",
56 ptr ? "force " :"", type, name, (__u64)size, file,
58 CERROR(LPU64" total bytes and "LPU64" total pages "
59 "("LPU64" bytes) allocated by Lustre, "
60 "%d total bytes by LNET\n",
62 obd_pages_sum() << CFS_PAGE_SHIFT,
64 atomic_read(&libcfs_kmemory));
69 EXPORT_SYMBOL(obd_alloc_fail);
72 void obd_update_maxusage()
76 max1 = obd_pages_sum();
77 max2 = obd_memory_sum();
79 spin_lock(&obd_updatemax_lock);
80 if (max1 > obd_max_pages)
82 if (max2 > obd_max_alloc)
84 spin_unlock(&obd_updatemax_lock);
88 __u64 obd_memory_max(void)
92 spin_lock(&obd_updatemax_lock);
94 spin_unlock(&obd_updatemax_lock);
99 __u64 obd_pages_max(void)
103 spin_lock(&obd_updatemax_lock);
105 spin_unlock(&obd_updatemax_lock);
110 EXPORT_SYMBOL(obd_update_maxusage);
111 EXPORT_SYMBOL(obd_pages_max);
112 EXPORT_SYMBOL(obd_memory_max);
113 EXPORT_SYMBOL(obd_memory);
118 __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
119 enum lprocfs_fields_flags field)
127 centry = atomic_read(&lc->lc_cntl.la_entry);
130 case LPROCFS_FIELDS_FLAGS_CONFIG:
133 case LPROCFS_FIELDS_FLAGS_SUM:
136 case LPROCFS_FIELDS_FLAGS_MIN:
139 case LPROCFS_FIELDS_FLAGS_MAX:
142 case LPROCFS_FIELDS_FLAGS_AVG:
143 ret = (lc->lc_max - lc->lc_min)/2;
145 case LPROCFS_FIELDS_FLAGS_SUMSQUARE:
146 ret = lc->lc_sumsquare;
148 case LPROCFS_FIELDS_FLAGS_COUNT:
154 } while (centry != atomic_read(&lc->lc_cntl.la_entry) &&
155 centry != atomic_read(&lc->lc_cntl.la_exit));
159 EXPORT_SYMBOL(lprocfs_read_helper);
161 void lprocfs_counter_add(struct lprocfs_stats *stats, int idx,
164 struct lprocfs_counter *percpu_cntr;
170 /* With per-client stats, statistics are allocated only for
171 * single CPU area, so the smp_id should be 0 always. */
172 smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
174 percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
175 atomic_inc(&percpu_cntr->lc_cntl.la_entry);
176 percpu_cntr->lc_count++;
178 if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
179 percpu_cntr->lc_sum += amount;
180 if (percpu_cntr->lc_config & LPROCFS_CNTR_STDDEV)
181 percpu_cntr->lc_sumsquare += (__u64)amount * amount;
182 if (amount < percpu_cntr->lc_min)
183 percpu_cntr->lc_min = amount;
184 if (amount > percpu_cntr->lc_max)
185 percpu_cntr->lc_max = amount;
187 atomic_inc(&percpu_cntr->lc_cntl.la_exit);
188 lprocfs_stats_unlock(stats);
190 EXPORT_SYMBOL(lprocfs_counter_add);
192 void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx,
195 struct lprocfs_counter *percpu_cntr;
201 /* With per-client stats, statistics are allocated only for
202 * single CPU area, so the smp_id should be 0 always. */
203 smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
205 percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
206 atomic_inc(&percpu_cntr->lc_cntl.la_entry);
207 if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX)
208 percpu_cntr->lc_sum -= amount;
209 atomic_inc(&percpu_cntr->lc_cntl.la_exit);
210 lprocfs_stats_unlock(stats);
212 EXPORT_SYMBOL(lprocfs_counter_sub);
215 EXPORT_SYMBOL(obd_fail_loc);
216 EXPORT_SYMBOL(obd_alloc_fail_rate);
217 EXPORT_SYMBOL(obd_fail_val);
219 int obd_fail_check(__u32 id)
221 static int count = 0;
222 if (likely((obd_fail_loc & OBD_FAIL_MASK_LOC) !=
223 (id & OBD_FAIL_MASK_LOC)))
226 if ((obd_fail_loc & (OBD_FAILED | OBD_FAIL_ONCE)) ==
227 (OBD_FAILED | OBD_FAIL_ONCE)) {
228 count = 0; /* paranoia */
232 if (obd_fail_loc & OBD_FAIL_RAND) {
233 unsigned int ll_rand(void);
234 if (obd_fail_val < 2)
236 if (ll_rand() % obd_fail_val > 0)
240 if (obd_fail_loc & OBD_FAIL_SKIP) {
242 if (count < obd_fail_val)
247 /* Overridden by FAIL_ONCE */
248 if (obd_fail_loc & OBD_FAIL_SOME) {
250 if (count >= obd_fail_val) {
252 /* Don't fail anymore */
253 obd_fail_loc |= OBD_FAIL_ONCE;
257 obd_fail_loc |= OBD_FAILED;
258 /* Handle old checks that OR in this */
259 if (id & OBD_FAIL_ONCE)
260 obd_fail_loc |= OBD_FAIL_ONCE;
264 EXPORT_SYMBOL(obd_fail_check);