1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/lvfs/lvfs_lib.c
5 * Lustre filesystem abstraction routines
7 * Copyright (C) 2007 Cluster File Systems, Inc.
8 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/random.h>
29 #include <liblustre.h>
31 #include <lustre_lib.h>
32 #include <lprocfs_status.h>
34 unsigned int obd_fail_val = 0;
35 unsigned long obd_fail_loc = 0;
36 unsigned int obd_alloc_fail_rate = 0;
38 int obd_alloc_fail(const void *ptr, const char *name, const char *type,
39 size_t size, const char *file, int line)
42 (ll_rand() & OBD_ALLOC_FAIL_MASK) < obd_alloc_fail_rate) {
43 CERROR("%s%salloc of %s ("LPU64" bytes) failed at %s:%d\n",
44 ptr ? "force " :"", type, name, (__u64)size, file,
46 CERROR(LPU64" total bytes and "LPU64" total pages "
47 "("LPU64" bytes) allocated by Lustre, "
48 "%d total bytes by LNET\n",
50 obd_pages_sum() << CFS_PAGE_SHIFT,
52 atomic_read(&libcfs_kmemory));
57 EXPORT_SYMBOL(obd_alloc_fail);
59 int __obd_fail_check_set(__u32 id, __u32 value, int set)
61 static atomic_t obd_fail_count = ATOMIC_INIT(0);
63 LASSERT(!(id & OBD_FAIL_ONCE));
65 if ((obd_fail_loc & (OBD_FAILED | OBD_FAIL_ONCE)) ==
66 (OBD_FAILED | OBD_FAIL_ONCE)) {
67 atomic_set(&obd_fail_count, 0); /* paranoia */
71 /* Fail 1/obd_fail_val times */
72 if (obd_fail_loc & OBD_FAIL_RAND) {
73 if (obd_fail_val < 2 || ll_rand() % obd_fail_val > 0)
77 /* Skip the first obd_fail_val, then fail */
78 if (obd_fail_loc & OBD_FAIL_SKIP) {
79 if (atomic_inc_return(&obd_fail_count) <= obd_fail_val)
83 /* Fail obd_fail_val times, overridden by FAIL_ONCE */
84 if (obd_fail_loc & OBD_FAIL_SOME &&
85 (!(obd_fail_loc & OBD_FAIL_ONCE) || obd_fail_val <= 1)) {
86 int count = atomic_inc_return(&obd_fail_count);
88 if (count >= obd_fail_val) {
89 set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
90 atomic_set(&obd_fail_count, 0);
91 /* we are lost race to increase obd_fail_count */
92 if (count > obd_fail_val)
97 if ((set == OBD_FAIL_LOC_ORSET || set == OBD_FAIL_LOC_RESET) &&
98 (value & OBD_FAIL_ONCE))
99 set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
101 /* Lost race to set OBD_FAILED_BIT. */
102 if (test_and_set_bit(OBD_FAILED_BIT, &obd_fail_loc)) {
103 /* If OBD_FAIL_ONCE is valid, only one process can fail,
104 * otherwise multi-process can fail at the same time. */
105 if (obd_fail_loc & OBD_FAIL_ONCE)
110 case OBD_FAIL_LOC_NOSET:
112 case OBD_FAIL_LOC_ORSET:
113 obd_fail_loc |= value & ~(OBD_FAILED | OBD_FAIL_ONCE);
115 case OBD_FAIL_LOC_RESET:
116 obd_fail_loc = value;
119 LASSERTF(0, "called with bad set %u\n", set);
125 EXPORT_SYMBOL(__obd_fail_check_set);
128 void lprocfs_counter_add(struct lprocfs_stats *stats, int idx,
131 struct lprocfs_counter *percpu_cntr;
137 /* With per-client stats, statistics are allocated only for
138 * single CPU area, so the smp_id should be 0 always. */
139 smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
141 percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
142 atomic_inc(&percpu_cntr->lc_cntl.la_entry);
143 percpu_cntr->lc_count++;
145 if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
146 percpu_cntr->lc_sum += amount;
147 if (percpu_cntr->lc_config & LPROCFS_CNTR_STDDEV)
148 percpu_cntr->lc_sumsquare += (__u64)amount * amount;
149 if (amount < percpu_cntr->lc_min)
150 percpu_cntr->lc_min = amount;
151 if (amount > percpu_cntr->lc_max)
152 percpu_cntr->lc_max = amount;
154 atomic_inc(&percpu_cntr->lc_cntl.la_exit);
155 lprocfs_stats_unlock(stats);
157 EXPORT_SYMBOL(lprocfs_counter_add);
159 void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx,
162 struct lprocfs_counter *percpu_cntr;
168 /* With per-client stats, statistics are allocated only for
169 * single CPU area, so the smp_id should be 0 always. */
170 smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
172 percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
173 atomic_inc(&percpu_cntr->lc_cntl.la_entry);
174 if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX)
175 percpu_cntr->lc_sum -= amount;
176 atomic_inc(&percpu_cntr->lc_cntl.la_exit);
177 lprocfs_stats_unlock(stats);
179 EXPORT_SYMBOL(lprocfs_counter_sub);
182 EXPORT_SYMBOL(obd_fail_loc);
183 EXPORT_SYMBOL(obd_alloc_fail_rate);
184 EXPORT_SYMBOL(obd_fail_val);