Whamcloud - gitweb
b=18751 Move prng.c to libcfs
[fs/lustre-release.git] / lustre / lvfs / lvfs_lib.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/lvfs/lvfs_lib.c
37  *
38  * Lustre filesystem abstraction routines
39  *
40  * Author: Andreas Dilger <adilger@clusterfs.com>
41  */
42 #ifdef __KERNEL__
43 #include <linux/module.h>
44 #else
45 #include <liblustre.h>
46 #endif
47 #include <lustre_lib.h>
48 #include <lprocfs_status.h>
49
50 unsigned int obd_fail_val = 0;
51 unsigned long obd_fail_loc = 0;
52 unsigned int obd_alloc_fail_rate = 0;
53
54 int obd_alloc_fail(const void *ptr, const char *name, const char *type,
55                    size_t size, const char *file, int line)
56 {
57         if (ptr == NULL ||
58             (cfs_rand() & OBD_ALLOC_FAIL_MASK) < obd_alloc_fail_rate) {
59                 CERROR("%s%salloc of %s ("LPU64" bytes) failed at %s:%d\n",
60                        ptr ? "force " :"", type, name, (__u64)size, file,
61                        line);
62                 CERROR(LPU64" total bytes and "LPU64" total pages "
63                        "("LPU64" bytes) allocated by Lustre, "
64                        "%d total bytes by LNET\n",
65                        obd_memory_sum(),
66                        obd_pages_sum() << CFS_PAGE_SHIFT,
67                        obd_pages_sum(),
68                        cfs_atomic_read(&libcfs_kmemory));
69                 return 1;
70         }
71         return 0;
72 }
73 EXPORT_SYMBOL(obd_alloc_fail);
74
75 int __obd_fail_check_set(__u32 id, __u32 value, int set)
76 {
77         static cfs_atomic_t obd_fail_count = CFS_ATOMIC_INIT(0);
78
79         LASSERT(!(id & OBD_FAIL_ONCE));
80
81         if ((obd_fail_loc & (OBD_FAILED | OBD_FAIL_ONCE)) ==
82             (OBD_FAILED | OBD_FAIL_ONCE)) {
83                 cfs_atomic_set(&obd_fail_count, 0); /* paranoia */
84                 return 0;
85         }
86
87         /* Fail 1/obd_fail_val times */
88         if (obd_fail_loc & OBD_FAIL_RAND) {
89                 if (obd_fail_val < 2 || cfs_rand() % obd_fail_val > 0)
90                         return 0;
91         }
92
93         /* Skip the first obd_fail_val, then fail */
94         if (obd_fail_loc & OBD_FAIL_SKIP) {
95                 if (cfs_atomic_inc_return(&obd_fail_count) <= obd_fail_val)
96                         return 0;
97         }
98
99         /* Fail obd_fail_val times, overridden by FAIL_ONCE */
100         if (obd_fail_loc & OBD_FAIL_SOME &&
101             (!(obd_fail_loc & OBD_FAIL_ONCE) || obd_fail_val <= 1)) {
102                 int count = cfs_atomic_inc_return(&obd_fail_count);
103
104                 if (count >= obd_fail_val) {
105                         cfs_set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
106                         cfs_atomic_set(&obd_fail_count, 0);
107                         /* we are lost race to increase obd_fail_count */
108                         if (count > obd_fail_val)
109                                 return 0;
110                 }
111         }
112
113         if ((set == OBD_FAIL_LOC_ORSET || set == OBD_FAIL_LOC_RESET) &&
114             (value & OBD_FAIL_ONCE))
115                 cfs_set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
116
117         /* Lost race to set OBD_FAILED_BIT. */
118         if (cfs_test_and_set_bit(OBD_FAILED_BIT, &obd_fail_loc)) {
119                 /* If OBD_FAIL_ONCE is valid, only one process can fail,
120                  * otherwise multi-process can fail at the same time. */
121                 if (obd_fail_loc & OBD_FAIL_ONCE)
122                         return 0;
123         }
124
125         switch (set) {
126                 case OBD_FAIL_LOC_NOSET:
127                         break;
128                 case OBD_FAIL_LOC_ORSET:
129                         obd_fail_loc |= value & ~(OBD_FAILED | OBD_FAIL_ONCE);
130                         break;
131                 case OBD_FAIL_LOC_RESET:
132                         obd_fail_loc = value;
133                         break;
134                 default:
135                         LASSERTF(0, "called with bad set %u\n", set);
136                         break;
137         }
138
139         return 1;
140 }
141 EXPORT_SYMBOL(__obd_fail_check_set);
142
143 int __obd_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
144 {
145         int ret = 0;
146
147         ret = __obd_fail_check_set(id, value, set);
148         if (ret) {
149                 CERROR("obd_fail_timeout id %x sleeping for %dms\n",
150                        id, ms);
151                 cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
152                                                    cfs_time_seconds(ms) / 1000);
153                 cfs_set_current_state(CFS_TASK_RUNNING);
154                 CERROR("obd_fail_timeout id %x awake\n", id);
155         }
156         return ret;
157 }
158 EXPORT_SYMBOL(__obd_fail_timeout_set);
159
160 #ifdef LPROCFS
161 void lprocfs_counter_add(struct lprocfs_stats *stats, int idx,
162                                        long amount)
163 {
164         struct lprocfs_counter *percpu_cntr;
165         int smp_id;
166
167         if (stats == NULL)
168                 return;
169
170         /* With per-client stats, statistics are allocated only for
171          * single CPU area, so the smp_id should be 0 always. */
172         smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
173
174         percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
175         cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
176         percpu_cntr->lc_count++;
177
178         if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
179                 /* see comment in lprocfs_counter_sub */
180                 LASSERT(!cfs_in_interrupt());
181
182                 percpu_cntr->lc_sum += amount;
183                 if (percpu_cntr->lc_config & LPROCFS_CNTR_STDDEV)
184                         percpu_cntr->lc_sumsquare += (__s64)amount * amount;
185                 if (amount < percpu_cntr->lc_min)
186                         percpu_cntr->lc_min = amount;
187                 if (amount > percpu_cntr->lc_max)
188                         percpu_cntr->lc_max = amount;
189         }
190         cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
191         lprocfs_stats_unlock(stats);
192 }
193 EXPORT_SYMBOL(lprocfs_counter_add);
194
195 void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx,
196                                        long amount)
197 {
198         struct lprocfs_counter *percpu_cntr;
199         int smp_id;
200
201         if (stats == NULL)
202                 return;
203
204         /* With per-client stats, statistics are allocated only for
205          * single CPU area, so the smp_id should be 0 always. */
206         smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
207
208         percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
209         cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
210         if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
211                 /*
212                  * currently lprocfs_count_add() can only be called in thread
213                  * context; sometimes we use RCU callbacks to free memory
214                  * which calls lprocfs_counter_sub(), and RCU callbacks may
215                  * execute in softirq context - right now that's the only case
216                  * we're in softirq context here, use separate counter for that.
217                  * bz20650.
218                  */
219                 if (cfs_in_interrupt())
220                         percpu_cntr->lc_sum_irq -= amount;
221                 else
222                         percpu_cntr->lc_sum -= amount;
223         }
224         cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
225         lprocfs_stats_unlock(stats);
226 }
227 EXPORT_SYMBOL(lprocfs_counter_sub);
228 #endif  /* LPROCFS */
229
230 EXPORT_SYMBOL(obd_fail_loc);
231 EXPORT_SYMBOL(obd_alloc_fail_rate);
232 EXPORT_SYMBOL(obd_fail_val);