Whamcloud - gitweb
branch: HEAD
[fs/lustre-release.git] / lustre / lvfs / lvfs_lib.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  lustre/lvfs/lvfs_lib.c
5  *  Lustre filesystem abstraction routines
6  *
7  *  Copyright (C) 2007 Cluster File Systems, Inc.
8  *   Author: Andreas Dilger <adilger@clusterfs.com>
9  *
10  *   This file is part of Lustre, http://www.lustre.org.
11  *
12  *   Lustre is free software; you can redistribute it and/or
13  *   modify it under the terms of version 2 of the GNU General Public
14  *   License as published by the Free Software Foundation.
15  *
16  *   Lustre is distributed in the hope that it will be useful,
17  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *   GNU General Public License for more details.
20  *
21  *   You should have received a copy of the GNU General Public License
22  *   along with Lustre; if not, write to the Free Software
23  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25 #ifdef __KERNEL__
26 #include <linux/module.h>
27 #include <linux/random.h>
28 #else
29 #include <liblustre.h>
30 #endif
31 #include <lustre_lib.h>
32 #include <lprocfs_status.h>
33
34 unsigned int obd_fail_val = 0;
35 unsigned long obd_fail_loc = 0;
36 unsigned int obd_alloc_fail_rate = 0;
37
38 int obd_alloc_fail(const void *ptr, const char *name, const char *type,
39                    size_t size, const char *file, int line)
40 {
41         if (ptr == NULL ||
42             (ll_rand() & OBD_ALLOC_FAIL_MASK) < obd_alloc_fail_rate) {
43                 CERROR("%s%salloc of %s ("LPU64" bytes) failed at %s:%d\n",
44                        ptr ? "force " :"", type, name, (__u64)size, file,
45                        line);
46                 CERROR(LPU64" total bytes and "LPU64" total pages "
47                        "("LPU64" bytes) allocated by Lustre, "
48                        "%d total bytes by LNET\n",
49                        obd_memory_sum(),
50                        obd_pages_sum() << CFS_PAGE_SHIFT,
51                        obd_pages_sum(),
52                        atomic_read(&libcfs_kmemory));                
53                 return 1;
54         }
55         return 0;
56 }
57 EXPORT_SYMBOL(obd_alloc_fail);
58
59 int __obd_fail_check_set(__u32 id, __u32 value, int set)
60 {
61         static atomic_t obd_fail_count = ATOMIC_INIT(0);
62
63         LASSERT(!(id & OBD_FAIL_ONCE));
64
65         if ((obd_fail_loc & (OBD_FAILED | OBD_FAIL_ONCE)) ==
66             (OBD_FAILED | OBD_FAIL_ONCE)) {
67                 atomic_set(&obd_fail_count, 0); /* paranoia */
68                 return 0;
69         }
70
71         /* Fail 1/obd_fail_val times */
72         if (obd_fail_loc & OBD_FAIL_RAND) {
73                 if (obd_fail_val < 2 || ll_rand() % obd_fail_val > 0)
74                         return 0;
75         }
76
77         /* Skip the first obd_fail_val, then fail */
78         if (obd_fail_loc & OBD_FAIL_SKIP) {
79                 if (atomic_inc_return(&obd_fail_count) <= obd_fail_val)
80                         return 0;
81         }
82
83         /* Fail obd_fail_val times, overridden by FAIL_ONCE */
84         if (obd_fail_loc & OBD_FAIL_SOME &&
85             (!(obd_fail_loc & OBD_FAIL_ONCE) || obd_fail_val <= 1)) { 
86                 int count = atomic_inc_return(&obd_fail_count);
87
88                 if (count >= obd_fail_val) {
89                         set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
90                         atomic_set(&obd_fail_count, 0);
91                         /* we are lost race to increase obd_fail_count */
92                         if (count > obd_fail_val)
93                                 return 0;
94                 }
95         }
96
97         if ((set == OBD_FAIL_LOC_ORSET || set == OBD_FAIL_LOC_RESET) &&
98             (value & OBD_FAIL_ONCE))
99                 set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
100
101         /* Lost race to set OBD_FAILED_BIT. */
102         if (test_and_set_bit(OBD_FAILED_BIT, &obd_fail_loc)) {
103                 /* If OBD_FAIL_ONCE is valid, only one process can fail,
104                  * otherwise multi-process can fail at the same time. */
105                 if (obd_fail_loc & OBD_FAIL_ONCE)
106                         return 0;
107         }
108
109         switch (set) {
110                 case OBD_FAIL_LOC_NOSET:
111                         break;
112                 case OBD_FAIL_LOC_ORSET:
113                         obd_fail_loc |= value & ~(OBD_FAILED | OBD_FAIL_ONCE);
114                         break;
115                 case OBD_FAIL_LOC_RESET:
116                         obd_fail_loc = value;
117                         break;
118                 default:
119                         LASSERTF(0, "called with bad set %u\n", set);
120                         break;
121         }
122
123         return 1;
124 }
125 EXPORT_SYMBOL(__obd_fail_check_set);
126
127 int __obd_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
128 {
129         int ret = 0;
130
131         ret = __obd_fail_check_set(id, value, set);
132         if (ret) {
133                 CERROR("obd_fail_timeout id %x sleeping for %dms\n",
134                        id, ms);
135                 set_current_state(TASK_UNINTERRUPTIBLE);
136                 cfs_schedule_timeout(CFS_TASK_UNINT,
137                                      cfs_time_seconds(ms) / 1000);
138                 set_current_state(TASK_RUNNING);
139                 CERROR("obd_fail_timeout id %x awake\n", id);
140         }
141         return ret;
142 }
143 EXPORT_SYMBOL(__obd_fail_timeout_set);
144
145 #ifdef LPROCFS
146 void lprocfs_counter_add(struct lprocfs_stats *stats, int idx,
147                                        long amount)
148 {
149         struct lprocfs_counter *percpu_cntr;
150         int smp_id;
151
152         if (stats == NULL)
153                 return;
154
155         /* With per-client stats, statistics are allocated only for
156          * single CPU area, so the smp_id should be 0 always. */
157         smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
158
159         percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
160         atomic_inc(&percpu_cntr->lc_cntl.la_entry);
161         percpu_cntr->lc_count++;
162
163         if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
164                 percpu_cntr->lc_sum += amount;
165                 if (percpu_cntr->lc_config & LPROCFS_CNTR_STDDEV)
166                         percpu_cntr->lc_sumsquare += (__s64)amount * amount;
167                 if (amount < percpu_cntr->lc_min)
168                         percpu_cntr->lc_min = amount;
169                 if (amount > percpu_cntr->lc_max)
170                         percpu_cntr->lc_max = amount;
171         }
172         atomic_inc(&percpu_cntr->lc_cntl.la_exit);
173         lprocfs_stats_unlock(stats);
174 }
175 EXPORT_SYMBOL(lprocfs_counter_add);
176
177 void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx,
178                                        long amount)
179 {
180         struct lprocfs_counter *percpu_cntr;
181         int smp_id;
182
183         if (stats == NULL)
184                 return;
185
186         /* With per-client stats, statistics are allocated only for
187          * single CPU area, so the smp_id should be 0 always. */
188         smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
189
190         percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
191         atomic_inc(&percpu_cntr->lc_cntl.la_entry);
192         if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX)
193                 percpu_cntr->lc_sum -= amount;
194         atomic_inc(&percpu_cntr->lc_cntl.la_exit);
195         lprocfs_stats_unlock(stats);
196 }
197 EXPORT_SYMBOL(lprocfs_counter_sub);
198 #endif  /* LPROCFS */
199
200 EXPORT_SYMBOL(obd_fail_loc);
201 EXPORT_SYMBOL(obd_alloc_fail_rate);
202 EXPORT_SYMBOL(obd_fail_val);