Whamcloud - gitweb
Branch b1_6
[fs/lustre-release.git] / lustre / lvfs / lvfs_lib.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  lustre/lvfs/lvfs_lib.c
5  *  Lustre filesystem abstraction routines
6  *
7  *  Copyright (C) 2007 Cluster File Systems, Inc.
8  *   Author: Andreas Dilger <adilger@clusterfs.com>
9  *
10  *   This file is part of Lustre, http://www.lustre.org.
11  *
12  *   Lustre is free software; you can redistribute it and/or
13  *   modify it under the terms of version 2 of the GNU General Public
14  *   License as published by the Free Software Foundation.
15  *
16  *   Lustre is distributed in the hope that it will be useful,
17  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *   GNU General Public License for more details.
20  *
21  *   You should have received a copy of the GNU General Public License
22  *   along with Lustre; if not, write to the Free Software
23  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25 #ifdef __KERNEL__
26 #include <linux/module.h>
27 #include <linux/random.h>
28 #else
29 #include <liblustre.h>
30 #endif
31 #include <lustre_lib.h>
32 #include <lprocfs_status.h>
33
34 __u64 obd_max_pages = 0;
35 __u64 obd_max_alloc = 0;
36
37 #ifdef __KERNEL__
38 struct lprocfs_stats *obd_memory = NULL;
39 spinlock_t obd_updatemax_lock = SPIN_LOCK_UNLOCKED;
40 /* refine later and change to seqlock or simlar from libcfs */
41 #else
42 __u64 obd_alloc;
43 __u64 obd_pages;
44 #endif
45
46 unsigned int obd_fail_val = 0;
47 unsigned int obd_fail_loc = 0;
48 unsigned int obd_alloc_fail_rate = 0;
49
50 int obd_alloc_fail(const void *ptr, const char *name, const char *type,
51                    size_t size, const char *file, int line)
52 {
53         if (ptr == NULL ||
54             (ll_rand() & OBD_ALLOC_FAIL_MASK) < obd_alloc_fail_rate) {
55                 CERROR("%s%salloc of %s ("LPU64" bytes) failed at %s:%d\n",
56                        ptr ? "force " :"", type, name, (__u64)size, file,
57                        line);
58                 CERROR(LPU64" total bytes and "LPU64" total pages "           
59                        "("LPU64" bytes) allocated by Lustre, "                
60                        "%d total bytes by LNET\n",                            
61                        obd_memory_sum(),                                     
62                        obd_pages_sum() << CFS_PAGE_SHIFT,                                      
63                        obd_pages_sum(),           
64                        atomic_read(&libcfs_kmemory));                         
65                return 1;
66         }
67         return 0;
68 }
69 EXPORT_SYMBOL(obd_alloc_fail);
70
71 #ifdef __KERNEL__
72 void obd_update_maxusage()
73 {
74         __u64 max1, max2;
75
76         max1 = obd_pages_sum();
77         max2 = obd_memory_sum();
78
79         spin_lock(&obd_updatemax_lock);
80         if (max1 > obd_max_pages)
81                 obd_max_pages = max1;
82         if (max2 > obd_max_alloc)
83                 obd_max_alloc = max2;
84         spin_unlock(&obd_updatemax_lock);
85         
86 }
87
88 __u64 obd_memory_max(void)
89 {
90         __u64 ret;
91
92         spin_lock(&obd_updatemax_lock);
93         ret = obd_max_alloc;
94         spin_unlock(&obd_updatemax_lock);
95
96         return ret;
97 }
98
99 __u64 obd_pages_max(void)
100 {
101         __u64 ret;
102
103         spin_lock(&obd_updatemax_lock);
104         ret = obd_max_pages;
105         spin_unlock(&obd_updatemax_lock);
106
107         return ret;
108 }
109
110 EXPORT_SYMBOL(obd_update_maxusage);
111 EXPORT_SYMBOL(obd_pages_max);
112 EXPORT_SYMBOL(obd_memory_max);
113 EXPORT_SYMBOL(obd_memory);
114
115 #endif
116
117 #ifdef LPROCFS
118 __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
119                           enum lprocfs_fields_flags field)
120 {
121         __u64 ret = 0;
122         int centry;
123
124         if (!lc)
125                 RETURN(0);
126         do {
127                 centry = atomic_read(&lc->lc_cntl.la_entry);
128
129                 switch (field) {
130                         case LPROCFS_FIELDS_FLAGS_CONFIG:
131                                 ret = lc->lc_config;
132                                 break;
133                         case LPROCFS_FIELDS_FLAGS_SUM:
134                                 ret = lc->lc_sum;
135                                 break;
136                         case LPROCFS_FIELDS_FLAGS_MIN:
137                                 ret = lc->lc_min;
138                                 break;
139                         case LPROCFS_FIELDS_FLAGS_MAX:
140                                 ret = lc->lc_max;
141                                 break;
142                         case LPROCFS_FIELDS_FLAGS_AVG:
143                                 ret = (lc->lc_max - lc->lc_min)/2;
144                                 break;
145                         case LPROCFS_FIELDS_FLAGS_SUMSQUARE:
146                                 ret = lc->lc_sumsquare;
147                                 break;
148                         case LPROCFS_FIELDS_FLAGS_COUNT:
149                                 ret = lc->lc_count;
150                                 break;
151                         default:
152                                 break;
153                 };
154         } while (centry != atomic_read(&lc->lc_cntl.la_entry) &&
155                  centry != atomic_read(&lc->lc_cntl.la_exit));
156
157         RETURN(ret);
158 }
159 EXPORT_SYMBOL(lprocfs_read_helper);
160
161 void lprocfs_counter_add(struct lprocfs_stats *stats, int idx,
162                                        long amount)
163 {
164         struct lprocfs_counter *percpu_cntr;
165         int smp_id;
166
167         if (stats == NULL)
168                 return;
169
170         /* With per-client stats, statistics are allocated only for
171          * single CPU area, so the smp_id should be 0 always. */
172         smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
173
174         percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
175         atomic_inc(&percpu_cntr->lc_cntl.la_entry);
176         percpu_cntr->lc_count++;
177
178         if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
179                 percpu_cntr->lc_sum += amount;
180                 if (percpu_cntr->lc_config & LPROCFS_CNTR_STDDEV)
181                         percpu_cntr->lc_sumsquare += (__u64)amount * amount;
182                 if (amount < percpu_cntr->lc_min)
183                         percpu_cntr->lc_min = amount;
184                 if (amount > percpu_cntr->lc_max)
185                         percpu_cntr->lc_max = amount;
186         }
187         atomic_inc(&percpu_cntr->lc_cntl.la_exit);
188         lprocfs_stats_unlock(stats);
189 }
190 EXPORT_SYMBOL(lprocfs_counter_add);
191
192 void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx,
193                                        long amount)
194 {
195         struct lprocfs_counter *percpu_cntr;
196         int smp_id;
197
198         if (stats == NULL)
199                 return;
200
201         /* With per-client stats, statistics are allocated only for
202          * single CPU area, so the smp_id should be 0 always. */
203         smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
204
205         percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
206         atomic_inc(&percpu_cntr->lc_cntl.la_entry);
207         if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX)
208                 percpu_cntr->lc_sum -= amount;
209         atomic_inc(&percpu_cntr->lc_cntl.la_exit);
210         lprocfs_stats_unlock(stats);
211 }
212 EXPORT_SYMBOL(lprocfs_counter_sub);
213 #endif  /* LPROCFS */
214
215 EXPORT_SYMBOL(obd_fail_loc);
216 EXPORT_SYMBOL(obd_alloc_fail_rate);
217 EXPORT_SYMBOL(obd_fail_val);
218
219 int obd_fail_check(__u32 id)
220 {
221         static int count = 0;
222         if (likely((obd_fail_loc & OBD_FAIL_MASK_LOC) !=
223                    (id & OBD_FAIL_MASK_LOC)))
224                 return 0;
225
226         if ((obd_fail_loc & (OBD_FAILED | OBD_FAIL_ONCE)) ==
227             (OBD_FAILED | OBD_FAIL_ONCE)) {
228                 count = 0; /* paranoia */
229                 return 0;
230         }
231
232         if (obd_fail_loc & OBD_FAIL_RAND) {
233                 unsigned int ll_rand(void);
234                 if (obd_fail_val < 2)
235                         return 0;
236                 if (ll_rand() % obd_fail_val > 0)
237                         return 0;
238         }
239
240         if (obd_fail_loc & OBD_FAIL_SKIP) {
241                 count++;
242                 if (count < obd_fail_val)
243                         return 0;
244                 count = 0;
245         }
246
247         /* Overridden by FAIL_ONCE */
248         if (obd_fail_loc & OBD_FAIL_SOME) {
249                 count++;
250                 if (count >= obd_fail_val) {
251                         count = 0;
252                         /* Don't fail anymore */
253                         obd_fail_loc |= OBD_FAIL_ONCE;
254                 }
255         }
256
257         obd_fail_loc |= OBD_FAILED;
258         /* Handle old checks that OR in this */
259         if (id & OBD_FAIL_ONCE)
260                 obd_fail_loc |= OBD_FAIL_ONCE;
261
262         return 1;
263 }
264 EXPORT_SYMBOL(obd_fail_check);