1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/lvfs/lvfs_lib.c
38 * Lustre filesystem abstraction routines
40 * Author: Andreas Dilger <adilger@clusterfs.com>
43 #include <linux/module.h>
45 #include <liblustre.h>
47 #include <lustre_lib.h>
48 #include <lprocfs_status.h>
50 unsigned int obd_alloc_fail_rate = 0;
52 int obd_alloc_fail(const void *ptr, const char *name, const char *type,
53 size_t size, const char *file, int line)
56 (cfs_rand() & OBD_ALLOC_FAIL_MASK) < obd_alloc_fail_rate) {
57 CERROR("%s%salloc of %s ("LPU64" bytes) failed at %s:%d\n",
58 ptr ? "force " :"", type, name, (__u64)size, file,
60 CERROR(LPU64" total bytes and "LPU64" total pages "
61 "("LPU64" bytes) allocated by Lustre, "
62 "%d total bytes by LNET\n",
64 obd_pages_sum() << CFS_PAGE_SHIFT,
66 cfs_atomic_read(&libcfs_kmemory));
71 EXPORT_SYMBOL(obd_alloc_fail);
74 void lprocfs_counter_add(struct lprocfs_stats *stats, int idx,
77 struct lprocfs_counter *percpu_cntr;
79 unsigned long flags = 0;
84 /* With per-client stats, statistics are allocated only for
85 * single CPU area, so the smp_id should be 0 always. */
86 smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags);
90 percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
91 if (!(stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU))
92 cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
93 percpu_cntr->lc_count++;
95 if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
96 /* see comment in lprocfs_counter_sub */
97 LASSERT(!cfs_in_interrupt());
99 percpu_cntr->lc_sum += amount;
100 if (percpu_cntr->lc_config & LPROCFS_CNTR_STDDEV)
101 percpu_cntr->lc_sumsquare += (__s64)amount * amount;
102 if (amount < percpu_cntr->lc_min)
103 percpu_cntr->lc_min = amount;
104 if (amount > percpu_cntr->lc_max)
105 percpu_cntr->lc_max = amount;
107 if (!(stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU))
108 cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
109 lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID, &flags);
111 EXPORT_SYMBOL(lprocfs_counter_add);
113 void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount)
115 struct lprocfs_counter *percpu_cntr;
117 unsigned long flags = 0;
122 /* With per-client stats, statistics are allocated only for
123 * single CPU area, so the smp_id should be 0 always. */
124 smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags);
128 percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
129 if (!(stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU))
130 cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
131 if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
133 * currently lprocfs_count_add() can only be called in thread
134 * context; sometimes we use RCU callbacks to free memory
135 * which calls lprocfs_counter_sub(), and RCU callbacks may
136 * execute in softirq context - right now that's the only case
137 * we're in softirq context here, use separate counter for that.
140 if (cfs_in_interrupt())
141 percpu_cntr->lc_sum_irq -= amount;
143 percpu_cntr->lc_sum -= amount;
145 if (!(stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU))
146 cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
147 lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID, &flags);
149 EXPORT_SYMBOL(lprocfs_counter_sub);
151 int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int idx)
153 unsigned int percpusize;
155 unsigned long flags = 0;
157 /* the 1st percpu entry was statically allocated in
158 * lprocfs_alloc_stats() */
159 LASSERT(idx != 0 && stats->ls_percpu[0] != NULL);
160 LASSERT(stats->ls_percpu[idx] == NULL);
161 LASSERT((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0);
163 percpusize = CFS_L1_CACHE_ALIGN(offsetof(struct lprocfs_percpu,
164 lp_cntr[stats->ls_num]));
165 OBD_ALLOC_GFP(stats->ls_percpu[idx], percpusize, CFS_ALLOC_ATOMIC);
166 if (stats->ls_percpu[idx] != NULL) {
168 if (unlikely(stats->ls_biggest_alloc_num <= idx)) {
169 if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
170 cfs_spin_lock_irqsave(&stats->ls_lock, flags);
172 cfs_spin_lock(&stats->ls_lock);
173 if (stats->ls_biggest_alloc_num <= idx)
174 stats->ls_biggest_alloc_num = idx + 1;
175 if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
176 cfs_spin_unlock_irqrestore(&stats->ls_lock,
179 cfs_spin_unlock(&stats->ls_lock);
183 /* initialize the ls_percpu[idx] by copying the 0th template
185 memcpy(stats->ls_percpu[idx], stats->ls_percpu[0],
191 EXPORT_SYMBOL(lprocfs_stats_alloc_one);
194 EXPORT_SYMBOL(obd_alloc_fail_rate);