* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see [sun.com URL with a
- * copy of GPLv2].
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
*/
#ifdef __KERNEL__
#include <linux/module.h>
-#include <linux/random.h>
#else
#include <liblustre.h>
#endif
size_t size, const char *file, int line)
{
if (ptr == NULL ||
- (ll_rand() & OBD_ALLOC_FAIL_MASK) < obd_alloc_fail_rate) {
+ (cfs_rand() & OBD_ALLOC_FAIL_MASK) < obd_alloc_fail_rate) {
CERROR("%s%salloc of %s ("LPU64" bytes) failed at %s:%d\n",
ptr ? "force " :"", type, name, (__u64)size, file,
line);
obd_memory_sum(),
obd_pages_sum() << CFS_PAGE_SHIFT,
obd_pages_sum(),
- atomic_read(&libcfs_kmemory));
+ cfs_atomic_read(&libcfs_kmemory));
return 1;
}
return 0;
int __obd_fail_check_set(__u32 id, __u32 value, int set)
{
- static atomic_t obd_fail_count = ATOMIC_INIT(0);
+ static cfs_atomic_t obd_fail_count = CFS_ATOMIC_INIT(0);
LASSERT(!(id & OBD_FAIL_ONCE));
if ((obd_fail_loc & (OBD_FAILED | OBD_FAIL_ONCE)) ==
(OBD_FAILED | OBD_FAIL_ONCE)) {
- atomic_set(&obd_fail_count, 0); /* paranoia */
+ cfs_atomic_set(&obd_fail_count, 0); /* paranoia */
return 0;
}
/* Fail 1/obd_fail_val times */
if (obd_fail_loc & OBD_FAIL_RAND) {
- if (obd_fail_val < 2 || ll_rand() % obd_fail_val > 0)
+ if (obd_fail_val < 2 || cfs_rand() % obd_fail_val > 0)
return 0;
}
/* Skip the first obd_fail_val, then fail */
if (obd_fail_loc & OBD_FAIL_SKIP) {
- if (atomic_inc_return(&obd_fail_count) <= obd_fail_val)
+ if (cfs_atomic_inc_return(&obd_fail_count) <= obd_fail_val)
return 0;
}
/* Fail obd_fail_val times, overridden by FAIL_ONCE */
if (obd_fail_loc & OBD_FAIL_SOME &&
(!(obd_fail_loc & OBD_FAIL_ONCE) || obd_fail_val <= 1)) {
- int count = atomic_inc_return(&obd_fail_count);
+ int count = cfs_atomic_inc_return(&obd_fail_count);
if (count >= obd_fail_val) {
- set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
- atomic_set(&obd_fail_count, 0);
+ cfs_set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
+ cfs_atomic_set(&obd_fail_count, 0);
/* we are lost race to increase obd_fail_count */
if (count > obd_fail_val)
return 0;
if ((set == OBD_FAIL_LOC_ORSET || set == OBD_FAIL_LOC_RESET) &&
(value & OBD_FAIL_ONCE))
- set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
+ cfs_set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
/* Lost race to set OBD_FAILED_BIT. */
- if (test_and_set_bit(OBD_FAILED_BIT, &obd_fail_loc)) {
+ if (cfs_test_and_set_bit(OBD_FAILED_BIT, &obd_fail_loc)) {
/* If OBD_FAIL_ONCE is valid, only one process can fail,
* otherwise multi-process can fail at the same time. */
if (obd_fail_loc & OBD_FAIL_ONCE)
if (ret) {
CERROR("obd_fail_timeout id %x sleeping for %dms\n",
id, ms);
- cfs_schedule_timeout(CFS_TASK_UNINT,
- cfs_time_seconds(ms) / 1000);
- set_current_state(CFS_TASK_RUNNING);
+ cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
+ cfs_time_seconds(ms) / 1000);
+ cfs_set_current_state(CFS_TASK_RUNNING);
CERROR("obd_fail_timeout id %x awake\n", id);
}
return ret;
smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
- atomic_inc(&percpu_cntr->lc_cntl.la_entry);
+ cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
percpu_cntr->lc_count++;
if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
+ /* see comment in lprocfs_counter_sub */
+ LASSERT(!cfs_in_interrupt());
+
percpu_cntr->lc_sum += amount;
if (percpu_cntr->lc_config & LPROCFS_CNTR_STDDEV)
percpu_cntr->lc_sumsquare += (__s64)amount * amount;
if (amount > percpu_cntr->lc_max)
percpu_cntr->lc_max = amount;
}
- atomic_inc(&percpu_cntr->lc_cntl.la_exit);
+ cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
lprocfs_stats_unlock(stats);
}
EXPORT_SYMBOL(lprocfs_counter_add);
smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
- atomic_inc(&percpu_cntr->lc_cntl.la_entry);
- if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX)
- percpu_cntr->lc_sum -= amount;
- atomic_inc(&percpu_cntr->lc_cntl.la_exit);
+ cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
+ if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
+ /*
+ * currently lprocfs_count_add() can only be called in thread
+ * context; sometimes we use RCU callbacks to free memory
+ * which calls lprocfs_counter_sub(), and RCU callbacks may
+ * execute in softirq context - right now that's the only case
+ * we're in softirq context here, use separate counter for that.
+ * bz20650.
+ */
+ if (cfs_in_interrupt())
+ percpu_cntr->lc_sum_irq -= amount;
+ else
+ percpu_cntr->lc_sum -= amount;
+ }
+ cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
lprocfs_stats_unlock(stats);
}
EXPORT_SYMBOL(lprocfs_counter_sub);