X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Flvfs%2Flvfs_lib.c;h=31c2d512fa6bca9bd5c7a1bbb7bd3d022818a533;hp=8ea2133375966d4ed0db5fb03d19744e24a19adb;hb=21ec386df7314348e9d483008648524192b51054;hpb=ca6dd9a48e7b3ebf5df8a78c8ed65f7676cebf4d;ds=sidebyside diff --git a/lustre/lvfs/lvfs_lib.c b/lustre/lvfs/lvfs_lib.c index 8ea2133..31c2d51 100644 --- a/lustre/lvfs/lvfs_lib.c +++ b/lustre/lvfs/lvfs_lib.c @@ -1,60 +1,177 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: +/* + * GPL HEADER START * - * lustre/lvfs/lvfs_lib.c - * Lustre filesystem abstraction routines + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * Copyright (C) 2007 Cluster File Systems, Inc. - * Author: Andreas Dilger + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * This file is part of Lustre, http://www.lustre.org. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * GPL HEADER END + */ +/* + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lustre/lvfs/lvfs_lib.c + * + * Lustre filesystem abstraction routines + * + * Author: Andreas Dilger */ #ifdef __KERNEL__ #include -#include #else #include #endif #include +#include + +#ifdef LPROCFS +void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount) +{ + struct lprocfs_counter *percpu_cntr; + struct lprocfs_counter_header *header; + int smp_id; + unsigned long flags = 0; + + if (stats == NULL) + return; + + /* With per-client stats, statistics are allocated only for + * single CPU area, so the smp_id should be 0 always. */ + smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags); + if (smp_id < 0) + return; + + header = &stats->ls_cnt_header[idx]; + percpu_cntr = lprocfs_stats_counter_get(stats, smp_id, idx); + percpu_cntr->lc_count++; -unsigned int obd_fail_val = 0; -unsigned int obd_fail_loc = 0; -unsigned int obd_alloc_fail_rate = 0; + if (header->lc_config & LPROCFS_CNTR_AVGMINMAX) { + /* + * lprocfs_counter_add() can be called in interrupt context, + * as memory allocation could trigger memory shrinker call + * ldlm_pool_shrink(), which calls lprocfs_counter_add(). + * LU-1727. + * + * Only obd_memory uses LPROCFS_STATS_FLAG_IRQ_SAFE + * flag, because it needs accurate counting lest memory leak + * check reports error. + */ + if (cfs_in_interrupt() && + (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) + percpu_cntr->lc_sum_irq += amount; + else + percpu_cntr->lc_sum += amount; -int obd_alloc_fail(const void *ptr, const char *name, const char *type, - size_t size, const char *file, int line) + if (header->lc_config & LPROCFS_CNTR_STDDEV) + percpu_cntr->lc_sumsquare += (__s64)amount * amount; + if (amount < percpu_cntr->lc_min) + percpu_cntr->lc_min = amount; + if (amount > percpu_cntr->lc_max) + percpu_cntr->lc_max = amount; + } + lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID, &flags); +} +EXPORT_SYMBOL(lprocfs_counter_add); + +void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount) { - if (ptr == NULL || - (ll_rand() & OBD_ALLOC_FAIL_MASK) < obd_alloc_fail_rate) { - CERROR("%s%salloc of %s ("LPU64" bytes) failed at %s:%d\n", - ptr ? "force " :"", type, name, (__u64)size, file, - line); - CERROR(LPU64" total bytes and "LPU64" total pages " - "("LPU64" bytes) allocated by Lustre, " - "%d total bytes by LNET\n", - obd_memory_sum(), - obd_pages_sum() << CFS_PAGE_SHIFT, - obd_pages_sum(), - atomic_read(&libcfs_kmemory)); - return 1; - } - return 0; + struct lprocfs_counter *percpu_cntr; + struct lprocfs_counter_header *header; + int smp_id; + unsigned long flags = 0; + + if (stats == NULL) + return; + + /* With per-client stats, statistics are allocated only for + * single CPU area, so the smp_id should be 0 always. */ + smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags); + if (smp_id < 0) + return; + + header = &stats->ls_cnt_header[idx]; + percpu_cntr = lprocfs_stats_counter_get(stats, smp_id, idx); + if (header->lc_config & LPROCFS_CNTR_AVGMINMAX) { + /* + * Sometimes we use RCU callbacks to free memory which calls + * lprocfs_counter_sub(), and RCU callbacks may execute in + * softirq context - right now that's the only case we're in + * softirq context here, use separate counter for that. + * bz20650. + * + * Only obd_memory uses LPROCFS_STATS_FLAG_IRQ_SAFE + * flag, because it needs accurate counting lest memory leak + * check reports error. + */ + if (cfs_in_interrupt() && + (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) + percpu_cntr->lc_sum_irq -= amount; + else + percpu_cntr->lc_sum -= amount; + } + lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID, &flags); } -EXPORT_SYMBOL(obd_alloc_fail); +EXPORT_SYMBOL(lprocfs_counter_sub); -EXPORT_SYMBOL(obd_fail_loc); -EXPORT_SYMBOL(obd_alloc_fail_rate); -EXPORT_SYMBOL(obd_fail_val); +int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid) +{ + struct lprocfs_counter *cntr; + unsigned int percpusize; + int rc = -ENOMEM; + unsigned long flags = 0; + int i; + + LASSERT(stats->ls_percpu[cpuid] == NULL); + LASSERT((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0); + + percpusize = lprocfs_stats_counter_size(stats); + LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[cpuid], percpusize); + if (stats->ls_percpu[cpuid] != NULL) { + rc = 0; + if (unlikely(stats->ls_biggest_alloc_num <= cpuid)) { + if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) + spin_lock_irqsave(&stats->ls_lock, flags); + else + spin_lock(&stats->ls_lock); + if (stats->ls_biggest_alloc_num <= cpuid) + stats->ls_biggest_alloc_num = cpuid + 1; + if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) { + spin_unlock_irqrestore(&stats->ls_lock, flags); + } else { + spin_unlock(&stats->ls_lock); + } + } + /* initialize the ls_percpu[cpuid] non-zero counter */ + for (i = 0; i < stats->ls_num; ++i) { + cntr = lprocfs_stats_counter_get(stats, cpuid, i); + cntr->lc_min = LC_MIN_INIT; + } + } + + return rc; +} +EXPORT_SYMBOL(lprocfs_stats_alloc_one); +#endif /* LPROCFS */