Whamcloud - gitweb
LU-2158 lvfs: move obdclass related functions to obclass
[fs/lustre-release.git] / lustre / lvfs / lvfs_lib.c
index 95b59d4..31c2d51 100644 (file)
@@ -1,6 +1,4 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * GPL HEADER START
  *
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  * GPL HEADER END
  */
 /*
- * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
  */
 #ifdef __KERNEL__
 #include <linux/module.h>
-#include <linux/random.h>
 #else
 #include <liblustre.h>
 #endif
 #include <lustre_lib.h>
 #include <lprocfs_status.h>
 
-unsigned int obd_fail_val = 0;
-unsigned long obd_fail_loc = 0;
-unsigned int obd_alloc_fail_rate = 0;
-
-int obd_alloc_fail(const void *ptr, const char *name, const char *type,
-                   size_t size, const char *file, int line)
-{
-        if (ptr == NULL ||
-            (ll_rand() & OBD_ALLOC_FAIL_MASK) < obd_alloc_fail_rate) {
-                CERROR("%s%salloc of %s ("LPU64" bytes) failed at %s:%d\n",
-                       ptr ? "force " :"", type, name, (__u64)size, file,
-                       line);
-                CERROR(LPU64" total bytes and "LPU64" total pages "
-                       "("LPU64" bytes) allocated by Lustre, "
-                       "%d total bytes by LNET\n",
-                       obd_memory_sum(),
-                       obd_pages_sum() << CFS_PAGE_SHIFT,
-                       obd_pages_sum(),
-                       atomic_read(&libcfs_kmemory));
-                return 1;
-        }
-        return 0;
-}
-EXPORT_SYMBOL(obd_alloc_fail);
-
-int __obd_fail_check_set(__u32 id, __u32 value, int set)
-{
-        static atomic_t obd_fail_count = ATOMIC_INIT(0);
-
-        LASSERT(!(id & OBD_FAIL_ONCE));
-
-        if ((obd_fail_loc & (OBD_FAILED | OBD_FAIL_ONCE)) ==
-            (OBD_FAILED | OBD_FAIL_ONCE)) {
-                atomic_set(&obd_fail_count, 0); /* paranoia */
-                return 0;
-        }
-
-        /* Fail 1/obd_fail_val times */
-        if (obd_fail_loc & OBD_FAIL_RAND) {
-                if (obd_fail_val < 2 || ll_rand() % obd_fail_val > 0)
-                        return 0;
-        }
-
-        /* Skip the first obd_fail_val, then fail */
-        if (obd_fail_loc & OBD_FAIL_SKIP) {
-                if (atomic_inc_return(&obd_fail_count) <= obd_fail_val)
-                        return 0;
-        }
-
-        /* Fail obd_fail_val times, overridden by FAIL_ONCE */
-        if (obd_fail_loc & OBD_FAIL_SOME &&
-            (!(obd_fail_loc & OBD_FAIL_ONCE) || obd_fail_val <= 1)) {
-                int count = atomic_inc_return(&obd_fail_count);
-
-                if (count >= obd_fail_val) {
-                        set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
-                        atomic_set(&obd_fail_count, 0);
-                        /* we are lost race to increase obd_fail_count */
-                        if (count > obd_fail_val)
-                                return 0;
-                }
-        }
-
-        if ((set == OBD_FAIL_LOC_ORSET || set == OBD_FAIL_LOC_RESET) &&
-            (value & OBD_FAIL_ONCE))
-                set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
-
-        /* Lost race to set OBD_FAILED_BIT. */
-        if (test_and_set_bit(OBD_FAILED_BIT, &obd_fail_loc)) {
-                /* If OBD_FAIL_ONCE is valid, only one process can fail,
-                 * otherwise multi-process can fail at the same time. */
-                if (obd_fail_loc & OBD_FAIL_ONCE)
-                        return 0;
-        }
-
-        switch (set) {
-                case OBD_FAIL_LOC_NOSET:
-                        break;
-                case OBD_FAIL_LOC_ORSET:
-                        obd_fail_loc |= value & ~(OBD_FAILED | OBD_FAIL_ONCE);
-                        break;
-                case OBD_FAIL_LOC_RESET:
-                        obd_fail_loc = value;
-                        break;
-                default:
-                        LASSERTF(0, "called with bad set %u\n", set);
-                        break;
-        }
-
-        return 1;
-}
-EXPORT_SYMBOL(__obd_fail_check_set);
-
-int __obd_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
-{
-        int ret = 0;
-
-        ret = __obd_fail_check_set(id, value, set);
-        if (ret) {
-                CERROR("obd_fail_timeout id %x sleeping for %dms\n",
-                       id, ms);
-                cfs_schedule_timeout(CFS_TASK_UNINT,
-                                     cfs_time_seconds(ms) / 1000);
-                set_current_state(CFS_TASK_RUNNING);
-                CERROR("obd_fail_timeout id %x awake\n", id);
-        }
-        return ret;
-}
-EXPORT_SYMBOL(__obd_fail_timeout_set);
-
 #ifdef LPROCFS
-void lprocfs_counter_add(struct lprocfs_stats *stats, int idx,
-                                       long amount)
+void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount)
 {
-        struct lprocfs_counter *percpu_cntr;
-        int smp_id;
-
-        if (stats == NULL)
-                return;
-
-        /* With per-client stats, statistics are allocated only for
-         * single CPU area, so the smp_id should be 0 always. */
-        smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
-
-        percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
-        atomic_inc(&percpu_cntr->lc_cntl.la_entry);
-        percpu_cntr->lc_count++;
-
-        if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
-                /* see comment in lprocfs_counter_sub */
-                LASSERT(!cfs_in_interrupt());
-
-                percpu_cntr->lc_sum += amount;
-                if (percpu_cntr->lc_config & LPROCFS_CNTR_STDDEV)
-                        percpu_cntr->lc_sumsquare += (__s64)amount * amount;
-                if (amount < percpu_cntr->lc_min)
-                        percpu_cntr->lc_min = amount;
-                if (amount > percpu_cntr->lc_max)
-                        percpu_cntr->lc_max = amount;
-        }
-        atomic_inc(&percpu_cntr->lc_cntl.la_exit);
-        lprocfs_stats_unlock(stats);
+       struct lprocfs_counter          *percpu_cntr;
+       struct lprocfs_counter_header   *header;
+       int                             smp_id;
+       unsigned long                   flags = 0;
+
+       if (stats == NULL)
+               return;
+
+       /* With per-client stats, statistics are allocated only for
+        * single CPU area, so the smp_id should be 0 always. */
+       smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags);
+       if (smp_id < 0)
+               return;
+
+       header = &stats->ls_cnt_header[idx];
+       percpu_cntr = lprocfs_stats_counter_get(stats, smp_id, idx);
+       percpu_cntr->lc_count++;
+
+       if (header->lc_config & LPROCFS_CNTR_AVGMINMAX) {
+               /*
+                * lprocfs_counter_add() can be called in interrupt context,
+                * as memory allocation could trigger memory shrinker call
+                * ldlm_pool_shrink(), which calls lprocfs_counter_add().
+                * LU-1727.
+                *
+                * Only obd_memory uses LPROCFS_STATS_FLAG_IRQ_SAFE
+                * flag, because it needs accurate counting lest memory leak
+                * check reports error.
+                */
+               if (cfs_in_interrupt() &&
+                   (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
+                       percpu_cntr->lc_sum_irq += amount;
+               else
+                       percpu_cntr->lc_sum += amount;
+
+               if (header->lc_config & LPROCFS_CNTR_STDDEV)
+                       percpu_cntr->lc_sumsquare += (__s64)amount * amount;
+               if (amount < percpu_cntr->lc_min)
+                       percpu_cntr->lc_min = amount;
+               if (amount > percpu_cntr->lc_max)
+                       percpu_cntr->lc_max = amount;
+       }
+       lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID, &flags);
 }
 EXPORT_SYMBOL(lprocfs_counter_add);
 
-void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx,
-                                       long amount)
+void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount)
 {
-        struct lprocfs_counter *percpu_cntr;
-        int smp_id;
-
-        if (stats == NULL)
-                return;
-
-        /* With per-client stats, statistics are allocated only for
-         * single CPU area, so the smp_id should be 0 always. */
-        smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);
-
-        percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
-        atomic_inc(&percpu_cntr->lc_cntl.la_entry);
-        if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
-                /*
-                 * currently lprocfs_count_add() can only be called in thread
-                 * context; sometimes we use RCU callbacks to free memory
-                 * which calls lprocfs_counter_sub(), and RCU callbacks may
-                 * execute in softirq context - right now that's the only case
-                 * we're in softirq context here, use separate counter for that.
-                 * bz20650.
-                 */
-                if (cfs_in_interrupt())
-                        percpu_cntr->lc_sum_irq -= amount;
-                else
-                        percpu_cntr->lc_sum -= amount;
-        }
-        atomic_inc(&percpu_cntr->lc_cntl.la_exit);
-        lprocfs_stats_unlock(stats);
+       struct lprocfs_counter          *percpu_cntr;
+       struct lprocfs_counter_header   *header;
+       int                             smp_id;
+       unsigned long                   flags = 0;
+
+       if (stats == NULL)
+               return;
+
+       /* With per-client stats, statistics are allocated only for
+        * single CPU area, so the smp_id should be 0 always. */
+       smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags);
+       if (smp_id < 0)
+               return;
+
+       header = &stats->ls_cnt_header[idx];
+       percpu_cntr = lprocfs_stats_counter_get(stats, smp_id, idx);
+       if (header->lc_config & LPROCFS_CNTR_AVGMINMAX) {
+               /*
+                * Sometimes we use RCU callbacks to free memory which calls
+                * lprocfs_counter_sub(), and RCU callbacks may execute in
+                * softirq context - right now that's the only case we're in
+                * softirq context here, use separate counter for that.
+                * bz20650.
+                *
+                * Only obd_memory uses LPROCFS_STATS_FLAG_IRQ_SAFE
+                * flag, because it needs accurate counting lest memory leak
+                * check reports error.
+                */
+               if (cfs_in_interrupt() &&
+                   (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
+                       percpu_cntr->lc_sum_irq -= amount;
+               else
+                       percpu_cntr->lc_sum -= amount;
+       }
+       lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID, &flags);
 }
 EXPORT_SYMBOL(lprocfs_counter_sub);
-#endif  /* LPROCFS */
 
-EXPORT_SYMBOL(obd_fail_loc);
-EXPORT_SYMBOL(obd_alloc_fail_rate);
-EXPORT_SYMBOL(obd_fail_val);
+int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid)
+{
+       struct lprocfs_counter  *cntr;
+       unsigned int            percpusize;
+       int                     rc = -ENOMEM;
+       unsigned long           flags = 0;
+       int                     i;
+
+       LASSERT(stats->ls_percpu[cpuid] == NULL);
+       LASSERT((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0);
+
+       percpusize = lprocfs_stats_counter_size(stats);
+       LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[cpuid], percpusize);
+       if (stats->ls_percpu[cpuid] != NULL) {
+               rc = 0;
+               if (unlikely(stats->ls_biggest_alloc_num <= cpuid)) {
+                       if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+                               spin_lock_irqsave(&stats->ls_lock, flags);
+                       else
+                               spin_lock(&stats->ls_lock);
+                       if (stats->ls_biggest_alloc_num <= cpuid)
+                               stats->ls_biggest_alloc_num = cpuid + 1;
+                       if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
+                               spin_unlock_irqrestore(&stats->ls_lock, flags);
+                       } else {
+                               spin_unlock(&stats->ls_lock);
+                       }
+               }
+               /* initialize the ls_percpu[cpuid] non-zero counter */
+               for (i = 0; i < stats->ls_num; ++i) {
+                       cntr = lprocfs_stats_counter_get(stats, cpuid, i);
+                       cntr->lc_min = LC_MIN_INIT;
+               }
+       }
+
+       return rc;
+}
+EXPORT_SYMBOL(lprocfs_stats_alloc_one);
+#endif  /* LPROCFS */