Whamcloud - gitweb
LU-4214 target: LWP failover should create new export
[fs/lustre-release.git] / libcfs / libcfs / tracefile.h
index 5e7e9b1..3aa8989 100644 (file)
@@ -26,6 +26,8 @@
 /*
  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -37,6 +39,7 @@
 
 #include <libcfs/libcfs.h>
 
+#ifdef __KERNEL__
 #if defined(__linux__)
 #include "linux/linux-tracefile.h"
 #elif defined(__WINNT__)
@@ -44,7 +47,9 @@
 #else
 #error Unsupported operating system.
 #endif
-
+#else
+#include "posix/posix-tracefile.h"
+#endif
 /* trace file lock routines */
 
 #define TRACEFILE_NAME_SIZE 1024
@@ -90,7 +95,7 @@ extern void libcfs_unregister_panic_notifier(void);
 extern int  libcfs_panic_in_progress;
 extern int  cfs_trace_max_debug_mb(void);
 
-#define TCD_MAX_PAGES (5 << (20 - CFS_PAGE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
 #define TCD_STOCK_PAGES (TCD_MAX_PAGES)
 #define CFS_TRACEFILE_SIZE (500 << 20)
 
@@ -99,7 +104,7 @@ extern int  cfs_trace_max_debug_mb(void);
 /*
  * Private declare for tracefile
  */
-#define TCD_MAX_PAGES (5 << (20 - CFS_PAGE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
 #define TCD_STOCK_PAGES (TCD_MAX_PAGES)
 
 #define CFS_TRACEFILE_SIZE (500 << 20)
@@ -117,15 +122,15 @@ union cfs_trace_data_union {
                 * and trace_put_tcd, which are called in libcfs_debug_vmsg2 and
                 * tcd_for_each_type_lock
                 */
-               cfs_spinlock_t          tcd_lock;
+               spinlock_t              tcd_lock;
                unsigned long           tcd_lock_flags;
 
                /*
                 * pages with trace records not yet processed by tracefiled.
                 */
-               cfs_list_t              tcd_pages;
+               struct list_head        tcd_pages;
                /* number of pages on ->tcd_pages */
-               unsigned long           tcd_cur_pages;
+               unsigned long           tcd_cur_pages;
 
                /*
                 * pages with trace records already processed by
@@ -137,9 +142,9 @@ union cfs_trace_data_union {
                 * (put_pages_on_daemon_list()). LRU pages from this list are
                 * discarded when list grows too large.
                 */
-               cfs_list_t              tcd_daemon_pages;
+               struct list_head        tcd_daemon_pages;
                /* number of pages on ->tcd_daemon_pages */
-               unsigned long           tcd_cur_daemon_pages;
+               unsigned long           tcd_cur_daemon_pages;
 
                /*
                 * Maximal number of pages allowed on ->tcd_pages and
@@ -171,7 +176,7 @@ union cfs_trace_data_union {
                 * TCD_STOCK_PAGES pagesful are consumed by trace records all
                 * emitted in non-blocking contexts. Which is quite unlikely.
                 */
-               cfs_list_t              tcd_stock_pages;
+               struct list_head        tcd_stock_pages;
                /* number of pages on ->tcd_stock_pages */
                unsigned long           tcd_cur_stock_pages;
 
@@ -181,17 +186,17 @@ union cfs_trace_data_union {
                /* The factors to share debug memory. */
                unsigned short          tcd_pages_factor;
        } tcd;
-       char __pad[CFS_L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))];
+       char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))];
 };
 
 #define TCD_MAX_TYPES      8
-extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS];
+extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS];
 
-#define cfs_tcd_for_each(tcd, i, j)                                       \
-    for (i = 0; cfs_trace_data[i] != NULL; i++)                           \
-        for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd);               \
-             j < cfs_num_possible_cpus();                                 \
-             j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
+#define cfs_tcd_for_each(tcd, i, j)                                      \
+    for (i = 0; cfs_trace_data[i] != NULL; i++)                                  \
+       for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd);               \
+            j < num_possible_cpus();                                     \
+            j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
 
 #define cfs_tcd_for_each_type_lock(tcd, i, cpu)                           \
     for (i = 0; cfs_trace_data[i] &&                                      \
@@ -201,31 +206,23 @@ extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS];
 /* XXX nikita: this declaration is internal to tracefile.c and should probably
  * be moved there */
 struct page_collection {
-       cfs_list_t              pc_pages;
-       /*
-        * spin-lock protecting ->pc_pages. It is taken by smp_call_function()
-        * call-back functions. XXX nikita: Which is horrible: all processors
-        * receive NMI at the same time only to be serialized by this
-        * lock. Probably ->pc_pages should be replaced with an array of
-        * NR_CPUS elements accessed locklessly.
-        */
-       cfs_spinlock_t          pc_lock;
+       struct list_head        pc_pages;
        /*
         * if this flag is set, collect_pages() will spill both
         * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
         * only ->tcd_pages are spilled.
         */
-       int                     pc_want_daemon_pages;
+       int                     pc_want_daemon_pages;
 };
 
 /* XXX nikita: this declaration is internal to tracefile.c and should probably
  * be moved there */
 struct tracefiled_ctl {
-       cfs_completion_t       tctl_start;
-       cfs_completion_t       tctl_stop;
-       cfs_waitq_t            tctl_waitq;
-       pid_t                  tctl_pid;
-       cfs_atomic_t           tctl_shutdown;
+       struct completion       tctl_start;
+       struct completion       tctl_stop;
+       wait_queue_head_t       tctl_waitq;
+       pid_t                   tctl_pid;
+       atomic_t                tctl_shutdown;
 };
 
 /*
@@ -237,24 +234,24 @@ struct cfs_trace_page {
        /*
         * page itself
         */
-       cfs_page_t          *page;
+       struct page             *page;
        /*
         * linkage into one of the lists in trace_data_union or
         * page_collection
         */
-       cfs_list_t           linkage;
+       struct list_head        linkage;
        /*
         * number of bytes used within this page
         */
-       unsigned int         used;
+       unsigned int            used;
        /*
         * cpu that owns this page
         */
-       unsigned short       cpu;
+       unsigned short          cpu;
        /*
         * type(context) of this page
         */
-       unsigned short       type;
+       unsigned short          type;
 };
 
 extern void cfs_set_ptldebug_header(struct ptldebug_header *header,
@@ -274,45 +271,41 @@ extern void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
  * (see, for example, linux-tracefile.h).
  */
 
-extern char *cfs_trace_console_buffers[CFS_NR_CPUS][CFS_TCD_TYPE_MAX];
+extern char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
 extern cfs_trace_buf_type_t cfs_trace_buf_idx_get(void);
 
-static inline char *
-cfs_trace_get_console_buffer(void)
+static inline char *cfs_trace_get_console_buffer(void)
 {
-        unsigned int i = cfs_get_cpu();
-        unsigned int j = cfs_trace_buf_idx_get();
+       unsigned int i = get_cpu();
+       unsigned int j = cfs_trace_buf_idx_get();
 
-        return cfs_trace_console_buffers[i][j];
+       return cfs_trace_console_buffers[i][j];
 }
 
 static inline void
 cfs_trace_put_console_buffer(char *buffer)
 {
-        cfs_put_cpu();
+       put_cpu();
 }
 
-static inline struct cfs_trace_cpu_data *
-cfs_trace_get_tcd(void)
+static inline struct cfs_trace_cpu_data *cfs_trace_get_tcd(void)
 {
        struct cfs_trace_cpu_data *tcd =
-                &(*cfs_trace_data[cfs_trace_buf_idx_get()])[cfs_get_cpu()].tcd;
+               &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd;
 
        cfs_trace_lock_tcd(tcd, 0);
 
        return tcd;
 }
 
-static inline void
-cfs_trace_put_tcd (struct cfs_trace_cpu_data *tcd)
+static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd)
 {
        cfs_trace_unlock_tcd(tcd, 0);
-
-       cfs_put_cpu();
+       put_cpu();
 }
 
 int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp,
-                           cfs_list_t *stock);
+                               struct list_head *stock);
 
 
 int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
@@ -335,8 +328,8 @@ do {                                                                    \
 do {                                                                    \
         __LASSERT(tage != NULL);                                        \
         __LASSERT(tage->page != NULL);                                  \
-        __LASSERT(tage->used <= CFS_PAGE_SIZE);                         \
-        __LASSERT(cfs_page_count(tage->page) > 0);                      \
+       __LASSERT(tage->used <= PAGE_CACHE_SIZE);                         \
+       __LASSERT(page_count(tage->page) > 0);                      \
 } while (0)
 
 #endif /* LUSTRE_TRACEFILE_PRIVATE */