4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_LNET
34 #define LUSTRE_TRACEFILE_PRIVATE
36 #include <libcfs/libcfs.h>
37 #include "tracefile.h"
39 /* percents to share the total debug memory for each type */
40 static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = {
41 80, /* 80% pages for CFS_TCD_TYPE_PROC */
42 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */
43 10 /* 10% pages for CFS_TCD_TYPE_IRQ */
46 char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
48 static DECLARE_RWSEM(cfs_tracefile_sem);
50 int cfs_tracefile_init_arch()
54 struct cfs_trace_cpu_data *tcd;
56 /* initialize trace_data */
57 memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
58 for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
60 kmalloc(sizeof(union cfs_trace_data_union) *
61 num_possible_cpus(), GFP_KERNEL);
62 if (cfs_trace_data[i] == NULL)
67 /* arch related info initialized */
68 cfs_tcd_for_each(tcd, i, j) {
69 spin_lock_init(&tcd->tcd_lock);
70 tcd->tcd_pages_factor = pages_factor[i];
75 for (i = 0; i < num_possible_cpus(); i++)
76 for (j = 0; j < 3; j++) {
77 cfs_trace_console_buffers[i][j] =
78 kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
81 if (cfs_trace_console_buffers[i][j] == NULL)
88 cfs_tracefile_fini_arch();
89 printk(KERN_ERR "lnet: Not enough memory\n");
93 void cfs_tracefile_fini_arch()
98 for (i = 0; i < num_possible_cpus(); i++)
99 for (j = 0; j < 3; j++)
100 if (cfs_trace_console_buffers[i][j] != NULL) {
101 kfree(cfs_trace_console_buffers[i][j]);
102 cfs_trace_console_buffers[i][j] = NULL;
105 for (i = 0; cfs_trace_data[i] != NULL; i++) {
106 kfree(cfs_trace_data[i]);
107 cfs_trace_data[i] = NULL;
111 void cfs_tracefile_read_lock()
113 down_read(&cfs_tracefile_sem);
116 void cfs_tracefile_read_unlock()
118 up_read(&cfs_tracefile_sem);
121 void cfs_tracefile_write_lock()
123 down_write(&cfs_tracefile_sem);
126 void cfs_tracefile_write_unlock()
128 up_write(&cfs_tracefile_sem);
131 enum cfs_trace_buf_type cfs_trace_buf_idx_get()
134 return CFS_TCD_TYPE_IRQ;
135 else if (in_softirq())
136 return CFS_TCD_TYPE_SOFTIRQ;
138 return CFS_TCD_TYPE_PROC;
142 * The walking argument indicates the locking comes from all tcd types
143 * iterator and we must lock it and dissable local irqs to avoid deadlocks
144 * with other interrupt locks that might be happening. See LU-1311
147 int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
148 __acquires(&tcd->tcd_lock)
150 __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
151 if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
152 spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
153 else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
154 spin_lock_bh(&tcd->tcd_lock);
155 else if (unlikely(walking))
156 spin_lock_irq(&tcd->tcd_lock);
158 spin_lock(&tcd->tcd_lock);
162 void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
163 __releases(&tcd->tcd_lock)
165 __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
166 if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
167 spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
168 else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
169 spin_unlock_bh(&tcd->tcd_lock);
170 else if (unlikely(walking))
171 spin_unlock_irq(&tcd->tcd_lock);
173 spin_unlock(&tcd->tcd_lock);
176 int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
177 struct cfs_trace_page *tage)
180 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
181 * from here: this will lead to infinite recursion.
183 return tcd->tcd_cpu == tage->cpu;
187 cfs_set_ptldebug_header(struct ptldebug_header *header,
188 struct libcfs_debug_msg_data *msgdata,
191 struct timespec64 ts;
193 ktime_get_real_ts64(&ts);
195 header->ph_subsys = msgdata->msg_subsys;
196 header->ph_mask = msgdata->msg_mask;
197 header->ph_cpu_id = smp_processor_id();
198 header->ph_type = cfs_trace_buf_idx_get();
199 /* y2038 safe since all user space treats this as unsigned, but
200 * will overflow in 2106
202 header->ph_sec = (u32)ts.tv_sec;
203 header->ph_usec = ts.tv_nsec / NSEC_PER_USEC;
204 header->ph_stack = stack;
205 header->ph_pid = current->pid;
206 header->ph_line_num = msgdata->msg_line;
207 header->ph_extern_pid = 0;
212 dbghdr_to_err_string(struct ptldebug_header *hdr)
214 switch (hdr->ph_subsys) {
220 return "LustreError";
225 dbghdr_to_info_string(struct ptldebug_header *hdr)
227 switch (hdr->ph_subsys) {
237 void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
238 const char *buf, int len, const char *file,
241 char *prefix = "Lustre", *ptype = NULL;
243 if ((mask & D_EMERG) != 0) {
244 prefix = dbghdr_to_err_string(hdr);
246 } else if ((mask & D_ERROR) != 0) {
247 prefix = dbghdr_to_err_string(hdr);
249 } else if ((mask & D_WARNING) != 0) {
250 prefix = dbghdr_to_info_string(hdr);
251 ptype = KERN_WARNING;
252 } else if ((mask & (D_CONSOLE | libcfs_printk)) != 0) {
253 prefix = dbghdr_to_info_string(hdr);
257 if ((mask & D_CONSOLE) != 0) {
258 printk("%s%s: %.*s", ptype, prefix, len, buf);
260 printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
261 hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num,
267 int cfs_trace_max_debug_mb(void)
269 int total_mb = (totalram_pages >> (20 - PAGE_SHIFT));
271 return MAX(512, (total_mb * 80)/100);