• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32 
33 #define DEBUG_SUBSYSTEM S_LNET
34 #define LUSTRE_TRACEFILE_PRIVATE
35 
36 #include "../../../include/linux/libcfs/libcfs.h"
37 #include "../tracefile.h"
38 
39 /* percents to share the total debug memory for each type */
40 static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = {
41 	80,  /* 80% pages for CFS_TCD_TYPE_PROC */
42 	10,  /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */
43 	10   /* 10% pages for CFS_TCD_TYPE_IRQ */
44 };
45 
46 char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
47 
48 static DECLARE_RWSEM(cfs_tracefile_sem);
49 
cfs_tracefile_init_arch(void)50 int cfs_tracefile_init_arch(void)
51 {
52 	int    i;
53 	int    j;
54 	struct cfs_trace_cpu_data *tcd;
55 
56 	/* initialize trace_data */
57 	memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
58 	for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
59 		cfs_trace_data[i] =
60 			kmalloc(sizeof(union cfs_trace_data_union) *
61 				num_possible_cpus(), GFP_KERNEL);
62 		if (!cfs_trace_data[i])
63 			goto out;
64 	}
65 
66 	/* arch related info initialized */
67 	cfs_tcd_for_each(tcd, i, j) {
68 		spin_lock_init(&tcd->tcd_lock);
69 		tcd->tcd_pages_factor = pages_factor[i];
70 		tcd->tcd_type = i;
71 		tcd->tcd_cpu = j;
72 	}
73 
74 	for (i = 0; i < num_possible_cpus(); i++)
75 		for (j = 0; j < 3; j++) {
76 			cfs_trace_console_buffers[i][j] =
77 				kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
78 					GFP_KERNEL);
79 
80 			if (!cfs_trace_console_buffers[i][j])
81 				goto out;
82 		}
83 
84 	return 0;
85 
86 out:
87 	cfs_tracefile_fini_arch();
88 	printk(KERN_ERR "lnet: Not enough memory\n");
89 	return -ENOMEM;
90 }
91 
cfs_tracefile_fini_arch(void)92 void cfs_tracefile_fini_arch(void)
93 {
94 	int    i;
95 	int    j;
96 
97 	for (i = 0; i < num_possible_cpus(); i++)
98 		for (j = 0; j < 3; j++) {
99 			kfree(cfs_trace_console_buffers[i][j]);
100 			cfs_trace_console_buffers[i][j] = NULL;
101 		}
102 
103 	for (i = 0; cfs_trace_data[i]; i++) {
104 		kfree(cfs_trace_data[i]);
105 		cfs_trace_data[i] = NULL;
106 	}
107 }
108 
cfs_tracefile_read_lock(void)109 void cfs_tracefile_read_lock(void)
110 {
111 	down_read(&cfs_tracefile_sem);
112 }
113 
cfs_tracefile_read_unlock(void)114 void cfs_tracefile_read_unlock(void)
115 {
116 	up_read(&cfs_tracefile_sem);
117 }
118 
cfs_tracefile_write_lock(void)119 void cfs_tracefile_write_lock(void)
120 {
121 	down_write(&cfs_tracefile_sem);
122 }
123 
cfs_tracefile_write_unlock(void)124 void cfs_tracefile_write_unlock(void)
125 {
126 	up_write(&cfs_tracefile_sem);
127 }
128 
cfs_trace_buf_idx_get(void)129 enum cfs_trace_buf_type cfs_trace_buf_idx_get(void)
130 {
131 	if (in_irq())
132 		return CFS_TCD_TYPE_IRQ;
133 	if (in_softirq())
134 		return CFS_TCD_TYPE_SOFTIRQ;
135 	return CFS_TCD_TYPE_PROC;
136 }
137 
138 /*
139  * The walking argument indicates the locking comes from all tcd types
140  * iterator and we must lock it and dissable local irqs to avoid deadlocks
141  * with other interrupt locks that might be happening. See LU-1311
142  * for details.
143  */
cfs_trace_lock_tcd(struct cfs_trace_cpu_data * tcd,int walking)144 int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
145 	__acquires(&tcd->tc_lock)
146 {
147 	__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
148 	if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
149 		spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
150 	else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
151 		spin_lock_bh(&tcd->tcd_lock);
152 	else if (unlikely(walking))
153 		spin_lock_irq(&tcd->tcd_lock);
154 	else
155 		spin_lock(&tcd->tcd_lock);
156 	return 1;
157 }
158 
cfs_trace_unlock_tcd(struct cfs_trace_cpu_data * tcd,int walking)159 void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
160 	__releases(&tcd->tcd_lock)
161 {
162 	__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
163 	if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
164 		spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
165 	else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
166 		spin_unlock_bh(&tcd->tcd_lock);
167 	else if (unlikely(walking))
168 		spin_unlock_irq(&tcd->tcd_lock);
169 	else
170 		spin_unlock(&tcd->tcd_lock);
171 }
172 
173 void
cfs_set_ptldebug_header(struct ptldebug_header * header,struct libcfs_debug_msg_data * msgdata,unsigned long stack)174 cfs_set_ptldebug_header(struct ptldebug_header *header,
175 			struct libcfs_debug_msg_data *msgdata,
176 			unsigned long stack)
177 {
178 	struct timespec64 ts;
179 
180 	ktime_get_real_ts64(&ts);
181 
182 	header->ph_subsys = msgdata->msg_subsys;
183 	header->ph_mask = msgdata->msg_mask;
184 	header->ph_cpu_id = smp_processor_id();
185 	header->ph_type = cfs_trace_buf_idx_get();
186 	/* y2038 safe since all user space treats this as unsigned, but
187 	 * will overflow in 2106
188 	 */
189 	header->ph_sec = (u32)ts.tv_sec;
190 	header->ph_usec = ts.tv_nsec / NSEC_PER_USEC;
191 	header->ph_stack = stack;
192 	header->ph_pid = current->pid;
193 	header->ph_line_num = msgdata->msg_line;
194 	header->ph_extern_pid = 0;
195 }
196 
197 static char *
dbghdr_to_err_string(struct ptldebug_header * hdr)198 dbghdr_to_err_string(struct ptldebug_header *hdr)
199 {
200 	switch (hdr->ph_subsys) {
201 	case S_LND:
202 	case S_LNET:
203 		return "LNetError";
204 	default:
205 		return "LustreError";
206 	}
207 }
208 
209 static char *
dbghdr_to_info_string(struct ptldebug_header * hdr)210 dbghdr_to_info_string(struct ptldebug_header *hdr)
211 {
212 	switch (hdr->ph_subsys) {
213 	case S_LND:
214 	case S_LNET:
215 		return "LNet";
216 	default:
217 		return "Lustre";
218 	}
219 }
220 
cfs_print_to_console(struct ptldebug_header * hdr,int mask,const char * buf,int len,const char * file,const char * fn)221 void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
222 			  const char *buf, int len, const char *file,
223 			  const char *fn)
224 {
225 	char *prefix = "Lustre", *ptype = NULL;
226 
227 	if ((mask & D_EMERG) != 0) {
228 		prefix = dbghdr_to_err_string(hdr);
229 		ptype = KERN_EMERG;
230 	} else if ((mask & D_ERROR) != 0) {
231 		prefix = dbghdr_to_err_string(hdr);
232 		ptype = KERN_ERR;
233 	} else if ((mask & D_WARNING) != 0) {
234 		prefix = dbghdr_to_info_string(hdr);
235 		ptype = KERN_WARNING;
236 	} else if ((mask & (D_CONSOLE | libcfs_printk)) != 0) {
237 		prefix = dbghdr_to_info_string(hdr);
238 		ptype = KERN_INFO;
239 	}
240 
241 	if ((mask & D_CONSOLE) != 0) {
242 		printk("%s%s: %.*s", ptype, prefix, len, buf);
243 	} else {
244 		printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
245 		       hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num,
246 		       fn, len, buf);
247 	}
248 }
249 
cfs_trace_max_debug_mb(void)250 int cfs_trace_max_debug_mb(void)
251 {
252 	int  total_mb = (totalram_pages >> (20 - PAGE_SHIFT));
253 
254 	return max(512, (total_mb * 80) / 100);
255 }
256