• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * trace irqs off critical timings
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * From code in the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 Nadia Yvette Chambers
11  */
12 #include <linux/kallsyms.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/ftrace.h>
16 
17 #include "trace.h"
18 
19 #define CREATE_TRACE_POINTS
20 #include <trace/events/preemptirq.h>
21 
22 #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
23 static struct trace_array		*irqsoff_trace __read_mostly;
24 static int				tracer_enabled __read_mostly;
25 
26 static DEFINE_PER_CPU(int, tracing_cpu);
27 
28 static DEFINE_RAW_SPINLOCK(max_trace_lock);
29 
30 enum {
31 	TRACER_IRQS_OFF		= (1 << 1),
32 	TRACER_PREEMPT_OFF	= (1 << 2),
33 };
34 
35 static int trace_type __read_mostly;
36 
37 static int save_flags;
38 
39 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
40 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
41 
42 #ifdef CONFIG_PREEMPT_TRACER
43 static inline int
preempt_trace(void)44 preempt_trace(void)
45 {
46 	return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
47 }
48 #else
49 # define preempt_trace() (0)
50 #endif
51 
52 #ifdef CONFIG_IRQSOFF_TRACER
53 static inline int
irq_trace(void)54 irq_trace(void)
55 {
56 	return ((trace_type & TRACER_IRQS_OFF) &&
57 		irqs_disabled());
58 }
59 #else
60 # define irq_trace() (0)
61 #endif
62 
63 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
64 static int irqsoff_display_graph(struct trace_array *tr, int set);
65 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
66 #else
irqsoff_display_graph(struct trace_array * tr,int set)67 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
68 {
69 	return -EINVAL;
70 }
71 # define is_graph(tr) false
72 #endif
73 
74 /*
75  * Sequence count - we record it when starting a measurement and
76  * skip the latency if the sequence has changed - some other section
77  * did a maximum and could disturb our measurement with serial console
78  * printouts, etc. Truly coinciding maximum latencies should be rare
79  * and what happens together happens separately as well, so this doesn't
80  * decrease the validity of the maximum found:
81  */
82 static __cacheline_aligned_in_smp	unsigned long max_sequence;
83 
84 #ifdef CONFIG_FUNCTION_TRACER
85 /*
86  * Prologue for the preempt and irqs off function tracers.
87  *
88  * Returns 1 if it is OK to continue, and data->disabled is
89  *            incremented.
90  *         0 if the trace is to be ignored, and data->disabled
91  *            is kept the same.
92  *
93  * Note, this function is also used outside this ifdef but
94  *  inside the #ifdef of the function graph tracer below.
95  *  This is OK, since the function graph tracer is
96  *  dependent on the function tracer.
97  */
func_prolog_dec(struct trace_array * tr,struct trace_array_cpu ** data,unsigned long * flags)98 static int func_prolog_dec(struct trace_array *tr,
99 			   struct trace_array_cpu **data,
100 			   unsigned long *flags)
101 {
102 	long disabled;
103 	int cpu;
104 
105 	/*
106 	 * Does not matter if we preempt. We test the flags
107 	 * afterward, to see if irqs are disabled or not.
108 	 * If we preempt and get a false positive, the flags
109 	 * test will fail.
110 	 */
111 	cpu = raw_smp_processor_id();
112 	if (likely(!per_cpu(tracing_cpu, cpu)))
113 		return 0;
114 
115 	local_save_flags(*flags);
116 	/*
117 	 * Slight chance to get a false positive on tracing_cpu,
118 	 * although I'm starting to think there isn't a chance.
119 	 * Leave this for now just to be paranoid.
120 	 */
121 	if (!irqs_disabled_flags(*flags) && !preempt_count())
122 		return 0;
123 
124 	*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
125 	disabled = atomic_inc_return(&(*data)->disabled);
126 
127 	if (likely(disabled == 1))
128 		return 1;
129 
130 	atomic_dec(&(*data)->disabled);
131 
132 	return 0;
133 }
134 
135 /*
136  * irqsoff uses its own tracer function to keep the overhead down:
137  */
138 static void
irqsoff_tracer_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct pt_regs * pt_regs)139 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
140 		    struct ftrace_ops *op, struct pt_regs *pt_regs)
141 {
142 	struct trace_array *tr = irqsoff_trace;
143 	struct trace_array_cpu *data;
144 	unsigned long flags;
145 
146 	if (!func_prolog_dec(tr, &data, &flags))
147 		return;
148 
149 	trace_function(tr, ip, parent_ip, flags, preempt_count());
150 
151 	atomic_dec(&data->disabled);
152 }
153 #endif /* CONFIG_FUNCTION_TRACER */
154 
155 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
irqsoff_display_graph(struct trace_array * tr,int set)156 static int irqsoff_display_graph(struct trace_array *tr, int set)
157 {
158 	int cpu;
159 
160 	if (!(is_graph(tr) ^ set))
161 		return 0;
162 
163 	stop_irqsoff_tracer(irqsoff_trace, !set);
164 
165 	for_each_possible_cpu(cpu)
166 		per_cpu(tracing_cpu, cpu) = 0;
167 
168 	tr->max_latency = 0;
169 	tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
170 
171 	return start_irqsoff_tracer(irqsoff_trace, set);
172 }
173 
irqsoff_graph_entry(struct ftrace_graph_ent * trace)174 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
175 {
176 	struct trace_array *tr = irqsoff_trace;
177 	struct trace_array_cpu *data;
178 	unsigned long flags;
179 	int ret;
180 	int pc;
181 
182 	if (!func_prolog_dec(tr, &data, &flags))
183 		return 0;
184 
185 	pc = preempt_count();
186 	ret = __trace_graph_entry(tr, trace, flags, pc);
187 	atomic_dec(&data->disabled);
188 
189 	return ret;
190 }
191 
irqsoff_graph_return(struct ftrace_graph_ret * trace)192 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
193 {
194 	struct trace_array *tr = irqsoff_trace;
195 	struct trace_array_cpu *data;
196 	unsigned long flags;
197 	int pc;
198 
199 	if (!func_prolog_dec(tr, &data, &flags))
200 		return;
201 
202 	pc = preempt_count();
203 	__trace_graph_return(tr, trace, flags, pc);
204 	atomic_dec(&data->disabled);
205 }
206 
irqsoff_trace_open(struct trace_iterator * iter)207 static void irqsoff_trace_open(struct trace_iterator *iter)
208 {
209 	if (is_graph(iter->tr))
210 		graph_trace_open(iter);
211 
212 }
213 
irqsoff_trace_close(struct trace_iterator * iter)214 static void irqsoff_trace_close(struct trace_iterator *iter)
215 {
216 	if (iter->private)
217 		graph_trace_close(iter);
218 }
219 
220 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
221 			    TRACE_GRAPH_PRINT_PROC | \
222 			    TRACE_GRAPH_PRINT_ABS_TIME | \
223 			    TRACE_GRAPH_PRINT_DURATION)
224 
irqsoff_print_line(struct trace_iterator * iter)225 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
226 {
227 	/*
228 	 * In graph mode call the graph tracer output function,
229 	 * otherwise go with the TRACE_FN event handler
230 	 */
231 	if (is_graph(iter->tr))
232 		return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
233 
234 	return TRACE_TYPE_UNHANDLED;
235 }
236 
irqsoff_print_header(struct seq_file * s)237 static void irqsoff_print_header(struct seq_file *s)
238 {
239 	struct trace_array *tr = irqsoff_trace;
240 
241 	if (is_graph(tr))
242 		print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
243 	else
244 		trace_default_header(s);
245 }
246 
247 static void
__trace_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned long flags,int pc)248 __trace_function(struct trace_array *tr,
249 		 unsigned long ip, unsigned long parent_ip,
250 		 unsigned long flags, int pc)
251 {
252 	if (is_graph(tr))
253 		trace_graph_function(tr, ip, parent_ip, flags, pc);
254 	else
255 		trace_function(tr, ip, parent_ip, flags, pc);
256 }
257 
258 #else
259 #define __trace_function trace_function
260 
261 #ifdef CONFIG_FUNCTION_TRACER
irqsoff_graph_entry(struct ftrace_graph_ent * trace)262 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
263 {
264 	return -1;
265 }
266 #endif
267 
irqsoff_print_line(struct trace_iterator * iter)268 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
269 {
270 	return TRACE_TYPE_UNHANDLED;
271 }
272 
irqsoff_trace_open(struct trace_iterator * iter)273 static void irqsoff_trace_open(struct trace_iterator *iter) { }
irqsoff_trace_close(struct trace_iterator * iter)274 static void irqsoff_trace_close(struct trace_iterator *iter) { }
275 
276 #ifdef CONFIG_FUNCTION_TRACER
irqsoff_graph_return(struct ftrace_graph_ret * trace)277 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
irqsoff_print_header(struct seq_file * s)278 static void irqsoff_print_header(struct seq_file *s)
279 {
280 	trace_default_header(s);
281 }
282 #else
irqsoff_print_header(struct seq_file * s)283 static void irqsoff_print_header(struct seq_file *s)
284 {
285 	trace_latency_header(s);
286 }
287 #endif /* CONFIG_FUNCTION_TRACER */
288 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
289 
290 /*
291  * Should this new latency be reported/recorded?
292  */
report_latency(struct trace_array * tr,cycle_t delta)293 static bool report_latency(struct trace_array *tr, cycle_t delta)
294 {
295 	if (tracing_thresh) {
296 		if (delta < tracing_thresh)
297 			return false;
298 	} else {
299 		if (delta <= tr->max_latency)
300 			return false;
301 	}
302 	return true;
303 }
304 
305 static void
check_critical_timing(struct trace_array * tr,struct trace_array_cpu * data,unsigned long parent_ip,int cpu)306 check_critical_timing(struct trace_array *tr,
307 		      struct trace_array_cpu *data,
308 		      unsigned long parent_ip,
309 		      int cpu)
310 {
311 	cycle_t T0, T1, delta;
312 	unsigned long flags;
313 	int pc;
314 
315 	T0 = data->preempt_timestamp;
316 	T1 = ftrace_now(cpu);
317 	delta = T1-T0;
318 
319 	local_save_flags(flags);
320 
321 	pc = preempt_count();
322 
323 	if (!report_latency(tr, delta))
324 		goto out;
325 
326 	raw_spin_lock_irqsave(&max_trace_lock, flags);
327 
328 	/* check if we are still the max latency */
329 	if (!report_latency(tr, delta))
330 		goto out_unlock;
331 
332 	__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
333 	/* Skip 5 functions to get to the irq/preempt enable function */
334 	__trace_stack(tr, flags, 5, pc);
335 
336 	if (data->critical_sequence != max_sequence)
337 		goto out_unlock;
338 
339 	data->critical_end = parent_ip;
340 
341 	if (likely(!is_tracing_stopped())) {
342 		tr->max_latency = delta;
343 		update_max_tr_single(tr, current, cpu);
344 	}
345 
346 	max_sequence++;
347 
348 out_unlock:
349 	raw_spin_unlock_irqrestore(&max_trace_lock, flags);
350 
351 out:
352 	data->critical_sequence = max_sequence;
353 	data->preempt_timestamp = ftrace_now(cpu);
354 	__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
355 }
356 
357 static inline void
start_critical_timing(unsigned long ip,unsigned long parent_ip)358 start_critical_timing(unsigned long ip, unsigned long parent_ip)
359 {
360 	int cpu;
361 	struct trace_array *tr = irqsoff_trace;
362 	struct trace_array_cpu *data;
363 	unsigned long flags;
364 
365 	if (!tracer_enabled || !tracing_is_enabled())
366 		return;
367 
368 	cpu = raw_smp_processor_id();
369 
370 	if (per_cpu(tracing_cpu, cpu))
371 		return;
372 
373 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
374 
375 	if (unlikely(!data) || atomic_read(&data->disabled))
376 		return;
377 
378 	atomic_inc(&data->disabled);
379 
380 	data->critical_sequence = max_sequence;
381 	data->preempt_timestamp = ftrace_now(cpu);
382 	data->critical_start = parent_ip ? : ip;
383 
384 	local_save_flags(flags);
385 
386 	__trace_function(tr, ip, parent_ip, flags, preempt_count());
387 
388 	per_cpu(tracing_cpu, cpu) = 1;
389 
390 	atomic_dec(&data->disabled);
391 }
392 
393 static inline void
stop_critical_timing(unsigned long ip,unsigned long parent_ip)394 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
395 {
396 	int cpu;
397 	struct trace_array *tr = irqsoff_trace;
398 	struct trace_array_cpu *data;
399 	unsigned long flags;
400 
401 	cpu = raw_smp_processor_id();
402 	/* Always clear the tracing cpu on stopping the trace */
403 	if (unlikely(per_cpu(tracing_cpu, cpu)))
404 		per_cpu(tracing_cpu, cpu) = 0;
405 	else
406 		return;
407 
408 	if (!tracer_enabled || !tracing_is_enabled())
409 		return;
410 
411 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
412 
413 	if (unlikely(!data) ||
414 	    !data->critical_start || atomic_read(&data->disabled))
415 		return;
416 
417 	atomic_inc(&data->disabled);
418 
419 	local_save_flags(flags);
420 	__trace_function(tr, ip, parent_ip, flags, preempt_count());
421 	check_critical_timing(tr, data, parent_ip ? : ip, cpu);
422 	data->critical_start = 0;
423 	atomic_dec(&data->disabled);
424 }
425 
426 /* start and stop critical timings used to for stoppage (in idle) */
start_critical_timings(void)427 void start_critical_timings(void)
428 {
429 	if (preempt_trace() || irq_trace())
430 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
431 }
432 EXPORT_SYMBOL_GPL(start_critical_timings);
433 
stop_critical_timings(void)434 void stop_critical_timings(void)
435 {
436 	if (preempt_trace() || irq_trace())
437 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
438 }
439 EXPORT_SYMBOL_GPL(stop_critical_timings);
440 
441 #ifdef CONFIG_IRQSOFF_TRACER
442 #ifdef CONFIG_PROVE_LOCKING
time_hardirqs_on(unsigned long a0,unsigned long a1)443 void time_hardirqs_on(unsigned long a0, unsigned long a1)
444 {
445 	if (!preempt_trace() && irq_trace())
446 		stop_critical_timing(a0, a1);
447 }
448 
time_hardirqs_off(unsigned long a0,unsigned long a1)449 void time_hardirqs_off(unsigned long a0, unsigned long a1)
450 {
451 	if (!preempt_trace() && irq_trace())
452 		start_critical_timing(a0, a1);
453 }
454 
455 #else /* !CONFIG_PROVE_LOCKING */
456 
457 /*
458  * We are only interested in hardirq on/off events:
459  */
tracer_hardirqs_on(void)460 static inline void tracer_hardirqs_on(void)
461 {
462 	if (!preempt_trace() && irq_trace())
463 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
464 }
465 
tracer_hardirqs_off(void)466 static inline void tracer_hardirqs_off(void)
467 {
468 	if (!preempt_trace() && irq_trace())
469 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
470 }
471 
tracer_hardirqs_on_caller(unsigned long caller_addr)472 static inline void tracer_hardirqs_on_caller(unsigned long caller_addr)
473 {
474 	if (!preempt_trace() && irq_trace())
475 		stop_critical_timing(CALLER_ADDR0, caller_addr);
476 }
477 
tracer_hardirqs_off_caller(unsigned long caller_addr)478 static inline void tracer_hardirqs_off_caller(unsigned long caller_addr)
479 {
480 	if (!preempt_trace() && irq_trace())
481 		start_critical_timing(CALLER_ADDR0, caller_addr);
482 }
483 
484 #endif /* CONFIG_PROVE_LOCKING */
485 #endif /*  CONFIG_IRQSOFF_TRACER */
486 
487 #ifdef CONFIG_PREEMPT_TRACER
tracer_preempt_on(unsigned long a0,unsigned long a1)488 static inline void tracer_preempt_on(unsigned long a0, unsigned long a1)
489 {
490 	if (preempt_trace() && !irq_trace())
491 		stop_critical_timing(a0, a1);
492 }
493 
tracer_preempt_off(unsigned long a0,unsigned long a1)494 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1)
495 {
496 	if (preempt_trace() && !irq_trace())
497 		start_critical_timing(a0, a1);
498 }
499 #endif /* CONFIG_PREEMPT_TRACER */
500 
501 #ifdef CONFIG_FUNCTION_TRACER
502 static bool function_enabled;
503 
register_irqsoff_function(struct trace_array * tr,int graph,int set)504 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
505 {
506 	int ret;
507 
508 	/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
509 	if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
510 		return 0;
511 
512 	if (graph)
513 		ret = register_ftrace_graph(&irqsoff_graph_return,
514 					    &irqsoff_graph_entry);
515 	else
516 		ret = register_ftrace_function(tr->ops);
517 
518 	if (!ret)
519 		function_enabled = true;
520 
521 	return ret;
522 }
523 
unregister_irqsoff_function(struct trace_array * tr,int graph)524 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
525 {
526 	if (!function_enabled)
527 		return;
528 
529 	if (graph)
530 		unregister_ftrace_graph();
531 	else
532 		unregister_ftrace_function(tr->ops);
533 
534 	function_enabled = false;
535 }
536 
irqsoff_function_set(struct trace_array * tr,u32 mask,int set)537 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
538 {
539 	if (!(mask & TRACE_ITER_FUNCTION))
540 		return 0;
541 
542 	if (set)
543 		register_irqsoff_function(tr, is_graph(tr), 1);
544 	else
545 		unregister_irqsoff_function(tr, is_graph(tr));
546 	return 1;
547 }
548 #else
register_irqsoff_function(struct trace_array * tr,int graph,int set)549 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
550 {
551 	return 0;
552 }
unregister_irqsoff_function(struct trace_array * tr,int graph)553 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
irqsoff_function_set(struct trace_array * tr,u32 mask,int set)554 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
555 {
556 	return 0;
557 }
558 #endif /* CONFIG_FUNCTION_TRACER */
559 
irqsoff_flag_changed(struct trace_array * tr,u32 mask,int set)560 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
561 {
562 	struct tracer *tracer = tr->current_trace;
563 
564 	if (irqsoff_function_set(tr, mask, set))
565 		return 0;
566 
567 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
568 	if (mask & TRACE_ITER_DISPLAY_GRAPH)
569 		return irqsoff_display_graph(tr, set);
570 #endif
571 
572 	return trace_keep_overwrite(tracer, mask, set);
573 }
574 
start_irqsoff_tracer(struct trace_array * tr,int graph)575 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
576 {
577 	int ret;
578 
579 	ret = register_irqsoff_function(tr, graph, 0);
580 
581 	if (!ret && tracing_is_enabled())
582 		tracer_enabled = 1;
583 	else
584 		tracer_enabled = 0;
585 
586 	return ret;
587 }
588 
stop_irqsoff_tracer(struct trace_array * tr,int graph)589 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
590 {
591 	tracer_enabled = 0;
592 
593 	unregister_irqsoff_function(tr, graph);
594 }
595 
596 static bool irqsoff_busy;
597 
__irqsoff_tracer_init(struct trace_array * tr)598 static int __irqsoff_tracer_init(struct trace_array *tr)
599 {
600 	if (irqsoff_busy)
601 		return -EBUSY;
602 
603 	save_flags = tr->trace_flags;
604 
605 	/* non overwrite screws up the latency tracers */
606 	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
607 	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
608 
609 	tr->max_latency = 0;
610 	irqsoff_trace = tr;
611 	/* make sure that the tracer is visible */
612 	smp_wmb();
613 
614 	ftrace_init_array_ops(tr, irqsoff_tracer_call);
615 
616 	/* Only toplevel instance supports graph tracing */
617 	if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
618 				      is_graph(tr))))
619 		printk(KERN_ERR "failed to start irqsoff tracer\n");
620 
621 	irqsoff_busy = true;
622 	return 0;
623 }
624 
irqsoff_tracer_reset(struct trace_array * tr)625 static void irqsoff_tracer_reset(struct trace_array *tr)
626 {
627 	int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
628 	int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
629 
630 	stop_irqsoff_tracer(tr, is_graph(tr));
631 
632 	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
633 	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
634 	ftrace_reset_array_ops(tr);
635 
636 	irqsoff_busy = false;
637 }
638 
irqsoff_tracer_start(struct trace_array * tr)639 static void irqsoff_tracer_start(struct trace_array *tr)
640 {
641 	tracer_enabled = 1;
642 }
643 
irqsoff_tracer_stop(struct trace_array * tr)644 static void irqsoff_tracer_stop(struct trace_array *tr)
645 {
646 	tracer_enabled = 0;
647 }
648 
649 #ifdef CONFIG_IRQSOFF_TRACER
irqsoff_tracer_init(struct trace_array * tr)650 static int irqsoff_tracer_init(struct trace_array *tr)
651 {
652 	trace_type = TRACER_IRQS_OFF;
653 
654 	return __irqsoff_tracer_init(tr);
655 }
656 static struct tracer irqsoff_tracer __read_mostly =
657 {
658 	.name		= "irqsoff",
659 	.init		= irqsoff_tracer_init,
660 	.reset		= irqsoff_tracer_reset,
661 	.start		= irqsoff_tracer_start,
662 	.stop		= irqsoff_tracer_stop,
663 	.print_max	= true,
664 	.print_header   = irqsoff_print_header,
665 	.print_line     = irqsoff_print_line,
666 	.flag_changed	= irqsoff_flag_changed,
667 #ifdef CONFIG_FTRACE_SELFTEST
668 	.selftest    = trace_selftest_startup_irqsoff,
669 #endif
670 	.open           = irqsoff_trace_open,
671 	.close          = irqsoff_trace_close,
672 	.allow_instances = true,
673 	.use_max_tr	= true,
674 };
675 # define register_irqsoff(trace) register_tracer(&trace)
676 #else
677 # define register_irqsoff(trace) do { } while (0)
678 #endif
679 
680 #ifdef CONFIG_PREEMPT_TRACER
preemptoff_tracer_init(struct trace_array * tr)681 static int preemptoff_tracer_init(struct trace_array *tr)
682 {
683 	trace_type = TRACER_PREEMPT_OFF;
684 
685 	return __irqsoff_tracer_init(tr);
686 }
687 
688 static struct tracer preemptoff_tracer __read_mostly =
689 {
690 	.name		= "preemptoff",
691 	.init		= preemptoff_tracer_init,
692 	.reset		= irqsoff_tracer_reset,
693 	.start		= irqsoff_tracer_start,
694 	.stop		= irqsoff_tracer_stop,
695 	.print_max	= true,
696 	.print_header   = irqsoff_print_header,
697 	.print_line     = irqsoff_print_line,
698 	.flag_changed	= irqsoff_flag_changed,
699 #ifdef CONFIG_FTRACE_SELFTEST
700 	.selftest    = trace_selftest_startup_preemptoff,
701 #endif
702 	.open		= irqsoff_trace_open,
703 	.close		= irqsoff_trace_close,
704 	.allow_instances = true,
705 	.use_max_tr	= true,
706 };
707 # define register_preemptoff(trace) register_tracer(&trace)
708 #else
709 # define register_preemptoff(trace) do { } while (0)
710 #endif
711 
712 #if defined(CONFIG_IRQSOFF_TRACER) && \
713 	defined(CONFIG_PREEMPT_TRACER)
714 
preemptirqsoff_tracer_init(struct trace_array * tr)715 static int preemptirqsoff_tracer_init(struct trace_array *tr)
716 {
717 	trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
718 
719 	return __irqsoff_tracer_init(tr);
720 }
721 
722 static struct tracer preemptirqsoff_tracer __read_mostly =
723 {
724 	.name		= "preemptirqsoff",
725 	.init		= preemptirqsoff_tracer_init,
726 	.reset		= irqsoff_tracer_reset,
727 	.start		= irqsoff_tracer_start,
728 	.stop		= irqsoff_tracer_stop,
729 	.print_max	= true,
730 	.print_header   = irqsoff_print_header,
731 	.print_line     = irqsoff_print_line,
732 	.flag_changed	= irqsoff_flag_changed,
733 #ifdef CONFIG_FTRACE_SELFTEST
734 	.selftest    = trace_selftest_startup_preemptirqsoff,
735 #endif
736 	.open		= irqsoff_trace_open,
737 	.close		= irqsoff_trace_close,
738 	.allow_instances = true,
739 	.use_max_tr	= true,
740 };
741 
742 # define register_preemptirqsoff(trace) register_tracer(&trace)
743 #else
744 # define register_preemptirqsoff(trace) do { } while (0)
745 #endif
746 
init_irqsoff_tracer(void)747 __init static int init_irqsoff_tracer(void)
748 {
749 	register_irqsoff(irqsoff_tracer);
750 	register_preemptoff(preemptoff_tracer);
751 	register_preemptirqsoff(preemptirqsoff_tracer);
752 
753 	return 0;
754 }
755 core_initcall(init_irqsoff_tracer);
756 #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
757 
758 #ifndef CONFIG_IRQSOFF_TRACER
tracer_hardirqs_on(void)759 static inline void tracer_hardirqs_on(void) { }
tracer_hardirqs_off(void)760 static inline void tracer_hardirqs_off(void) { }
tracer_hardirqs_on_caller(unsigned long caller_addr)761 static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { }
tracer_hardirqs_off_caller(unsigned long caller_addr)762 static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { }
763 #endif
764 
765 #ifndef CONFIG_PREEMPT_TRACER
tracer_preempt_on(unsigned long a0,unsigned long a1)766 static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
tracer_preempt_off(unsigned long a0,unsigned long a1)767 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
768 #endif
769 
770 /* Per-cpu variable to prevent redundant calls when IRQs already off */
771 static DEFINE_PER_CPU(int, tracing_irq_cpu);
772 
773 #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
trace_hardirqs_on(void)774 void trace_hardirqs_on(void)
775 {
776 	if (!this_cpu_read(tracing_irq_cpu))
777 		return;
778 
779 	trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
780 	tracer_hardirqs_on();
781 
782 	this_cpu_write(tracing_irq_cpu, 0);
783 }
784 EXPORT_SYMBOL(trace_hardirqs_on);
785 
trace_hardirqs_off(void)786 void trace_hardirqs_off(void)
787 {
788 	if (this_cpu_read(tracing_irq_cpu))
789 		return;
790 
791 	this_cpu_write(tracing_irq_cpu, 1);
792 
793 	trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
794 	tracer_hardirqs_off();
795 }
796 EXPORT_SYMBOL(trace_hardirqs_off);
797 
trace_hardirqs_on_caller(unsigned long caller_addr)798 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
799 {
800 	if (!this_cpu_read(tracing_irq_cpu))
801 		return;
802 
803 	trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
804 	tracer_hardirqs_on_caller(caller_addr);
805 
806 	this_cpu_write(tracing_irq_cpu, 0);
807 }
808 EXPORT_SYMBOL(trace_hardirqs_on_caller);
809 
trace_hardirqs_off_caller(unsigned long caller_addr)810 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
811 {
812 	if (this_cpu_read(tracing_irq_cpu))
813 		return;
814 
815 	this_cpu_write(tracing_irq_cpu, 1);
816 
817 	trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
818 	tracer_hardirqs_off_caller(caller_addr);
819 }
820 EXPORT_SYMBOL(trace_hardirqs_off_caller);
821 
822 /*
823  * Stubs:
824  */
825 
trace_softirqs_on(unsigned long ip)826 void trace_softirqs_on(unsigned long ip)
827 {
828 }
829 
trace_softirqs_off(unsigned long ip)830 void trace_softirqs_off(unsigned long ip)
831 {
832 }
833 
print_irqtrace_events(struct task_struct * curr)834 inline void print_irqtrace_events(struct task_struct *curr)
835 {
836 }
837 #endif
838 
839 #if defined(CONFIG_PREEMPT_TRACER) || \
840 	(defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
trace_preempt_on(unsigned long a0,unsigned long a1)841 void trace_preempt_on(unsigned long a0, unsigned long a1)
842 {
843 	trace_preempt_enable_rcuidle(a0, a1);
844 	tracer_preempt_on(a0, a1);
845 }
846 
trace_preempt_off(unsigned long a0,unsigned long a1)847 void trace_preempt_off(unsigned long a0, unsigned long a1)
848 {
849 	trace_preempt_disable_rcuidle(a0, a1);
850 	tracer_preempt_off(a0, a1);
851 }
852 #endif
853