• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * @file cpu_buffer.h
3  *
4  * @remark Copyright 2002-2009 OProfile authors
5  * @remark Read the file COPYING
6  *
7  * @author John Levon <levon@movementarian.org>
8  * @author Robert Richter <robert.richter@amd.com>
9  */
10 
11 #ifndef OPROFILE_CPU_BUFFER_H
12 #define OPROFILE_CPU_BUFFER_H
13 
14 #include <linux/types.h>
15 #include <linux/spinlock.h>
16 #include <linux/workqueue.h>
17 #include <linux/cache.h>
18 #include <linux/sched.h>
19 #include <linux/ring_buffer.h>
20 
21 struct task_struct;
22 
23 int alloc_cpu_buffers(void);
24 void free_cpu_buffers(void);
25 
26 void start_cpu_work(void);
27 void end_cpu_work(void);
28 
29 /* CPU buffer is composed of such entries (which are
30  * also used for context switch notes)
31  */
32 struct op_sample {
33 	unsigned long eip;
34 	unsigned long event;
35 	unsigned long data[0];
36 };
37 
38 struct op_entry;
39 
40 struct oprofile_cpu_buffer {
41 	unsigned long buffer_size;
42 	struct task_struct *last_task;
43 	int last_is_kernel;
44 	int tracing;
45 	unsigned long sample_received;
46 	unsigned long sample_lost_overflow;
47 	unsigned long backtrace_aborted;
48 	unsigned long sample_invalid_eip;
49 	int cpu;
50 	struct delayed_work work;
51 };
52 
53 DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
54 
55 /*
56  * Resets the cpu buffer to a sane state.
57  *
58  * reset these to invalid values; the next sample collected will
59  * populate the buffer with proper values to initialize the buffer
60  */
op_cpu_buffer_reset(int cpu)61 static inline void op_cpu_buffer_reset(int cpu)
62 {
63 	struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
64 
65 	cpu_buf->last_is_kernel = -1;
66 	cpu_buf->last_task = NULL;
67 }
68 
69 /*
70  * op_cpu_buffer_add_data() and op_cpu_buffer_write_commit() may be
71  * called only if op_cpu_buffer_write_reserve() did not return NULL or
72  * entry->event != NULL, otherwise entry->size or entry->event will be
73  * used uninitialized.
74  */
75 
76 struct op_sample
77 *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
78 int op_cpu_buffer_write_commit(struct op_entry *entry);
79 struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
80 unsigned long op_cpu_buffer_entries(int cpu);
81 
82 /* returns the remaining free size of data in the entry */
83 static inline
op_cpu_buffer_add_data(struct op_entry * entry,unsigned long val)84 int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
85 {
86 	if (!entry->size)
87 		return 0;
88 	*entry->data = val;
89 	entry->size--;
90 	entry->data++;
91 	return entry->size;
92 }
93 
94 /* returns the size of data in the entry */
95 static inline
op_cpu_buffer_get_size(struct op_entry * entry)96 int op_cpu_buffer_get_size(struct op_entry *entry)
97 {
98 	return entry->size;
99 }
100 
101 /* returns 0 if empty or the size of data including the current value */
102 static inline
op_cpu_buffer_get_data(struct op_entry * entry,unsigned long * val)103 int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
104 {
105 	int size = entry->size;
106 	if (!size)
107 		return 0;
108 	*val = *entry->data;
109 	entry->size--;
110 	entry->data++;
111 	return size;
112 }
113 
114 /* extra data flags */
115 #define KERNEL_CTX_SWITCH	(1UL << 0)
116 #define IS_KERNEL		(1UL << 1)
117 #define TRACE_BEGIN		(1UL << 2)
118 #define USER_CTX_SWITCH		(1UL << 3)
119 
120 #endif /* OPROFILE_CPU_BUFFER_H */
121