• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include "evlist.h"
2 #include "evsel.h"
3 #include "cpumap.h"
4 #include "parse-events.h"
5 #include <api/fs/fs.h>
6 #include "util.h"
7 #include "cloexec.h"
8 
9 typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
10 
perf_do_probe_api(setup_probe_fn_t fn,int cpu,const char * str)11 static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
12 {
13 	struct perf_evlist *evlist;
14 	struct perf_evsel *evsel;
15 	unsigned long flags = perf_event_open_cloexec_flag();
16 	int err = -EAGAIN, fd;
17 	static pid_t pid = -1;
18 
19 	evlist = perf_evlist__new();
20 	if (!evlist)
21 		return -ENOMEM;
22 
23 	if (parse_events(evlist, str, NULL))
24 		goto out_delete;
25 
26 	evsel = perf_evlist__first(evlist);
27 
28 	while (1) {
29 		fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
30 		if (fd < 0) {
31 			if (pid == -1 && errno == EACCES) {
32 				pid = 0;
33 				continue;
34 			}
35 			goto out_delete;
36 		}
37 		break;
38 	}
39 	close(fd);
40 
41 	fn(evsel);
42 
43 	fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
44 	if (fd < 0) {
45 		if (errno == EINVAL)
46 			err = -EINVAL;
47 		goto out_delete;
48 	}
49 	close(fd);
50 	err = 0;
51 
52 out_delete:
53 	perf_evlist__delete(evlist);
54 	return err;
55 }
56 
perf_probe_api(setup_probe_fn_t fn)57 static bool perf_probe_api(setup_probe_fn_t fn)
58 {
59 	const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
60 	struct cpu_map *cpus;
61 	int cpu, ret, i = 0;
62 
63 	cpus = cpu_map__new(NULL);
64 	if (!cpus)
65 		return false;
66 	cpu = cpus->map[0];
67 	cpu_map__put(cpus);
68 
69 	do {
70 		ret = perf_do_probe_api(fn, cpu, try[i++]);
71 		if (!ret)
72 			return true;
73 	} while (ret == -EAGAIN && try[i]);
74 
75 	return false;
76 }
77 
perf_probe_sample_identifier(struct perf_evsel * evsel)78 static void perf_probe_sample_identifier(struct perf_evsel *evsel)
79 {
80 	evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
81 }
82 
perf_probe_comm_exec(struct perf_evsel * evsel)83 static void perf_probe_comm_exec(struct perf_evsel *evsel)
84 {
85 	evsel->attr.comm_exec = 1;
86 }
87 
perf_probe_context_switch(struct perf_evsel * evsel)88 static void perf_probe_context_switch(struct perf_evsel *evsel)
89 {
90 	evsel->attr.context_switch = 1;
91 }
92 
perf_can_sample_identifier(void)93 bool perf_can_sample_identifier(void)
94 {
95 	return perf_probe_api(perf_probe_sample_identifier);
96 }
97 
perf_can_comm_exec(void)98 static bool perf_can_comm_exec(void)
99 {
100 	return perf_probe_api(perf_probe_comm_exec);
101 }
102 
perf_can_record_switch_events(void)103 bool perf_can_record_switch_events(void)
104 {
105 	return perf_probe_api(perf_probe_context_switch);
106 }
107 
perf_can_record_cpu_wide(void)108 bool perf_can_record_cpu_wide(void)
109 {
110 	struct perf_event_attr attr = {
111 		.type = PERF_TYPE_SOFTWARE,
112 		.config = PERF_COUNT_SW_CPU_CLOCK,
113 		.exclude_kernel = 1,
114 	};
115 	struct cpu_map *cpus;
116 	int cpu, fd;
117 
118 	cpus = cpu_map__new(NULL);
119 	if (!cpus)
120 		return false;
121 	cpu = cpus->map[0];
122 	cpu_map__put(cpus);
123 
124 	fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
125 	if (fd < 0)
126 		return false;
127 	close(fd);
128 
129 	return true;
130 }
131 
perf_evlist__config(struct perf_evlist * evlist,struct record_opts * opts,struct callchain_param * callchain)132 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
133 			 struct callchain_param *callchain)
134 {
135 	struct perf_evsel *evsel;
136 	bool use_sample_identifier = false;
137 	bool use_comm_exec;
138 
139 	/*
140 	 * Set the evsel leader links before we configure attributes,
141 	 * since some might depend on this info.
142 	 */
143 	if (opts->group)
144 		perf_evlist__set_leader(evlist);
145 
146 	if (evlist->cpus->map[0] < 0)
147 		opts->no_inherit = true;
148 
149 	use_comm_exec = perf_can_comm_exec();
150 
151 	evlist__for_each_entry(evlist, evsel) {
152 		perf_evsel__config(evsel, opts, callchain);
153 		if (evsel->tracking && use_comm_exec)
154 			evsel->attr.comm_exec = 1;
155 	}
156 
157 	if (opts->full_auxtrace) {
158 		/*
159 		 * Need to be able to synthesize and parse selected events with
160 		 * arbitrary sample types, which requires always being able to
161 		 * match the id.
162 		 */
163 		use_sample_identifier = perf_can_sample_identifier();
164 		evlist__for_each_entry(evlist, evsel)
165 			perf_evsel__set_sample_id(evsel, use_sample_identifier);
166 	} else if (evlist->nr_entries > 1) {
167 		struct perf_evsel *first = perf_evlist__first(evlist);
168 
169 		evlist__for_each_entry(evlist, evsel) {
170 			if (evsel->attr.sample_type == first->attr.sample_type)
171 				continue;
172 			use_sample_identifier = perf_can_sample_identifier();
173 			break;
174 		}
175 		evlist__for_each_entry(evlist, evsel)
176 			perf_evsel__set_sample_id(evsel, use_sample_identifier);
177 	}
178 
179 	perf_evlist__set_id_pos(evlist);
180 }
181 
get_max_rate(unsigned int * rate)182 static int get_max_rate(unsigned int *rate)
183 {
184 	return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
185 }
186 
record_opts__config_freq(struct record_opts * opts)187 static int record_opts__config_freq(struct record_opts *opts)
188 {
189 	bool user_freq = opts->user_freq != UINT_MAX;
190 	unsigned int max_rate;
191 
192 	if (opts->user_interval != ULLONG_MAX)
193 		opts->default_interval = opts->user_interval;
194 	if (user_freq)
195 		opts->freq = opts->user_freq;
196 
197 	/*
198 	 * User specified count overrides default frequency.
199 	 */
200 	if (opts->default_interval)
201 		opts->freq = 0;
202 	else if (opts->freq) {
203 		opts->default_interval = opts->freq;
204 	} else {
205 		pr_err("frequency and count are zero, aborting\n");
206 		return -1;
207 	}
208 
209 	if (get_max_rate(&max_rate))
210 		return 0;
211 
212 	/*
213 	 * User specified frequency is over current maximum.
214 	 */
215 	if (user_freq && (max_rate < opts->freq)) {
216 		pr_err("Maximum frequency rate (%u) reached.\n"
217 		   "Please use -F freq option with lower value or consider\n"
218 		   "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
219 		   max_rate);
220 		return -1;
221 	}
222 
223 	/*
224 	 * Default frequency is over current maximum.
225 	 */
226 	if (max_rate < opts->freq) {
227 		pr_warning("Lowering default frequency rate to %u.\n"
228 			   "Please consider tweaking "
229 			   "/proc/sys/kernel/perf_event_max_sample_rate.\n",
230 			   max_rate);
231 		opts->freq = max_rate;
232 	}
233 
234 	return 0;
235 }
236 
record_opts__config(struct record_opts * opts)237 int record_opts__config(struct record_opts *opts)
238 {
239 	return record_opts__config_freq(opts);
240 }
241 
perf_evlist__can_select_event(struct perf_evlist * evlist,const char * str)242 bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
243 {
244 	struct perf_evlist *temp_evlist;
245 	struct perf_evsel *evsel;
246 	int err, fd, cpu;
247 	bool ret = false;
248 	pid_t pid = -1;
249 
250 	temp_evlist = perf_evlist__new();
251 	if (!temp_evlist)
252 		return false;
253 
254 	err = parse_events(temp_evlist, str, NULL);
255 	if (err)
256 		goto out_delete;
257 
258 	evsel = perf_evlist__last(temp_evlist);
259 
260 	if (!evlist || cpu_map__empty(evlist->cpus)) {
261 		struct cpu_map *cpus = cpu_map__new(NULL);
262 
263 		cpu =  cpus ? cpus->map[0] : 0;
264 		cpu_map__put(cpus);
265 	} else {
266 		cpu = evlist->cpus->map[0];
267 	}
268 
269 	while (1) {
270 		fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1,
271 					 perf_event_open_cloexec_flag());
272 		if (fd < 0) {
273 			if (pid == -1 && errno == EACCES) {
274 				pid = 0;
275 				continue;
276 			}
277 			goto out_delete;
278 		}
279 		break;
280 	}
281 	close(fd);
282 	ret = true;
283 
284 out_delete:
285 	perf_evlist__delete(temp_evlist);
286 	return ret;
287 }
288