• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * trace event based perf event profiling/tracing
3  *
4  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
5  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
11 
12 static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
13 
14 /*
15  * Force it to be aligned to unsigned long to avoid misaligned accesses
16  * suprises
17  */
18 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19 	perf_trace_t;
20 
21 /* Count the events in use (per event id, not per instance) */
22 static int	total_ref_count;
23 
perf_trace_event_perm(struct trace_event_call * tp_event,struct perf_event * p_event)24 static int perf_trace_event_perm(struct trace_event_call *tp_event,
25 				 struct perf_event *p_event)
26 {
27 	if (tp_event->perf_perm) {
28 		int ret = tp_event->perf_perm(tp_event, p_event);
29 		if (ret)
30 			return ret;
31 	}
32 
33 	/*
34 	 * We checked and allowed to create parent,
35 	 * allow children without checking.
36 	 */
37 	if (p_event->parent)
38 		return 0;
39 
40 	/*
41 	 * It's ok to check current process (owner) permissions in here,
42 	 * because code below is called only via perf_event_open syscall.
43 	 */
44 
45 	/* The ftrace function trace is allowed only for root. */
46 	if (ftrace_event_is_function(tp_event)) {
47 		if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
48 			return -EPERM;
49 
50 		/*
51 		 * We don't allow user space callchains for  function trace
52 		 * event, due to issues with page faults while tracing page
53 		 * fault handler and its overall trickiness nature.
54 		 */
55 		if (!p_event->attr.exclude_callchain_user)
56 			return -EINVAL;
57 
58 		/*
59 		 * Same reason to disable user stack dump as for user space
60 		 * callchains above.
61 		 */
62 		if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
63 			return -EINVAL;
64 	}
65 
66 	/* No tracing, just counting, so no obvious leak */
67 	if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
68 		return 0;
69 
70 	/* Some events are ok to be traced by non-root users... */
71 	if (p_event->attach_state == PERF_ATTACH_TASK) {
72 		if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
73 			return 0;
74 	}
75 
76 	/*
77 	 * ...otherwise raw tracepoint data can be a severe data leak,
78 	 * only allow root to have these.
79 	 */
80 	if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
81 		return -EPERM;
82 
83 	return 0;
84 }
85 
perf_trace_event_reg(struct trace_event_call * tp_event,struct perf_event * p_event)86 static int perf_trace_event_reg(struct trace_event_call *tp_event,
87 				struct perf_event *p_event)
88 {
89 	struct hlist_head __percpu *list;
90 	int ret = -ENOMEM;
91 	int cpu;
92 
93 	p_event->tp_event = tp_event;
94 	if (tp_event->perf_refcount++ > 0)
95 		return 0;
96 
97 	list = alloc_percpu(struct hlist_head);
98 	if (!list)
99 		goto fail;
100 
101 	for_each_possible_cpu(cpu)
102 		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
103 
104 	tp_event->perf_events = list;
105 
106 	if (!total_ref_count) {
107 		char __percpu *buf;
108 		int i;
109 
110 		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
111 			buf = (char __percpu *)alloc_percpu(perf_trace_t);
112 			if (!buf)
113 				goto fail;
114 
115 			perf_trace_buf[i] = buf;
116 		}
117 	}
118 
119 	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
120 	if (ret)
121 		goto fail;
122 
123 	total_ref_count++;
124 	return 0;
125 
126 fail:
127 	if (!total_ref_count) {
128 		int i;
129 
130 		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
131 			free_percpu(perf_trace_buf[i]);
132 			perf_trace_buf[i] = NULL;
133 		}
134 	}
135 
136 	if (!--tp_event->perf_refcount) {
137 		free_percpu(tp_event->perf_events);
138 		tp_event->perf_events = NULL;
139 	}
140 
141 	return ret;
142 }
143 
perf_trace_event_unreg(struct perf_event * p_event)144 static void perf_trace_event_unreg(struct perf_event *p_event)
145 {
146 	struct trace_event_call *tp_event = p_event->tp_event;
147 	int i;
148 
149 	if (--tp_event->perf_refcount > 0)
150 		goto out;
151 
152 	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
153 
154 	/*
155 	 * Ensure our callback won't be called anymore. The buffers
156 	 * will be freed after that.
157 	 */
158 	tracepoint_synchronize_unregister();
159 
160 	free_percpu(tp_event->perf_events);
161 	tp_event->perf_events = NULL;
162 
163 	if (!--total_ref_count) {
164 		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
165 			free_percpu(perf_trace_buf[i]);
166 			perf_trace_buf[i] = NULL;
167 		}
168 	}
169 out:
170 	module_put(tp_event->mod);
171 }
172 
perf_trace_event_open(struct perf_event * p_event)173 static int perf_trace_event_open(struct perf_event *p_event)
174 {
175 	struct trace_event_call *tp_event = p_event->tp_event;
176 	return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
177 }
178 
perf_trace_event_close(struct perf_event * p_event)179 static void perf_trace_event_close(struct perf_event *p_event)
180 {
181 	struct trace_event_call *tp_event = p_event->tp_event;
182 	tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
183 }
184 
perf_trace_event_init(struct trace_event_call * tp_event,struct perf_event * p_event)185 static int perf_trace_event_init(struct trace_event_call *tp_event,
186 				 struct perf_event *p_event)
187 {
188 	int ret;
189 
190 	ret = perf_trace_event_perm(tp_event, p_event);
191 	if (ret)
192 		return ret;
193 
194 	ret = perf_trace_event_reg(tp_event, p_event);
195 	if (ret)
196 		return ret;
197 
198 	ret = perf_trace_event_open(p_event);
199 	if (ret) {
200 		perf_trace_event_unreg(p_event);
201 		return ret;
202 	}
203 
204 	return 0;
205 }
206 
perf_trace_init(struct perf_event * p_event)207 int perf_trace_init(struct perf_event *p_event)
208 {
209 	struct trace_event_call *tp_event;
210 	u64 event_id = p_event->attr.config;
211 	int ret = -EINVAL;
212 
213 	mutex_lock(&event_mutex);
214 	list_for_each_entry(tp_event, &ftrace_events, list) {
215 		if (tp_event->event.type == event_id &&
216 		    tp_event->class && tp_event->class->reg &&
217 		    try_module_get(tp_event->mod)) {
218 			ret = perf_trace_event_init(tp_event, p_event);
219 			if (ret)
220 				module_put(tp_event->mod);
221 			break;
222 		}
223 	}
224 	mutex_unlock(&event_mutex);
225 
226 	return ret;
227 }
228 
perf_trace_destroy(struct perf_event * p_event)229 void perf_trace_destroy(struct perf_event *p_event)
230 {
231 	mutex_lock(&event_mutex);
232 	perf_trace_event_close(p_event);
233 	perf_trace_event_unreg(p_event);
234 	mutex_unlock(&event_mutex);
235 }
236 
perf_trace_add(struct perf_event * p_event,int flags)237 int perf_trace_add(struct perf_event *p_event, int flags)
238 {
239 	struct trace_event_call *tp_event = p_event->tp_event;
240 	struct hlist_head __percpu *pcpu_list;
241 	struct hlist_head *list;
242 
243 	pcpu_list = tp_event->perf_events;
244 	if (WARN_ON_ONCE(!pcpu_list))
245 		return -EINVAL;
246 
247 	if (!(flags & PERF_EF_START))
248 		p_event->hw.state = PERF_HES_STOPPED;
249 
250 	list = this_cpu_ptr(pcpu_list);
251 	hlist_add_head_rcu(&p_event->hlist_entry, list);
252 
253 	return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
254 }
255 
perf_trace_del(struct perf_event * p_event,int flags)256 void perf_trace_del(struct perf_event *p_event, int flags)
257 {
258 	struct trace_event_call *tp_event = p_event->tp_event;
259 	hlist_del_rcu(&p_event->hlist_entry);
260 	tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
261 }
262 
perf_trace_buf_prepare(int size,unsigned short type,struct pt_regs ** regs,int * rctxp)263 void *perf_trace_buf_prepare(int size, unsigned short type,
264 			     struct pt_regs **regs, int *rctxp)
265 {
266 	struct trace_entry *entry;
267 	unsigned long flags;
268 	char *raw_data;
269 	int pc;
270 
271 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
272 
273 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
274 			"perf buffer not large enough"))
275 		return NULL;
276 
277 	pc = preempt_count();
278 
279 	*rctxp = perf_swevent_get_recursion_context();
280 	if (*rctxp < 0)
281 		return NULL;
282 
283 	if (regs)
284 		*regs = this_cpu_ptr(&__perf_regs[*rctxp]);
285 	raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
286 
287 	/* zero the dead bytes from align to not leak stack to user */
288 	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
289 
290 	entry = (struct trace_entry *)raw_data;
291 	local_save_flags(flags);
292 	tracing_generic_entry_update(entry, flags, pc);
293 	entry->type = type;
294 
295 	return raw_data;
296 }
297 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
298 NOKPROBE_SYMBOL(perf_trace_buf_prepare);
299 
300 #ifdef CONFIG_FUNCTION_TRACER
301 static void
perf_ftrace_function_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ops,struct pt_regs * pt_regs)302 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
303 			  struct ftrace_ops *ops, struct pt_regs *pt_regs)
304 {
305 	struct ftrace_entry *entry;
306 	struct hlist_head *head;
307 	struct pt_regs regs;
308 	int rctx;
309 
310 	head = this_cpu_ptr(event_function.perf_events);
311 	if (hlist_empty(head))
312 		return;
313 
314 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
315 		    sizeof(u64)) - sizeof(u32))
316 
317 	BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
318 
319 	perf_fetch_caller_regs(&regs);
320 
321 	entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
322 	if (!entry)
323 		return;
324 
325 	entry->ip = ip;
326 	entry->parent_ip = parent_ip;
327 	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
328 			      1, &regs, head, NULL);
329 
330 #undef ENTRY_SIZE
331 }
332 
perf_ftrace_function_register(struct perf_event * event)333 static int perf_ftrace_function_register(struct perf_event *event)
334 {
335 	struct ftrace_ops *ops = &event->ftrace_ops;
336 
337 	ops->flags |= FTRACE_OPS_FL_CONTROL;
338 	ops->func = perf_ftrace_function_call;
339 	return register_ftrace_function(ops);
340 }
341 
perf_ftrace_function_unregister(struct perf_event * event)342 static int perf_ftrace_function_unregister(struct perf_event *event)
343 {
344 	struct ftrace_ops *ops = &event->ftrace_ops;
345 	int ret = unregister_ftrace_function(ops);
346 	ftrace_free_filter(ops);
347 	return ret;
348 }
349 
perf_ftrace_function_enable(struct perf_event * event)350 static void perf_ftrace_function_enable(struct perf_event *event)
351 {
352 	ftrace_function_local_enable(&event->ftrace_ops);
353 }
354 
perf_ftrace_function_disable(struct perf_event * event)355 static void perf_ftrace_function_disable(struct perf_event *event)
356 {
357 	ftrace_function_local_disable(&event->ftrace_ops);
358 }
359 
perf_ftrace_event_register(struct trace_event_call * call,enum trace_reg type,void * data)360 int perf_ftrace_event_register(struct trace_event_call *call,
361 			       enum trace_reg type, void *data)
362 {
363 	switch (type) {
364 	case TRACE_REG_REGISTER:
365 	case TRACE_REG_UNREGISTER:
366 		break;
367 	case TRACE_REG_PERF_REGISTER:
368 	case TRACE_REG_PERF_UNREGISTER:
369 		return 0;
370 	case TRACE_REG_PERF_OPEN:
371 		return perf_ftrace_function_register(data);
372 	case TRACE_REG_PERF_CLOSE:
373 		return perf_ftrace_function_unregister(data);
374 	case TRACE_REG_PERF_ADD:
375 		perf_ftrace_function_enable(data);
376 		return 0;
377 	case TRACE_REG_PERF_DEL:
378 		perf_ftrace_function_disable(data);
379 		return 0;
380 	}
381 
382 	return -EINVAL;
383 }
384 #endif /* CONFIG_FUNCTION_TRACER */
385