• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _KERNEL_EVENTS_INTERNAL_H
2 #define _KERNEL_EVENTS_INTERNAL_H
3 
4 #include <linux/hardirq.h>
5 #include <linux/uaccess.h>
6 
7 /* Buffer handling */
8 
9 #define RING_BUFFER_WRITABLE		0x01
10 
11 struct ring_buffer {
12 	atomic_t			refcount;
13 	struct rcu_head			rcu_head;
14 #ifdef CONFIG_PERF_USE_VMALLOC
15 	struct work_struct		work;
16 	int				page_order;	/* allocation order  */
17 #endif
18 	int				nr_pages;	/* nr of data pages  */
19 	int				overwrite;	/* can overwrite itself */
20 
21 	atomic_t			poll;		/* POLL_ for wakeups */
22 
23 	local_t				head;		/* write position    */
24 	local_t				nest;		/* nested writers    */
25 	local_t				events;		/* event limit       */
26 	local_t				wakeup;		/* wakeup stamp      */
27 	local_t				lost;		/* nr records lost   */
28 
29 	long				watermark;	/* wakeup watermark  */
30 	/* poll crap */
31 	spinlock_t			event_lock;
32 	struct list_head		event_list;
33 
34 	atomic_t			mmap_count;
35 	unsigned long			mmap_locked;
36 	struct user_struct		*mmap_user;
37 
38 	struct perf_event_mmap_page	*user_page;
39 	void				*data_pages[0];
40 };
41 
42 extern void rb_free(struct ring_buffer *rb);
43 extern struct ring_buffer *
44 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
45 extern void perf_event_wakeup(struct perf_event *event);
46 
47 extern void
48 perf_event_header__init_id(struct perf_event_header *header,
49 			   struct perf_sample_data *data,
50 			   struct perf_event *event);
51 extern void
52 perf_event__output_id_sample(struct perf_event *event,
53 			     struct perf_output_handle *handle,
54 			     struct perf_sample_data *sample);
55 
56 extern struct page *
57 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
58 
59 #ifdef CONFIG_PERF_USE_VMALLOC
60 /*
61  * Back perf_mmap() with vmalloc memory.
62  *
63  * Required for architectures that have d-cache aliasing issues.
64  */
65 
page_order(struct ring_buffer * rb)66 static inline int page_order(struct ring_buffer *rb)
67 {
68 	return rb->page_order;
69 }
70 
71 #else
72 
page_order(struct ring_buffer * rb)73 static inline int page_order(struct ring_buffer *rb)
74 {
75 	return 0;
76 }
77 #endif
78 
perf_data_size(struct ring_buffer * rb)79 static inline unsigned long perf_data_size(struct ring_buffer *rb)
80 {
81 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
82 }
83 
84 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
85 static inline unsigned long						\
86 func_name(struct perf_output_handle *handle,				\
87 	  const void *buf, unsigned long len)				\
88 {									\
89 	unsigned long size, written;					\
90 									\
91 	do {								\
92 		size    = min(handle->size, len);			\
93 		written = memcpy_func(handle->addr, buf, size);		\
94 		written = size - written;				\
95 									\
96 		len -= written;						\
97 		handle->addr += written;				\
98 		buf += written;						\
99 		handle->size -= written;				\
100 		if (!handle->size) {					\
101 			struct ring_buffer *rb = handle->rb;		\
102 									\
103 			handle->page++;					\
104 			handle->page &= rb->nr_pages - 1;		\
105 			handle->addr = rb->data_pages[handle->page];	\
106 			handle->size = PAGE_SIZE << page_order(rb);	\
107 		}							\
108 	} while (len && written == size);				\
109 									\
110 	return len;							\
111 }
112 
113 static inline unsigned long
memcpy_common(void * dst,const void * src,unsigned long n)114 memcpy_common(void *dst, const void *src, unsigned long n)
115 {
116 	memcpy(dst, src, n);
117 	return 0;
118 }
119 
DEFINE_OUTPUT_COPY(__output_copy,memcpy_common)120 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
121 
122 static inline unsigned long
123 memcpy_skip(void *dst, const void *src, unsigned long n)
124 {
125 	return 0;
126 }
127 
DEFINE_OUTPUT_COPY(__output_skip,memcpy_skip)128 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
129 
130 #ifndef arch_perf_out_copy_user
131 #define arch_perf_out_copy_user arch_perf_out_copy_user
132 
133 static inline unsigned long
134 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
135 {
136 	unsigned long ret;
137 
138 	pagefault_disable();
139 	ret = __copy_from_user_inatomic(dst, src, n);
140 	pagefault_enable();
141 
142 	return ret;
143 }
144 #endif
145 
146 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
147 
148 /* Callchain handling */
149 extern struct perf_callchain_entry *
150 perf_callchain(struct perf_event *event, struct pt_regs *regs);
151 extern int get_callchain_buffers(void);
152 extern void put_callchain_buffers(void);
153 
get_recursion_context(int * recursion)154 static inline int get_recursion_context(int *recursion)
155 {
156 	int rctx;
157 
158 	if (in_nmi())
159 		rctx = 3;
160 	else if (in_irq())
161 		rctx = 2;
162 	else if (in_softirq())
163 		rctx = 1;
164 	else
165 		rctx = 0;
166 
167 	if (recursion[rctx])
168 		return -1;
169 
170 	recursion[rctx]++;
171 	barrier();
172 
173 	return rctx;
174 }
175 
put_recursion_context(int * recursion,int rctx)176 static inline void put_recursion_context(int *recursion, int rctx)
177 {
178 	barrier();
179 	recursion[rctx]--;
180 }
181 
182 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
arch_perf_have_user_stack_dump(void)183 static inline bool arch_perf_have_user_stack_dump(void)
184 {
185 	return true;
186 }
187 
188 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
189 #else
arch_perf_have_user_stack_dump(void)190 static inline bool arch_perf_have_user_stack_dump(void)
191 {
192 	return false;
193 }
194 
195 #define perf_user_stack_pointer(regs) 0
196 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
197 
198 #endif /* _KERNEL_EVENTS_INTERNAL_H */
199