• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1--- prebuilts/other/perf_event.h	2023-01-16 15:50:19.155934716 +0800
2+++ third_party/perf_include/linux/perf_event.h	2023-01-16 15:52:07.091731926 +0800
3@@ -1,4 +1,3 @@
4-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
5 /*
6  * Performance events:
7  *
8@@ -12,12 +11,9 @@
9  *
10  * For licencing details see kernel-base/COPYING
11  */
12-#ifndef _UAPI_LINUX_PERF_EVENT_H
13-#define _UAPI_LINUX_PERF_EVENT_H
14-
15-#include <linux/types.h>
16-#include <linux/ioctl.h>
17-#include <asm/byteorder.h>
18+#ifndef _TS_PERF_EVENT_H
19+#define _TS_PERF_EVENT_H
20+#include "types.h"
21
22 /*
23  * User-space ABI bits:
24@@ -38,21 +34,6 @@ enum perf_type_id {
25 };
26
27 /*
28- * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
29- * PERF_TYPE_HARDWARE:			0xEEEEEEEE000000AA
30- *					AA: hardware event ID
31- *					EEEEEEEE: PMU type ID
32- * PERF_TYPE_HW_CACHE:			0xEEEEEEEE00DDCCBB
33- *					BB: hardware cache ID
34- *					CC: hardware cache op ID
35- *					DD: hardware cache op result ID
36- *					EEEEEEEE: PMU type ID
37- * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
38- */
39-#define PERF_PMU_TYPE_SHIFT		32
40-#define PERF_HW_EVENT_MASK		0xffffffff
41-
42-/*
43  * Generalized performance event event_id types, used by the
44  * attr.event_id parameter of the sys_perf_event_open()
45  * syscall:
46@@ -127,7 +108,6 @@ enum perf_sw_ids {
47 	PERF_COUNT_SW_EMULATION_FAULTS		= 8,
48 	PERF_COUNT_SW_DUMMY			= 9,
49 	PERF_COUNT_SW_BPF_OUTPUT		= 10,
50-	PERF_COUNT_SW_CGROUP_SWITCHES		= 11,
51
52 	PERF_COUNT_SW_MAX,			/* non-ABI */
53 };
54@@ -157,18 +137,10 @@ enum perf_event_sample_format {
55 	PERF_SAMPLE_TRANSACTION			= 1U << 17,
56 	PERF_SAMPLE_REGS_INTR			= 1U << 18,
57 	PERF_SAMPLE_PHYS_ADDR			= 1U << 19,
58-	PERF_SAMPLE_AUX				= 1U << 20,
59-	PERF_SAMPLE_CGROUP			= 1U << 21,
60-	PERF_SAMPLE_DATA_PAGE_SIZE		= 1U << 22,
61-	PERF_SAMPLE_CODE_PAGE_SIZE		= 1U << 23,
62-	PERF_SAMPLE_WEIGHT_STRUCT		= 1U << 24,
63
64-	PERF_SAMPLE_MAX = 1U << 25,		/* non-ABI */
65-
66-	__PERF_SAMPLE_CALLCHAIN_EARLY		= 1ULL << 63, /* non-ABI; internal use */
67+	PERF_SAMPLE_MAX = 1U << 20,		/* non-ABI */
68 };
69
70-#define PERF_SAMPLE_WEIGHT_TYPE	(PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT)
71 /*
72  * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
73  *
74@@ -202,8 +174,6 @@ enum perf_branch_sample_type_shift {
75
76 	PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT	= 16, /* save branch type */
77
78-	PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT	= 17, /* save low level index of raw branch records */
79-
80 	PERF_SAMPLE_BRANCH_MAX_SHIFT		/* non-ABI */
81 };
82
83@@ -231,8 +201,6 @@ enum perf_branch_sample_type {
84 	PERF_SAMPLE_BRANCH_TYPE_SAVE	=
85 		1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
86
87-	PERF_SAMPLE_BRANCH_HW_INDEX	= 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,
88-
89 	PERF_SAMPLE_BRANCH_MAX		= 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
90 };
91
92@@ -326,8 +294,6 @@ enum perf_event_read_format {
93 					/* add: sample_stack_user */
94 #define PERF_ATTR_SIZE_VER4	104	/* add: sample_regs_intr */
95 #define PERF_ATTR_SIZE_VER5	112	/* add: aux_watermark */
96-#define PERF_ATTR_SIZE_VER6	120	/* add: aux_sample_size */
97-#define PERF_ATTR_SIZE_VER7	128	/* add: sig_data */
98
99 /*
100  * Hardware event_id to monitor via a performance monitoring event:
101@@ -400,16 +366,7 @@ struct perf_event_attr {
102 				context_switch :  1, /* context switch data */
103 				write_backward :  1, /* Write ring buffer from end to beginning */
104 				namespaces     :  1, /* include namespaces data */
105-				ksymbol        :  1, /* include ksymbol events */
106-				bpf_event      :  1, /* include bpf events */
107-				aux_output     :  1, /* generate AUX records instead of events */
108-				cgroup         :  1, /* include cgroup events */
109-				text_poke      :  1, /* include text poke events */
110-				build_id       :  1, /* use build id in mmap2 events */
111-				inherit_thread :  1, /* children only inherit if cloned with CLONE_THREAD */
112-				remove_on_exec :  1, /* event is removed from task on exec */
113-				sigtrap        :  1, /* send synchronous SIGTRAP on event */
114-				__reserved_1   : 26;
115+				__reserved_1   : 35;
116
117 	union {
118 		__u32		wakeup_events;	  /* wakeup every n events */
119@@ -419,14 +376,10 @@ struct perf_event_attr {
120 	__u32			bp_type;
121 	union {
122 		__u64		bp_addr;
123-		__u64		kprobe_func; /* for perf_kprobe */
124-		__u64		uprobe_path; /* for perf_uprobe */
125 		__u64		config1; /* extension of config */
126 	};
127 	union {
128 		__u64		bp_len;
129-		__u64		kprobe_addr; /* when kprobe_func == NULL */
130-		__u64		probe_offset; /* for perf_[k,u]probe */
131 		__u64		config2; /* extension of config1 */
132 	};
133 	__u64	branch_sample_type; /* enum perf_branch_sample_type */
134@@ -458,53 +411,24 @@ struct perf_event_attr {
135 	 */
136 	__u32	aux_watermark;
137 	__u16	sample_max_stack;
138-	__u16	__reserved_2;
139-	__u32	aux_sample_size;
140-	__u32	__reserved_3;
141-
142-	/*
143-	 * User provided data if sigtrap=1, passed back to user via
144-	 * siginfo_t::si_perf_data, e.g. to permit user to identify the event.
145-	 */
146-	__u64	sig_data;
147+	__u16	__reserved_2;	/* align to __u64 */
148 };
149
150-/*
151- * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
152- * to query bpf programs attached to the same perf tracepoint
153- * as the given perf event.
154- */
155-struct perf_event_query_bpf {
156-	/*
157-	 * The below ids array length
158-	 */
159-	__u32	ids_len;
160-	/*
161-	 * Set by the kernel to indicate the number of
162-	 * available programs
163-	 */
164-	__u32	prog_cnt;
165-	/*
166-	 * User provided buffer to store program ids
167-	 */
168-	__u32	ids[0];
169-};
170+#define perf_flags(attr)	(*(&(attr)->read_format + 1))
171
172 /*
173  * Ioctls that can be done on a perf event fd:
174  */
175-#define PERF_EVENT_IOC_ENABLE			_IO ('$', 0)
176-#define PERF_EVENT_IOC_DISABLE			_IO ('$', 1)
177-#define PERF_EVENT_IOC_REFRESH			_IO ('$', 2)
178-#define PERF_EVENT_IOC_RESET			_IO ('$', 3)
179-#define PERF_EVENT_IOC_PERIOD			_IOW('$', 4, __u64)
180-#define PERF_EVENT_IOC_SET_OUTPUT		_IO ('$', 5)
181-#define PERF_EVENT_IOC_SET_FILTER		_IOW('$', 6, char *)
182-#define PERF_EVENT_IOC_ID			_IOR('$', 7, __u64 *)
183-#define PERF_EVENT_IOC_SET_BPF			_IOW('$', 8, __u32)
184-#define PERF_EVENT_IOC_PAUSE_OUTPUT		_IOW('$', 9, __u32)
185-#define PERF_EVENT_IOC_QUERY_BPF		_IOWR('$', 10, struct perf_event_query_bpf *)
186-#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES	_IOW('$', 11, struct perf_event_attr *)
187+#define PERF_EVENT_IOC_ENABLE		_IO ('$', 0)
188+#define PERF_EVENT_IOC_DISABLE		_IO ('$', 1)
189+#define PERF_EVENT_IOC_REFRESH		_IO ('$', 2)
190+#define PERF_EVENT_IOC_RESET		_IO ('$', 3)
191+#define PERF_EVENT_IOC_PERIOD		_IOW('$', 4, __u64)
192+#define PERF_EVENT_IOC_SET_OUTPUT	_IO ('$', 5)
193+#define PERF_EVENT_IOC_SET_FILTER	_IOW('$', 6, char *)
194+#define PERF_EVENT_IOC_ID		_IOR('$', 7, __u64 *)
195+#define PERF_EVENT_IOC_SET_BPF		_IOW('$', 8, __u32)
196+#define PERF_EVENT_IOC_PAUSE_OUTPUT	_IOW('$', 9, __u32)
197
198 enum perf_event_ioc_flags {
199 	PERF_IOC_FLAG_GROUP		= 1U << 0,
200@@ -564,10 +488,9 @@ struct perf_event_mmap_page {
201 				cap_bit0_is_deprecated	: 1, /* Always 1, signals that bit 0 is zero */
202
203 				cap_user_rdpmc		: 1, /* The RDPMC instruction can be used to read counts */
204-				cap_user_time		: 1, /* The time_{shift,mult,offset} fields are used */
205+				cap_user_time		: 1, /* The time_* fields are used */
206 				cap_user_time_zero	: 1, /* The time_zero field is used */
207-				cap_user_time_short	: 1, /* the time_{cycle,mask} fields are used */
208-				cap_____res		: 58;
209+				cap_____res		: 59;
210 		};
211 	};
212
213@@ -626,29 +549,13 @@ struct perf_event_mmap_page {
214 	 *               ((rem * time_mult) >> time_shift);
215 	 */
216 	__u64	time_zero;
217-
218 	__u32	size;			/* Header size up to __reserved[] fields. */
219-	__u32	__reserved_1;
220-
221-	/*
222-	 * If cap_usr_time_short, the hardware clock is less than 64bit wide
223-	 * and we must compute the 'cyc' value, as used by cap_usr_time, as:
224-	 *
225-	 *   cyc = time_cycles + ((cyc - time_cycles) & time_mask)
226-	 *
227-	 * NOTE: this form is explicitly chosen such that cap_usr_time_short
228-	 *       is a correction on top of cap_usr_time, and code that doesn't
229-	 *       know about cap_usr_time_short still works under the assumption
230-	 *       the counter doesn't wrap.
231-	 */
232-	__u64	time_cycles;
233-	__u64	time_mask;
234
235 		/*
236 		 * Hole for extension of the self monitor capabilities
237 		 */
238
239-	__u8	__reserved[116*8];	/* align to 1k. */
240+	__u8	__reserved[118*8+4];	/* align to 1k. */
241
242 	/*
243 	 * Control data for the mmap() data buffer.
244@@ -688,22 +595,6 @@ struct perf_event_mmap_page {
245 	__u64	aux_size;
246 };
247
248-/*
249- * The current state of perf_event_header::misc bits usage:
250- * ('|' used bit, '-' unused bit)
251- *
252- *  012         CDEF
253- *  |||---------||||
254- *
255- *  Where:
256- *    0-2     CPUMODE_MASK
257- *
258- *    C       PROC_MAP_PARSE_TIMEOUT
259- *    D       MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT
260- *    E       MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT
261- *    F       (reserved)
262- */
263-
264 #define PERF_RECORD_MISC_CPUMODE_MASK		(7 << 0)
265 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN	(0 << 0)
266 #define PERF_RECORD_MISC_KERNEL			(1 << 0)
267@@ -717,41 +608,19 @@ struct perf_event_mmap_page {
268  */
269 #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT	(1 << 12)
270 /*
271- * Following PERF_RECORD_MISC_* are used on different
272- * events, so can reuse the same bit position:
273- *
274- *   PERF_RECORD_MISC_MMAP_DATA  - PERF_RECORD_MMAP* events
275- *   PERF_RECORD_MISC_COMM_EXEC  - PERF_RECORD_COMM event
276- *   PERF_RECORD_MISC_FORK_EXEC  - PERF_RECORD_FORK event (perf internal)
277- *   PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
278+ * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
279+ * different events so can reuse the same bit position.
280+ * Ditto PERF_RECORD_MISC_SWITCH_OUT.
281  */
282 #define PERF_RECORD_MISC_MMAP_DATA		(1 << 13)
283 #define PERF_RECORD_MISC_COMM_EXEC		(1 << 13)
284-#define PERF_RECORD_MISC_FORK_EXEC		(1 << 13)
285 #define PERF_RECORD_MISC_SWITCH_OUT		(1 << 13)
286 /*
287- * These PERF_RECORD_MISC_* flags below are safely reused
288- * for the following events:
289- *
290- *   PERF_RECORD_MISC_EXACT_IP           - PERF_RECORD_SAMPLE of precise events
291- *   PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
292- *   PERF_RECORD_MISC_MMAP_BUILD_ID      - PERF_RECORD_MMAP2 event
293- *
294- *
295- * PERF_RECORD_MISC_EXACT_IP:
296- *   Indicates that the content of PERF_SAMPLE_IP points to
297- *   the actual instruction that triggered the event. See also
298- *   perf_event_attr::precise_ip.
299- *
300- * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
301- *   Indicates that thread was preempted in TASK_RUNNING state.
302- *
303- * PERF_RECORD_MISC_MMAP_BUILD_ID:
304- *   Indicates that mmap2 event carries build id data.
305+ * Indicates that the content of PERF_SAMPLE_IP points to
306+ * the actual instruction that triggered the event. See also
307+ * perf_event_attr::precise_ip.
308  */
309 #define PERF_RECORD_MISC_EXACT_IP		(1 << 14)
310-#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT	(1 << 14)
311-#define PERF_RECORD_MISC_MMAP_BUILD_ID		(1 << 14)
312 /*
313  * Reserve the last bit to indicate some extended misc field
314  */
315@@ -929,9 +798,7 @@ enum perf_event_type {
316 	 *	  char                  data[size];}&& PERF_SAMPLE_RAW
317 	 *
318 	 *	{ u64                   nr;
319-	 *	  { u64	hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
320-	 *        { u64 from, to, flags } lbr[nr];
321-	 *      } && PERF_SAMPLE_BRANCH_STACK
322+	 *        { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
323 	 *
324 	 * 	{ u64			abi; # enum perf_sample_regs_abi
325 	 * 	  u64			regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
326@@ -940,33 +807,12 @@ enum perf_event_type {
327 	 * 	  char			data[size];
328 	 * 	  u64			dyn_size; } && PERF_SAMPLE_STACK_USER
329 	 *
330-	 *	{ union perf_sample_weight
331-	 *	 {
332-	 *		u64		full; && PERF_SAMPLE_WEIGHT
333-	 *	#if defined(__LITTLE_ENDIAN_BITFIELD)
334-	 *		struct {
335-	 *			u32	var1_dw;
336-	 *			u16	var2_w;
337-	 *			u16	var3_w;
338-	 *		} && PERF_SAMPLE_WEIGHT_STRUCT
339-	 *	#elif defined(__BIG_ENDIAN_BITFIELD)
340-	 *		struct {
341-	 *			u16	var3_w;
342-	 *			u16	var2_w;
343-	 *			u32	var1_dw;
344-	 *		} && PERF_SAMPLE_WEIGHT_STRUCT
345-	 *	#endif
346-	 *	 }
347-	 *	}
348+	 *	{ u64			weight;   } && PERF_SAMPLE_WEIGHT
349 	 *	{ u64			data_src; } && PERF_SAMPLE_DATA_SRC
350 	 *	{ u64			transaction; } && PERF_SAMPLE_TRANSACTION
351 	 *	{ u64			abi; # enum perf_sample_regs_abi
352 	 *	  u64			regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
353 	 *	{ u64			phys_addr;} && PERF_SAMPLE_PHYS_ADDR
354-	 *	{ u64			size;
355-	 *	  char			data[size]; } && PERF_SAMPLE_AUX
356-	 *	{ u64			data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE
357-	 *	{ u64			code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE
358 	 * };
359 	 */
360 	PERF_RECORD_SAMPLE			= 9,
361@@ -982,20 +828,10 @@ enum perf_event_type {
362 	 *	u64				addr;
363 	 *	u64				len;
364 	 *	u64				pgoff;
365-	 *	union {
366-	 *		struct {
367-	 *			u32		maj;
368-	 *			u32		min;
369-	 *			u64		ino;
370-	 *			u64		ino_generation;
371-	 *		};
372-	 *		struct {
373-	 *			u8		build_id_size;
374-	 *			u8		__reserved_1;
375-	 *			u16		__reserved_2;
376-	 *			u8		build_id[20];
377-	 *		};
378-	 *	};
379+	 *	u32				maj;
380+	 *	u32				min;
381+	 *	u64				ino;
382+	 *	u64				ino_generation;
383 	 *	u32				prot, flags;
384 	 *	char				filename[];
385 	 * 	struct sample_id		sample_id;
386@@ -1024,7 +860,6 @@ enum perf_event_type {
387 	 *	struct perf_event_header	header;
388 	 *	u32				pid;
389 	 *	u32				tid;
390-	 *	struct sample_id		sample_id;
391 	 * };
392 	 */
393 	PERF_RECORD_ITRACE_START		= 12,
394@@ -1079,106 +914,9 @@ enum perf_event_type {
395 	 */
396 	PERF_RECORD_NAMESPACES			= 16,
397
398-	/*
399-	 * Record ksymbol register/unregister events:
400-	 *
401-	 * struct {
402-	 *	struct perf_event_header	header;
403-	 *	u64				addr;
404-	 *	u32				len;
405-	 *	u16				ksym_type;
406-	 *	u16				flags;
407-	 *	char				name[];
408-	 *	struct sample_id		sample_id;
409-	 * };
410-	 */
411-	PERF_RECORD_KSYMBOL			= 17,
412-
413-	/*
414-	 * Record bpf events:
415-	 *  enum perf_bpf_event_type {
416-	 *	PERF_BPF_EVENT_UNKNOWN		= 0,
417-	 *	PERF_BPF_EVENT_PROG_LOAD	= 1,
418-	 *	PERF_BPF_EVENT_PROG_UNLOAD	= 2,
419-	 *  };
420-	 *
421-	 * struct {
422-	 *	struct perf_event_header	header;
423-	 *	u16				type;
424-	 *	u16				flags;
425-	 *	u32				id;
426-	 *	u8				tag[BPF_TAG_SIZE];
427-	 *	struct sample_id		sample_id;
428-	 * };
429-	 */
430-	PERF_RECORD_BPF_EVENT			= 18,
431-
432-	/*
433-	 * struct {
434-	 *	struct perf_event_header	header;
435-	 *	u64				id;
436-	 *	char				path[];
437-	 *	struct sample_id		sample_id;
438-	 * };
439-	 */
440-	PERF_RECORD_CGROUP			= 19,
441-
442-	/*
443-	 * Records changes to kernel text i.e. self-modified code. 'old_len' is
444-	 * the number of old bytes, 'new_len' is the number of new bytes. Either
445-	 * 'old_len' or 'new_len' may be zero to indicate, for example, the
446-	 * addition or removal of a trampoline. 'bytes' contains the old bytes
447-	 * followed immediately by the new bytes.
448-	 *
449-	 * struct {
450-	 *	struct perf_event_header	header;
451-	 *	u64				addr;
452-	 *	u16				old_len;
453-	 *	u16				new_len;
454-	 *	u8				bytes[];
455-	 *	struct sample_id		sample_id;
456-	 * };
457-	 */
458-	PERF_RECORD_TEXT_POKE			= 20,
459-
460-	/*
461-	 * Data written to the AUX area by hardware due to aux_output, may need
462-	 * to be matched to the event by an architecture-specific hardware ID.
463-	 * This records the hardware ID, but requires sample_id to provide the
464-	 * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT
465-	 * records from multiple events.
466-	 *
467-	 * struct {
468-	 *	struct perf_event_header	header;
469-	 *	u64				hw_id;
470-	 *	struct sample_id		sample_id;
471-	 * };
472-	 */
473-	PERF_RECORD_AUX_OUTPUT_HW_ID		= 21,
474-
475 	PERF_RECORD_MAX,			/* non-ABI */
476 };
477
478-enum perf_record_ksymbol_type {
479-	PERF_RECORD_KSYMBOL_TYPE_UNKNOWN	= 0,
480-	PERF_RECORD_KSYMBOL_TYPE_BPF		= 1,
481-	/*
482-	 * Out of line code such as kprobe-replaced instructions or optimized
483-	 * kprobes or ftrace trampolines.
484-	 */
485-	PERF_RECORD_KSYMBOL_TYPE_OOL		= 2,
486-	PERF_RECORD_KSYMBOL_TYPE_MAX		/* non-ABI */
487-};
488-
489-#define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER	(1 << 0)
490-
491-enum perf_bpf_event_type {
492-	PERF_BPF_EVENT_UNKNOWN		= 0,
493-	PERF_BPF_EVENT_PROG_LOAD	= 1,
494-	PERF_BPF_EVENT_PROG_UNLOAD	= 2,
495-	PERF_BPF_EVENT_MAX,		/* non-ABI */
496-};
497-
498 #define PERF_MAX_STACK_DEPTH		127
499 #define PERF_MAX_CONTEXTS_PER_STACK	  8
500
501@@ -1197,15 +935,10 @@ enum perf_callchain_context {
502 /**
503  * PERF_RECORD_AUX::flags bits
504  */
505-#define PERF_AUX_FLAG_TRUNCATED			0x01	/* record was truncated to fit */
506-#define PERF_AUX_FLAG_OVERWRITE			0x02	/* snapshot from overwrite mode */
507-#define PERF_AUX_FLAG_PARTIAL			0x04	/* record contains gaps */
508-#define PERF_AUX_FLAG_COLLISION			0x08	/* sample collided with another */
509-#define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK	0xff00	/* PMU specific trace format type */
510-
511-/* CoreSight PMU AUX buffer formats */
512-#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT	0x0000 /* Default for backward compatibility */
513-#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW		0x0100 /* Raw format of the source */
514+#define PERF_AUX_FLAG_TRUNCATED		0x01	/* record was truncated to fit */
515+#define PERF_AUX_FLAG_OVERWRITE		0x02	/* snapshot from overwrite mode */
516+#define PERF_AUX_FLAG_PARTIAL		0x04	/* record contains gaps */
517+#define PERF_AUX_FLAG_COLLISION		0x08	/* sample collided with another */
518
519 #define PERF_FLAG_FD_NO_GROUP		(1UL << 0)
520 #define PERF_FLAG_FD_OUTPUT		(1UL << 1)
521@@ -1224,18 +957,14 @@ union perf_mem_data_src {
522 			mem_lvl_num:4,	/* memory hierarchy level number */
523 			mem_remote:1,   /* remote */
524 			mem_snoopx:2,	/* snoop mode, ext */
525-			mem_blk:3,	/* access blocked */
526-			mem_hops:3,	/* hop level */
527-			mem_rsvd:18;
528+			mem_rsvd:24;
529 	};
530 };
531 #elif defined(__BIG_ENDIAN_BITFIELD)
532 union perf_mem_data_src {
533 	__u64 val;
534 	struct {
535-		__u64	mem_rsvd:18,
536-			mem_hops:3,	/* hop level */
537-			mem_blk:3,	/* access blocked */
538+		__u64	mem_rsvd:24,
539 			mem_snoopx:2,	/* snoop mode, ext */
540 			mem_remote:1,   /* remote */
541 			mem_lvl_num:4,	/* memory hierarchy level number */
542@@ -1247,7 +976,7 @@ union perf_mem_data_src {
543 	};
544 };
545 #else
546-#error "Unknown endianness"
547+// #error "Unknown endianness"
548 #endif
549
550 /* type of opcode (load/store/prefetch,code) */
551@@ -1258,13 +987,7 @@ union perf_mem_data_src {
552 #define PERF_MEM_OP_EXEC	0x10 /* code (execution) */
553 #define PERF_MEM_OP_SHIFT	0
554
555-/*
556- * PERF_MEM_LVL_* namespace being depricated to some extent in the
557- * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields.
558- * Supporting this namespace inorder to not break defined ABIs.
559- *
560- * memory hierarchy (memory level, hit or miss)
561- */
562+/* memory hierarchy (memory level, hit or miss) */
563 #define PERF_MEM_LVL_NA		0x01  /* not available */
564 #define PERF_MEM_LVL_HIT	0x02  /* hit level */
565 #define PERF_MEM_LVL_MISS	0x04  /* miss level  */
566@@ -1324,20 +1047,6 @@ union perf_mem_data_src {
567 #define PERF_MEM_TLB_OS		0x40 /* OS fault handler */
568 #define PERF_MEM_TLB_SHIFT	26
569
570-/* Access blocked */
571-#define PERF_MEM_BLK_NA		0x01 /* not available */
572-#define PERF_MEM_BLK_DATA	0x02 /* data could not be forwarded */
573-#define PERF_MEM_BLK_ADDR	0x04 /* address conflict */
574-#define PERF_MEM_BLK_SHIFT	40
575-
576-/* hop level */
577-#define PERF_MEM_HOPS_0		0x01 /* remote core, same node */
578-#define PERF_MEM_HOPS_1		0x02 /* remote node, same socket */
579-#define PERF_MEM_HOPS_2		0x03 /* remote socket, same board */
580-#define PERF_MEM_HOPS_3		0x04 /* remote board */
581-/* 5-7 available */
582-#define PERF_MEM_HOPS_SHIFT	43
583-
584 #define PERF_MEM_S(a, s) \
585 	(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
586
587@@ -1369,23 +1078,4 @@ struct perf_branch_entry {
588 		reserved:40;
589 };
590
591-union perf_sample_weight {
592-	__u64		full;
593-#if defined(__LITTLE_ENDIAN_BITFIELD)
594-	struct {
595-		__u32	var1_dw;
596-		__u16	var2_w;
597-		__u16	var3_w;
598-	};
599-#elif defined(__BIG_ENDIAN_BITFIELD)
600-	struct {
601-		__u16	var3_w;
602-		__u16	var2_w;
603-		__u32	var1_dw;
604-	};
605-#else
606-#error "Unknown endianness"
607-#endif
608-};
609-
610-#endif /* _UAPI_LINUX_PERF_EVENT_H */
611+#endif /* _TS_PERF_EVENT_H */
612