• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1--- prebuilts/other/perf_event.h	2023-01-16 15:50:19.155934716 +0800
2+++ third_party/perf_include/linux/perf_event.h	2023-01-16 15:52:07.091731926 +0800
3@@ -12,12 +12,9 @@
4  *
5  * For licencing details see kernel-base/COPYING
6  */
7-#ifndef _UAPI_LINUX_PERF_EVENT_H
8-#define _UAPI_LINUX_PERF_EVENT_H
9-
10-#include <linux/types.h>
11-#include <linux/ioctl.h>
12-#include <asm/byteorder.h>
13+#ifndef _TS_PERF_EVENT_H
14+#define _TS_PERF_EVENT_H
15+#include "types.h"
16
17 /*
18  * User-space ABI bits:
19@@ -38,21 +35,6 @@
20 };
21
22 /*
23- * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
24- * PERF_TYPE_HARDWARE:			0xEEEEEEEE000000AA
25- *					AA: hardware event ID
26- *					EEEEEEEE: PMU type ID
27- * PERF_TYPE_HW_CACHE:			0xEEEEEEEE00DDCCBB
28- *					BB: hardware cache ID
29- *					CC: hardware cache op ID
30- *					DD: hardware cache op result ID
31- *					EEEEEEEE: PMU type ID
32- * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
33- */
34-#define PERF_PMU_TYPE_SHIFT		32
35-#define PERF_HW_EVENT_MASK		0xffffffff
36-
37-/*
38  * Generalized performance event event_id types, used by the
39  * attr.event_id parameter of the sys_perf_event_open()
40  * syscall:
41@@ -127,7 +109,6 @@
42 	PERF_COUNT_SW_EMULATION_FAULTS		= 8,
43 	PERF_COUNT_SW_DUMMY			= 9,
44 	PERF_COUNT_SW_BPF_OUTPUT		= 10,
45-	PERF_COUNT_SW_CGROUP_SWITCHES		= 11,
46
47 	PERF_COUNT_SW_MAX,			/* non-ABI */
48 };
49@@ -157,18 +138,10 @@
50 	PERF_SAMPLE_TRANSACTION			= 1U << 17,
51 	PERF_SAMPLE_REGS_INTR			= 1U << 18,
52 	PERF_SAMPLE_PHYS_ADDR			= 1U << 19,
53-	PERF_SAMPLE_AUX				= 1U << 20,
54-	PERF_SAMPLE_CGROUP			= 1U << 21,
55-	PERF_SAMPLE_DATA_PAGE_SIZE		= 1U << 22,
56-	PERF_SAMPLE_CODE_PAGE_SIZE		= 1U << 23,
57-	PERF_SAMPLE_WEIGHT_STRUCT		= 1U << 24,
58
59-	PERF_SAMPLE_MAX = 1U << 25,		/* non-ABI */
60-
61-	__PERF_SAMPLE_CALLCHAIN_EARLY		= 1ULL << 63, /* non-ABI; internal use */
62+	PERF_SAMPLE_MAX = 1U << 20,		/* non-ABI */
63 };
64
65-#define PERF_SAMPLE_WEIGHT_TYPE	(PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT)
66 /*
67  * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
68  *
69@@ -202,8 +175,6 @@
70
71 	PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT	= 16, /* save branch type */
72
73-	PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT	= 17, /* save low level index of raw branch records */
74-
75 	PERF_SAMPLE_BRANCH_MAX_SHIFT		/* non-ABI */
76 };
77
78@@ -231,8 +202,6 @@
79 	PERF_SAMPLE_BRANCH_TYPE_SAVE	=
80 		1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
81
82-	PERF_SAMPLE_BRANCH_HW_INDEX	= 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,
83-
84 	PERF_SAMPLE_BRANCH_MAX		= 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
85 };
86
87@@ -326,8 +295,6 @@
88 					/* add: sample_stack_user */
89 #define PERF_ATTR_SIZE_VER4	104	/* add: sample_regs_intr */
90 #define PERF_ATTR_SIZE_VER5	112	/* add: aux_watermark */
91-#define PERF_ATTR_SIZE_VER6	120	/* add: aux_sample_size */
92-#define PERF_ATTR_SIZE_VER7	128	/* add: sig_data */
93
94 /*
95  * Hardware event_id to monitor via a performance monitoring event:
96@@ -400,16 +367,7 @@
97 				context_switch :  1, /* context switch data */
98 				write_backward :  1, /* Write ring buffer from end to beginning */
99 				namespaces     :  1, /* include namespaces data */
100-				ksymbol        :  1, /* include ksymbol events */
101-				bpf_event      :  1, /* include bpf events */
102-				aux_output     :  1, /* generate AUX records instead of events */
103-				cgroup         :  1, /* include cgroup events */
104-				text_poke      :  1, /* include text poke events */
105-				build_id       :  1, /* use build id in mmap2 events */
106-				inherit_thread :  1, /* children only inherit if cloned with CLONE_THREAD */
107-				remove_on_exec :  1, /* event is removed from task on exec */
108-				sigtrap        :  1, /* send synchronous SIGTRAP on event */
109-				__reserved_1   : 26;
110+				__reserved_1   : 35;
111
112 	union {
113 		__u32		wakeup_events;	  /* wakeup every n events */
114@@ -419,14 +377,10 @@
115 	__u32			bp_type;
116 	union {
117 		__u64		bp_addr;
118-		__u64		kprobe_func; /* for perf_kprobe */
119-		__u64		uprobe_path; /* for perf_uprobe */
120 		__u64		config1; /* extension of config */
121 	};
122 	union {
123 		__u64		bp_len;
124-		__u64		kprobe_addr; /* when kprobe_func == NULL */
125-		__u64		probe_offset; /* for perf_[k,u]probe */
126 		__u64		config2; /* extension of config1 */
127 	};
128 	__u64	branch_sample_type; /* enum perf_branch_sample_type */
129@@ -458,53 +412,24 @@
130 	 */
131 	__u32	aux_watermark;
132 	__u16	sample_max_stack;
133-	__u16	__reserved_2;
134-	__u32	aux_sample_size;
135-	__u32	__reserved_3;
136-
137-	/*
138-	 * User provided data if sigtrap=1, passed back to user via
139-	 * siginfo_t::si_perf_data, e.g. to permit user to identify the event.
140-	 */
141-	__u64	sig_data;
142+	__u16	__reserved_2;	/* align to __u64 */
143 };
144
145-/*
146- * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
147- * to query bpf programs attached to the same perf tracepoint
148- * as the given perf event.
149- */
150-struct perf_event_query_bpf {
151-	/*
152-	 * The below ids array length
153-	 */
154-	__u32	ids_len;
155-	/*
156-	 * Set by the kernel to indicate the number of
157-	 * available programs
158-	 */
159-	__u32	prog_cnt;
160-	/*
161-	 * User provided buffer to store program ids
162-	 */
163-	__u32	ids[0];
164-};
165+#define perf_flags(attr)	(*(&(attr)->read_format + 1))
166
167 /*
168  * Ioctls that can be done on a perf event fd:
169  */
170-#define PERF_EVENT_IOC_ENABLE			_IO ('$', 0)
171-#define PERF_EVENT_IOC_DISABLE			_IO ('$', 1)
172-#define PERF_EVENT_IOC_REFRESH			_IO ('$', 2)
173-#define PERF_EVENT_IOC_RESET			_IO ('$', 3)
174-#define PERF_EVENT_IOC_PERIOD			_IOW('$', 4, __u64)
175-#define PERF_EVENT_IOC_SET_OUTPUT		_IO ('$', 5)
176-#define PERF_EVENT_IOC_SET_FILTER		_IOW('$', 6, char *)
177-#define PERF_EVENT_IOC_ID			_IOR('$', 7, __u64 *)
178-#define PERF_EVENT_IOC_SET_BPF			_IOW('$', 8, __u32)
179-#define PERF_EVENT_IOC_PAUSE_OUTPUT		_IOW('$', 9, __u32)
180-#define PERF_EVENT_IOC_QUERY_BPF		_IOWR('$', 10, struct perf_event_query_bpf *)
181-#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES	_IOW('$', 11, struct perf_event_attr *)
182+#define PERF_EVENT_IOC_ENABLE		_IO ('$', 0)
183+#define PERF_EVENT_IOC_DISABLE		_IO ('$', 1)
184+#define PERF_EVENT_IOC_REFRESH		_IO ('$', 2)
185+#define PERF_EVENT_IOC_RESET		_IO ('$', 3)
186+#define PERF_EVENT_IOC_PERIOD		_IOW('$', 4, __u64)
187+#define PERF_EVENT_IOC_SET_OUTPUT	_IO ('$', 5)
188+#define PERF_EVENT_IOC_SET_FILTER	_IOW('$', 6, char *)
189+#define PERF_EVENT_IOC_ID		_IOR('$', 7, __u64 *)
190+#define PERF_EVENT_IOC_SET_BPF		_IOW('$', 8, __u32)
191+#define PERF_EVENT_IOC_PAUSE_OUTPUT	_IOW('$', 9, __u32)
192
193 enum perf_event_ioc_flags {
194 	PERF_IOC_FLAG_GROUP		= 1U << 0,
195@@ -564,10 +489,9 @@
196 				cap_bit0_is_deprecated	: 1, /* Always 1, signals that bit 0 is zero */
197
198 				cap_user_rdpmc		: 1, /* The RDPMC instruction can be used to read counts */
199-				cap_user_time		: 1, /* The time_{shift,mult,offset} fields are used */
200+				cap_user_time		: 1, /* The time_* fields are used */
201 				cap_user_time_zero	: 1, /* The time_zero field is used */
202-				cap_user_time_short	: 1, /* the time_{cycle,mask} fields are used */
203-				cap_____res		: 58;
204+				cap_____res		: 59;
205 		};
206 	};
207
208@@ -626,29 +550,13 @@
209 	 *               ((rem * time_mult) >> time_shift);
210 	 */
211 	__u64	time_zero;
212-
213 	__u32	size;			/* Header size up to __reserved[] fields. */
214-	__u32	__reserved_1;
215-
216-	/*
217-	 * If cap_usr_time_short, the hardware clock is less than 64bit wide
218-	 * and we must compute the 'cyc' value, as used by cap_usr_time, as:
219-	 *
220-	 *   cyc = time_cycles + ((cyc - time_cycles) & time_mask)
221-	 *
222-	 * NOTE: this form is explicitly chosen such that cap_usr_time_short
223-	 *       is a correction on top of cap_usr_time, and code that doesn't
224-	 *       know about cap_usr_time_short still works under the assumption
225-	 *       the counter doesn't wrap.
226-	 */
227-	__u64	time_cycles;
228-	__u64	time_mask;
229
230 		/*
231 		 * Hole for extension of the self monitor capabilities
232 		 */
233
234-	__u8	__reserved[116*8];	/* align to 1k. */
235+	__u8	__reserved[118*8+4];	/* align to 1k. */
236
237 	/*
238 	 * Control data for the mmap() data buffer.
239@@ -688,22 +596,6 @@
240 	__u64	aux_size;
241 };
242
243-/*
244- * The current state of perf_event_header::misc bits usage:
245- * ('|' used bit, '-' unused bit)
246- *
247- *  012         CDEF
248- *  |||---------||||
249- *
250- *  Where:
251- *    0-2     CPUMODE_MASK
252- *
253- *    C       PROC_MAP_PARSE_TIMEOUT
254- *    D       MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT
255- *    E       MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT
256- *    F       (reserved)
257- */
258-
259 #define PERF_RECORD_MISC_CPUMODE_MASK		(7 << 0)
260 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN	(0 << 0)
261 #define PERF_RECORD_MISC_KERNEL			(1 << 0)
262@@ -717,41 +609,19 @@
263  */
264 #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT	(1 << 12)
265 /*
266- * Following PERF_RECORD_MISC_* are used on different
267- * events, so can reuse the same bit position:
268- *
269- *   PERF_RECORD_MISC_MMAP_DATA  - PERF_RECORD_MMAP* events
270- *   PERF_RECORD_MISC_COMM_EXEC  - PERF_RECORD_COMM event
271- *   PERF_RECORD_MISC_FORK_EXEC  - PERF_RECORD_FORK event (perf internal)
272- *   PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
273+ * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
274+ * different events so can reuse the same bit position.
275+ * Ditto PERF_RECORD_MISC_SWITCH_OUT.
276  */
277 #define PERF_RECORD_MISC_MMAP_DATA		(1 << 13)
278 #define PERF_RECORD_MISC_COMM_EXEC		(1 << 13)
279-#define PERF_RECORD_MISC_FORK_EXEC		(1 << 13)
280 #define PERF_RECORD_MISC_SWITCH_OUT		(1 << 13)
281 /*
282- * These PERF_RECORD_MISC_* flags below are safely reused
283- * for the following events:
284- *
285- *   PERF_RECORD_MISC_EXACT_IP           - PERF_RECORD_SAMPLE of precise events
286- *   PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
287- *   PERF_RECORD_MISC_MMAP_BUILD_ID      - PERF_RECORD_MMAP2 event
288- *
289- *
290- * PERF_RECORD_MISC_EXACT_IP:
291- *   Indicates that the content of PERF_SAMPLE_IP points to
292- *   the actual instruction that triggered the event. See also
293- *   perf_event_attr::precise_ip.
294- *
295- * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
296- *   Indicates that thread was preempted in TASK_RUNNING state.
297- *
298- * PERF_RECORD_MISC_MMAP_BUILD_ID:
299- *   Indicates that mmap2 event carries build id data.
300+ * Indicates that the content of PERF_SAMPLE_IP points to
301+ * the actual instruction that triggered the event. See also
302+ * perf_event_attr::precise_ip.
303  */
304 #define PERF_RECORD_MISC_EXACT_IP		(1 << 14)
305-#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT	(1 << 14)
306-#define PERF_RECORD_MISC_MMAP_BUILD_ID		(1 << 14)
307 /*
308  * Reserve the last bit to indicate some extended misc field
309  */
310@@ -929,9 +799,7 @@
311 	 *	  char                  data[size];}&& PERF_SAMPLE_RAW
312 	 *
313 	 *	{ u64                   nr;
314-	 *	  { u64	hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
315-	 *        { u64 from, to, flags } lbr[nr];
316-	 *      } && PERF_SAMPLE_BRANCH_STACK
317+	 *        { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
318 	 *
319 	 * 	{ u64			abi; # enum perf_sample_regs_abi
320 	 * 	  u64			regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
321@@ -940,33 +808,12 @@
322 	 * 	  char			data[size];
323 	 * 	  u64			dyn_size; } && PERF_SAMPLE_STACK_USER
324 	 *
325-	 *	{ union perf_sample_weight
326-	 *	 {
327-	 *		u64		full; && PERF_SAMPLE_WEIGHT
328-	 *	#if defined(__LITTLE_ENDIAN_BITFIELD)
329-	 *		struct {
330-	 *			u32	var1_dw;
331-	 *			u16	var2_w;
332-	 *			u16	var3_w;
333-	 *		} && PERF_SAMPLE_WEIGHT_STRUCT
334-	 *	#elif defined(__BIG_ENDIAN_BITFIELD)
335-	 *		struct {
336-	 *			u16	var3_w;
337-	 *			u16	var2_w;
338-	 *			u32	var1_dw;
339-	 *		} && PERF_SAMPLE_WEIGHT_STRUCT
340-	 *	#endif
341-	 *	 }
342-	 *	}
343+	 *	{ u64			weight;   } && PERF_SAMPLE_WEIGHT
344 	 *	{ u64			data_src; } && PERF_SAMPLE_DATA_SRC
345 	 *	{ u64			transaction; } && PERF_SAMPLE_TRANSACTION
346 	 *	{ u64			abi; # enum perf_sample_regs_abi
347 	 *	  u64			regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
348 	 *	{ u64			phys_addr;} && PERF_SAMPLE_PHYS_ADDR
349-	 *	{ u64			size;
350-	 *	  char			data[size]; } && PERF_SAMPLE_AUX
351-	 *	{ u64			data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE
352-	 *	{ u64			code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE
353 	 * };
354 	 */
355 	PERF_RECORD_SAMPLE			= 9,
356@@ -982,20 +829,10 @@
357 	 *	u64				addr;
358 	 *	u64				len;
359 	 *	u64				pgoff;
360-	 *	union {
361-	 *		struct {
362-	 *			u32		maj;
363-	 *			u32		min;
364-	 *			u64		ino;
365-	 *			u64		ino_generation;
366-	 *		};
367-	 *		struct {
368-	 *			u8		build_id_size;
369-	 *			u8		__reserved_1;
370-	 *			u16		__reserved_2;
371-	 *			u8		build_id[20];
372-	 *		};
373-	 *	};
374+	 *	u32				maj;
375+	 *	u32				min;
376+	 *	u64				ino;
377+	 *	u64				ino_generation;
378 	 *	u32				prot, flags;
379 	 *	char				filename[];
380 	 * 	struct sample_id		sample_id;
381@@ -1024,7 +861,6 @@
382 	 *	struct perf_event_header	header;
383 	 *	u32				pid;
384 	 *	u32				tid;
385-	 *	struct sample_id		sample_id;
386 	 * };
387 	 */
388 	PERF_RECORD_ITRACE_START		= 12,
389@@ -1079,106 +915,9 @@
390 	 */
391 	PERF_RECORD_NAMESPACES			= 16,
392
393-	/*
394-	 * Record ksymbol register/unregister events:
395-	 *
396-	 * struct {
397-	 *	struct perf_event_header	header;
398-	 *	u64				addr;
399-	 *	u32				len;
400-	 *	u16				ksym_type;
401-	 *	u16				flags;
402-	 *	char				name[];
403-	 *	struct sample_id		sample_id;
404-	 * };
405-	 */
406-	PERF_RECORD_KSYMBOL			= 17,
407-
408-	/*
409-	 * Record bpf events:
410-	 *  enum perf_bpf_event_type {
411-	 *	PERF_BPF_EVENT_UNKNOWN		= 0,
412-	 *	PERF_BPF_EVENT_PROG_LOAD	= 1,
413-	 *	PERF_BPF_EVENT_PROG_UNLOAD	= 2,
414-	 *  };
415-	 *
416-	 * struct {
417-	 *	struct perf_event_header	header;
418-	 *	u16				type;
419-	 *	u16				flags;
420-	 *	u32				id;
421-	 *	u8				tag[BPF_TAG_SIZE];
422-	 *	struct sample_id		sample_id;
423-	 * };
424-	 */
425-	PERF_RECORD_BPF_EVENT			= 18,
426-
427-	/*
428-	 * struct {
429-	 *	struct perf_event_header	header;
430-	 *	u64				id;
431-	 *	char				path[];
432-	 *	struct sample_id		sample_id;
433-	 * };
434-	 */
435-	PERF_RECORD_CGROUP			= 19,
436-
437-	/*
438-	 * Records changes to kernel text i.e. self-modified code. 'old_len' is
439-	 * the number of old bytes, 'new_len' is the number of new bytes. Either
440-	 * 'old_len' or 'new_len' may be zero to indicate, for example, the
441-	 * addition or removal of a trampoline. 'bytes' contains the old bytes
442-	 * followed immediately by the new bytes.
443-	 *
444-	 * struct {
445-	 *	struct perf_event_header	header;
446-	 *	u64				addr;
447-	 *	u16				old_len;
448-	 *	u16				new_len;
449-	 *	u8				bytes[];
450-	 *	struct sample_id		sample_id;
451-	 * };
452-	 */
453-	PERF_RECORD_TEXT_POKE			= 20,
454-
455-	/*
456-	 * Data written to the AUX area by hardware due to aux_output, may need
457-	 * to be matched to the event by an architecture-specific hardware ID.
458-	 * This records the hardware ID, but requires sample_id to provide the
459-	 * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT
460-	 * records from multiple events.
461-	 *
462-	 * struct {
463-	 *	struct perf_event_header	header;
464-	 *	u64				hw_id;
465-	 *	struct sample_id		sample_id;
466-	 * };
467-	 */
468-	PERF_RECORD_AUX_OUTPUT_HW_ID		= 21,
469-
470 	PERF_RECORD_MAX,			/* non-ABI */
471 };
472
473-enum perf_record_ksymbol_type {
474-	PERF_RECORD_KSYMBOL_TYPE_UNKNOWN	= 0,
475-	PERF_RECORD_KSYMBOL_TYPE_BPF		= 1,
476-	/*
477-	 * Out of line code such as kprobe-replaced instructions or optimized
478-	 * kprobes or ftrace trampolines.
479-	 */
480-	PERF_RECORD_KSYMBOL_TYPE_OOL		= 2,
481-	PERF_RECORD_KSYMBOL_TYPE_MAX		/* non-ABI */
482-};
483-
484-#define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER	(1 << 0)
485-
486-enum perf_bpf_event_type {
487-	PERF_BPF_EVENT_UNKNOWN		= 0,
488-	PERF_BPF_EVENT_PROG_LOAD	= 1,
489-	PERF_BPF_EVENT_PROG_UNLOAD	= 2,
490-	PERF_BPF_EVENT_MAX,		/* non-ABI */
491-};
492-
493 #define PERF_MAX_STACK_DEPTH		127
494 #define PERF_MAX_CONTEXTS_PER_STACK	  8
495
496@@ -1197,15 +936,10 @@
497 /**
498  * PERF_RECORD_AUX::flags bits
499  */
500-#define PERF_AUX_FLAG_TRUNCATED			0x01	/* record was truncated to fit */
501-#define PERF_AUX_FLAG_OVERWRITE			0x02	/* snapshot from overwrite mode */
502-#define PERF_AUX_FLAG_PARTIAL			0x04	/* record contains gaps */
503-#define PERF_AUX_FLAG_COLLISION			0x08	/* sample collided with another */
504-#define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK	0xff00	/* PMU specific trace format type */
505-
506-/* CoreSight PMU AUX buffer formats */
507-#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT	0x0000 /* Default for backward compatibility */
508-#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW		0x0100 /* Raw format of the source */
509+#define PERF_AUX_FLAG_TRUNCATED		0x01	/* record was truncated to fit */
510+#define PERF_AUX_FLAG_OVERWRITE		0x02	/* snapshot from overwrite mode */
511+#define PERF_AUX_FLAG_PARTIAL		0x04	/* record contains gaps */
512+#define PERF_AUX_FLAG_COLLISION		0x08	/* sample collided with another */
513
514 #define PERF_FLAG_FD_NO_GROUP		(1UL << 0)
515 #define PERF_FLAG_FD_OUTPUT		(1UL << 1)
516@@ -1224,18 +958,14 @@
517 			mem_lvl_num:4,	/* memory hierarchy level number */
518 			mem_remote:1,   /* remote */
519 			mem_snoopx:2,	/* snoop mode, ext */
520-			mem_blk:3,	/* access blocked */
521-			mem_hops:3,	/* hop level */
522-			mem_rsvd:18;
523+			mem_rsvd:24;
524 	};
525 };
526 #elif defined(__BIG_ENDIAN_BITFIELD)
527 union perf_mem_data_src {
528 	__u64 val;
529 	struct {
530-		__u64	mem_rsvd:18,
531-			mem_hops:3,	/* hop level */
532-			mem_blk:3,	/* access blocked */
533+		__u64	mem_rsvd:24,
534 			mem_snoopx:2,	/* snoop mode, ext */
535 			mem_remote:1,   /* remote */
536 			mem_lvl_num:4,	/* memory hierarchy level number */
537@@ -1247,7 +977,7 @@
538 	};
539 };
540 #else
541-#error "Unknown endianness"
542+// #error "Unknown endianness"
543 #endif
544
545 /* type of opcode (load/store/prefetch,code) */
546@@ -1258,13 +988,7 @@
547 #define PERF_MEM_OP_EXEC	0x10 /* code (execution) */
548 #define PERF_MEM_OP_SHIFT	0
549
550-/*
551- * PERF_MEM_LVL_* namespace being depricated to some extent in the
552- * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields.
553- * Supporting this namespace inorder to not break defined ABIs.
554- *
555- * memory hierarchy (memory level, hit or miss)
556- */
557+/* memory hierarchy (memory level, hit or miss) */
558 #define PERF_MEM_LVL_NA		0x01  /* not available */
559 #define PERF_MEM_LVL_HIT	0x02  /* hit level */
560 #define PERF_MEM_LVL_MISS	0x04  /* miss level  */
561@@ -1324,20 +1048,6 @@
562 #define PERF_MEM_TLB_OS		0x40 /* OS fault handler */
563 #define PERF_MEM_TLB_SHIFT	26
564
565-/* Access blocked */
566-#define PERF_MEM_BLK_NA		0x01 /* not available */
567-#define PERF_MEM_BLK_DATA	0x02 /* data could not be forwarded */
568-#define PERF_MEM_BLK_ADDR	0x04 /* address conflict */
569-#define PERF_MEM_BLK_SHIFT	40
570-
571-/* hop level */
572-#define PERF_MEM_HOPS_0		0x01 /* remote core, same node */
573-#define PERF_MEM_HOPS_1		0x02 /* remote node, same socket */
574-#define PERF_MEM_HOPS_2		0x03 /* remote socket, same board */
575-#define PERF_MEM_HOPS_3		0x04 /* remote board */
576-/* 5-7 available */
577-#define PERF_MEM_HOPS_SHIFT	43
578-
579 #define PERF_MEM_S(a, s) \
580 	(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
581
582@@ -1369,23 +1079,4 @@
583 		reserved:40;
584 };
585
586-union perf_sample_weight {
587-	__u64		full;
588-#if defined(__LITTLE_ENDIAN_BITFIELD)
589-	struct {
590-		__u32	var1_dw;
591-		__u16	var2_w;
592-		__u16	var3_w;
593-	};
594-#elif defined(__BIG_ENDIAN_BITFIELD)
595-	struct {
596-		__u16	var3_w;
597-		__u16	var2_w;
598-		__u32	var1_dw;
599-	};
600-#else
601-#error "Unknown endianness"
602-#endif
603-};
604-
605-#endif /* _UAPI_LINUX_PERF_EVENT_H */
606+#endif /* _TS_PERF_EVENT_H */
607