• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Meta performance counter support.
3  *  Copyright (C) 2012 Imagination Technologies Ltd
4  *
5  * This file is subject to the terms and conditions of the GNU General Public
6  * License.  See the file "COPYING" in the main directory of this archive
7  * for more details.
8  */
9 
10 #ifndef METAG_PERF_EVENT_H_
11 #define METAG_PERF_EVENT_H_
12 
13 #include <linux/kernel.h>
14 #include <linux/interrupt.h>
15 #include <linux/perf_event.h>
16 
17 /* For performance counter definitions */
18 #include <asm/metag_mem.h>
19 
20 /*
21  * The Meta core has two performance counters, with 24-bit resolution. Newer
22  * cores generate an overflow interrupt on transition from 0xffffff to 0.
23  *
24  * Each counter consists of the counter id, hardware thread id, and the count
25  * itself; each counter can be assigned to multiple hardware threads at any
26  * one time, with the returned count being an aggregate of events. A small
27  * number of events are thread global, i.e. they count the aggregate of all
28  * threads' events, regardless of the thread selected.
29  *
30  * Newer cores can store an arbitrary 24-bit number in the counter, whereas
31  * older cores will clear the counter bits on write.
32  *
33  * We also have a pseudo-counter in the form of the thread active cycles
34  * counter (which, incidentally, is also bound to
35  */
36 
37 #define MAX_HWEVENTS		3
38 #define MAX_PERIOD		((1UL << 24) - 1)
39 #define METAG_INST_COUNTER	(MAX_HWEVENTS - 1)
40 
41 /**
42  * struct cpu_hw_events - a processor core's performance events
43  * @events:	an array of perf_events active for a given index.
44  * @used_mask:	a bitmap of in-use counters.
45  * @pmu_lock:	a perf counter lock
46  *
47  * This is a per-cpu/core structure that maintains a record of its
48  * performance counters' state.
49  */
50 struct cpu_hw_events {
51 	struct perf_event	*events[MAX_HWEVENTS];
52 	unsigned long		used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
53 	raw_spinlock_t		pmu_lock;
54 };
55 
56 /**
57  * struct metag_pmu - the Meta PMU structure
58  * @pmu:		core pmu structure
59  * @name:		pmu name
60  * @version:		core version
61  * @handle_irq:		overflow interrupt handler
62  * @enable:		enable a counter
63  * @disable:		disable a counter
64  * @read:		read the value of a counter
65  * @write:		write a value to a counter
66  * @event_map:		kernel event to counter event id map
67  * @cache_events:	kernel cache counter to core cache counter map
68  * @max_period:		maximum value of the counter before overflow
69  * @max_events:		maximum number of counters available at any one time
70  * @active_events:	number of active counters
71  * @reserve_mutex:	counter reservation mutex
72  *
73  * This describes the main functionality and data used by the performance
74  * event core.
75  */
76 struct metag_pmu {
77 	struct pmu	pmu;
78 	const char	*name;
79 	u32		version;
80 	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
81 	void		(*enable)(struct hw_perf_event *evt, int idx);
82 	void		(*disable)(struct hw_perf_event *evt, int idx);
83 	u64		(*read)(int idx);
84 	void		(*write)(int idx, u32 val);
85 	int		(*event_map)(int idx);
86 	const int	(*cache_events)[PERF_COUNT_HW_CACHE_MAX]
87 				[PERF_COUNT_HW_CACHE_OP_MAX]
88 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
89 	u32		max_period;
90 	int		max_events;
91 	atomic_t	active_events;
92 	struct mutex	reserve_mutex;
93 };
94 
95 /* Convenience macros for accessing the perf counters */
96 /* Define some convenience accessors */
97 #define PERF_COUNT(x)	(PERF_COUNT0 + (sizeof(u64) * (x)))
98 #define PERF_ICORE(x)	(PERF_ICORE0 + (sizeof(u64) * (x)))
99 #define PERF_CHAN(x)	(PERF_CHAN0 + (sizeof(u64) * (x)))
100 
101 /* Cache index macros */
102 #define C(x) PERF_COUNT_HW_CACHE_##x
103 #define CACHE_OP_UNSUPPORTED	0xfffe
104 #define CACHE_OP_NONSENSE	0xffff
105 
106 #endif
107