1 /*
2 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <stdbool.h>
9
10 #include <arch.h>
11 #include <arch_helpers.h>
12
13 #include <lib/el3_runtime/pubsub_events.h>
14 #include <lib/extensions/amu.h>
15 #include <lib/extensions/amu_private.h>
16
17 #include <plat/common/platform.h>
18
19 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
20
21 /* Check if AMUv1 for Armv8.4 or 8.6 is implemented */
amu_supported(void)22 bool amu_supported(void)
23 {
24 uint32_t features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
25
26 features &= ID_PFR0_AMU_MASK;
27 return ((features == 1U) || (features == 2U));
28 }
29
30 #if AMU_GROUP1_NR_COUNTERS
31 /* Check if group 1 counters is implemented */
amu_group1_supported(void)32 bool amu_group1_supported(void)
33 {
34 uint32_t features = read_amcfgr() >> AMCFGR_NCG_SHIFT;
35
36 return (features & AMCFGR_NCG_MASK) == 1U;
37 }
38 #endif
39
40 /*
41 * Enable counters. This function is meant to be invoked
42 * by the context management library before exiting from EL3.
43 */
amu_enable(bool el2_unused)44 void amu_enable(bool el2_unused)
45 {
46 if (!amu_supported()) {
47 return;
48 }
49
50 #if AMU_GROUP1_NR_COUNTERS
51 /* Check and set presence of group 1 counters */
52 if (!amu_group1_supported()) {
53 ERROR("AMU Counter Group 1 is not implemented\n");
54 panic();
55 }
56
57 /* Check number of group 1 counters */
58 uint32_t cnt_num = (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
59 AMCGCR_CG1NC_MASK;
60 VERBOSE("%s%u. %s%u\n",
61 "Number of AMU Group 1 Counters ", cnt_num,
62 "Requested number ", AMU_GROUP1_NR_COUNTERS);
63
64 if (cnt_num < AMU_GROUP1_NR_COUNTERS) {
65 ERROR("%s%u is less than %s%u\n",
66 "Number of AMU Group 1 Counters ", cnt_num,
67 "Requested number ", AMU_GROUP1_NR_COUNTERS);
68 panic();
69 }
70 #endif
71
72 if (el2_unused) {
73 uint64_t v;
74 /*
75 * Non-secure access from EL0 or EL1 to the Activity Monitor
76 * registers do not trap to EL2.
77 */
78 v = read_hcptr();
79 v &= ~TAM_BIT;
80 write_hcptr(v);
81 }
82
83 /* Enable group 0 counters */
84 write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
85
86 #if AMU_GROUP1_NR_COUNTERS
87 /* Enable group 1 counters */
88 write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
89 #endif
90 }
91
92 /* Read the group 0 counter identified by the given `idx`. */
amu_group0_cnt_read(unsigned int idx)93 uint64_t amu_group0_cnt_read(unsigned int idx)
94 {
95 assert(amu_supported());
96 assert(idx < AMU_GROUP0_NR_COUNTERS);
97
98 return amu_group0_cnt_read_internal(idx);
99 }
100
101 /* Write the group 0 counter identified by the given `idx` with `val` */
amu_group0_cnt_write(unsigned int idx,uint64_t val)102 void amu_group0_cnt_write(unsigned int idx, uint64_t val)
103 {
104 assert(amu_supported());
105 assert(idx < AMU_GROUP0_NR_COUNTERS);
106
107 amu_group0_cnt_write_internal(idx, val);
108 isb();
109 }
110
111 #if AMU_GROUP1_NR_COUNTERS
112 /* Read the group 1 counter identified by the given `idx` */
amu_group1_cnt_read(unsigned int idx)113 uint64_t amu_group1_cnt_read(unsigned int idx)
114 {
115 assert(amu_supported());
116 assert(amu_group1_supported());
117 assert(idx < AMU_GROUP1_NR_COUNTERS);
118
119 return amu_group1_cnt_read_internal(idx);
120 }
121
122 /* Write the group 1 counter identified by the given `idx` with `val` */
amu_group1_cnt_write(unsigned int idx,uint64_t val)123 void amu_group1_cnt_write(unsigned int idx, uint64_t val)
124 {
125 assert(amu_supported());
126 assert(amu_group1_supported());
127 assert(idx < AMU_GROUP1_NR_COUNTERS);
128
129 amu_group1_cnt_write_internal(idx, val);
130 isb();
131 }
132
133 /*
134 * Program the event type register for the given `idx` with
135 * the event number `val`
136 */
amu_group1_set_evtype(unsigned int idx,unsigned int val)137 void amu_group1_set_evtype(unsigned int idx, unsigned int val)
138 {
139 assert(amu_supported());
140 assert(amu_group1_supported());
141 assert(idx < AMU_GROUP1_NR_COUNTERS);
142
143 amu_group1_set_evtype_internal(idx, val);
144 isb();
145 }
146 #endif /* AMU_GROUP1_NR_COUNTERS */
147
amu_context_save(const void * arg)148 static void *amu_context_save(const void *arg)
149 {
150 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
151 unsigned int i;
152
153 if (!amu_supported()) {
154 return (void *)-1;
155 }
156
157 #if AMU_GROUP1_NR_COUNTERS
158 if (!amu_group1_supported()) {
159 return (void *)-1;
160 }
161 #endif
162 /* Assert that group 0/1 counter configuration is what we expect */
163 assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
164
165 #if AMU_GROUP1_NR_COUNTERS
166 assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
167 #endif
168 /*
169 * Disable group 0/1 counters to avoid other observers like SCP sampling
170 * counter values from the future via the memory mapped view.
171 */
172 write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
173
174 #if AMU_GROUP1_NR_COUNTERS
175 write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
176 #endif
177 isb();
178
179 /* Save all group 0 counters */
180 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
181 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
182 }
183
184 #if AMU_GROUP1_NR_COUNTERS
185 /* Save group 1 counters */
186 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
187 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
188 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
189 }
190 }
191 #endif
192 return (void *)0;
193 }
194
amu_context_restore(const void * arg)195 static void *amu_context_restore(const void *arg)
196 {
197 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
198 unsigned int i;
199
200 if (!amu_supported()) {
201 return (void *)-1;
202 }
203
204 #if AMU_GROUP1_NR_COUNTERS
205 if (!amu_group1_supported()) {
206 return (void *)-1;
207 }
208 #endif
209 /* Counters were disabled in `amu_context_save()` */
210 assert(read_amcntenset0_el0() == 0U);
211
212 #if AMU_GROUP1_NR_COUNTERS
213 assert(read_amcntenset1_el0() == 0U);
214 #endif
215
216 /* Restore all group 0 counters */
217 for (i = 0U; i < AMU_GROUP0_NR_COUNTERS; i++) {
218 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
219 }
220
221 /* Restore group 0 counter configuration */
222 write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
223
224 #if AMU_GROUP1_NR_COUNTERS
225 /* Restore group 1 counters */
226 for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
227 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U) {
228 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
229 }
230 }
231
232 /* Restore group 1 counter configuration */
233 write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
234 #endif
235
236 return (void *)0;
237 }
238
239 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
240 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
241