1 /*
2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <stdbool.h>
9
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <lib/el3_runtime/pubsub_events.h>
13 #include <lib/extensions/amu.h>
14 #include <lib/extensions/amu_private.h>
15 #include <plat/common/platform.h>
16
17 #define AMU_GROUP0_NR_COUNTERS 4
18
19 struct amu_ctx {
20 uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
21 uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
22 };
23
24 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
25
amu_supported(void)26 bool amu_supported(void)
27 {
28 uint64_t features;
29
30 features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
31 return (features & ID_AA64PFR0_AMU_MASK) == 1U;
32 }
33
34 /*
35 * Enable counters. This function is meant to be invoked
36 * by the context management library before exiting from EL3.
37 */
amu_enable(bool el2_unused)38 void amu_enable(bool el2_unused)
39 {
40 uint64_t v;
41
42 if (!amu_supported())
43 return;
44
45 if (el2_unused) {
46 /*
47 * CPTR_EL2.TAM: Set to zero so any accesses to
48 * the Activity Monitor registers do not trap to EL2.
49 */
50 v = read_cptr_el2();
51 v &= ~CPTR_EL2_TAM_BIT;
52 write_cptr_el2(v);
53 }
54
55 /*
56 * CPTR_EL3.TAM: Set to zero so that any accesses to
57 * the Activity Monitor registers do not trap to EL3.
58 */
59 v = read_cptr_el3();
60 v &= ~TAM_BIT;
61 write_cptr_el3(v);
62
63 /* Enable group 0 counters */
64 write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
65 /* Enable group 1 counters */
66 write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
67 }
68
69 /* Read the group 0 counter identified by the given `idx`. */
amu_group0_cnt_read(int idx)70 uint64_t amu_group0_cnt_read(int idx)
71 {
72 assert(amu_supported());
73 assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
74
75 return amu_group0_cnt_read_internal(idx);
76 }
77
78 /* Write the group 0 counter identified by the given `idx` with `val`. */
amu_group0_cnt_write(int idx,uint64_t val)79 void amu_group0_cnt_write(int idx, uint64_t val)
80 {
81 assert(amu_supported());
82 assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
83
84 amu_group0_cnt_write_internal(idx, val);
85 isb();
86 }
87
88 /* Read the group 1 counter identified by the given `idx`. */
amu_group1_cnt_read(int idx)89 uint64_t amu_group1_cnt_read(int idx)
90 {
91 assert(amu_supported());
92 assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
93
94 return amu_group1_cnt_read_internal(idx);
95 }
96
97 /* Write the group 1 counter identified by the given `idx` with `val`. */
amu_group1_cnt_write(int idx,uint64_t val)98 void amu_group1_cnt_write(int idx, uint64_t val)
99 {
100 assert(amu_supported());
101 assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
102
103 amu_group1_cnt_write_internal(idx, val);
104 isb();
105 }
106
107 /*
108 * Program the event type register for the given `idx` with
109 * the event number `val`.
110 */
amu_group1_set_evtype(int idx,unsigned int val)111 void amu_group1_set_evtype(int idx, unsigned int val)
112 {
113 assert(amu_supported());
114 assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
115
116 amu_group1_set_evtype_internal(idx, val);
117 isb();
118 }
119
amu_context_save(const void * arg)120 static void *amu_context_save(const void *arg)
121 {
122 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
123 int i;
124
125 if (!amu_supported())
126 return (void *)-1;
127
128 /* Assert that group 0/1 counter configuration is what we expect */
129 assert((read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK) &&
130 (read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK));
131
132 assert(((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK))
133 <= AMU_GROUP1_NR_COUNTERS);
134
135 /*
136 * Disable group 0/1 counters to avoid other observers like SCP sampling
137 * counter values from the future via the memory mapped view.
138 */
139 write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
140 write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
141 isb();
142
143 /* Save group 0 counters */
144 for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
145 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
146
147 /* Save group 1 counters */
148 for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
149 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
150
151 return (void *)0;
152 }
153
amu_context_restore(const void * arg)154 static void *amu_context_restore(const void *arg)
155 {
156 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
157 int i;
158
159 if (!amu_supported())
160 return (void *)-1;
161
162 /* Counters were disabled in `amu_context_save()` */
163 assert((read_amcntenset0_el0() == 0U) && (read_amcntenset1_el0() == 0U));
164
165 assert(((sizeof(int) * 8U) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK))
166 <= AMU_GROUP1_NR_COUNTERS);
167
168 /* Restore group 0 counters */
169 for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
170 if ((AMU_GROUP0_COUNTERS_MASK & (1U << i)) != 0U)
171 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
172
173 /* Restore group 1 counters */
174 for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
175 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U)
176 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
177
178 /* Restore group 0/1 counter configuration */
179 write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
180 write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
181
182 return (void *)0;
183 }
184
185 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
186 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
187