1 /*
2 * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <stddef.h>
9
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <context.h>
14 #include <lib/el3_runtime/context_mgmt.h>
15 #include <lib/cpus/errata_report.h>
16 #include <plat/common/platform.h>
17
18 #include "psci_private.h"
19
20 /*******************************************************************************
21 * Per cpu non-secure contexts used to program the architectural state prior
22 * return to the normal world.
23 * TODO: Use the memory allocator to set aside memory for the contexts instead
24 * of relying on platform defined constants.
25 ******************************************************************************/
26 static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
27
28 /******************************************************************************
29 * Define the psci capability variable.
30 *****************************************************************************/
31 unsigned int psci_caps;
32
33 /*******************************************************************************
34 * Function which initializes the 'psci_non_cpu_pd_nodes' or the
35 * 'psci_cpu_pd_nodes' corresponding to the power level.
36 ******************************************************************************/
psci_init_pwr_domain_node(unsigned char node_idx,unsigned int parent_idx,unsigned char level)37 static void __init psci_init_pwr_domain_node(unsigned char node_idx,
38 unsigned int parent_idx,
39 unsigned char level)
40 {
41 if (level > PSCI_CPU_PWR_LVL) {
42 psci_non_cpu_pd_nodes[node_idx].level = level;
43 psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
44 psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
45 psci_non_cpu_pd_nodes[node_idx].local_state =
46 PLAT_MAX_OFF_STATE;
47 } else {
48 psci_cpu_data_t *svc_cpu_data;
49
50 psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
51
52 /* Initialize with an invalid mpidr */
53 psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR;
54
55 svc_cpu_data =
56 &(_cpu_data_by_index(node_idx)->psci_svc_cpu_data);
57
58 /* Set the Affinity Info for the cores as OFF */
59 svc_cpu_data->aff_info_state = AFF_STATE_OFF;
60
61 /* Invalidate the suspend level for the cpu */
62 svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
63
64 /* Set the power state to OFF state */
65 svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
66
67 psci_flush_dcache_range((uintptr_t)svc_cpu_data,
68 sizeof(*svc_cpu_data));
69
70 cm_set_context_by_index(node_idx,
71 (void *) &psci_ns_context[node_idx],
72 NON_SECURE);
73 }
74 }
75
76 /*******************************************************************************
77 * This functions updates cpu_start_idx and ncpus field for each of the node in
78 * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of
79 * the CPUs and check whether they match with the parent of the previous
80 * CPU. The basic assumption for this work is that children of the same parent
81 * are allocated adjacent indices. The platform should ensure this though proper
82 * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and
83 * plat_my_core_pos() APIs.
84 *******************************************************************************/
psci_update_pwrlvl_limits(void)85 static void __init psci_update_pwrlvl_limits(void)
86 {
87 unsigned int cpu_idx;
88 int j;
89 unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
90 unsigned int temp_index[PLAT_MAX_PWR_LVL];
91
92 for (cpu_idx = 0; cpu_idx < psci_plat_core_count; cpu_idx++) {
93 psci_get_parent_pwr_domain_nodes(cpu_idx,
94 PLAT_MAX_PWR_LVL,
95 temp_index);
96 for (j = (int)PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
97 if (temp_index[j] != nodes_idx[j]) {
98 nodes_idx[j] = temp_index[j];
99 psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
100 = cpu_idx;
101 }
102 psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++;
103 }
104 }
105 }
106
107 /*******************************************************************************
108 * Core routine to populate the power domain tree. The tree descriptor passed by
109 * the platform is populated breadth-first and the first entry in the map
110 * informs the number of root power domains. The parent nodes of the root nodes
111 * will point to an invalid entry(-1).
112 ******************************************************************************/
populate_power_domain_tree(const unsigned char * topology)113 static unsigned int __init populate_power_domain_tree(const unsigned char
114 *topology)
115 {
116 unsigned int i, j = 0U, num_nodes_at_lvl = 1U, num_nodes_at_next_lvl;
117 unsigned int node_index = 0U, num_children;
118 unsigned int parent_node_index = 0U;
119 int level = (int)PLAT_MAX_PWR_LVL;
120
121 /*
122 * For each level the inputs are:
123 * - number of nodes at this level in plat_array i.e. num_nodes_at_level
124 * This is the sum of values of nodes at the parent level.
125 * - Index of first entry at this level in the plat_array i.e.
126 * parent_node_index.
127 * - Index of first free entry in psci_non_cpu_pd_nodes[] or
128 * psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
129 */
130 while (level >= (int) PSCI_CPU_PWR_LVL) {
131 num_nodes_at_next_lvl = 0U;
132 /*
133 * For each entry (parent node) at this level in the plat_array:
134 * - Find the number of children
135 * - Allocate a node in a power domain array for each child
136 * - Set the parent of the child to the parent_node_index - 1
137 * - Increment parent_node_index to point to the next parent
138 * - Accumulate the number of children at next level.
139 */
140 for (i = 0U; i < num_nodes_at_lvl; i++) {
141 assert(parent_node_index <=
142 PSCI_NUM_NON_CPU_PWR_DOMAINS);
143 num_children = topology[parent_node_index];
144
145 for (j = node_index;
146 j < (node_index + num_children); j++)
147 psci_init_pwr_domain_node((unsigned char)j,
148 parent_node_index - 1U,
149 (unsigned char)level);
150
151 node_index = j;
152 num_nodes_at_next_lvl += num_children;
153 parent_node_index++;
154 }
155
156 num_nodes_at_lvl = num_nodes_at_next_lvl;
157 level--;
158
159 /* Reset the index for the cpu power domain array */
160 if (level == (int) PSCI_CPU_PWR_LVL)
161 node_index = 0;
162 }
163
164 /* Validate the sanity of array exported by the platform */
165 assert(j <= PLATFORM_CORE_COUNT);
166 return j;
167 }
168
169 /*******************************************************************************
170 * This function does the architectural setup and takes the warm boot
171 * entry-point `mailbox_ep` as an argument. The function also initializes the
172 * power domain topology tree by querying the platform. The power domain nodes
173 * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and
174 * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform
175 * exports its static topology map through the
176 * populate_power_domain_topology_tree() API. The algorithm populates the
177 * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
178 * topology map. On a platform that implements two clusters of 2 cpus each,
179 * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would
180 * look like this:
181 *
182 * ---------------------------------------------------
183 * | system node | cluster 0 node | cluster 1 node |
184 * ---------------------------------------------------
185 *
186 * And populated psci_cpu_pd_nodes would look like this :
187 * <- cpus cluster0 -><- cpus cluster1 ->
188 * ------------------------------------------------
189 * | CPU 0 | CPU 1 | CPU 2 | CPU 3 |
190 * ------------------------------------------------
191 ******************************************************************************/
psci_setup(const psci_lib_args_t * lib_args)192 int __init psci_setup(const psci_lib_args_t *lib_args)
193 {
194 const unsigned char *topology_tree;
195
196 assert(VERIFY_PSCI_LIB_ARGS_V1(lib_args));
197
198 /* Do the Architectural initialization */
199 psci_arch_setup();
200
201 /* Query the topology map from the platform */
202 topology_tree = plat_get_power_domain_tree_desc();
203
204 /* Populate the power domain arrays using the platform topology map */
205 psci_plat_core_count = populate_power_domain_tree(topology_tree);
206
207 /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
208 psci_update_pwrlvl_limits();
209
210 /* Populate the mpidr field of cpu node for this CPU */
211 psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
212 read_mpidr() & MPIDR_AFFINITY_MASK;
213
214 psci_init_req_local_pwr_states();
215
216 /*
217 * Set the requested and target state of this CPU and all the higher
218 * power domain levels for this CPU to run.
219 */
220 psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
221
222 (void) plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep,
223 &psci_plat_pm_ops);
224 assert(psci_plat_pm_ops != NULL);
225
226 /*
227 * Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs
228 * during warm boot, possibly before data cache is enabled.
229 */
230 psci_flush_dcache_range((uintptr_t)&psci_plat_pm_ops,
231 sizeof(psci_plat_pm_ops));
232
233 /* Initialize the psci capability */
234 psci_caps = PSCI_GENERIC_CAP;
235
236 if (psci_plat_pm_ops->pwr_domain_off != NULL)
237 psci_caps |= define_psci_cap(PSCI_CPU_OFF);
238 if ((psci_plat_pm_ops->pwr_domain_on != NULL) &&
239 (psci_plat_pm_ops->pwr_domain_on_finish != NULL))
240 psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64);
241 if ((psci_plat_pm_ops->pwr_domain_suspend != NULL) &&
242 (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL)) {
243 psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
244 if (psci_plat_pm_ops->get_sys_suspend_power_state != NULL)
245 psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
246 }
247 if (psci_plat_pm_ops->system_off != NULL)
248 psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF);
249 if (psci_plat_pm_ops->system_reset != NULL)
250 psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET);
251 if (psci_plat_pm_ops->get_node_hw_state != NULL)
252 psci_caps |= define_psci_cap(PSCI_NODE_HW_STATE_AARCH64);
253 if ((psci_plat_pm_ops->read_mem_protect != NULL) &&
254 (psci_plat_pm_ops->write_mem_protect != NULL))
255 psci_caps |= define_psci_cap(PSCI_MEM_PROTECT);
256 if (psci_plat_pm_ops->mem_protect_chk != NULL)
257 psci_caps |= define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64);
258 if (psci_plat_pm_ops->system_reset2 != NULL)
259 psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64);
260
261 #if ENABLE_PSCI_STAT
262 psci_caps |= define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64);
263 psci_caps |= define_psci_cap(PSCI_STAT_COUNT_AARCH64);
264 #endif
265
266 return 0;
267 }
268
269 /*******************************************************************************
270 * This duplicates what the primary cpu did after a cold boot in BL1. The same
271 * needs to be done when a cpu is hotplugged in. This function could also over-
272 * ride any EL3 setup done by BL1 as this code resides in rw memory.
273 ******************************************************************************/
psci_arch_setup(void)274 void psci_arch_setup(void)
275 {
276 #if (ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_GENERIC_TIMER)
277 /* Program the counter frequency */
278 write_cntfrq_el0(plat_get_syscnt_freq2());
279 #endif
280
281 /* Initialize the cpu_ops pointer. */
282 init_cpu_ops();
283
284 /* Having initialized cpu_ops, we can now print errata status */
285 print_errata_status();
286
287 #if ENABLE_PAUTH
288 /* Store APIAKey_EL1 key */
289 set_cpu_data(apiakey[0], read_apiakeylo_el1());
290 set_cpu_data(apiakey[1], read_apiakeyhi_el1());
291 #endif /* ENABLE_PAUTH */
292 }
293
294 /******************************************************************************
295 * PSCI Library interface to initialize the cpu context for the next non
296 * secure image during cold boot. The relevant registers in the cpu context
297 * need to be retrieved and programmed on return from this interface.
298 *****************************************************************************/
psci_prepare_next_non_secure_ctx(entry_point_info_t * next_image_info)299 void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info)
300 {
301 assert(GET_SECURITY_STATE(next_image_info->h.attr) == NON_SECURE);
302 cm_init_my_context(next_image_info);
303 cm_prepare_el3_exit(NON_SECURE);
304 }
305