• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <bl_common.h>
35 #include <bl31.h>
36 #include <debug.h>
37 #include <context_mgmt.h>
38 #include <platform.h>
39 #include <runtime_svc.h>
40 #include <stddef.h>
41 #include "psci_private.h"
42 
43 typedef int (*afflvl_on_handler_t)(unsigned long target_cpu,
44 				 aff_map_node_t *node);
45 
46 /*******************************************************************************
47  * This function checks whether a cpu which has been requested to be turned on
48  * is OFF to begin with.
49  ******************************************************************************/
cpu_on_validate_state(unsigned int psci_state)50 static int cpu_on_validate_state(unsigned int psci_state)
51 {
52 	if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
53 		return PSCI_E_ALREADY_ON;
54 
55 	if (psci_state == PSCI_STATE_ON_PENDING)
56 		return PSCI_E_ON_PENDING;
57 
58 	assert(psci_state == PSCI_STATE_OFF);
59 	return PSCI_E_SUCCESS;
60 }
61 
62 /*******************************************************************************
63  * Handler routine to turn a cpu on. It takes care of any generic, architectural
64  * or platform specific setup required.
65  * TODO: Split this code across separate handlers for each type of setup?
66  ******************************************************************************/
psci_afflvl0_on(unsigned long target_cpu,aff_map_node_t * cpu_node)67 static int psci_afflvl0_on(unsigned long target_cpu,
68 			   aff_map_node_t *cpu_node)
69 {
70 	unsigned long psci_entrypoint;
71 
72 	/* Sanity check to safeguard against data corruption */
73 	assert(cpu_node->level == MPIDR_AFFLVL0);
74 
75 	/* Set the secure world (EL3) re-entry point after BL1 */
76 	psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
77 
78 	/*
79 	 * Plat. management: Give the platform the current state
80 	 * of the target cpu to allow it to perform the necessary
81 	 * steps to power on.
82 	 */
83 	return psci_plat_pm_ops->affinst_on(target_cpu,
84 					    psci_entrypoint,
85 					    cpu_node->level,
86 					    psci_get_phys_state(cpu_node));
87 }
88 
89 /*******************************************************************************
90  * Handler routine to turn a cluster on. It takes care or any generic, arch.
91  * or platform specific setup required.
92  * TODO: Split this code across separate handlers for each type of setup?
93  ******************************************************************************/
psci_afflvl1_on(unsigned long target_cpu,aff_map_node_t * cluster_node)94 static int psci_afflvl1_on(unsigned long target_cpu,
95 			   aff_map_node_t *cluster_node)
96 {
97 	unsigned long psci_entrypoint;
98 
99 	assert(cluster_node->level == MPIDR_AFFLVL1);
100 
101 	/*
102 	 * There is no generic and arch. specific cluster
103 	 * management required
104 	 */
105 
106 	/* State management: Is not required while turning a cluster on */
107 
108 	/*
109 	 * Plat. management: Give the platform the current state
110 	 * of the target cpu to allow it to perform the necessary
111 	 * steps to power on.
112 	 */
113 	psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
114 	return psci_plat_pm_ops->affinst_on(target_cpu,
115 					    psci_entrypoint,
116 					    cluster_node->level,
117 					    psci_get_phys_state(cluster_node));
118 }
119 
120 /*******************************************************************************
121  * Handler routine to turn a cluster of clusters on. It takes care or any
122  * generic, arch. or platform specific setup required.
123  * TODO: Split this code across separate handlers for each type of setup?
124  ******************************************************************************/
psci_afflvl2_on(unsigned long target_cpu,aff_map_node_t * system_node)125 static int psci_afflvl2_on(unsigned long target_cpu,
126 			   aff_map_node_t *system_node)
127 {
128 	unsigned long psci_entrypoint;
129 
130 	/* Cannot go beyond affinity level 2 in this psci imp. */
131 	assert(system_node->level == MPIDR_AFFLVL2);
132 
133 	/*
134 	 * There is no generic and arch. specific system management
135 	 * required
136 	 */
137 
138 	/* State management: Is not required while turning a system on */
139 
140 	/*
141 	 * Plat. management: Give the platform the current state
142 	 * of the target cpu to allow it to perform the necessary
143 	 * steps to power on.
144 	 */
145 	psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
146 	return psci_plat_pm_ops->affinst_on(target_cpu,
147 					    psci_entrypoint,
148 					    system_node->level,
149 					    psci_get_phys_state(system_node));
150 }
151 
152 /* Private data structure to make this handlers accessible through indexing */
153 static const afflvl_on_handler_t psci_afflvl_on_handlers[] = {
154 	psci_afflvl0_on,
155 	psci_afflvl1_on,
156 	psci_afflvl2_on,
157 };
158 
159 /*******************************************************************************
160  * This function takes an array of pointers to affinity instance nodes in the
161  * topology tree and calls the on handler for the corresponding affinity
162  * levels
163  ******************************************************************************/
psci_call_on_handlers(aff_map_node_t * target_cpu_nodes[],int start_afflvl,int end_afflvl,unsigned long target_cpu)164 static int psci_call_on_handlers(aff_map_node_t *target_cpu_nodes[],
165 				 int start_afflvl,
166 				 int end_afflvl,
167 				 unsigned long target_cpu)
168 {
169 	int rc = PSCI_E_INVALID_PARAMS, level;
170 	aff_map_node_t *node;
171 
172 	for (level = end_afflvl; level >= start_afflvl; level--) {
173 		node = target_cpu_nodes[level];
174 		if (node == NULL)
175 			continue;
176 
177 		/*
178 		 * TODO: In case of an error should there be a way
179 		 * of undoing what we might have setup at higher
180 		 * affinity levels.
181 		 */
182 		rc = psci_afflvl_on_handlers[level](target_cpu,
183 						    node);
184 		if (rc != PSCI_E_SUCCESS)
185 			break;
186 	}
187 
188 	return rc;
189 }
190 
191 /*******************************************************************************
192  * Generic handler which is called to physically power on a cpu identified by
193  * its mpidr. It traverses through all the affinity levels performing generic,
194  * architectural, platform setup and state management e.g. for a cpu that is
195  * to be powered on, it will ensure that enough information is stashed for it
196  * to resume execution in the non-secure security state.
197  *
198  * The state of all the relevant affinity levels is changed after calling the
199  * affinity level specific handlers as their actions would depend upon the state
200  * the affinity level is currently in.
201  *
202  * The affinity level specific handlers are called in descending order i.e. from
203  * the highest to the lowest affinity level implemented by the platform because
204  * to turn on affinity level X it is necessary to turn on affinity level X + 1
205  * first.
206  ******************************************************************************/
psci_afflvl_on(unsigned long target_cpu,entry_point_info_t * ep,int start_afflvl,int end_afflvl)207 int psci_afflvl_on(unsigned long target_cpu,
208 		   entry_point_info_t *ep,
209 		   int start_afflvl,
210 		   int end_afflvl)
211 {
212 	int rc;
213 	mpidr_aff_map_nodes_t target_cpu_nodes;
214 
215 	/*
216 	 * This function must only be called on platforms where the
217 	 * CPU_ON platform hooks have been implemented.
218 	 */
219 	assert(psci_plat_pm_ops->affinst_on &&
220 			psci_plat_pm_ops->affinst_on_finish);
221 
222 	/*
223 	 * Collect the pointers to the nodes in the topology tree for
224 	 * each affinity instance in the mpidr. If this function does
225 	 * not return successfully then either the mpidr or the affinity
226 	 * levels are incorrect.
227 	 */
228 	rc = psci_get_aff_map_nodes(target_cpu,
229 				    start_afflvl,
230 				    end_afflvl,
231 				    target_cpu_nodes);
232 	assert(rc == PSCI_E_SUCCESS);
233 
234 	/*
235 	 * This function acquires the lock corresponding to each affinity
236 	 * level so that by the time all locks are taken, the system topology
237 	 * is snapshot and state management can be done safely.
238 	 */
239 	psci_acquire_afflvl_locks(start_afflvl,
240 				  end_afflvl,
241 				  target_cpu_nodes);
242 
243 	/*
244 	 * Generic management: Ensure that the cpu is off to be
245 	 * turned on.
246 	 */
247 	rc = cpu_on_validate_state(psci_get_state(
248 				    target_cpu_nodes[MPIDR_AFFLVL0]));
249 	if (rc != PSCI_E_SUCCESS)
250 		goto exit;
251 
252 	/*
253 	 * Call the cpu on handler registered by the Secure Payload Dispatcher
254 	 * to let it do any bookeeping. If the handler encounters an error, it's
255 	 * expected to assert within
256 	 */
257 	if (psci_spd_pm && psci_spd_pm->svc_on)
258 		psci_spd_pm->svc_on(target_cpu);
259 
260 	/* Perform generic, architecture and platform specific handling. */
261 	rc = psci_call_on_handlers(target_cpu_nodes,
262 				   start_afflvl,
263 				   end_afflvl,
264 				   target_cpu);
265 
266 	assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
267 
268 	/*
269 	 * This function updates the state of each affinity instance
270 	 * corresponding to the mpidr in the range of affinity levels
271 	 * specified.
272 	 */
273 	if (rc == PSCI_E_SUCCESS) {
274 		psci_do_afflvl_state_mgmt(start_afflvl,
275 					  end_afflvl,
276 					  target_cpu_nodes,
277 					  PSCI_STATE_ON_PENDING);
278 
279 		/*
280 		 * Store the re-entry information for the non-secure world.
281 		 */
282 		cm_init_context(target_cpu, ep);
283 	}
284 
285 exit:
286 	/*
287 	 * This loop releases the lock corresponding to each affinity level
288 	 * in the reverse order to which they were acquired.
289 	 */
290 	psci_release_afflvl_locks(start_afflvl,
291 				  end_afflvl,
292 				  target_cpu_nodes);
293 
294 	return rc;
295 }
296 
297 /*******************************************************************************
298  * The following functions finish an earlier affinity power on request. They
299  * are called by the common finisher routine in psci_common.c.
300  ******************************************************************************/
psci_afflvl0_on_finish(aff_map_node_t * cpu_node)301 static void psci_afflvl0_on_finish(aff_map_node_t *cpu_node)
302 {
303 	unsigned int plat_state, state;
304 
305 	assert(cpu_node->level == MPIDR_AFFLVL0);
306 
307 	/* Ensure we have been explicitly woken up by another cpu */
308 	state = psci_get_state(cpu_node);
309 	assert(state == PSCI_STATE_ON_PENDING);
310 
311 	/*
312 	 * Plat. management: Perform the platform specific actions
313 	 * for this cpu e.g. enabling the gic or zeroing the mailbox
314 	 * register. The actual state of this cpu has already been
315 	 * changed.
316 	 */
317 
318 	/* Get the physical state of this cpu */
319 	plat_state = get_phys_state(state);
320 	psci_plat_pm_ops->affinst_on_finish(cpu_node->level,
321 							 plat_state);
322 
323 	/*
324 	 * Arch. management: Enable data cache and manage stack memory
325 	 */
326 	psci_do_pwrup_cache_maintenance();
327 
328 	/*
329 	 * All the platform specific actions for turning this cpu
330 	 * on have completed. Perform enough arch.initialization
331 	 * to run in the non-secure address space.
332 	 */
333 	bl31_arch_setup();
334 
335 	/*
336 	 * Call the cpu on finish handler registered by the Secure Payload
337 	 * Dispatcher to let it do any bookeeping. If the handler encounters an
338 	 * error, it's expected to assert within
339 	 */
340 	if (psci_spd_pm && psci_spd_pm->svc_on_finish)
341 		psci_spd_pm->svc_on_finish(0);
342 
343 	/*
344 	 * Generic management: Now we just need to retrieve the
345 	 * information that we had stashed away during the cpu_on
346 	 * call to set this cpu on its way.
347 	 */
348 	cm_prepare_el3_exit(NON_SECURE);
349 
350 	/* Clean caches before re-entering normal world */
351 	dcsw_op_louis(DCCSW);
352 }
353 
psci_afflvl1_on_finish(aff_map_node_t * cluster_node)354 static void psci_afflvl1_on_finish(aff_map_node_t *cluster_node)
355 {
356 	unsigned int plat_state;
357 
358 	assert(cluster_node->level == MPIDR_AFFLVL1);
359 
360 	/*
361 	 * Plat. management: Perform the platform specific actions
362 	 * as per the old state of the cluster e.g. enabling
363 	 * coherency at the interconnect depends upon the state with
364 	 * which this cluster was powered up. If anything goes wrong
365 	 * then assert as there is no way to recover from this
366 	 * situation.
367 	 */
368 	plat_state = psci_get_phys_state(cluster_node);
369 	psci_plat_pm_ops->affinst_on_finish(cluster_node->level,
370 						 plat_state);
371 }
372 
373 
psci_afflvl2_on_finish(aff_map_node_t * system_node)374 static void psci_afflvl2_on_finish(aff_map_node_t *system_node)
375 {
376 	unsigned int plat_state;
377 
378 	/* Cannot go beyond this affinity level */
379 	assert(system_node->level == MPIDR_AFFLVL2);
380 
381 	/*
382 	 * Currently, there are no architectural actions to perform
383 	 * at the system level.
384 	 */
385 
386 	/*
387 	 * Plat. management: Perform the platform specific actions
388 	 * as per the old state of the cluster e.g. enabling
389 	 * coherency at the interconnect depends upon the state with
390 	 * which this cluster was powered up. If anything goes wrong
391 	 * then assert as there is no way to recover from this
392 	 * situation.
393 	 */
394 	plat_state = psci_get_phys_state(system_node);
395 	psci_plat_pm_ops->affinst_on_finish(system_node->level,
396 						   plat_state);
397 }
398 
399 const afflvl_power_on_finisher_t psci_afflvl_on_finishers[] = {
400 	psci_afflvl0_on_finish,
401 	psci_afflvl1_on_finish,
402 	psci_afflvl2_on_finish,
403 };
404