• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <stddef.h>
9 
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <lib/el3_runtime/context_mgmt.h>
15 #include <lib/el3_runtime/pubsub_events.h>
16 #include <plat/common/platform.h>
17 
18 #include "psci_private.h"
19 
20 /*
21  * Helper functions for the CPU level spinlocks
22  */
psci_spin_lock_cpu(unsigned int idx)23 static inline void psci_spin_lock_cpu(unsigned int idx)
24 {
25 	spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock);
26 }
27 
psci_spin_unlock_cpu(unsigned int idx)28 static inline void psci_spin_unlock_cpu(unsigned int idx)
29 {
30 	spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock);
31 }
32 
33 /*******************************************************************************
34  * This function checks whether a cpu which has been requested to be turned on
35  * is OFF to begin with.
36  ******************************************************************************/
cpu_on_validate_state(aff_info_state_t aff_state)37 static int cpu_on_validate_state(aff_info_state_t aff_state)
38 {
39 	if (aff_state == AFF_STATE_ON)
40 		return PSCI_E_ALREADY_ON;
41 
42 	if (aff_state == AFF_STATE_ON_PENDING)
43 		return PSCI_E_ON_PENDING;
44 
45 	assert(aff_state == AFF_STATE_OFF);
46 	return PSCI_E_SUCCESS;
47 }
48 
49 /*******************************************************************************
50  * Generic handler which is called to physically power on a cpu identified by
51  * its mpidr. It performs the generic, architectural, platform setup and state
52  * management to power on the target cpu e.g. it will ensure that
53  * enough information is stashed for it to resume execution in the non-secure
54  * security state.
55  *
56  * The state of all the relevant power domains are changed after calling the
57  * platform handler as it can return error.
58  ******************************************************************************/
psci_cpu_on_start(u_register_t target_cpu,const entry_point_info_t * ep)59 int psci_cpu_on_start(u_register_t target_cpu,
60 		      const entry_point_info_t *ep)
61 {
62 	int rc;
63 	aff_info_state_t target_aff_state;
64 	int ret = plat_core_pos_by_mpidr(target_cpu);
65 	unsigned int target_idx = (unsigned int)ret;
66 
67 	/* Calling function must supply valid input arguments */
68 	assert(ret >= 0);
69 	assert(ep != NULL);
70 
71 
72 	/*
73 	 * This function must only be called on platforms where the
74 	 * CPU_ON platform hooks have been implemented.
75 	 */
76 	assert((psci_plat_pm_ops->pwr_domain_on != NULL) &&
77 	       (psci_plat_pm_ops->pwr_domain_on_finish != NULL));
78 
79 	/* Protect against multiple CPUs trying to turn ON the same target CPU */
80 	psci_spin_lock_cpu(target_idx);
81 
82 	/*
83 	 * Generic management: Ensure that the cpu is off to be
84 	 * turned on.
85 	 * Perform cache maintanence ahead of reading the target CPU state to
86 	 * ensure that the data is not stale.
87 	 * There is a theoretical edge case where the cache may contain stale
88 	 * data for the target CPU data - this can occur under the following
89 	 * conditions:
90 	 * - the target CPU is in another cluster from the current
91 	 * - the target CPU was the last CPU to shutdown on its cluster
92 	 * - the cluster was removed from coherency as part of the CPU shutdown
93 	 *
94 	 * In this case the cache maintenace that was performed as part of the
95 	 * target CPUs shutdown was not seen by the current CPU's cluster. And
96 	 * so the cache may contain stale data for the target CPU.
97 	 */
98 	flush_cpu_data_by_index(target_idx,
99 				psci_svc_cpu_data.aff_info_state);
100 	rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx));
101 	if (rc != PSCI_E_SUCCESS)
102 		goto exit;
103 
104 	/*
105 	 * Call the cpu on handler registered by the Secure Payload Dispatcher
106 	 * to let it do any bookeeping. If the handler encounters an error, it's
107 	 * expected to assert within
108 	 */
109 	if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on != NULL))
110 		psci_spd_pm->svc_on(target_cpu);
111 
112 	/*
113 	 * Set the Affinity info state of the target cpu to ON_PENDING.
114 	 * Flush aff_info_state as it will be accessed with caches
115 	 * turned OFF.
116 	 */
117 	psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
118 	flush_cpu_data_by_index(target_idx,
119 				psci_svc_cpu_data.aff_info_state);
120 
121 	/*
122 	 * The cache line invalidation by the target CPU after setting the
123 	 * state to OFF (see psci_do_cpu_off()), could cause the update to
124 	 * aff_info_state to be invalidated. Retry the update if the target
125 	 * CPU aff_info_state is not ON_PENDING.
126 	 */
127 	target_aff_state = psci_get_aff_info_state_by_idx(target_idx);
128 	if (target_aff_state != AFF_STATE_ON_PENDING) {
129 		assert(target_aff_state == AFF_STATE_OFF);
130 		psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
131 		flush_cpu_data_by_index(target_idx,
132 					psci_svc_cpu_data.aff_info_state);
133 
134 		assert(psci_get_aff_info_state_by_idx(target_idx) ==
135 		       AFF_STATE_ON_PENDING);
136 	}
137 
138 	/*
139 	 * Perform generic, architecture and platform specific handling.
140 	 */
141 	/*
142 	 * Plat. management: Give the platform the current state
143 	 * of the target cpu to allow it to perform the necessary
144 	 * steps to power on.
145 	 */
146 	rc = psci_plat_pm_ops->pwr_domain_on(target_cpu);
147 	assert((rc == PSCI_E_SUCCESS) || (rc == PSCI_E_INTERN_FAIL));
148 
149 	if (rc == PSCI_E_SUCCESS)
150 		/* Store the re-entry information for the non-secure world. */
151 		cm_init_context_by_index(target_idx, ep);
152 	else {
153 		/* Restore the state on error. */
154 		psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF);
155 		flush_cpu_data_by_index(target_idx,
156 					psci_svc_cpu_data.aff_info_state);
157 	}
158 
159 exit:
160 	psci_spin_unlock_cpu(target_idx);
161 	return rc;
162 }
163 
164 /*******************************************************************************
165  * The following function finish an earlier power on request. They
166  * are called by the common finisher routine in psci_common.c. The `state_info`
167  * is the psci_power_state from which this CPU has woken up from.
168  ******************************************************************************/
psci_cpu_on_finish(unsigned int cpu_idx,const psci_power_state_t * state_info)169 void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info)
170 {
171 	/*
172 	 * Plat. management: Perform the platform specific actions
173 	 * for this cpu e.g. enabling the gic or zeroing the mailbox
174 	 * register. The actual state of this cpu has already been
175 	 * changed.
176 	 */
177 	psci_plat_pm_ops->pwr_domain_on_finish(state_info);
178 
179 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
180 	/*
181 	 * Arch. management: Enable data cache and manage stack memory
182 	 */
183 	psci_do_pwrup_cache_maintenance();
184 #endif
185 
186 	/*
187 	 * Plat. management: Perform any platform specific actions which
188 	 * can only be done with the cpu and the cluster guaranteed to
189 	 * be coherent.
190 	 */
191 	if (psci_plat_pm_ops->pwr_domain_on_finish_late != NULL)
192 		psci_plat_pm_ops->pwr_domain_on_finish_late(state_info);
193 
194 	/*
195 	 * All the platform specific actions for turning this cpu
196 	 * on have completed. Perform enough arch.initialization
197 	 * to run in the non-secure address space.
198 	 */
199 	psci_arch_setup();
200 
201 	/*
202 	 * Lock the CPU spin lock to make sure that the context initialization
203 	 * is done. Since the lock is only used in this function to create
204 	 * a synchronization point with cpu_on_start(), it can be released
205 	 * immediately.
206 	 */
207 	psci_spin_lock_cpu(cpu_idx);
208 	psci_spin_unlock_cpu(cpu_idx);
209 
210 	/* Ensure we have been explicitly woken up by another cpu */
211 	assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING);
212 
213 	/*
214 	 * Call the cpu on finish handler registered by the Secure Payload
215 	 * Dispatcher to let it do any bookeeping. If the handler encounters an
216 	 * error, it's expected to assert within
217 	 */
218 	if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on_finish != NULL))
219 		psci_spd_pm->svc_on_finish(0);
220 
221 	PUBLISH_EVENT(psci_cpu_on_finish);
222 
223 	/* Populate the mpidr field within the cpu node array */
224 	/* This needs to be done only once */
225 	psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
226 
227 	/*
228 	 * Generic management: Now we just need to retrieve the
229 	 * information that we had stashed away during the cpu_on
230 	 * call to set this cpu on its way.
231 	 */
232 	cm_prepare_el3_exit(NON_SECURE);
233 }
234