• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <stddef.h>
9 
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <context.h>
15 #include <lib/el3_runtime/context_mgmt.h>
16 #include <lib/el3_runtime/cpu_data.h>
17 #include <lib/el3_runtime/pubsub_events.h>
18 #include <lib/pmf/pmf.h>
19 #include <lib/runtime_instr.h>
20 #include <plat/common/platform.h>
21 
22 #include "psci_private.h"
23 
24 /*******************************************************************************
25  * This function does generic and platform specific operations after a wake-up
26  * from standby/retention states at multiple power levels.
27  ******************************************************************************/
psci_suspend_to_standby_finisher(unsigned int cpu_idx,unsigned int end_pwrlvl)28 static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
29 					     unsigned int end_pwrlvl)
30 {
31 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
32 	psci_power_state_t state_info;
33 
34 	/* Get the parent nodes */
35 	psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
36 
37 	psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
38 
39 	/*
40 	 * Find out which retention states this CPU has exited from until the
41 	 * 'end_pwrlvl'. The exit retention state could be deeper than the entry
42 	 * state as a result of state coordination amongst other CPUs post wfi.
43 	 */
44 	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
45 
46 #if ENABLE_PSCI_STAT
47 	plat_psci_stat_accounting_stop(&state_info);
48 	psci_stats_update_pwr_up(end_pwrlvl, &state_info);
49 #endif
50 
51 	/*
52 	 * Plat. management: Allow the platform to do operations
53 	 * on waking up from retention.
54 	 */
55 	psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info);
56 
57 	/*
58 	 * Set the requested and target state of this CPU and all the higher
59 	 * power domain levels for this CPU to run.
60 	 */
61 	psci_set_pwr_domains_to_run(end_pwrlvl);
62 
63 	psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
64 }
65 
66 /*******************************************************************************
67  * This function does generic and platform specific suspend to power down
68  * operations.
69  ******************************************************************************/
psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,const entry_point_info_t * ep,const psci_power_state_t * state_info)70 static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
71 					  const entry_point_info_t *ep,
72 					  const psci_power_state_t *state_info)
73 {
74 	unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
75 
76 	PUBLISH_EVENT(psci_suspend_pwrdown_start);
77 
78 	/* Save PSCI target power level for the suspend finisher handler */
79 	psci_set_suspend_pwrlvl(end_pwrlvl);
80 
81 	/*
82 	 * Flush the target power level as it might be accessed on power up with
83 	 * Data cache disabled.
84 	 */
85 	psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
86 
87 	/*
88 	 * Call the cpu suspend handler registered by the Secure Payload
89 	 * Dispatcher to let it do any book-keeping. If the handler encounters an
90 	 * error, it's expected to assert within
91 	 */
92 	if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend != NULL))
93 		psci_spd_pm->svc_suspend(max_off_lvl);
94 
95 #if !HW_ASSISTED_COHERENCY
96 	/*
97 	 * Plat. management: Allow the platform to perform any early
98 	 * actions required to power down the CPU. This might be useful for
99 	 * HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these
100 	 * actions with data caches enabled.
101 	 */
102 	if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early != NULL)
103 		psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info);
104 #endif
105 
106 	/*
107 	 * Store the re-entry information for the non-secure world.
108 	 */
109 	cm_init_my_context(ep);
110 
111 #if ENABLE_RUNTIME_INSTRUMENTATION
112 
113 	/*
114 	 * Flush cache line so that even if CPU power down happens
115 	 * the timestamp update is reflected in memory.
116 	 */
117 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
118 		RT_INSTR_ENTER_CFLUSH,
119 		PMF_CACHE_MAINT);
120 #endif
121 
122 	/*
123 	 * Arch. management. Initiate power down sequence.
124 	 * TODO : Introduce a mechanism to query the cache level to flush
125 	 * and the cpu-ops power down to perform from the platform.
126 	 */
127 	psci_do_pwrdown_sequence(max_off_lvl);
128 
129 #if ENABLE_RUNTIME_INSTRUMENTATION
130 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
131 		RT_INSTR_EXIT_CFLUSH,
132 		PMF_NO_CACHE_MAINT);
133 #endif
134 }
135 
136 /*******************************************************************************
137  * Top level handler which is called when a cpu wants to suspend its execution.
138  * It is assumed that along with suspending the cpu power domain, power domains
139  * at higher levels until the target power level will be suspended as well. It
140  * coordinates with the platform to negotiate the target state for each of
141  * the power domain level till the target power domain level. It then performs
142  * generic, architectural, platform setup and state management required to
143  * suspend that power domain level and power domain levels below it.
144  * e.g. For a cpu that's to be suspended, it could mean programming the
145  * power controller whereas for a cluster that's to be suspended, it will call
146  * the platform specific code which will disable coherency at the interconnect
147  * level if the cpu is the last in the cluster and also the program the power
148  * controller.
149  *
150  * All the required parameter checks are performed at the beginning and after
151  * the state transition has been done, no further error is expected and it is
152  * not possible to undo any of the actions taken beyond that point.
153  ******************************************************************************/
psci_cpu_suspend_start(const entry_point_info_t * ep,unsigned int end_pwrlvl,psci_power_state_t * state_info,unsigned int is_power_down_state)154 void psci_cpu_suspend_start(const entry_point_info_t *ep,
155 			    unsigned int end_pwrlvl,
156 			    psci_power_state_t *state_info,
157 			    unsigned int is_power_down_state)
158 {
159 	int skip_wfi = 0;
160 	unsigned int idx = plat_my_core_pos();
161 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
162 
163 	/*
164 	 * This function must only be called on platforms where the
165 	 * CPU_SUSPEND platform hooks have been implemented.
166 	 */
167 	assert((psci_plat_pm_ops->pwr_domain_suspend != NULL) &&
168 	       (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL));
169 
170 	/* Get the parent nodes */
171 	psci_get_parent_pwr_domain_nodes(idx, end_pwrlvl, parent_nodes);
172 
173 	/*
174 	 * This function acquires the lock corresponding to each power
175 	 * level so that by the time all locks are taken, the system topology
176 	 * is snapshot and state management can be done safely.
177 	 */
178 	psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
179 
180 	/*
181 	 * We check if there are any pending interrupts after the delay
182 	 * introduced by lock contention to increase the chances of early
183 	 * detection that a wake-up interrupt has fired.
184 	 */
185 	if (read_isr_el1() != 0U) {
186 		skip_wfi = 1;
187 		goto exit;
188 	}
189 
190 	/*
191 	 * This function is passed the requested state info and
192 	 * it returns the negotiated state info for each power level upto
193 	 * the end level specified.
194 	 */
195 	psci_do_state_coordination(end_pwrlvl, state_info);
196 
197 #if ENABLE_PSCI_STAT
198 	/* Update the last cpu for each level till end_pwrlvl */
199 	psci_stats_update_pwr_down(end_pwrlvl, state_info);
200 #endif
201 
202 	if (is_power_down_state != 0U)
203 		psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
204 
205 	/*
206 	 * Plat. management: Allow the platform to perform the
207 	 * necessary actions to turn off this cpu e.g. set the
208 	 * platform defined mailbox with the psci entrypoint,
209 	 * program the power controller etc.
210 	 */
211 	psci_plat_pm_ops->pwr_domain_suspend(state_info);
212 
213 #if ENABLE_PSCI_STAT
214 	plat_psci_stat_accounting_start(state_info);
215 #endif
216 
217 exit:
218 	/*
219 	 * Release the locks corresponding to each power level in the
220 	 * reverse order to which they were acquired.
221 	 */
222 	psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
223 
224 	if (skip_wfi == 1)
225 		return;
226 
227 	if (is_power_down_state != 0U) {
228 #if ENABLE_RUNTIME_INSTRUMENTATION
229 
230 		/*
231 		 * Update the timestamp with cache off.  We assume this
232 		 * timestamp can only be read from the current CPU and the
233 		 * timestamp cache line will be flushed before return to
234 		 * normal world on wakeup.
235 		 */
236 		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
237 		    RT_INSTR_ENTER_HW_LOW_PWR,
238 		    PMF_NO_CACHE_MAINT);
239 #endif
240 
241 		/* The function calls below must not return */
242 		if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi != NULL)
243 			psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info);
244 		else
245 			psci_power_down_wfi();
246 	}
247 
248 #if ENABLE_RUNTIME_INSTRUMENTATION
249 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
250 	    RT_INSTR_ENTER_HW_LOW_PWR,
251 	    PMF_NO_CACHE_MAINT);
252 #endif
253 
254 	/*
255 	 * We will reach here if only retention/standby states have been
256 	 * requested at multiple power levels. This means that the cpu
257 	 * context will be preserved.
258 	 */
259 	wfi();
260 
261 #if ENABLE_RUNTIME_INSTRUMENTATION
262 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
263 	    RT_INSTR_EXIT_HW_LOW_PWR,
264 	    PMF_NO_CACHE_MAINT);
265 #endif
266 
267 	/*
268 	 * After we wake up from context retaining suspend, call the
269 	 * context retaining suspend finisher.
270 	 */
271 	psci_suspend_to_standby_finisher(idx, end_pwrlvl);
272 }
273 
274 /*******************************************************************************
275  * The following functions finish an earlier suspend request. They
276  * are called by the common finisher routine in psci_common.c. The `state_info`
277  * is the psci_power_state from which this CPU has woken up from.
278  ******************************************************************************/
psci_cpu_suspend_finish(unsigned int cpu_idx,const psci_power_state_t * state_info)279 void psci_cpu_suspend_finish(unsigned int cpu_idx, const psci_power_state_t *state_info)
280 {
281 	unsigned int counter_freq;
282 	unsigned int max_off_lvl;
283 
284 	/* Ensure we have been woken up from a suspended state */
285 	assert((psci_get_aff_info_state() == AFF_STATE_ON) &&
286 		(is_local_state_off(
287 			state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]) != 0));
288 
289 	/*
290 	 * Plat. management: Perform the platform specific actions
291 	 * before we change the state of the cpu e.g. enabling the
292 	 * gic or zeroing the mailbox register. If anything goes
293 	 * wrong then assert as there is no way to recover from this
294 	 * situation.
295 	 */
296 	psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
297 
298 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
299 	/* Arch. management: Enable the data cache, stack memory maintenance. */
300 	psci_do_pwrup_cache_maintenance();
301 #endif
302 
303 	/* Re-init the cntfrq_el0 register */
304 	counter_freq = plat_get_syscnt_freq2();
305 	write_cntfrq_el0(counter_freq);
306 
307 #if ENABLE_PAUTH
308 	/* Store APIAKey_EL1 key */
309 	set_cpu_data(apiakey[0], read_apiakeylo_el1());
310 	set_cpu_data(apiakey[1], read_apiakeyhi_el1());
311 #endif /* ENABLE_PAUTH */
312 
313 	/*
314 	 * Call the cpu suspend finish handler registered by the Secure Payload
315 	 * Dispatcher to let it do any bookeeping. If the handler encounters an
316 	 * error, it's expected to assert within
317 	 */
318 	if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend_finish != NULL)) {
319 		max_off_lvl = psci_find_max_off_lvl(state_info);
320 		assert(max_off_lvl != PSCI_INVALID_PWR_LVL);
321 		psci_spd_pm->svc_suspend_finish(max_off_lvl);
322 	}
323 
324 	/* Invalidate the suspend level for the cpu */
325 	psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
326 
327 	PUBLISH_EVENT(psci_suspend_pwrdown_finish);
328 
329 	/*
330 	 * Generic management: Now we just need to retrieve the
331 	 * information that we had stashed away during the suspend
332 	 * call to set this cpu on its way.
333 	 */
334 	cm_prepare_el3_exit(NON_SECURE);
335 }
336