• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch.h>
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <debug.h>
11 #include <platform.h>
12 #include <pmf.h>
13 #include <runtime_instr.h>
14 #include <string.h>
15 #include "psci_private.h"
16 
17 /******************************************************************************
18  * Construct the psci_power_state to request power OFF at all power levels.
19  ******************************************************************************/
psci_set_power_off_state(psci_power_state_t * state_info)20 static void psci_set_power_off_state(psci_power_state_t *state_info)
21 {
22 	unsigned int lvl;
23 
24 	for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++)
25 		state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE;
26 }
27 
28 /******************************************************************************
29  * Top level handler which is called when a cpu wants to power itself down.
30  * It's assumed that along with turning the cpu power domain off, power
31  * domains at higher levels will be turned off as far as possible. It finds
32  * the highest level where a domain has to be powered off by traversing the
33  * node information and then performs generic, architectural, platform setup
34  * and state management required to turn OFF that power domain and domains
35  * below it. e.g. For a cpu that's to be powered OFF, it could mean programming
36  * the power controller whereas for a cluster that's to be powered off, it will
37  * call the platform specific code which will disable coherency at the
38  * interconnect level if the cpu is the last in the cluster and also the
39  * program the power controller.
40  ******************************************************************************/
psci_do_cpu_off(unsigned int end_pwrlvl)41 int psci_do_cpu_off(unsigned int end_pwrlvl)
42 {
43 	int rc = PSCI_E_SUCCESS, idx = plat_my_core_pos();
44 	psci_power_state_t state_info;
45 
46 	/*
47 	 * This function must only be called on platforms where the
48 	 * CPU_OFF platform hooks have been implemented.
49 	 */
50 	assert(psci_plat_pm_ops->pwr_domain_off);
51 
52 	/* Construct the psci_power_state for CPU_OFF */
53 	psci_set_power_off_state(&state_info);
54 
55 	/*
56 	 * This function acquires the lock corresponding to each power
57 	 * level so that by the time all locks are taken, the system topology
58 	 * is snapshot and state management can be done safely.
59 	 */
60 	psci_acquire_pwr_domain_locks(end_pwrlvl,
61 				      idx);
62 
63 	/*
64 	 * Call the cpu off handler registered by the Secure Payload Dispatcher
65 	 * to let it do any bookkeeping. Assume that the SPD always reports an
66 	 * E_DENIED error if SP refuse to power down
67 	 */
68 	if (psci_spd_pm && psci_spd_pm->svc_off) {
69 		rc = psci_spd_pm->svc_off(0);
70 		if (rc)
71 			goto exit;
72 	}
73 
74 	/*
75 	 * This function is passed the requested state info and
76 	 * it returns the negotiated state info for each power level upto
77 	 * the end level specified.
78 	 */
79 	psci_do_state_coordination(end_pwrlvl, &state_info);
80 
81 #if ENABLE_PSCI_STAT
82 	/* Update the last cpu for each level till end_pwrlvl */
83 	psci_stats_update_pwr_down(end_pwrlvl, &state_info);
84 #endif
85 
86 #if ENABLE_RUNTIME_INSTRUMENTATION
87 
88 	/*
89 	 * Flush cache line so that even if CPU power down happens
90 	 * the timestamp update is reflected in memory.
91 	 */
92 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
93 		RT_INSTR_ENTER_CFLUSH,
94 		PMF_CACHE_MAINT);
95 #endif
96 
97 	/*
98 	 * Arch. management. Initiate power down sequence.
99 	 */
100 	psci_do_pwrdown_sequence(psci_find_max_off_lvl(&state_info));
101 
102 #if ENABLE_RUNTIME_INSTRUMENTATION
103 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
104 		RT_INSTR_EXIT_CFLUSH,
105 		PMF_NO_CACHE_MAINT);
106 #endif
107 
108 	/*
109 	 * Plat. management: Perform platform specific actions to turn this
110 	 * cpu off e.g. exit cpu coherency, program the power controller etc.
111 	 */
112 	psci_plat_pm_ops->pwr_domain_off(&state_info);
113 
114 #if ENABLE_PSCI_STAT
115 	plat_psci_stat_accounting_start(&state_info);
116 #endif
117 
118 exit:
119 	/*
120 	 * Release the locks corresponding to each power level in the
121 	 * reverse order to which they were acquired.
122 	 */
123 	psci_release_pwr_domain_locks(end_pwrlvl,
124 				      idx);
125 
126 	/*
127 	 * Check if all actions needed to safely power down this cpu have
128 	 * successfully completed.
129 	 */
130 	if (rc == PSCI_E_SUCCESS) {
131 		/*
132 		 * Set the affinity info state to OFF. When caches are disabled,
133 		 * this writes directly to main memory, so cache maintenance is
134 		 * required to ensure that later cached reads of aff_info_state
135 		 * return AFF_STATE_OFF. A dsbish() ensures ordering of the
136 		 * update to the affinity info state prior to cache line
137 		 * invalidation.
138 		 */
139 		psci_flush_cpu_data(psci_svc_cpu_data.aff_info_state);
140 		psci_set_aff_info_state(AFF_STATE_OFF);
141 		psci_dsbish();
142 		psci_inv_cpu_data(psci_svc_cpu_data.aff_info_state);
143 
144 #if ENABLE_RUNTIME_INSTRUMENTATION
145 
146 		/*
147 		 * Update the timestamp with cache off.  We assume this
148 		 * timestamp can only be read from the current CPU and the
149 		 * timestamp cache line will be flushed before return to
150 		 * normal world on wakeup.
151 		 */
152 		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
153 		    RT_INSTR_ENTER_HW_LOW_PWR,
154 		    PMF_NO_CACHE_MAINT);
155 #endif
156 
157 		if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) {
158 			/* This function must not return */
159 			psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info);
160 		} else {
161 			/*
162 			 * Enter a wfi loop which will allow the power
163 			 * controller to physically power down this cpu.
164 			 */
165 			psci_power_down_wfi();
166 		}
167 	}
168 
169 	return rc;
170 }
171