1 /*
2 * Copyright (c) 2018-2020, Arm Limited and Contributors. All rights reserved.
3 * Copyright (c) 2021-2022, Xilinx, Inc. All rights reserved.
4 * Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9 #include <assert.h>
10
11 #include <common/debug.h>
12 #include <common/runtime_svc.h>
13 #include <lib/mmio.h>
14 #include <lib/psci/psci.h>
15 #include <plat/arm/common/plat_arm.h>
16 #include <plat/common/platform.h>
17 #include <plat_arm.h>
18
19 #include <plat_private.h>
20 #include <pm_defs.h>
21
22 #define PM_RET_ERROR_NOFEATURE U(19)
23 #define ALWAYSTRUE true
24 #define LINEAR_MODE BIT(1)
25
26 static uintptr_t _sec_entry;
27
zynqmp_cpu_standby(plat_local_state_t cpu_state)28 static void zynqmp_cpu_standby(plat_local_state_t cpu_state)
29 {
30 dsb();
31 wfi();
32 }
33
34 #define MPIDR_MT_BIT (24)
35
zynqmp_nopmu_pwr_domain_on(u_register_t mpidr)36 static int32_t zynqmp_nopmu_pwr_domain_on(u_register_t mpidr)
37 {
38 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr) & ~BIT(MPIDR_MT_BIT);
39 uint32_t cpu = cpu_id % PLATFORM_CORE_COUNT_PER_CLUSTER;
40 uint32_t cluster = cpu_id / PLATFORM_CORE_COUNT_PER_CLUSTER;
41 uintptr_t apu_cluster_base = 0, apu_pcli_base, apu_pcli_cluster = 0;
42 uintptr_t rst_apu_cluster = PSX_CRF + RST_APU0_OFFSET + ((uint64_t)cluster * 0x4U);
43
44 VERBOSE("%s: mpidr: 0x%lx, cpuid: %x, cpu: %x, cluster: %x\n",
45 __func__, mpidr, cpu_id, cpu, cluster);
46
47 if (cpu_id == -1) {
48 return PSCI_E_INTERN_FAIL;
49 }
50
51 if (cluster > 3) {
52 panic();
53 }
54
55 apu_pcli_cluster = APU_PCLI + APU_PCLI_CLUSTER_OFFSET + ((uint64_t)cluster * APU_PCLI_CLUSTER_STEP);
56 apu_cluster_base = APU_CLUSTER0 + ((uint64_t)cluster * APU_CLUSTER_STEP);
57
58 /* Enable clock */
59 mmio_setbits_32(PSX_CRF + ACPU0_CLK_CTRL + ((uint64_t)cluster * 0x4U), ACPU_CLK_CTRL_CLKACT);
60
61 /* Enable cluster states */
62 mmio_setbits_32(apu_pcli_cluster + PCLI_PSTATE_OFFSET, PCLI_PSTATE_VAL_SET);
63 mmio_setbits_32(apu_pcli_cluster + PCLI_PREQ_OFFSET, PREQ_CHANGE_REQUEST);
64
65 /* assert core reset */
66 mmio_setbits_32(rst_apu_cluster, ((RST_APU_COLD_RESET|RST_APU_WARN_RESET) << cpu));
67
68 /* program RVBAR */
69 mmio_write_32(apu_cluster_base + APU_RVBAR_L_0 + (cpu << 3),
70 (uint32_t)_sec_entry);
71 mmio_write_32(apu_cluster_base + APU_RVBAR_H_0 + (cpu << 3),
72 _sec_entry >> 32);
73
74 /* de-assert core reset */
75 mmio_clrbits_32(rst_apu_cluster, ((RST_APU_COLD_RESET|RST_APU_WARN_RESET) << cpu));
76
77 /* clear cluster resets */
78 mmio_clrbits_32(rst_apu_cluster, RST_APU_CLUSTER_WARM_RESET);
79 mmio_clrbits_32(rst_apu_cluster, RST_APU_CLUSTER_COLD_RESET);
80
81 apu_pcli_base = APU_PCLI + (APU_PCLI_CPU_STEP * cpu) +
82 (APU_PCLI_CLUSTER_CPU_STEP * cluster);
83
84 mmio_write_32(apu_pcli_base + PCLI_PSTATE_OFFSET, PCLI_PSTATE_VAL_CLEAR);
85 mmio_write_32(apu_pcli_base + PCLI_PREQ_OFFSET, PREQ_CHANGE_REQUEST);
86
87 return PSCI_E_SUCCESS;
88 }
89
zynqmp_nopmu_pwr_domain_off(const psci_power_state_t * target_state)90 static void zynqmp_nopmu_pwr_domain_off(const psci_power_state_t *target_state)
91 {
92 plat_gic_cpuif_disable();
93 }
94
zynqmp_nopmu_system_reset(void)95 static void __dead2 zynqmp_nopmu_system_reset(void)
96 {
97 while (ALWAYSTRUE) {
98 wfi();
99 }
100 }
101
zynqmp_validate_ns_entrypoint(uint64_t ns_entrypoint)102 static int32_t zynqmp_validate_ns_entrypoint(uint64_t ns_entrypoint)
103 {
104 VERBOSE("Validate ns_entry point %lx\n", ns_entrypoint);
105
106 if ((ns_entrypoint) != 0U) {
107 return PSCI_E_SUCCESS;
108 } else {
109 return PSCI_E_INVALID_ADDRESS;
110 }
111 }
112
zynqmp_pwr_domain_on_finish(const psci_power_state_t * target_state)113 static void zynqmp_pwr_domain_on_finish(const psci_power_state_t *target_state)
114 {
115 plat_gic_pcpu_init();
116 plat_gic_cpuif_enable();
117 }
118
zynqmp_system_off(void)119 static void __dead2 zynqmp_system_off(void)
120 {
121 while (ALWAYSTRUE) {
122 wfi();
123 }
124 }
125
zynqmp_validate_power_state(uint32_t power_state,psci_power_state_t * req_state)126 static int32_t zynqmp_validate_power_state(uint32_t power_state, psci_power_state_t *req_state)
127 {
128 return PSCI_E_SUCCESS;
129 }
130
131 static const struct plat_psci_ops _nopmc_psci_ops = {
132 .cpu_standby = zynqmp_cpu_standby,
133 .pwr_domain_on = zynqmp_nopmu_pwr_domain_on,
134 .pwr_domain_off = zynqmp_nopmu_pwr_domain_off,
135 .system_reset = zynqmp_nopmu_system_reset,
136 .validate_ns_entrypoint = zynqmp_validate_ns_entrypoint,
137 .pwr_domain_on_finish = zynqmp_pwr_domain_on_finish,
138 .system_off = zynqmp_system_off,
139 .validate_power_state = zynqmp_validate_power_state,
140 };
141
142 /*******************************************************************************
143 * Export the platform specific power ops.
144 ******************************************************************************/
plat_setup_psci_ops(uintptr_t sec_entrypoint,const struct plat_psci_ops ** psci_ops)145 int32_t plat_setup_psci_ops(uintptr_t sec_entrypoint,
146 const struct plat_psci_ops **psci_ops)
147 {
148 _sec_entry = sec_entrypoint;
149
150 VERBOSE("Setting up entry point %lx\n", _sec_entry);
151
152 *psci_ops = &_nopmc_psci_ops;
153
154 return 0;
155 }
156
sip_svc_setup_init(void)157 int sip_svc_setup_init(void)
158 {
159 return 0;
160 }
161
no_pm_ioctl(uint32_t device_id,uint32_t ioctl_id,uint32_t arg1,uint32_t arg2)162 static int32_t no_pm_ioctl(uint32_t device_id, uint32_t ioctl_id,
163 uint32_t arg1, uint32_t arg2)
164 {
165 int32_t ret = 0;
166 VERBOSE("%s: ioctl_id: %x, arg1: %x\n", __func__, ioctl_id, arg1);
167
168 switch (ioctl_id) {
169 case IOCTL_OSPI_MUX_SELECT:
170 if ((arg1 == 0) || (arg1 == 1)) {
171 mmio_clrsetbits_32(SLCR_OSPI_QSPI_IOU_AXI_MUX_SEL, LINEAR_MODE,
172 (arg1 ? LINEAR_MODE : 0));
173 } else {
174 ret = PM_RET_ERROR_ARGS;
175 }
176 break;
177 case IOCTL_UFS_TXRX_CFGRDY_GET:
178 ret = (int32_t) mmio_read_32(PMXC_IOU_SLCR_TX_RX_CONFIG_RDY);
179 break;
180 case IOCTL_UFS_SRAM_CSR_SEL:
181 if (arg1 == 1) {
182 ret = (int32_t) mmio_read_32(PMXC_IOU_SLCR_SRAM_CSR);
183 } else if (arg1 == 0) {
184 mmio_write_32(PMXC_IOU_SLCR_SRAM_CSR, arg2);
185 }
186 break;
187 case IOCTL_USB_SET_STATE:
188 break;
189 default:
190 ret = PM_RET_ERROR_NOFEATURE;
191 break;
192 }
193
194 return ret;
195 }
196
no_pm_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)197 static uint64_t no_pm_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
198 uint64_t x4, void *cookie, void *handle, uint64_t flags)
199 {
200 int32_t ret;
201 uint32_t arg[4], api_id;
202
203 arg[0] = (uint32_t)x1;
204 arg[1] = (uint32_t)(x1 >> 32);
205 arg[2] = (uint32_t)x2;
206 arg[3] = (uint32_t)(x2 >> 32);
207
208 api_id = smc_fid & FUNCID_NUM_MASK;
209 VERBOSE("%s: smc_fid: %x, api_id=0x%x\n", __func__, smc_fid, api_id);
210
211 switch (api_id) {
212 case PM_IOCTL:
213 {
214 ret = no_pm_ioctl(arg[0], arg[1], arg[2], arg[3]);
215 /* Firmware driver expects return code in upper 32 bits and
216 * status in lower 32 bits.
217 * status is always SUCCESS(0) for mmio low level register
218 * r/w calls and return value is the value returned from
219 * no_pm_ioctl
220 */
221 SMC_RET1(handle, ((uint64_t)ret << 32));
222 }
223 case PM_GET_CHIPID:
224 {
225 uint32_t idcode, version_type;
226
227 idcode = mmio_read_32(PMC_TAP);
228 version_type = mmio_read_32(PMC_TAP_VERSION);
229 SMC_RET2(handle, ((uint64_t)idcode << 32), version_type);
230 }
231 default:
232 WARN("Unimplemented PM Service Call: 0x%x\n", smc_fid);
233 SMC_RET1(handle, SMC_UNK);
234 }
235 }
236
smc_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)237 uint64_t smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4,
238 void *cookie, void *handle, uint64_t flags)
239 {
240 return no_pm_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
241 }
242