1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020-2024 Intel Corporation
4 */
5
6 #include "ivpu_drv.h"
7 #include "ivpu_fw.h"
8 #include "ivpu_hw.h"
9 #include "ivpu_hw_37xx_reg.h"
10 #include "ivpu_hw_40xx_reg.h"
11 #include "ivpu_hw_btrs.h"
12 #include "ivpu_hw_ip.h"
13 #include "ivpu_hw_reg_io.h"
14 #include "ivpu_mmu.h"
15 #include "ivpu_pm.h"
16
17 #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
18
19 #define TIM_SAFE_ENABLE 0xf1d0dead
20 #define TIM_WATCHDOG_RESET_VALUE 0xffffffff
21
22 #define ICB_0_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
23 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
24 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
25 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
26 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
27 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
28 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
29
30 #define ICB_1_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
31 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
32 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
33
34 #define ICB_0_1_IRQ_MASK_37XX ((((u64)ICB_1_IRQ_MASK_37XX) << 32) | ICB_0_IRQ_MASK_37XX)
35
36 #define ICB_0_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
37 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
38 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
39 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
40 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
41 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
42 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
43
44 #define ICB_1_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
45 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
46 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
47
48 #define ICB_0_1_IRQ_MASK_40XX ((((u64)ICB_1_IRQ_MASK_40XX) << 32) | ICB_0_IRQ_MASK_40XX)
49
50 #define ITF_FIREWALL_VIOLATION_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
51 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
52 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
53 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
54 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
55 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
56 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
57
58 #define ITF_FIREWALL_VIOLATION_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
59 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
60 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
61 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
62 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
63 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
64 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
65
wait_for_ip_bar(struct ivpu_device * vdev)66 static int wait_for_ip_bar(struct ivpu_device *vdev)
67 {
68 return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
69 }
70
host_ss_rst_clr(struct ivpu_device * vdev)71 static void host_ss_rst_clr(struct ivpu_device *vdev)
72 {
73 u32 val = 0;
74
75 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
76 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
77 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
78
79 REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
80 }
81
host_ss_noc_qreqn_check_37xx(struct ivpu_device * vdev,u32 exp_val)82 static int host_ss_noc_qreqn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
83 {
84 u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
85
86 if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
87 return -EIO;
88
89 return 0;
90 }
91
host_ss_noc_qreqn_check_40xx(struct ivpu_device * vdev,u32 exp_val)92 static int host_ss_noc_qreqn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
93 {
94 u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
95
96 if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
97 return -EIO;
98
99 return 0;
100 }
101
host_ss_noc_qreqn_check(struct ivpu_device * vdev,u32 exp_val)102 static int host_ss_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
103 {
104 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
105 return host_ss_noc_qreqn_check_37xx(vdev, exp_val);
106 else
107 return host_ss_noc_qreqn_check_40xx(vdev, exp_val);
108 }
109
host_ss_noc_qacceptn_check_37xx(struct ivpu_device * vdev,u32 exp_val)110 static int host_ss_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
111 {
112 u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
113
114 if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
115 return -EIO;
116
117 return 0;
118 }
119
host_ss_noc_qacceptn_check_40xx(struct ivpu_device * vdev,u32 exp_val)120 static int host_ss_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
121 {
122 u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN);
123
124 if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
125 return -EIO;
126
127 return 0;
128 }
129
host_ss_noc_qacceptn_check(struct ivpu_device * vdev,u32 exp_val)130 static int host_ss_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
131 {
132 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
133 return host_ss_noc_qacceptn_check_37xx(vdev, exp_val);
134 else
135 return host_ss_noc_qacceptn_check_40xx(vdev, exp_val);
136 }
137
host_ss_noc_qdeny_check_37xx(struct ivpu_device * vdev,u32 exp_val)138 static int host_ss_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val)
139 {
140 u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
141
142 if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
143 return -EIO;
144
145 return 0;
146 }
147
host_ss_noc_qdeny_check_40xx(struct ivpu_device * vdev,u32 exp_val)148 static int host_ss_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
149 {
150 u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY);
151
152 if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
153 return -EIO;
154
155 return 0;
156 }
157
host_ss_noc_qdeny_check(struct ivpu_device * vdev,u32 exp_val)158 static int host_ss_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
159 {
160 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
161 return host_ss_noc_qdeny_check_37xx(vdev, exp_val);
162 else
163 return host_ss_noc_qdeny_check_40xx(vdev, exp_val);
164 }
165
top_noc_qrenqn_check_37xx(struct ivpu_device * vdev,u32 exp_val)166 static int top_noc_qrenqn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
167 {
168 u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
169
170 if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
171 !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
172 return -EIO;
173
174 return 0;
175 }
176
top_noc_qrenqn_check_40xx(struct ivpu_device * vdev,u32 exp_val)177 static int top_noc_qrenqn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
178 {
179 u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
180
181 if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
182 !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
183 return -EIO;
184
185 return 0;
186 }
187
top_noc_qreqn_check(struct ivpu_device * vdev,u32 exp_val)188 static int top_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
189 {
190 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
191 return top_noc_qrenqn_check_37xx(vdev, exp_val);
192 else
193 return top_noc_qrenqn_check_40xx(vdev, exp_val);
194 }
195
ivpu_hw_ip_host_ss_configure(struct ivpu_device * vdev)196 int ivpu_hw_ip_host_ss_configure(struct ivpu_device *vdev)
197 {
198 int ret;
199
200 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
201 ret = wait_for_ip_bar(vdev);
202 if (ret) {
203 ivpu_err(vdev, "Timed out waiting for NPU IP bar\n");
204 return ret;
205 }
206 host_ss_rst_clr(vdev);
207 }
208
209 ret = host_ss_noc_qreqn_check(vdev, 0x0);
210 if (ret) {
211 ivpu_err(vdev, "Failed qreqn check: %d\n", ret);
212 return ret;
213 }
214
215 ret = host_ss_noc_qacceptn_check(vdev, 0x0);
216 if (ret) {
217 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
218 return ret;
219 }
220
221 ret = host_ss_noc_qdeny_check(vdev, 0x0);
222 if (ret)
223 ivpu_err(vdev, "Failed qdeny check %d\n", ret);
224
225 return ret;
226 }
227
idle_gen_drive_37xx(struct ivpu_device * vdev,bool enable)228 static void idle_gen_drive_37xx(struct ivpu_device *vdev, bool enable)
229 {
230 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN);
231
232 if (enable)
233 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val);
234 else
235 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val);
236
237 REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, val);
238 }
239
idle_gen_drive_40xx(struct ivpu_device * vdev,bool enable)240 static void idle_gen_drive_40xx(struct ivpu_device *vdev, bool enable)
241 {
242 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN);
243
244 if (enable)
245 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
246 else
247 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
248
249 REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val);
250 }
251
ivpu_hw_ip_idle_gen_enable(struct ivpu_device * vdev)252 void ivpu_hw_ip_idle_gen_enable(struct ivpu_device *vdev)
253 {
254 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
255 idle_gen_drive_37xx(vdev, true);
256 else
257 idle_gen_drive_40xx(vdev, true);
258 }
259
ivpu_hw_ip_idle_gen_disable(struct ivpu_device * vdev)260 void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev)
261 {
262 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
263 idle_gen_drive_37xx(vdev, false);
264 else
265 idle_gen_drive_40xx(vdev, false);
266 }
267
268 static void
pwr_island_delay_set_50xx(struct ivpu_device * vdev,u32 post,u32 post1,u32 post2,u32 status)269 pwr_island_delay_set_50xx(struct ivpu_device *vdev, u32 post, u32 post1, u32 post2, u32 status)
270 {
271 u32 val;
272
273 val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY);
274 val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST_DLY, post, val);
275 val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST1_DLY, post1, val);
276 val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST2_DLY, post2, val);
277 REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, val);
278
279 val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY);
280 val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, STATUS_DLY, status, val);
281 REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, val);
282 }
283
pwr_island_trickle_drive_37xx(struct ivpu_device * vdev,bool enable)284 static void pwr_island_trickle_drive_37xx(struct ivpu_device *vdev, bool enable)
285 {
286 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
287
288 if (enable)
289 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
290 else
291 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
292
293 REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
294 }
295
pwr_island_trickle_drive_40xx(struct ivpu_device * vdev,bool enable)296 static void pwr_island_trickle_drive_40xx(struct ivpu_device *vdev, bool enable)
297 {
298 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
299
300 if (enable)
301 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
302 else
303 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
304
305 REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
306
307 if (enable)
308 ndelay(500);
309 }
310
pwr_island_drive_37xx(struct ivpu_device * vdev,bool enable)311 static void pwr_island_drive_37xx(struct ivpu_device *vdev, bool enable)
312 {
313 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0);
314
315 if (enable)
316 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
317 else
318 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
319
320 REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
321
322 if (!enable)
323 ndelay(500);
324 }
325
pwr_island_drive_40xx(struct ivpu_device * vdev,bool enable)326 static void pwr_island_drive_40xx(struct ivpu_device *vdev, bool enable)
327 {
328 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
329
330 if (enable)
331 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
332 else
333 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
334
335 REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
336 }
337
pwr_island_enable(struct ivpu_device * vdev)338 static void pwr_island_enable(struct ivpu_device *vdev)
339 {
340 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
341 pwr_island_trickle_drive_37xx(vdev, true);
342 pwr_island_drive_37xx(vdev, true);
343 } else {
344 pwr_island_trickle_drive_40xx(vdev, true);
345 pwr_island_drive_40xx(vdev, true);
346 }
347 }
348
wait_for_pwr_island_status(struct ivpu_device * vdev,u32 exp_val)349 static int wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
350 {
351 if (IVPU_WA(punit_disabled))
352 return 0;
353
354 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
355 return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU, exp_val,
356 PWR_ISLAND_STATUS_TIMEOUT_US);
357 else
358 return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU, exp_val,
359 PWR_ISLAND_STATUS_TIMEOUT_US);
360 }
361
pwr_island_isolation_drive_37xx(struct ivpu_device * vdev,bool enable)362 static void pwr_island_isolation_drive_37xx(struct ivpu_device *vdev, bool enable)
363 {
364 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
365
366 if (enable)
367 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
368 else
369 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
370
371 REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
372 }
373
pwr_island_isolation_drive_40xx(struct ivpu_device * vdev,bool enable)374 static void pwr_island_isolation_drive_40xx(struct ivpu_device *vdev, bool enable)
375 {
376 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0);
377
378 if (enable)
379 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
380 else
381 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
382
383 REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val);
384 }
385
pwr_island_isolation_drive(struct ivpu_device * vdev,bool enable)386 static void pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
387 {
388 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
389 pwr_island_isolation_drive_37xx(vdev, enable);
390 else
391 pwr_island_isolation_drive_40xx(vdev, enable);
392 }
393
pwr_island_isolation_disable(struct ivpu_device * vdev)394 static void pwr_island_isolation_disable(struct ivpu_device *vdev)
395 {
396 pwr_island_isolation_drive(vdev, false);
397 }
398
host_ss_clk_drive_37xx(struct ivpu_device * vdev,bool enable)399 static void host_ss_clk_drive_37xx(struct ivpu_device *vdev, bool enable)
400 {
401 u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
402
403 if (enable) {
404 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
405 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
406 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
407 } else {
408 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
409 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
410 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
411 }
412
413 REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
414 }
415
host_ss_clk_drive_40xx(struct ivpu_device * vdev,bool enable)416 static void host_ss_clk_drive_40xx(struct ivpu_device *vdev, bool enable)
417 {
418 u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN);
419
420 if (enable) {
421 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
422 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
423 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
424 } else {
425 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
426 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
427 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
428 }
429
430 REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val);
431 }
432
host_ss_clk_drive(struct ivpu_device * vdev,bool enable)433 static void host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
434 {
435 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
436 host_ss_clk_drive_37xx(vdev, enable);
437 else
438 host_ss_clk_drive_40xx(vdev, enable);
439 }
440
host_ss_clk_enable(struct ivpu_device * vdev)441 static void host_ss_clk_enable(struct ivpu_device *vdev)
442 {
443 host_ss_clk_drive(vdev, true);
444 }
445
host_ss_rst_drive_37xx(struct ivpu_device * vdev,bool enable)446 static void host_ss_rst_drive_37xx(struct ivpu_device *vdev, bool enable)
447 {
448 u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
449
450 if (enable) {
451 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
452 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
453 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
454 } else {
455 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
456 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
457 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
458 }
459
460 REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
461 }
462
host_ss_rst_drive_40xx(struct ivpu_device * vdev,bool enable)463 static void host_ss_rst_drive_40xx(struct ivpu_device *vdev, bool enable)
464 {
465 u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN);
466
467 if (enable) {
468 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
469 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
470 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
471 } else {
472 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
473 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
474 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
475 }
476
477 REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val);
478 }
479
host_ss_rst_drive(struct ivpu_device * vdev,bool enable)480 static void host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
481 {
482 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
483 host_ss_rst_drive_37xx(vdev, enable);
484 else
485 host_ss_rst_drive_40xx(vdev, enable);
486 }
487
host_ss_rst_enable(struct ivpu_device * vdev)488 static void host_ss_rst_enable(struct ivpu_device *vdev)
489 {
490 host_ss_rst_drive(vdev, true);
491 }
492
host_ss_noc_qreqn_top_socmmio_drive_37xx(struct ivpu_device * vdev,bool enable)493 static void host_ss_noc_qreqn_top_socmmio_drive_37xx(struct ivpu_device *vdev, bool enable)
494 {
495 u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
496
497 if (enable)
498 val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
499 else
500 val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
501 REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
502 }
503
host_ss_noc_qreqn_top_socmmio_drive_40xx(struct ivpu_device * vdev,bool enable)504 static void host_ss_noc_qreqn_top_socmmio_drive_40xx(struct ivpu_device *vdev, bool enable)
505 {
506 u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
507
508 if (enable)
509 val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
510 else
511 val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
512 REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val);
513 }
514
host_ss_noc_qreqn_top_socmmio_drive(struct ivpu_device * vdev,bool enable)515 static void host_ss_noc_qreqn_top_socmmio_drive(struct ivpu_device *vdev, bool enable)
516 {
517 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
518 host_ss_noc_qreqn_top_socmmio_drive_37xx(vdev, enable);
519 else
520 host_ss_noc_qreqn_top_socmmio_drive_40xx(vdev, enable);
521 }
522
host_ss_axi_drive(struct ivpu_device * vdev,bool enable)523 static int host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
524 {
525 int ret;
526
527 host_ss_noc_qreqn_top_socmmio_drive(vdev, enable);
528
529 ret = host_ss_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
530 if (ret) {
531 ivpu_err(vdev, "Failed HOST SS NOC QACCEPTN check: %d\n", ret);
532 return ret;
533 }
534
535 ret = host_ss_noc_qdeny_check(vdev, 0x0);
536 if (ret)
537 ivpu_err(vdev, "Failed HOST SS NOC QDENY check: %d\n", ret);
538
539 return ret;
540 }
541
top_noc_qreqn_drive_40xx(struct ivpu_device * vdev,bool enable)542 static void top_noc_qreqn_drive_40xx(struct ivpu_device *vdev, bool enable)
543 {
544 u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
545
546 if (enable) {
547 val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
548 val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
549 } else {
550 val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
551 val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
552 }
553
554 REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val);
555 }
556
top_noc_qreqn_drive_37xx(struct ivpu_device * vdev,bool enable)557 static void top_noc_qreqn_drive_37xx(struct ivpu_device *vdev, bool enable)
558 {
559 u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
560
561 if (enable) {
562 val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
563 val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
564 } else {
565 val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
566 val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
567 }
568
569 REGV_WR32(VPU_37XX_TOP_NOC_QREQN, val);
570 }
571
top_noc_qreqn_drive(struct ivpu_device * vdev,bool enable)572 static void top_noc_qreqn_drive(struct ivpu_device *vdev, bool enable)
573 {
574 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
575 top_noc_qreqn_drive_37xx(vdev, enable);
576 else
577 top_noc_qreqn_drive_40xx(vdev, enable);
578 }
579
ivpu_hw_ip_host_ss_axi_enable(struct ivpu_device * vdev)580 int ivpu_hw_ip_host_ss_axi_enable(struct ivpu_device *vdev)
581 {
582 return host_ss_axi_drive(vdev, true);
583 }
584
top_noc_qacceptn_check_37xx(struct ivpu_device * vdev,u32 exp_val)585 static int top_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
586 {
587 u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QACCEPTN);
588
589 if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
590 !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
591 return -EIO;
592
593 return 0;
594 }
595
top_noc_qacceptn_check_40xx(struct ivpu_device * vdev,u32 exp_val)596 static int top_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
597 {
598 u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN);
599
600 if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
601 !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
602 return -EIO;
603
604 return 0;
605 }
606
top_noc_qacceptn_check(struct ivpu_device * vdev,u32 exp_val)607 static int top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
608 {
609 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
610 return top_noc_qacceptn_check_37xx(vdev, exp_val);
611 else
612 return top_noc_qacceptn_check_40xx(vdev, exp_val);
613 }
614
top_noc_qdeny_check_37xx(struct ivpu_device * vdev,u32 exp_val)615 static int top_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val)
616 {
617 u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QDENY);
618
619 if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
620 !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
621 return -EIO;
622
623 return 0;
624 }
625
top_noc_qdeny_check_40xx(struct ivpu_device * vdev,u32 exp_val)626 static int top_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
627 {
628 u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY);
629
630 if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
631 !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
632 return -EIO;
633
634 return 0;
635 }
636
top_noc_qdeny_check(struct ivpu_device * vdev,u32 exp_val)637 static int top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
638 {
639 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
640 return top_noc_qdeny_check_37xx(vdev, exp_val);
641 else
642 return top_noc_qdeny_check_40xx(vdev, exp_val);
643 }
644
top_noc_drive(struct ivpu_device * vdev,bool enable)645 static int top_noc_drive(struct ivpu_device *vdev, bool enable)
646 {
647 int ret;
648
649 top_noc_qreqn_drive(vdev, enable);
650
651 ret = top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
652 if (ret) {
653 ivpu_err(vdev, "Failed TOP NOC QACCEPTN check: %d\n", ret);
654 return ret;
655 }
656
657 ret = top_noc_qdeny_check(vdev, 0x0);
658 if (ret)
659 ivpu_err(vdev, "Failed TOP NOC QDENY check: %d\n", ret);
660
661 return ret;
662 }
663
ivpu_hw_ip_top_noc_enable(struct ivpu_device * vdev)664 int ivpu_hw_ip_top_noc_enable(struct ivpu_device *vdev)
665 {
666 return top_noc_drive(vdev, true);
667 }
668
dpu_active_drive_37xx(struct ivpu_device * vdev,bool enable)669 static void dpu_active_drive_37xx(struct ivpu_device *vdev, bool enable)
670 {
671 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
672
673 if (enable)
674 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
675 else
676 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
677
678 REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
679 }
680
pwr_island_delay_set(struct ivpu_device * vdev)681 static void pwr_island_delay_set(struct ivpu_device *vdev)
682 {
683 bool high = vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_HIGH;
684 u32 post, post1, post2, status;
685
686 if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX)
687 return;
688
689 switch (ivpu_device_id(vdev)) {
690 case PCI_DEVICE_ID_PTL_P:
691 post = high ? 18 : 0;
692 post1 = 0;
693 post2 = 0;
694 status = high ? 46 : 3;
695 break;
696
697 default:
698 dump_stack();
699 ivpu_err(vdev, "Unknown device ID\n");
700 return;
701 }
702
703 pwr_island_delay_set_50xx(vdev, post, post1, post2, status);
704 }
705
ivpu_hw_ip_pwr_domain_enable(struct ivpu_device * vdev)706 int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev)
707 {
708 int ret;
709
710 pwr_island_delay_set(vdev);
711 pwr_island_enable(vdev);
712
713 ret = wait_for_pwr_island_status(vdev, 0x1);
714 if (ret) {
715 ivpu_err(vdev, "Timed out waiting for power island status\n");
716 return ret;
717 }
718
719 ret = top_noc_qreqn_check(vdev, 0x0);
720 if (ret) {
721 ivpu_err(vdev, "Failed TOP NOC QREQN check %d\n", ret);
722 return ret;
723 }
724
725 host_ss_clk_enable(vdev);
726 pwr_island_isolation_disable(vdev);
727 host_ss_rst_enable(vdev);
728
729 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
730 dpu_active_drive_37xx(vdev, true);
731
732 return ret;
733 }
734
ivpu_hw_ip_read_perf_timer_counter(struct ivpu_device * vdev)735 u64 ivpu_hw_ip_read_perf_timer_counter(struct ivpu_device *vdev)
736 {
737 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
738 return REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT);
739 else
740 return REGV_RD64(VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT);
741 }
742
ivpu_hw_ip_snoop_disable_37xx(struct ivpu_device * vdev)743 static void ivpu_hw_ip_snoop_disable_37xx(struct ivpu_device *vdev)
744 {
745 u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
746
747 val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
748 val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
749
750 if (ivpu_is_force_snoop_enabled(vdev))
751 val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
752 else
753 val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
754
755 REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
756 }
757
ivpu_hw_ip_snoop_disable_40xx(struct ivpu_device * vdev)758 static void ivpu_hw_ip_snoop_disable_40xx(struct ivpu_device *vdev)
759 {
760 u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
761
762 val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
763 val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
764
765 if (ivpu_is_force_snoop_enabled(vdev))
766 val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
767 else
768 val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
769
770 REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
771 }
772
ivpu_hw_ip_snoop_disable(struct ivpu_device * vdev)773 void ivpu_hw_ip_snoop_disable(struct ivpu_device *vdev)
774 {
775 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
776 return ivpu_hw_ip_snoop_disable_37xx(vdev);
777 else
778 return ivpu_hw_ip_snoop_disable_40xx(vdev);
779 }
780
ivpu_hw_ip_tbu_mmu_enable_37xx(struct ivpu_device * vdev)781 static void ivpu_hw_ip_tbu_mmu_enable_37xx(struct ivpu_device *vdev)
782 {
783 u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
784
785 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
786 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
787 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
788 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
789
790 REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
791 }
792
ivpu_hw_ip_tbu_mmu_enable_40xx(struct ivpu_device * vdev)793 static void ivpu_hw_ip_tbu_mmu_enable_40xx(struct ivpu_device *vdev)
794 {
795 u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV);
796
797 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
798 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
799 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
800 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
801 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
802 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
803
804 REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val);
805 }
806
ivpu_hw_ip_tbu_mmu_enable(struct ivpu_device * vdev)807 void ivpu_hw_ip_tbu_mmu_enable(struct ivpu_device *vdev)
808 {
809 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
810 return ivpu_hw_ip_tbu_mmu_enable_37xx(vdev);
811 else
812 return ivpu_hw_ip_tbu_mmu_enable_40xx(vdev);
813 }
814
soc_cpu_boot_37xx(struct ivpu_device * vdev)815 static int soc_cpu_boot_37xx(struct ivpu_device *vdev)
816 {
817 u32 val;
818
819 val = REGV_RD32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
820 val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
821
822 val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
823 REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
824
825 val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
826 REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
827
828 val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
829 REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
830
831 val = vdev->fw->entry_point >> 9;
832 REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
833
834 val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
835 REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
836
837 ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
838 vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
839
840 return 0;
841 }
842
cpu_noc_qacceptn_check_40xx(struct ivpu_device * vdev,u32 exp_val)843 static int cpu_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
844 {
845 u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN);
846
847 if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val))
848 return -EIO;
849
850 return 0;
851 }
852
cpu_noc_qdeny_check_40xx(struct ivpu_device * vdev,u32 exp_val)853 static int cpu_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
854 {
855 u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY);
856
857 if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val))
858 return -EIO;
859
860 return 0;
861 }
862
cpu_noc_top_mmio_drive_40xx(struct ivpu_device * vdev,bool enable)863 static void cpu_noc_top_mmio_drive_40xx(struct ivpu_device *vdev, bool enable)
864 {
865 u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN);
866
867 if (enable)
868 val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
869 else
870 val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
871 REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val);
872 }
873
soc_cpu_drive_40xx(struct ivpu_device * vdev,bool enable)874 static int soc_cpu_drive_40xx(struct ivpu_device *vdev, bool enable)
875 {
876 int ret;
877
878 cpu_noc_top_mmio_drive_40xx(vdev, enable);
879
880 ret = cpu_noc_qacceptn_check_40xx(vdev, enable ? 0x1 : 0x0);
881 if (ret) {
882 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
883 return ret;
884 }
885
886 ret = cpu_noc_qdeny_check_40xx(vdev, 0x0);
887 if (ret)
888 ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
889
890 return ret;
891 }
892
soc_cpu_enable(struct ivpu_device * vdev)893 static int soc_cpu_enable(struct ivpu_device *vdev)
894 {
895 return soc_cpu_drive_40xx(vdev, true);
896 }
897
soc_cpu_boot_40xx(struct ivpu_device * vdev)898 static int soc_cpu_boot_40xx(struct ivpu_device *vdev)
899 {
900 int ret;
901 u32 val;
902 u64 val64;
903
904 ret = soc_cpu_enable(vdev);
905 if (ret) {
906 ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret);
907 return ret;
908 }
909
910 val64 = vdev->fw->entry_point;
911 val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1;
912 REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64);
913
914 val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO);
915 val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val);
916 REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val);
917
918 ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
919 ivpu_fw_is_cold_boot(vdev) ? "cold boot" : "resume");
920
921 return 0;
922 }
923
ivpu_hw_ip_soc_cpu_boot(struct ivpu_device * vdev)924 int ivpu_hw_ip_soc_cpu_boot(struct ivpu_device *vdev)
925 {
926 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
927 return soc_cpu_boot_37xx(vdev);
928 else
929 return soc_cpu_boot_40xx(vdev);
930 }
931
wdt_disable_37xx(struct ivpu_device * vdev)932 static void wdt_disable_37xx(struct ivpu_device *vdev)
933 {
934 u32 val;
935
936 /* Enable writing and set non-zero WDT value */
937 REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
938 REGV_WR32(VPU_37XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
939
940 /* Enable writing and disable watchdog timer */
941 REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
942 REGV_WR32(VPU_37XX_CPU_SS_TIM_WDOG_EN, 0);
943
944 /* Now clear the timeout interrupt */
945 val = REGV_RD32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG);
946 val = REG_CLR_FLD(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
947 REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val);
948 }
949
wdt_disable_40xx(struct ivpu_device * vdev)950 static void wdt_disable_40xx(struct ivpu_device *vdev)
951 {
952 u32 val;
953
954 REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
955 REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
956
957 REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
958 REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0);
959
960 val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG);
961 val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
962 REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val);
963 }
964
ivpu_hw_ip_wdt_disable(struct ivpu_device * vdev)965 void ivpu_hw_ip_wdt_disable(struct ivpu_device *vdev)
966 {
967 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
968 return wdt_disable_37xx(vdev);
969 else
970 return wdt_disable_40xx(vdev);
971 }
972
ipc_rx_count_get_37xx(struct ivpu_device * vdev)973 static u32 ipc_rx_count_get_37xx(struct ivpu_device *vdev)
974 {
975 u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
976
977 return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
978 }
979
ipc_rx_count_get_40xx(struct ivpu_device * vdev)980 static u32 ipc_rx_count_get_40xx(struct ivpu_device *vdev)
981 {
982 u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
983
984 return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
985 }
986
ivpu_hw_ip_ipc_rx_count_get(struct ivpu_device * vdev)987 u32 ivpu_hw_ip_ipc_rx_count_get(struct ivpu_device *vdev)
988 {
989 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
990 return ipc_rx_count_get_37xx(vdev);
991 else
992 return ipc_rx_count_get_40xx(vdev);
993 }
994
ivpu_hw_ip_irq_enable(struct ivpu_device * vdev)995 void ivpu_hw_ip_irq_enable(struct ivpu_device *vdev)
996 {
997 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
998 REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_37XX);
999 REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_37XX);
1000 } else {
1001 REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_40XX);
1002 REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_40XX);
1003 }
1004 }
1005
ivpu_hw_ip_irq_disable(struct ivpu_device * vdev)1006 void ivpu_hw_ip_irq_disable(struct ivpu_device *vdev)
1007 {
1008 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
1009 REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
1010 REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
1011 } else {
1012 REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
1013 REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul);
1014 }
1015 }
1016
diagnose_failure_37xx(struct ivpu_device * vdev)1017 static void diagnose_failure_37xx(struct ivpu_device *vdev)
1018 {
1019 u32 reg = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX;
1020
1021 if (ipc_rx_count_get_37xx(vdev))
1022 ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
1023
1024 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg))
1025 ivpu_err(vdev, "WDT MSS timeout detected\n");
1026
1027 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg))
1028 ivpu_err(vdev, "WDT NCE timeout detected\n");
1029
1030 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg))
1031 ivpu_err(vdev, "NOC Firewall irq detected\n");
1032 }
1033
diagnose_failure_40xx(struct ivpu_device * vdev)1034 static void diagnose_failure_40xx(struct ivpu_device *vdev)
1035 {
1036 u32 reg = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX;
1037
1038 if (ipc_rx_count_get_40xx(vdev))
1039 ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
1040
1041 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg))
1042 ivpu_err(vdev, "WDT MSS timeout detected\n");
1043
1044 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg))
1045 ivpu_err(vdev, "WDT NCE timeout detected\n");
1046
1047 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg))
1048 ivpu_err(vdev, "NOC Firewall irq detected\n");
1049 }
1050
ivpu_hw_ip_diagnose_failure(struct ivpu_device * vdev)1051 void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev)
1052 {
1053 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1054 diagnose_failure_37xx(vdev);
1055 else
1056 diagnose_failure_40xx(vdev);
1057 }
1058
ivpu_hw_ip_irq_clear(struct ivpu_device * vdev)1059 void ivpu_hw_ip_irq_clear(struct ivpu_device *vdev)
1060 {
1061 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1062 REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_37XX);
1063 else
1064 REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_40XX);
1065 }
1066
irq_wdt_nce_handler(struct ivpu_device * vdev)1067 static void irq_wdt_nce_handler(struct ivpu_device *vdev)
1068 {
1069 ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
1070 }
1071
irq_wdt_mss_handler(struct ivpu_device * vdev)1072 static void irq_wdt_mss_handler(struct ivpu_device *vdev)
1073 {
1074 ivpu_hw_ip_wdt_disable(vdev);
1075 ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
1076 }
1077
irq_noc_firewall_handler(struct ivpu_device * vdev)1078 static void irq_noc_firewall_handler(struct ivpu_device *vdev)
1079 {
1080 atomic_inc(&vdev->hw->firewall_irq_counter);
1081
1082 ivpu_dbg(vdev, IRQ, "NOC Firewall interrupt detected, counter %d\n",
1083 atomic_read(&vdev->hw->firewall_irq_counter));
1084 }
1085
1086 /* Handler for IRQs from NPU core */
ivpu_hw_ip_irq_handler_37xx(struct ivpu_device * vdev,int irq)1087 bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq)
1088 {
1089 u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX;
1090
1091 if (!status)
1092 return false;
1093
1094 REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
1095
1096 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
1097 ivpu_mmu_irq_evtq_handler(vdev);
1098
1099 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
1100 ivpu_ipc_irq_handler(vdev);
1101
1102 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
1103 ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
1104
1105 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
1106 ivpu_mmu_irq_gerr_handler(vdev);
1107
1108 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
1109 irq_wdt_mss_handler(vdev);
1110
1111 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
1112 irq_wdt_nce_handler(vdev);
1113
1114 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
1115 irq_noc_firewall_handler(vdev);
1116
1117 return true;
1118 }
1119
1120 /* Handler for IRQs from NPU core */
ivpu_hw_ip_irq_handler_40xx(struct ivpu_device * vdev,int irq)1121 bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq)
1122 {
1123 u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX;
1124
1125 if (!status)
1126 return false;
1127
1128 REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
1129
1130 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
1131 ivpu_mmu_irq_evtq_handler(vdev);
1132
1133 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
1134 ivpu_ipc_irq_handler(vdev);
1135
1136 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
1137 ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
1138
1139 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
1140 ivpu_mmu_irq_gerr_handler(vdev);
1141
1142 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
1143 irq_wdt_mss_handler(vdev);
1144
1145 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
1146 irq_wdt_nce_handler(vdev);
1147
1148 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
1149 irq_noc_firewall_handler(vdev);
1150
1151 return true;
1152 }
1153
db_set_37xx(struct ivpu_device * vdev,u32 db_id)1154 static void db_set_37xx(struct ivpu_device *vdev, u32 db_id)
1155 {
1156 u32 reg_stride = VPU_37XX_CPU_SS_DOORBELL_1 - VPU_37XX_CPU_SS_DOORBELL_0;
1157 u32 val = REG_FLD(VPU_37XX_CPU_SS_DOORBELL_0, SET);
1158
1159 REGV_WR32I(VPU_37XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
1160 }
1161
db_set_40xx(struct ivpu_device * vdev,u32 db_id)1162 static void db_set_40xx(struct ivpu_device *vdev, u32 db_id)
1163 {
1164 u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0;
1165 u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET);
1166
1167 REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
1168 }
1169
ivpu_hw_ip_db_set(struct ivpu_device * vdev,u32 db_id)1170 void ivpu_hw_ip_db_set(struct ivpu_device *vdev, u32 db_id)
1171 {
1172 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1173 db_set_37xx(vdev, db_id);
1174 else
1175 db_set_40xx(vdev, db_id);
1176 }
1177
ivpu_hw_ip_ipc_rx_addr_get(struct ivpu_device * vdev)1178 u32 ivpu_hw_ip_ipc_rx_addr_get(struct ivpu_device *vdev)
1179 {
1180 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1181 return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
1182 else
1183 return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM);
1184 }
1185
ivpu_hw_ip_ipc_tx_set(struct ivpu_device * vdev,u32 vpu_addr)1186 void ivpu_hw_ip_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
1187 {
1188 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1189 REGV_WR32(VPU_37XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
1190 else
1191 REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
1192 }
1193