1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 - 2024 Intel Corporation
4 */
5
6 #include "ivpu_drv.h"
7 #include "ivpu_hw.h"
8 #include "ivpu_hw_btrs.h"
9 #include "ivpu_hw_ip.h"
10
11 #include <linux/dmi.h>
12
platform_to_str(u32 platform)13 static char *platform_to_str(u32 platform)
14 {
15 switch (platform) {
16 case IVPU_PLATFORM_SILICON:
17 return "SILICON";
18 case IVPU_PLATFORM_SIMICS:
19 return "SIMICS";
20 case IVPU_PLATFORM_FPGA:
21 return "FPGA";
22 default:
23 return "Invalid platform";
24 }
25 }
26
27 static const struct dmi_system_id dmi_platform_simulation[] = {
28 {
29 .ident = "Intel Simics",
30 .matches = {
31 DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"),
32 DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
33 DMI_MATCH(DMI_BOARD_SERIAL, "123456789"),
34 },
35 },
36 {
37 .ident = "Intel Simics",
38 .matches = {
39 DMI_MATCH(DMI_BOARD_NAME, "Simics"),
40 },
41 },
42 { }
43 };
44
platform_init(struct ivpu_device * vdev)45 static void platform_init(struct ivpu_device *vdev)
46 {
47 if (dmi_check_system(dmi_platform_simulation))
48 vdev->platform = IVPU_PLATFORM_SIMICS;
49 else
50 vdev->platform = IVPU_PLATFORM_SILICON;
51
52 ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
53 platform_to_str(vdev->platform), vdev->platform);
54 }
55
wa_init(struct ivpu_device * vdev)56 static void wa_init(struct ivpu_device *vdev)
57 {
58 vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
59 vdev->wa.clear_runtime_mem = false;
60
61 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
62 vdev->wa.interrupt_clear_with_0 = ivpu_hw_btrs_irqs_clear_with_0_mtl(vdev);
63
64 if (ivpu_device_id(vdev) == PCI_DEVICE_ID_LNL &&
65 ivpu_revision(vdev) < IVPU_HW_IP_REV_LNL_B0)
66 vdev->wa.disable_clock_relinquish = true;
67
68 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
69 vdev->wa.wp0_during_power_up = true;
70
71 IVPU_PRINT_WA(punit_disabled);
72 IVPU_PRINT_WA(clear_runtime_mem);
73 IVPU_PRINT_WA(interrupt_clear_with_0);
74 IVPU_PRINT_WA(disable_clock_relinquish);
75 IVPU_PRINT_WA(wp0_during_power_up);
76 }
77
timeouts_init(struct ivpu_device * vdev)78 static void timeouts_init(struct ivpu_device *vdev)
79 {
80 if (ivpu_test_mode & IVPU_TEST_MODE_DISABLE_TIMEOUTS) {
81 vdev->timeout.boot = -1;
82 vdev->timeout.jsm = -1;
83 vdev->timeout.tdr = -1;
84 vdev->timeout.autosuspend = -1;
85 vdev->timeout.d0i3_entry_msg = -1;
86 } else if (ivpu_is_fpga(vdev)) {
87 vdev->timeout.boot = 100000;
88 vdev->timeout.jsm = 50000;
89 vdev->timeout.tdr = 2000000;
90 vdev->timeout.autosuspend = -1;
91 vdev->timeout.d0i3_entry_msg = 500;
92 vdev->timeout.state_dump_msg = 10;
93 } else if (ivpu_is_simics(vdev)) {
94 vdev->timeout.boot = 50;
95 vdev->timeout.jsm = 500;
96 vdev->timeout.tdr = 10000;
97 vdev->timeout.autosuspend = -1;
98 vdev->timeout.d0i3_entry_msg = 100;
99 vdev->timeout.state_dump_msg = 10;
100 } else {
101 vdev->timeout.boot = 1000;
102 vdev->timeout.jsm = 500;
103 vdev->timeout.tdr = 2000;
104 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
105 vdev->timeout.autosuspend = 10;
106 else
107 vdev->timeout.autosuspend = 100;
108 vdev->timeout.d0i3_entry_msg = 5;
109 vdev->timeout.state_dump_msg = 100;
110 }
111 }
112
priority_bands_init(struct ivpu_device * vdev)113 static void priority_bands_init(struct ivpu_device *vdev)
114 {
115 /* Idle */
116 vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 0;
117 vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 50000;
118 vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 160000;
119 /* Normal */
120 vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
121 vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
122 vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 300000;
123 /* Focus */
124 vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
125 vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
126 vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 200000;
127 /* Realtime */
128 vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 0;
129 vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 50000;
130 vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000;
131 }
132
memory_ranges_init(struct ivpu_device * vdev)133 static void memory_ranges_init(struct ivpu_device *vdev)
134 {
135 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
136 ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
137 ivpu_hw_range_init(&vdev->hw->ranges.user, 0xc0000000, 255 * SZ_1M);
138 ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G);
139 ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
140 } else {
141 ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
142 ivpu_hw_range_init(&vdev->hw->ranges.user, 0x80000000, SZ_256M);
143 ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M);
144 ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
145 }
146 }
147
wp_enable(struct ivpu_device * vdev)148 static int wp_enable(struct ivpu_device *vdev)
149 {
150 return ivpu_hw_btrs_wp_drive(vdev, true);
151 }
152
wp_disable(struct ivpu_device * vdev)153 static int wp_disable(struct ivpu_device *vdev)
154 {
155 return ivpu_hw_btrs_wp_drive(vdev, false);
156 }
157
ivpu_hw_power_up(struct ivpu_device * vdev)158 int ivpu_hw_power_up(struct ivpu_device *vdev)
159 {
160 int ret;
161
162 if (IVPU_WA(wp0_during_power_up)) {
163 /* WP requests may fail when powering down, so issue WP 0 here */
164 ret = wp_disable(vdev);
165 if (ret)
166 ivpu_warn(vdev, "Failed to disable workpoint: %d\n", ret);
167 }
168
169 ret = ivpu_hw_btrs_d0i3_disable(vdev);
170 if (ret)
171 ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
172
173 ret = wp_enable(vdev);
174 if (ret) {
175 ivpu_err(vdev, "Failed to enable workpoint: %d\n", ret);
176 return ret;
177 }
178
179 if (ivpu_hw_btrs_gen(vdev) >= IVPU_HW_BTRS_LNL) {
180 if (IVPU_WA(disable_clock_relinquish))
181 ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev);
182 ivpu_hw_btrs_profiling_freq_reg_set_lnl(vdev);
183 ivpu_hw_btrs_ats_print_lnl(vdev);
184 }
185
186 ret = ivpu_hw_ip_host_ss_configure(vdev);
187 if (ret) {
188 ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
189 return ret;
190 }
191
192 ivpu_hw_ip_idle_gen_disable(vdev);
193
194 ret = ivpu_hw_btrs_wait_for_clock_res_own_ack(vdev);
195 if (ret) {
196 ivpu_err(vdev, "Timed out waiting for clock resource own ACK\n");
197 return ret;
198 }
199
200 ret = ivpu_hw_ip_pwr_domain_enable(vdev);
201 if (ret) {
202 ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
203 return ret;
204 }
205
206 ret = ivpu_hw_ip_host_ss_axi_enable(vdev);
207 if (ret) {
208 ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
209 return ret;
210 }
211
212 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_LNL)
213 ivpu_hw_btrs_set_port_arbitration_weights_lnl(vdev);
214
215 ret = ivpu_hw_ip_top_noc_enable(vdev);
216 if (ret)
217 ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
218
219 return ret;
220 }
221
save_d0i3_entry_timestamp(struct ivpu_device * vdev)222 static void save_d0i3_entry_timestamp(struct ivpu_device *vdev)
223 {
224 vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
225 vdev->hw->d0i3_entry_vpu_ts = ivpu_hw_ip_read_perf_timer_counter(vdev);
226 }
227
ivpu_hw_reset(struct ivpu_device * vdev)228 int ivpu_hw_reset(struct ivpu_device *vdev)
229 {
230 int ret = 0;
231
232 if (ivpu_hw_btrs_ip_reset(vdev)) {
233 ivpu_err(vdev, "Failed to reset NPU IP\n");
234 ret = -EIO;
235 }
236
237 if (wp_disable(vdev)) {
238 ivpu_err(vdev, "Failed to disable workpoint\n");
239 ret = -EIO;
240 }
241
242 return ret;
243 }
244
ivpu_hw_power_down(struct ivpu_device * vdev)245 int ivpu_hw_power_down(struct ivpu_device *vdev)
246 {
247 int ret = 0;
248
249 save_d0i3_entry_timestamp(vdev);
250
251 if (!ivpu_hw_is_idle(vdev))
252 ivpu_warn(vdev, "NPU not idle during power down\n");
253
254 if (ivpu_hw_reset(vdev)) {
255 ivpu_err(vdev, "Failed to reset NPU\n");
256 ret = -EIO;
257 }
258
259 if (ivpu_hw_btrs_d0i3_enable(vdev)) {
260 ivpu_err(vdev, "Failed to enter D0I3\n");
261 ret = -EIO;
262 }
263
264 return ret;
265 }
266
ivpu_hw_init(struct ivpu_device * vdev)267 int ivpu_hw_init(struct ivpu_device *vdev)
268 {
269 ivpu_hw_btrs_info_init(vdev);
270 ivpu_hw_btrs_freq_ratios_init(vdev);
271 priority_bands_init(vdev);
272 memory_ranges_init(vdev);
273 platform_init(vdev);
274 wa_init(vdev);
275 timeouts_init(vdev);
276 atomic_set(&vdev->hw->firewall_irq_counter, 0);
277
278 return 0;
279 }
280
ivpu_hw_boot_fw(struct ivpu_device * vdev)281 int ivpu_hw_boot_fw(struct ivpu_device *vdev)
282 {
283 int ret;
284
285 ivpu_hw_ip_snoop_disable(vdev);
286 ivpu_hw_ip_tbu_mmu_enable(vdev);
287 ret = ivpu_hw_ip_soc_cpu_boot(vdev);
288 if (ret)
289 ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret);
290
291 return ret;
292 }
293
ivpu_hw_profiling_freq_drive(struct ivpu_device * vdev,bool enable)294 void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
295 {
296 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
297 vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
298 return;
299 }
300
301 if (enable)
302 vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_HIGH;
303 else
304 vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
305 }
306
ivpu_irq_handlers_init(struct ivpu_device * vdev)307 void ivpu_irq_handlers_init(struct ivpu_device *vdev)
308 {
309 INIT_KFIFO(vdev->hw->irq.fifo);
310
311 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
312 vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx;
313 else
314 vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_40xx;
315
316 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
317 vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_mtl;
318 else
319 vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_lnl;
320 }
321
ivpu_hw_irq_enable(struct ivpu_device * vdev)322 void ivpu_hw_irq_enable(struct ivpu_device *vdev)
323 {
324 kfifo_reset(&vdev->hw->irq.fifo);
325 ivpu_hw_ip_irq_enable(vdev);
326 ivpu_hw_btrs_irq_enable(vdev);
327 }
328
ivpu_hw_irq_disable(struct ivpu_device * vdev)329 void ivpu_hw_irq_disable(struct ivpu_device *vdev)
330 {
331 ivpu_hw_btrs_irq_disable(vdev);
332 ivpu_hw_ip_irq_disable(vdev);
333 }
334
ivpu_hw_irq_handler(int irq,void * ptr)335 irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
336 {
337 struct ivpu_device *vdev = ptr;
338 bool ip_handled, btrs_handled;
339
340 ivpu_hw_btrs_global_int_disable(vdev);
341
342 btrs_handled = ivpu_hw_btrs_irq_handler(vdev, irq);
343 if (!ivpu_hw_is_idle((vdev)) || !btrs_handled)
344 ip_handled = ivpu_hw_ip_irq_handler(vdev, irq);
345 else
346 ip_handled = false;
347
348 /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
349 ivpu_hw_btrs_global_int_enable(vdev);
350
351 if (!kfifo_is_empty(&vdev->hw->irq.fifo))
352 return IRQ_WAKE_THREAD;
353 if (ip_handled || btrs_handled)
354 return IRQ_HANDLED;
355 return IRQ_NONE;
356 }
357