1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020-2025 Intel Corporation
4 */
5
6 #include <linux/firmware.h>
7 #include <linux/highmem.h>
8 #include <linux/moduleparam.h>
9 #include <linux/pci.h>
10
11 #include "vpu_boot_api.h"
12 #include "ivpu_drv.h"
13 #include "ivpu_fw.h"
14 #include "ivpu_fw_log.h"
15 #include "ivpu_gem.h"
16 #include "ivpu_hw.h"
17 #include "ivpu_ipc.h"
18 #include "ivpu_pm.h"
19
20 #define FW_GLOBAL_MEM_START (2ull * SZ_1G)
21 #define FW_GLOBAL_MEM_END (3ull * SZ_1G)
22 #define FW_SHARED_MEM_SIZE SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */
23 #define FW_SHARED_MEM_ALIGNMENT SZ_128K /* VPU MTRR limitation */
24 #define FW_RUNTIME_MAX_SIZE SZ_512M
25 #define FW_SHAVE_NN_MAX_SIZE SZ_2M
26 #define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START)
27 #define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
28 #define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
29
30 #define WATCHDOG_MSS_REDIRECT 32
31 #define WATCHDOG_NCE_REDIRECT 33
32
33 #define ADDR_TO_L2_CACHE_CFG(addr) ((addr) >> 31)
34
35 /* Check if FW API is compatible with the driver */
36 #define IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, name, min_major) \
37 ivpu_fw_check_api(vdev, fw_hdr, #name, \
38 VPU_##name##_API_VER_INDEX, \
39 VPU_##name##_API_VER_MAJOR, \
40 VPU_##name##_API_VER_MINOR, min_major)
41
42 /* Check if API version is lower that the given version */
43 #define IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, name, major, minor) \
44 ivpu_fw_check_api_ver_lt(vdev, fw_hdr, #name, VPU_##name##_API_VER_INDEX, major, minor)
45
46 #define IVPU_FOCUS_PRESENT_TIMER_MS 1000
47
48 static char *ivpu_firmware;
49 module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
50 MODULE_PARM_DESC(firmware, "NPU firmware binary in /lib/firmware/..");
51
52 static struct {
53 int gen;
54 const char *name;
55 } fw_names[] = {
56 { IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v1.bin" },
57 { IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
58 { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v1.bin" },
59 { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
60 { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v1.bin" },
61 { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" },
62 };
63
64 /* Production fw_names from the table above */
65 MODULE_FIRMWARE("intel/vpu/vpu_37xx_v1.bin");
66 MODULE_FIRMWARE("intel/vpu/vpu_40xx_v1.bin");
67 MODULE_FIRMWARE("intel/vpu/vpu_50xx_v1.bin");
68
ivpu_fw_request(struct ivpu_device * vdev)69 static int ivpu_fw_request(struct ivpu_device *vdev)
70 {
71 int ret = -ENOENT;
72 int i;
73
74 if (ivpu_firmware) {
75 ret = request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev);
76 if (!ret)
77 vdev->fw->name = ivpu_firmware;
78 return ret;
79 }
80
81 for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
82 if (fw_names[i].gen != ivpu_hw_ip_gen(vdev))
83 continue;
84
85 ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i].name, vdev->drm.dev);
86 if (!ret) {
87 vdev->fw->name = fw_names[i].name;
88 return 0;
89 }
90 }
91
92 ivpu_err(vdev, "Failed to request firmware: %d\n", ret);
93 return ret;
94 }
95
96 static int
ivpu_fw_check_api(struct ivpu_device * vdev,const struct vpu_firmware_header * fw_hdr,const char * str,int index,u16 expected_major,u16 expected_minor,u16 min_major)97 ivpu_fw_check_api(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr,
98 const char *str, int index, u16 expected_major, u16 expected_minor,
99 u16 min_major)
100 {
101 u16 major = (u16)(fw_hdr->api_version[index] >> 16);
102 u16 minor = (u16)(fw_hdr->api_version[index]);
103
104 if (major < min_major) {
105 ivpu_err(vdev, "Incompatible FW %s API version: %d.%d, required %d.0 or later\n",
106 str, major, minor, min_major);
107 return -EINVAL;
108 }
109 if (major != expected_major) {
110 ivpu_warn(vdev, "Major FW %s API version different: %d.%d (expected %d.%d)\n",
111 str, major, minor, expected_major, expected_minor);
112 }
113 ivpu_dbg(vdev, FW_BOOT, "FW %s API version: %d.%d (expected %d.%d)\n",
114 str, major, minor, expected_major, expected_minor);
115
116 return 0;
117 }
118
119 static bool
ivpu_fw_check_api_ver_lt(struct ivpu_device * vdev,const struct vpu_firmware_header * fw_hdr,const char * str,int index,u16 major,u16 minor)120 ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr,
121 const char *str, int index, u16 major, u16 minor)
122 {
123 u16 fw_major = (u16)(fw_hdr->api_version[index] >> 16);
124 u16 fw_minor = (u16)(fw_hdr->api_version[index]);
125
126 if (fw_major < major || (fw_major == major && fw_minor < minor))
127 return true;
128
129 return false;
130 }
131
is_within_range(u64 addr,size_t size,u64 range_start,size_t range_size)132 static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range_size)
133 {
134 if (addr < range_start || addr + size > range_start + range_size)
135 return false;
136
137 return true;
138 }
139
140 static u32
ivpu_fw_sched_mode_select(struct ivpu_device * vdev,const struct vpu_firmware_header * fw_hdr)141 ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr)
142 {
143 if (ivpu_sched_mode != IVPU_SCHED_MODE_AUTO)
144 return ivpu_sched_mode;
145
146 return VPU_SCHEDULING_MODE_OS;
147 }
148
ivpu_fw_parse(struct ivpu_device * vdev)149 static int ivpu_fw_parse(struct ivpu_device *vdev)
150 {
151 struct ivpu_fw_info *fw = vdev->fw;
152 const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data;
153 u64 runtime_addr, image_load_addr, runtime_size, image_size;
154
155 if (fw->file->size <= FW_FILE_IMAGE_OFFSET) {
156 ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size);
157 return -EINVAL;
158 }
159
160 if (fw_hdr->header_version != VPU_FW_HEADER_VERSION) {
161 ivpu_err(vdev, "Invalid firmware header version: %u\n", fw_hdr->header_version);
162 return -EINVAL;
163 }
164
165 runtime_addr = fw_hdr->boot_params_load_address;
166 runtime_size = fw_hdr->runtime_size;
167 image_load_addr = fw_hdr->image_load_address;
168 image_size = fw_hdr->image_size;
169
170 if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) {
171 ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr);
172 return -EINVAL;
173 }
174
175 if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) {
176 ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size);
177 return -EINVAL;
178 }
179
180 if (FW_FILE_IMAGE_OFFSET + image_size > fw->file->size) {
181 ivpu_err(vdev, "Invalid image size: %llu\n", image_size);
182 return -EINVAL;
183 }
184
185 if (image_load_addr < runtime_addr ||
186 image_load_addr + image_size > runtime_addr + runtime_size) {
187 ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n",
188 image_load_addr, image_size);
189 return -EINVAL;
190 }
191
192 if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
193 ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
194 return -EINVAL;
195 }
196
197 if (fw_hdr->entry_point < image_load_addr ||
198 fw_hdr->entry_point >= image_load_addr + image_size) {
199 ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point);
200 return -EINVAL;
201 }
202 ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
203 fw_hdr->header_version, fw_hdr->image_format);
204
205 if (!scnprintf(fw->version, sizeof(fw->version), "%s", fw->file->data + VPU_FW_HEADER_SIZE))
206 ivpu_warn(vdev, "Missing firmware version\n");
207
208 ivpu_info(vdev, "Firmware: %s, version: %s\n", fw->name, fw->version);
209
210 if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, BOOT, 3))
211 return -EINVAL;
212 if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, JSM, 3))
213 return -EINVAL;
214
215 fw->runtime_addr = runtime_addr;
216 fw->runtime_size = runtime_size;
217 fw->image_load_offset = image_load_addr - runtime_addr;
218 fw->image_size = image_size;
219 fw->shave_nn_size = PAGE_ALIGN(fw_hdr->shave_nn_fw_size);
220
221 fw->cold_boot_entry_point = fw_hdr->entry_point;
222 fw->entry_point = fw->cold_boot_entry_point;
223
224 fw->trace_level = min_t(u32, ivpu_fw_log_level, IVPU_FW_LOG_FATAL);
225 fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING;
226 fw->trace_hw_component_mask = -1;
227
228 fw->dvfs_mode = 0;
229
230 fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr);
231 fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
232 fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
233 ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS");
234
235 if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address,
236 fw_hdr->ro_section_size,
237 fw_hdr->image_load_address,
238 fw_hdr->image_size)) {
239 ivpu_err(vdev, "Invalid read-only section: start address 0x%llx, size %u\n",
240 fw_hdr->ro_section_start_address, fw_hdr->ro_section_size);
241 return -EINVAL;
242 }
243
244 fw->read_only_addr = fw_hdr->ro_section_start_address;
245 fw->read_only_size = fw_hdr->ro_section_size;
246
247 ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
248 fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
249 ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
250 fw->runtime_addr, image_load_addr, fw->entry_point);
251 ivpu_dbg(vdev, FW_BOOT, "Read-only section: address 0x%llx, size %u\n",
252 fw->read_only_addr, fw->read_only_size);
253
254 return 0;
255 }
256
ivpu_fw_release(struct ivpu_device * vdev)257 static void ivpu_fw_release(struct ivpu_device *vdev)
258 {
259 release_firmware(vdev->fw->file);
260 }
261
262 /* Initialize workarounds that depend on FW version */
263 static void
ivpu_fw_init_wa(struct ivpu_device * vdev)264 ivpu_fw_init_wa(struct ivpu_device *vdev)
265 {
266 const struct vpu_firmware_header *fw_hdr = (const void *)vdev->fw->file->data;
267
268 if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, BOOT, 3, 17) ||
269 (ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_DISABLE))
270 vdev->wa.disable_d0i3_msg = true;
271
272 /* Force enable the feature for testing purposes */
273 if (ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_ENABLE)
274 vdev->wa.disable_d0i3_msg = false;
275
276 IVPU_PRINT_WA(disable_d0i3_msg);
277 }
278
ivpu_fw_update_global_range(struct ivpu_device * vdev)279 static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
280 {
281 struct ivpu_fw_info *fw = vdev->fw;
282 u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT);
283 u64 size = FW_SHARED_MEM_SIZE;
284
285 if (start + size > FW_GLOBAL_MEM_END) {
286 ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size);
287 return -EINVAL;
288 }
289
290 ivpu_hw_range_init(&vdev->hw->ranges.global, start, size);
291 return 0;
292 }
293
ivpu_fw_mem_init(struct ivpu_device * vdev)294 static int ivpu_fw_mem_init(struct ivpu_device *vdev)
295 {
296 struct ivpu_fw_info *fw = vdev->fw;
297 struct ivpu_addr_range fw_range;
298 int log_verb_size;
299 int ret;
300
301 ret = ivpu_fw_update_global_range(vdev);
302 if (ret)
303 return ret;
304
305 fw_range.start = fw->runtime_addr;
306 fw_range.end = fw->runtime_addr + fw->runtime_size;
307 fw->mem = ivpu_bo_create(vdev, &vdev->gctx, &fw_range, fw->runtime_size,
308 DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
309 if (!fw->mem) {
310 ivpu_err(vdev, "Failed to create firmware runtime memory buffer\n");
311 return -ENOMEM;
312 }
313
314 ret = ivpu_mmu_context_set_pages_ro(vdev, &vdev->gctx, fw->read_only_addr,
315 fw->read_only_size);
316 if (ret) {
317 ivpu_err(vdev, "Failed to set firmware image read-only\n");
318 goto err_free_fw_mem;
319 }
320
321 fw->mem_log_crit = ivpu_bo_create_global(vdev, IVPU_FW_CRITICAL_BUFFER_SIZE,
322 DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
323 if (!fw->mem_log_crit) {
324 ivpu_err(vdev, "Failed to create critical log buffer\n");
325 ret = -ENOMEM;
326 goto err_free_fw_mem;
327 }
328
329 if (ivpu_fw_log_level <= IVPU_FW_LOG_INFO)
330 log_verb_size = IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE;
331 else
332 log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE;
333
334 fw->mem_log_verb = ivpu_bo_create_global(vdev, log_verb_size,
335 DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
336 if (!fw->mem_log_verb) {
337 ivpu_err(vdev, "Failed to create verbose log buffer\n");
338 ret = -ENOMEM;
339 goto err_free_log_crit;
340 }
341
342 if (fw->shave_nn_size) {
343 fw->mem_shave_nn = ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.shave,
344 fw->shave_nn_size, DRM_IVPU_BO_WC);
345 if (!fw->mem_shave_nn) {
346 ivpu_err(vdev, "Failed to create shavenn buffer\n");
347 ret = -ENOMEM;
348 goto err_free_log_verb;
349 }
350 }
351
352 return 0;
353
354 err_free_log_verb:
355 ivpu_bo_free(fw->mem_log_verb);
356 err_free_log_crit:
357 ivpu_bo_free(fw->mem_log_crit);
358 err_free_fw_mem:
359 ivpu_bo_free(fw->mem);
360 return ret;
361 }
362
ivpu_fw_mem_fini(struct ivpu_device * vdev)363 static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
364 {
365 struct ivpu_fw_info *fw = vdev->fw;
366
367 if (fw->mem_shave_nn) {
368 ivpu_bo_free(fw->mem_shave_nn);
369 fw->mem_shave_nn = NULL;
370 }
371
372 ivpu_bo_free(fw->mem_log_verb);
373 ivpu_bo_free(fw->mem_log_crit);
374 ivpu_bo_free(fw->mem);
375
376 fw->mem_log_verb = NULL;
377 fw->mem_log_crit = NULL;
378 fw->mem = NULL;
379 }
380
ivpu_fw_init(struct ivpu_device * vdev)381 int ivpu_fw_init(struct ivpu_device *vdev)
382 {
383 int ret;
384
385 ret = ivpu_fw_request(vdev);
386 if (ret)
387 return ret;
388
389 ret = ivpu_fw_parse(vdev);
390 if (ret)
391 goto err_fw_release;
392
393 ivpu_fw_init_wa(vdev);
394
395 ret = ivpu_fw_mem_init(vdev);
396 if (ret)
397 goto err_fw_release;
398
399 ivpu_fw_load(vdev);
400
401 return 0;
402
403 err_fw_release:
404 ivpu_fw_release(vdev);
405 return ret;
406 }
407
ivpu_fw_fini(struct ivpu_device * vdev)408 void ivpu_fw_fini(struct ivpu_device *vdev)
409 {
410 ivpu_fw_mem_fini(vdev);
411 ivpu_fw_release(vdev);
412 }
413
ivpu_fw_load(struct ivpu_device * vdev)414 void ivpu_fw_load(struct ivpu_device *vdev)
415 {
416 struct ivpu_fw_info *fw = vdev->fw;
417 u64 image_end_offset = fw->image_load_offset + fw->image_size;
418
419 memset(ivpu_bo_vaddr(fw->mem), 0, fw->image_load_offset);
420 memcpy(ivpu_bo_vaddr(fw->mem) + fw->image_load_offset,
421 fw->file->data + FW_FILE_IMAGE_OFFSET, fw->image_size);
422
423 if (IVPU_WA(clear_runtime_mem)) {
424 u8 *start = ivpu_bo_vaddr(fw->mem) + image_end_offset;
425 u64 size = ivpu_bo_size(fw->mem) - image_end_offset;
426
427 memset(start, 0, size);
428 }
429
430 wmb(); /* Flush WC buffers after writing fw->mem */
431 }
432
ivpu_fw_boot_params_print(struct ivpu_device * vdev,struct vpu_boot_params * boot_params)433 static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
434 {
435 ivpu_dbg(vdev, FW_BOOT, "boot_params.magic = 0x%x\n",
436 boot_params->magic);
437 ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_id = 0x%x\n",
438 boot_params->vpu_id);
439 ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_count = 0x%x\n",
440 boot_params->vpu_count);
441 ivpu_dbg(vdev, FW_BOOT, "boot_params.frequency = %u\n",
442 boot_params->frequency);
443 ivpu_dbg(vdev, FW_BOOT, "boot_params.perf_clk_frequency = %u\n",
444 boot_params->perf_clk_frequency);
445
446 ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_start = 0x%llx\n",
447 boot_params->ipc_header_area_start);
448 ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_size = 0x%x\n",
449 boot_params->ipc_header_area_size);
450 ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_base = 0x%llx\n",
451 boot_params->shared_region_base);
452 ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_size = 0x%x\n",
453 boot_params->shared_region_size);
454 ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_start = 0x%llx\n",
455 boot_params->ipc_payload_area_start);
456 ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_size = 0x%x\n",
457 boot_params->ipc_payload_area_size);
458 ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_base = 0x%llx\n",
459 boot_params->global_aliased_pio_base);
460 ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_size = 0x%x\n",
461 boot_params->global_aliased_pio_size);
462
463 ivpu_dbg(vdev, FW_BOOT, "boot_params.autoconfig = 0x%x\n",
464 boot_params->autoconfig);
465
466 ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 0x%x\n",
467 boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use);
468 ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n",
469 boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg);
470
471 ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n",
472 boot_params->global_memory_allocator_base);
473 ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n",
474 boot_params->global_memory_allocator_size);
475
476 ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n",
477 boot_params->shave_nn_fw_base);
478
479 ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_mss = 0x%x\n",
480 boot_params->watchdog_irq_mss);
481 ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n",
482 boot_params->watchdog_irq_nce);
483 ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n",
484 boot_params->host_to_vpu_irq);
485 ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n",
486 boot_params->job_done_irq);
487
488 ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n",
489 boot_params->host_version_id);
490 ivpu_dbg(vdev, FW_BOOT, "boot_params.si_stepping = 0x%x\n",
491 boot_params->si_stepping);
492 ivpu_dbg(vdev, FW_BOOT, "boot_params.device_id = 0x%llx\n",
493 boot_params->device_id);
494 ivpu_dbg(vdev, FW_BOOT, "boot_params.feature_exclusion = 0x%llx\n",
495 boot_params->feature_exclusion);
496 ivpu_dbg(vdev, FW_BOOT, "boot_params.sku = 0x%llx\n",
497 boot_params->sku);
498 ivpu_dbg(vdev, FW_BOOT, "boot_params.min_freq_pll_ratio = 0x%x\n",
499 boot_params->min_freq_pll_ratio);
500 ivpu_dbg(vdev, FW_BOOT, "boot_params.pn_freq_pll_ratio = 0x%x\n",
501 boot_params->pn_freq_pll_ratio);
502 ivpu_dbg(vdev, FW_BOOT, "boot_params.max_freq_pll_ratio = 0x%x\n",
503 boot_params->max_freq_pll_ratio);
504 ivpu_dbg(vdev, FW_BOOT, "boot_params.default_trace_level = 0x%x\n",
505 boot_params->default_trace_level);
506 ivpu_dbg(vdev, FW_BOOT, "boot_params.tracing_buff_message_format_mask = 0x%llx\n",
507 boot_params->tracing_buff_message_format_mask);
508 ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_destination_mask = 0x%x\n",
509 boot_params->trace_destination_mask);
510 ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_hw_component_mask = 0x%llx\n",
511 boot_params->trace_hw_component_mask);
512 ivpu_dbg(vdev, FW_BOOT, "boot_params.boot_type = 0x%x\n",
513 boot_params->boot_type);
514 ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_base = 0x%llx\n",
515 boot_params->punit_telemetry_sram_base);
516 ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_size = 0x%llx\n",
517 boot_params->punit_telemetry_sram_size);
518 ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_telemetry_enable = 0x%x\n",
519 boot_params->vpu_telemetry_enable);
520 ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_scheduling_mode = 0x%x\n",
521 boot_params->vpu_scheduling_mode);
522 ivpu_dbg(vdev, FW_BOOT, "boot_params.dvfs_mode = %u\n",
523 boot_params->dvfs_mode);
524 ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_delayed_entry = %d\n",
525 boot_params->d0i3_delayed_entry);
526 ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_residency_time_us = %lld\n",
527 boot_params->d0i3_residency_time_us);
528 ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_entry_vpu_ts = %llu\n",
529 boot_params->d0i3_entry_vpu_ts);
530 ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
531 boot_params->system_time_us);
532 }
533
ivpu_fw_boot_params_setup(struct ivpu_device * vdev,struct vpu_boot_params * boot_params)534 void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
535 {
536 struct ivpu_bo *ipc_mem_rx = vdev->ipc->mem_rx;
537
538 /* In case of warm boot only update variable params */
539 if (!ivpu_fw_is_cold_boot(vdev)) {
540 boot_params->d0i3_residency_time_us =
541 ktime_us_delta(ktime_get_boottime(), vdev->hw->d0i3_entry_host_ts);
542 boot_params->d0i3_entry_vpu_ts = vdev->hw->d0i3_entry_vpu_ts;
543 boot_params->system_time_us = ktime_to_us(ktime_get_real());
544
545 ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_residency_time_us = %lld\n",
546 boot_params->d0i3_residency_time_us);
547 ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_entry_vpu_ts = %llu\n",
548 boot_params->d0i3_entry_vpu_ts);
549 ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
550 boot_params->system_time_us);
551
552 boot_params->save_restore_ret_address = 0;
553 vdev->pm->is_warmboot = true;
554 wmb(); /* Flush WC buffers after writing save_restore_ret_address */
555 return;
556 }
557
558 vdev->pm->is_warmboot = false;
559
560 boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
561 boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number;
562
563 /*
564 * This param is a debug firmware feature. It switches default clock
565 * to higher resolution one for fine-grained and more accurate firmware
566 * task profiling.
567 */
568 boot_params->perf_clk_frequency = ivpu_hw_profiling_freq_get(vdev);
569
570 /*
571 * Uncached region of VPU address space, covers IPC buffers, job queues
572 * and log buffers, programmable to L2$ Uncached by VPU MTRR
573 */
574 boot_params->shared_region_base = vdev->hw->ranges.global.start;
575 boot_params->shared_region_size = vdev->hw->ranges.global.end -
576 vdev->hw->ranges.global.start;
577
578 boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
579 boot_params->ipc_header_area_size = ivpu_bo_size(ipc_mem_rx) / 2;
580
581 boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ivpu_bo_size(ipc_mem_rx) / 2;
582 boot_params->ipc_payload_area_size = ivpu_bo_size(ipc_mem_rx) / 2;
583
584 boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
585 boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
586
587 /* Allow configuration for L2C_PAGE_TABLE with boot param value */
588 boot_params->autoconfig = 1;
589
590 /* Enable L2 cache for first 2GB of high memory */
591 boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1;
592 boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg =
593 ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.shave.start);
594
595 if (vdev->fw->mem_shave_nn)
596 boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr;
597
598 boot_params->watchdog_irq_mss = WATCHDOG_MSS_REDIRECT;
599 boot_params->watchdog_irq_nce = WATCHDOG_NCE_REDIRECT;
600 boot_params->si_stepping = ivpu_revision(vdev);
601 boot_params->device_id = ivpu_device_id(vdev);
602 boot_params->feature_exclusion = vdev->hw->tile_fuse;
603 boot_params->sku = vdev->hw->sku;
604
605 boot_params->min_freq_pll_ratio = vdev->hw->pll.min_ratio;
606 boot_params->pn_freq_pll_ratio = vdev->hw->pll.pn_ratio;
607 boot_params->max_freq_pll_ratio = vdev->hw->pll.max_ratio;
608
609 boot_params->default_trace_level = vdev->fw->trace_level;
610 boot_params->tracing_buff_message_format_mask = BIT(VPU_TRACING_FORMAT_STRING);
611 boot_params->trace_destination_mask = vdev->fw->trace_destination_mask;
612 boot_params->trace_hw_component_mask = vdev->fw->trace_hw_component_mask;
613 boot_params->crit_tracing_buff_addr = vdev->fw->mem_log_crit->vpu_addr;
614 boot_params->crit_tracing_buff_size = ivpu_bo_size(vdev->fw->mem_log_crit);
615 boot_params->verbose_tracing_buff_addr = vdev->fw->mem_log_verb->vpu_addr;
616 boot_params->verbose_tracing_buff_size = ivpu_bo_size(vdev->fw->mem_log_verb);
617
618 boot_params->punit_telemetry_sram_base = ivpu_hw_telemetry_offset_get(vdev);
619 boot_params->punit_telemetry_sram_size = ivpu_hw_telemetry_size_get(vdev);
620 boot_params->vpu_telemetry_enable = ivpu_hw_telemetry_enable_get(vdev);
621 boot_params->vpu_scheduling_mode = vdev->fw->sched_mode;
622 if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
623 boot_params->vpu_focus_present_timer_ms = IVPU_FOCUS_PRESENT_TIMER_MS;
624 boot_params->dvfs_mode = vdev->fw->dvfs_mode;
625 if (!IVPU_WA(disable_d0i3_msg))
626 boot_params->d0i3_delayed_entry = 1;
627 boot_params->d0i3_residency_time_us = 0;
628 boot_params->d0i3_entry_vpu_ts = 0;
629
630 boot_params->system_time_us = ktime_to_us(ktime_get_real());
631 wmb(); /* Flush WC buffers after writing bootparams */
632
633 ivpu_fw_boot_params_print(vdev, boot_params);
634 }
635