1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020-2024 Intel Corporation
4 */
5
6 #include <linux/debugfs.h>
7
8 #include <drm/drm_debugfs.h>
9 #include <drm/drm_file.h>
10 #include <drm/drm_print.h>
11
12 #include <uapi/drm/ivpu_accel.h>
13
14 #include "ivpu_debugfs.h"
15 #include "ivpu_drv.h"
16 #include "ivpu_fw.h"
17 #include "ivpu_fw_log.h"
18 #include "ivpu_gem.h"
19 #include "ivpu_hw.h"
20 #include "ivpu_jsm_msg.h"
21 #include "ivpu_pm.h"
22
seq_to_ivpu(struct seq_file * s)23 static inline struct ivpu_device *seq_to_ivpu(struct seq_file *s)
24 {
25 struct drm_debugfs_entry *entry = s->private;
26
27 return to_ivpu_device(entry->dev);
28 }
29
bo_list_show(struct seq_file * s,void * v)30 static int bo_list_show(struct seq_file *s, void *v)
31 {
32 struct drm_printer p = drm_seq_file_printer(s);
33 struct ivpu_device *vdev = seq_to_ivpu(s);
34
35 ivpu_bo_list(&vdev->drm, &p);
36
37 return 0;
38 }
39
fw_name_show(struct seq_file * s,void * v)40 static int fw_name_show(struct seq_file *s, void *v)
41 {
42 struct ivpu_device *vdev = seq_to_ivpu(s);
43
44 seq_printf(s, "%s\n", vdev->fw->name);
45 return 0;
46 }
47
fw_trace_capability_show(struct seq_file * s,void * v)48 static int fw_trace_capability_show(struct seq_file *s, void *v)
49 {
50 struct ivpu_device *vdev = seq_to_ivpu(s);
51 u64 trace_hw_component_mask;
52 u32 trace_destination_mask;
53 int ret;
54
55 ret = ivpu_jsm_trace_get_capability(vdev, &trace_destination_mask,
56 &trace_hw_component_mask);
57 if (!ret) {
58 seq_printf(s,
59 "trace_destination_mask: %#18x\n"
60 "trace_hw_component_mask: %#18llx\n",
61 trace_destination_mask, trace_hw_component_mask);
62 }
63 return 0;
64 }
65
fw_trace_config_show(struct seq_file * s,void * v)66 static int fw_trace_config_show(struct seq_file *s, void *v)
67 {
68 struct ivpu_device *vdev = seq_to_ivpu(s);
69 /**
70 * WA: VPU_JSM_MSG_TRACE_GET_CONFIG command is not working yet,
71 * so we use values from vdev->fw instead of calling ivpu_jsm_trace_get_config()
72 */
73 u32 trace_level = vdev->fw->trace_level;
74 u32 trace_destination_mask = vdev->fw->trace_destination_mask;
75 u64 trace_hw_component_mask = vdev->fw->trace_hw_component_mask;
76
77 seq_printf(s,
78 "trace_level: %#18x\n"
79 "trace_destination_mask: %#18x\n"
80 "trace_hw_component_mask: %#18llx\n",
81 trace_level, trace_destination_mask, trace_hw_component_mask);
82
83 return 0;
84 }
85
last_bootmode_show(struct seq_file * s,void * v)86 static int last_bootmode_show(struct seq_file *s, void *v)
87 {
88 struct ivpu_device *vdev = seq_to_ivpu(s);
89
90 seq_printf(s, "%s\n", (vdev->pm->is_warmboot) ? "warmboot" : "coldboot");
91
92 return 0;
93 }
94
reset_counter_show(struct seq_file * s,void * v)95 static int reset_counter_show(struct seq_file *s, void *v)
96 {
97 struct ivpu_device *vdev = seq_to_ivpu(s);
98
99 seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_counter));
100 return 0;
101 }
102
reset_pending_show(struct seq_file * s,void * v)103 static int reset_pending_show(struct seq_file *s, void *v)
104 {
105 struct ivpu_device *vdev = seq_to_ivpu(s);
106
107 seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_pending));
108 return 0;
109 }
110
firewall_irq_counter_show(struct seq_file * s,void * v)111 static int firewall_irq_counter_show(struct seq_file *s, void *v)
112 {
113 struct ivpu_device *vdev = seq_to_ivpu(s);
114
115 seq_printf(s, "%d\n", atomic_read(&vdev->hw->firewall_irq_counter));
116 return 0;
117 }
118
119 static const struct drm_debugfs_info vdev_debugfs_list[] = {
120 {"bo_list", bo_list_show, 0},
121 {"fw_name", fw_name_show, 0},
122 {"fw_trace_capability", fw_trace_capability_show, 0},
123 {"fw_trace_config", fw_trace_config_show, 0},
124 {"last_bootmode", last_bootmode_show, 0},
125 {"reset_counter", reset_counter_show, 0},
126 {"reset_pending", reset_pending_show, 0},
127 {"firewall_irq_counter", firewall_irq_counter_show, 0},
128 };
129
130 static ssize_t
dvfs_mode_fops_write(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)131 dvfs_mode_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
132 {
133 struct ivpu_device *vdev = file->private_data;
134 struct ivpu_fw_info *fw = vdev->fw;
135 u32 dvfs_mode;
136 int ret;
137
138 ret = kstrtou32_from_user(user_buf, size, 0, &dvfs_mode);
139 if (ret < 0)
140 return ret;
141
142 fw->dvfs_mode = dvfs_mode;
143
144 ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
145 if (ret)
146 return ret;
147
148 return size;
149 }
150
151 static const struct file_operations dvfs_mode_fops = {
152 .owner = THIS_MODULE,
153 .open = simple_open,
154 .write = dvfs_mode_fops_write,
155 };
156
157 static ssize_t
fw_dyndbg_fops_write(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)158 fw_dyndbg_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
159 {
160 struct ivpu_device *vdev = file->private_data;
161 char buffer[VPU_DYNDBG_CMD_MAX_LEN] = {};
162 int ret;
163
164 if (size >= VPU_DYNDBG_CMD_MAX_LEN)
165 return -EINVAL;
166
167 ret = strncpy_from_user(buffer, user_buf, size);
168 if (ret < 0)
169 return ret;
170
171 ivpu_jsm_dyndbg_control(vdev, buffer, size);
172 return size;
173 }
174
175 static const struct file_operations fw_dyndbg_fops = {
176 .owner = THIS_MODULE,
177 .open = simple_open,
178 .write = fw_dyndbg_fops_write,
179 };
180
fw_log_show(struct seq_file * s,void * v)181 static int fw_log_show(struct seq_file *s, void *v)
182 {
183 struct ivpu_device *vdev = s->private;
184 struct drm_printer p = drm_seq_file_printer(s);
185
186 ivpu_fw_log_print(vdev, true, &p);
187 return 0;
188 }
189
fw_log_fops_open(struct inode * inode,struct file * file)190 static int fw_log_fops_open(struct inode *inode, struct file *file)
191 {
192 return single_open(file, fw_log_show, inode->i_private);
193 }
194
195 static ssize_t
fw_log_fops_write(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)196 fw_log_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
197 {
198 struct seq_file *s = file->private_data;
199 struct ivpu_device *vdev = s->private;
200
201 if (!size)
202 return -EINVAL;
203
204 ivpu_fw_log_mark_read(vdev);
205 return size;
206 }
207
208 static const struct file_operations fw_log_fops = {
209 .owner = THIS_MODULE,
210 .open = fw_log_fops_open,
211 .write = fw_log_fops_write,
212 .read = seq_read,
213 .llseek = seq_lseek,
214 .release = single_release,
215 };
216
217 static ssize_t
fw_profiling_freq_fops_write(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)218 fw_profiling_freq_fops_write(struct file *file, const char __user *user_buf,
219 size_t size, loff_t *pos)
220 {
221 struct ivpu_device *vdev = file->private_data;
222 bool enable;
223 int ret;
224
225 ret = kstrtobool_from_user(user_buf, size, &enable);
226 if (ret < 0)
227 return ret;
228
229 ivpu_hw_profiling_freq_drive(vdev, enable);
230
231 ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
232 if (ret)
233 return ret;
234
235 return size;
236 }
237
238 static const struct file_operations fw_profiling_freq_fops = {
239 .owner = THIS_MODULE,
240 .open = simple_open,
241 .write = fw_profiling_freq_fops_write,
242 };
243
244 static ssize_t
fw_trace_destination_mask_fops_write(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)245 fw_trace_destination_mask_fops_write(struct file *file, const char __user *user_buf,
246 size_t size, loff_t *pos)
247 {
248 struct ivpu_device *vdev = file->private_data;
249 struct ivpu_fw_info *fw = vdev->fw;
250 u32 trace_destination_mask;
251 int ret;
252
253 ret = kstrtou32_from_user(user_buf, size, 0, &trace_destination_mask);
254 if (ret < 0)
255 return ret;
256
257 fw->trace_destination_mask = trace_destination_mask;
258
259 ivpu_jsm_trace_set_config(vdev, fw->trace_level, trace_destination_mask,
260 fw->trace_hw_component_mask);
261
262 return size;
263 }
264
265 static const struct file_operations fw_trace_destination_mask_fops = {
266 .owner = THIS_MODULE,
267 .open = simple_open,
268 .write = fw_trace_destination_mask_fops_write,
269 };
270
271 static ssize_t
fw_trace_hw_comp_mask_fops_write(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)272 fw_trace_hw_comp_mask_fops_write(struct file *file, const char __user *user_buf,
273 size_t size, loff_t *pos)
274 {
275 struct ivpu_device *vdev = file->private_data;
276 struct ivpu_fw_info *fw = vdev->fw;
277 u64 trace_hw_component_mask;
278 int ret;
279
280 ret = kstrtou64_from_user(user_buf, size, 0, &trace_hw_component_mask);
281 if (ret < 0)
282 return ret;
283
284 fw->trace_hw_component_mask = trace_hw_component_mask;
285
286 ivpu_jsm_trace_set_config(vdev, fw->trace_level, fw->trace_destination_mask,
287 trace_hw_component_mask);
288
289 return size;
290 }
291
292 static const struct file_operations fw_trace_hw_comp_mask_fops = {
293 .owner = THIS_MODULE,
294 .open = simple_open,
295 .write = fw_trace_hw_comp_mask_fops_write,
296 };
297
298 static ssize_t
fw_trace_level_fops_write(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)299 fw_trace_level_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
300 {
301 struct ivpu_device *vdev = file->private_data;
302 struct ivpu_fw_info *fw = vdev->fw;
303 u32 trace_level;
304 int ret;
305
306 ret = kstrtou32_from_user(user_buf, size, 0, &trace_level);
307 if (ret < 0)
308 return ret;
309
310 fw->trace_level = trace_level;
311
312 ivpu_jsm_trace_set_config(vdev, trace_level, fw->trace_destination_mask,
313 fw->trace_hw_component_mask);
314
315 return size;
316 }
317
318 static const struct file_operations fw_trace_level_fops = {
319 .owner = THIS_MODULE,
320 .open = simple_open,
321 .write = fw_trace_level_fops_write,
322 };
323
324 static ssize_t
ivpu_force_recovery_fn(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)325 ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
326 {
327 struct ivpu_device *vdev = file->private_data;
328 int ret;
329
330 if (!size)
331 return -EINVAL;
332
333 ret = ivpu_rpm_get(vdev);
334 if (ret < 0)
335 return ret;
336
337 ivpu_pm_trigger_recovery(vdev, "debugfs");
338 flush_work(&vdev->pm->recovery_work);
339 ivpu_rpm_put(vdev);
340 return size;
341 }
342
343 static const struct file_operations ivpu_force_recovery_fops = {
344 .owner = THIS_MODULE,
345 .open = simple_open,
346 .write = ivpu_force_recovery_fn,
347 };
348
ivpu_reset_engine_fn(void * data,u64 val)349 static int ivpu_reset_engine_fn(void *data, u64 val)
350 {
351 struct ivpu_device *vdev = (struct ivpu_device *)data;
352
353 return ivpu_jsm_reset_engine(vdev, (u32)val);
354 }
355
356 DEFINE_DEBUGFS_ATTRIBUTE(ivpu_reset_engine_fops, NULL, ivpu_reset_engine_fn, "0x%02llx\n");
357
ivpu_resume_engine_fn(void * data,u64 val)358 static int ivpu_resume_engine_fn(void *data, u64 val)
359 {
360 struct ivpu_device *vdev = (struct ivpu_device *)data;
361
362 return ivpu_jsm_hws_resume_engine(vdev, (u32)val);
363 }
364
365 DEFINE_DEBUGFS_ATTRIBUTE(ivpu_resume_engine_fops, NULL, ivpu_resume_engine_fn, "0x%02llx\n");
366
dct_active_get(void * data,u64 * active_percent)367 static int dct_active_get(void *data, u64 *active_percent)
368 {
369 struct ivpu_device *vdev = data;
370
371 *active_percent = vdev->pm->dct_active_percent;
372
373 return 0;
374 }
375
dct_active_set(void * data,u64 active_percent)376 static int dct_active_set(void *data, u64 active_percent)
377 {
378 struct ivpu_device *vdev = data;
379 int ret;
380
381 if (active_percent > 100)
382 return -EINVAL;
383
384 ret = ivpu_rpm_get(vdev);
385 if (ret < 0)
386 return ret;
387
388 if (active_percent)
389 ret = ivpu_pm_dct_enable(vdev, active_percent);
390 else
391 ret = ivpu_pm_dct_disable(vdev);
392
393 ivpu_rpm_put(vdev);
394
395 return ret;
396 }
397
398 DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
399
priority_bands_show(struct seq_file * s,void * v)400 static int priority_bands_show(struct seq_file *s, void *v)
401 {
402 struct ivpu_device *vdev = s->private;
403 struct ivpu_hw_info *hw = vdev->hw;
404
405 for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
406 band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
407 switch (band) {
408 case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE:
409 seq_puts(s, "Idle: ");
410 break;
411
412 case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL:
413 seq_puts(s, "Normal: ");
414 break;
415
416 case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS:
417 seq_puts(s, "Focus: ");
418 break;
419
420 case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME:
421 seq_puts(s, "Realtime: ");
422 break;
423 }
424
425 seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n",
426 hw->hws.grace_period[band], hw->hws.process_grace_period[band],
427 hw->hws.process_quantum[band]);
428 }
429
430 return 0;
431 }
432
priority_bands_fops_open(struct inode * inode,struct file * file)433 static int priority_bands_fops_open(struct inode *inode, struct file *file)
434 {
435 return single_open(file, priority_bands_show, inode->i_private);
436 }
437
438 static ssize_t
priority_bands_fops_write(struct file * file,const char __user * user_buf,size_t size,loff_t * pos)439 priority_bands_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
440 {
441 struct seq_file *s = file->private_data;
442 struct ivpu_device *vdev = s->private;
443 char buf[64];
444 u32 grace_period;
445 u32 process_grace_period;
446 u32 process_quantum;
447 u32 band;
448 int ret;
449
450 if (size >= sizeof(buf))
451 return -EINVAL;
452
453 ret = simple_write_to_buffer(buf, sizeof(buf) - 1, pos, user_buf, size);
454 if (ret < 0)
455 return ret;
456
457 buf[size] = '\0';
458 ret = sscanf(buf, "%u %u %u %u", &band, &grace_period, &process_grace_period,
459 &process_quantum);
460 if (ret != 4)
461 return -EINVAL;
462
463 if (band >= VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT)
464 return -EINVAL;
465
466 vdev->hw->hws.grace_period[band] = grace_period;
467 vdev->hw->hws.process_grace_period[band] = process_grace_period;
468 vdev->hw->hws.process_quantum[band] = process_quantum;
469
470 return size;
471 }
472
473 static const struct file_operations ivpu_hws_priority_bands_fops = {
474 .owner = THIS_MODULE,
475 .open = priority_bands_fops_open,
476 .write = priority_bands_fops_write,
477 .read = seq_read,
478 .llseek = seq_lseek,
479 .release = single_release,
480 };
481
ivpu_debugfs_init(struct ivpu_device * vdev)482 void ivpu_debugfs_init(struct ivpu_device *vdev)
483 {
484 struct dentry *debugfs_root = vdev->drm.debugfs_root;
485
486 drm_debugfs_add_files(&vdev->drm, vdev_debugfs_list, ARRAY_SIZE(vdev_debugfs_list));
487
488 debugfs_create_file("force_recovery", 0200, debugfs_root, vdev,
489 &ivpu_force_recovery_fops);
490
491 debugfs_create_file("dvfs_mode", 0200, debugfs_root, vdev,
492 &dvfs_mode_fops);
493
494 debugfs_create_file("fw_dyndbg", 0200, debugfs_root, vdev,
495 &fw_dyndbg_fops);
496 debugfs_create_file("fw_log", 0644, debugfs_root, vdev,
497 &fw_log_fops);
498 debugfs_create_file("fw_trace_destination_mask", 0200, debugfs_root, vdev,
499 &fw_trace_destination_mask_fops);
500 debugfs_create_file("fw_trace_hw_comp_mask", 0200, debugfs_root, vdev,
501 &fw_trace_hw_comp_mask_fops);
502 debugfs_create_file("fw_trace_level", 0200, debugfs_root, vdev,
503 &fw_trace_level_fops);
504 debugfs_create_file("hws_priority_bands", 0200, debugfs_root, vdev,
505 &ivpu_hws_priority_bands_fops);
506
507 debugfs_create_file("reset_engine", 0200, debugfs_root, vdev,
508 &ivpu_reset_engine_fops);
509 debugfs_create_file("resume_engine", 0200, debugfs_root, vdev,
510 &ivpu_resume_engine_fops);
511
512 if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX) {
513 debugfs_create_file("fw_profiling_freq_drive", 0200,
514 debugfs_root, vdev, &fw_profiling_freq_fops);
515 debugfs_create_file("dct", 0644, debugfs_root, vdev, &ivpu_dct_fops);
516 }
517 }
518