1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2018-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include "mali_kbase.h"
23 #include "mali_kbase_defs.h"
24 #include "mali_kbase_csf_firmware.h"
25 #include "mali_kbase_csf_trace_buffer.h"
26 #include "mali_kbase_reset_gpu.h"
27 #include "mali_kbase_csf_tl_reader.h"
28
29 #include <linux/list.h>
30 #include <linux/mman.h>
31
32 #if IS_ENABLED(CONFIG_DEBUG_FS)
33 #if (KERNEL_VERSION(4, 7, 0) > LINUX_VERSION_CODE)
34 #define DEFINE_DEBUGFS_ATTRIBUTE DEFINE_SIMPLE_ATTRIBUTE
35 #endif
36 #endif
37
38 /**
39 * struct firmware_trace_buffer - Trace Buffer within the MCU firmware
40 *
41 * @kbdev: Pointer to the Kbase device.
42 * @node: List head linking all trace buffers to
43 * kbase_device:csf.firmware_trace_buffers
44 * @data_mapping: MCU shared memory mapping used for the data buffer.
45 * @updatable: Indicates whether config items can be updated with
46 * FIRMWARE_CONFIG_UPDATE
47 * @type: The type of the trace buffer.
48 * @trace_enable_entry_count: Number of Trace Enable bits.
49 * @gpu_va: Structure containing all the Firmware addresses
50 * that are accessed by the MCU.
51 * @gpu_va.size_address: The address where the MCU shall read the size of
52 * the data buffer.
53 * @gpu_va.insert_address: The address that shall be dereferenced by the MCU
54 * to write the Insert offset.
55 * @gpu_va.extract_address: The address that shall be dereferenced by the MCU
56 * to read the Extract offset.
57 * @gpu_va.data_address: The address that shall be dereferenced by the MCU
58 * to write the Trace Buffer.
59 * @gpu_va.trace_enable: The address where the MCU shall read the array of
60 * Trace Enable bits describing which trace points
61 * and features shall be enabled.
62 * @cpu_va: Structure containing CPU addresses of variables
63 * which are permanently mapped on the CPU address
64 * space.
65 * @cpu_va.insert_cpu_va: CPU virtual address of the Insert variable.
66 * @cpu_va.extract_cpu_va: CPU virtual address of the Extract variable.
67 * @num_pages: Size of the data buffer, in pages.
68 * @trace_enable_init_mask: Initial value for the trace enable bit mask.
69 * @name: NULL terminated string which contains the name of the trace buffer.
70 *
71 * The firmware relays information to the host by writing on memory buffers
72 * which are allocated and partially configured by the host. These buffers
73 * are called Trace Buffers: each of them has a specific purpose and is
74 * identified by a name and a set of memory addresses where the host can
75 * set pointers to host-allocated structures.
76 */
77 struct firmware_trace_buffer {
78 struct kbase_device *kbdev;
79 struct list_head node;
80 struct kbase_csf_mapping data_mapping;
81 bool updatable;
82 u32 type;
83 u32 trace_enable_entry_count;
84 struct gpu_va {
85 u32 size_address;
86 u32 insert_address;
87 u32 extract_address;
88 u32 data_address;
89 u32 trace_enable;
90 } gpu_va;
91 struct cpu_va {
92 u32 *insert_cpu_va;
93 u32 *extract_cpu_va;
94 } cpu_va;
95 u32 num_pages;
96 u32 trace_enable_init_mask[CSF_FIRMWARE_TRACE_ENABLE_INIT_MASK_MAX];
97 char name[1]; /* this field must be last */
98 };
99
100 /**
101 * struct firmware_trace_buffer_data - Configuration data for trace buffers
102 *
103 * @name: Name identifier of the trace buffer
104 * @trace_enable_init_mask: Initial value to assign to the trace enable bits
105 * @size: Size of the data buffer to allocate for the trace buffer, in pages.
106 * The size of a data buffer must always be a power of 2.
107 *
108 * Describe how to set up a trace buffer interface.
109 * Trace buffers are identified by name and they require a data buffer and
110 * an initial mask of values for the trace enable bits.
111 */
112 struct firmware_trace_buffer_data {
113 char name[64];
114 u32 trace_enable_init_mask[CSF_FIRMWARE_TRACE_ENABLE_INIT_MASK_MAX];
115 size_t size;
116 };
117
118 /*
119 * Table of configuration data for trace buffers.
120 *
121 * This table contains the configuration data for the trace buffers that are
122 * expected to be parsed from the firmware.
123 */
124 static const struct firmware_trace_buffer_data trace_buffer_data[] = {
125 #if MALI_UNIT_TEST
126 { "fwutf", { 0 }, 1 },
127 #endif
128 { FW_TRACE_BUF_NAME, { 0 }, 4 },
129 { "benchmark", { 0 }, 2 },
130 { "timeline", { 0 }, KBASE_CSF_TL_BUFFER_NR_PAGES },
131 };
132
kbase_csf_firmware_trace_buffers_init(struct kbase_device * kbdev)133 int kbase_csf_firmware_trace_buffers_init(struct kbase_device *kbdev)
134 {
135 struct firmware_trace_buffer *trace_buffer;
136 int ret = 0;
137 u32 mcu_rw_offset = 0, mcu_write_offset = 0;
138 const u32 cache_line_alignment = kbase_get_cache_line_alignment(kbdev);
139
140 if (list_empty(&kbdev->csf.firmware_trace_buffers.list)) {
141 dev_dbg(kbdev->dev, "No trace buffers to initialise\n");
142 return 0;
143 }
144
145 /* GPU-readable,writable memory used for Extract variables */
146 ret = kbase_csf_firmware_mcu_shared_mapping_init(
147 kbdev, 1, PROT_WRITE,
148 KBASE_REG_GPU_RD | KBASE_REG_GPU_WR,
149 &kbdev->csf.firmware_trace_buffers.mcu_rw);
150 if (ret != 0) {
151 dev_err(kbdev->dev, "Failed to map GPU-rw MCU shared memory\n");
152 goto out;
153 }
154
155 /* GPU-writable memory used for Insert variables */
156 ret = kbase_csf_firmware_mcu_shared_mapping_init(
157 kbdev, 1, PROT_READ, KBASE_REG_GPU_WR,
158 &kbdev->csf.firmware_trace_buffers.mcu_write);
159 if (ret != 0) {
160 dev_err(kbdev->dev, "Failed to map GPU-writable MCU shared memory\n");
161 goto out;
162 }
163
164 list_for_each_entry(trace_buffer, &kbdev->csf.firmware_trace_buffers.list, node) {
165 u32 extract_gpu_va, insert_gpu_va, data_buffer_gpu_va,
166 trace_enable_size_dwords;
167 u32 *extract_cpu_va, *insert_cpu_va;
168 unsigned int i;
169
170 /* GPU-writable data buffer for the individual trace buffer */
171 ret = kbase_csf_firmware_mcu_shared_mapping_init(
172 kbdev, trace_buffer->num_pages, PROT_READ, KBASE_REG_GPU_WR,
173 &trace_buffer->data_mapping);
174 if (ret) {
175 dev_err(kbdev->dev, "Failed to map GPU-writable MCU shared memory for a trace buffer\n");
176 goto out;
177 }
178
179 extract_gpu_va =
180 (kbdev->csf.firmware_trace_buffers.mcu_rw.va_reg->start_pfn << PAGE_SHIFT) +
181 mcu_rw_offset;
182 extract_cpu_va = (u32*)(
183 kbdev->csf.firmware_trace_buffers.mcu_rw.cpu_addr +
184 mcu_rw_offset);
185 insert_gpu_va =
186 (kbdev->csf.firmware_trace_buffers.mcu_write.va_reg->start_pfn << PAGE_SHIFT) +
187 mcu_write_offset;
188 insert_cpu_va = (u32*)(
189 kbdev->csf.firmware_trace_buffers.mcu_write.cpu_addr +
190 mcu_write_offset);
191 data_buffer_gpu_va =
192 (trace_buffer->data_mapping.va_reg->start_pfn << PAGE_SHIFT);
193
194 /* Initialize the Extract variable */
195 *extract_cpu_va = 0;
196
197 /* Each FW address shall be mapped and set individually, as we can't
198 * assume anything about their location in the memory address space.
199 */
200 kbase_csf_update_firmware_memory(
201 kbdev, trace_buffer->gpu_va.data_address, data_buffer_gpu_va);
202 kbase_csf_update_firmware_memory(
203 kbdev, trace_buffer->gpu_va.insert_address, insert_gpu_va);
204 kbase_csf_update_firmware_memory(
205 kbdev, trace_buffer->gpu_va.extract_address, extract_gpu_va);
206 kbase_csf_update_firmware_memory(
207 kbdev, trace_buffer->gpu_va.size_address,
208 trace_buffer->num_pages << PAGE_SHIFT);
209
210 trace_enable_size_dwords =
211 (trace_buffer->trace_enable_entry_count + 31) >> 5;
212
213 for (i = 0; i < trace_enable_size_dwords; i++) {
214 kbase_csf_update_firmware_memory(
215 kbdev, trace_buffer->gpu_va.trace_enable + i*4,
216 trace_buffer->trace_enable_init_mask[i]);
217 }
218
219 /* Store CPU virtual addresses for permanently mapped variables */
220 trace_buffer->cpu_va.insert_cpu_va = insert_cpu_va;
221 trace_buffer->cpu_va.extract_cpu_va = extract_cpu_va;
222
223 /* Update offsets */
224 mcu_write_offset += cache_line_alignment;
225 mcu_rw_offset += cache_line_alignment;
226 }
227
228 out:
229 return ret;
230 }
231
kbase_csf_firmware_trace_buffers_term(struct kbase_device * kbdev)232 void kbase_csf_firmware_trace_buffers_term(struct kbase_device *kbdev)
233 {
234 if (list_empty(&kbdev->csf.firmware_trace_buffers.list))
235 return;
236
237 while (!list_empty(&kbdev->csf.firmware_trace_buffers.list)) {
238 struct firmware_trace_buffer *trace_buffer;
239
240 trace_buffer = list_first_entry(&kbdev->csf.firmware_trace_buffers.list,
241 struct firmware_trace_buffer, node);
242 kbase_csf_firmware_mcu_shared_mapping_term(kbdev, &trace_buffer->data_mapping);
243 list_del(&trace_buffer->node);
244
245 kfree(trace_buffer);
246 }
247
248 kbase_csf_firmware_mcu_shared_mapping_term(
249 kbdev, &kbdev->csf.firmware_trace_buffers.mcu_rw);
250 kbase_csf_firmware_mcu_shared_mapping_term(
251 kbdev, &kbdev->csf.firmware_trace_buffers.mcu_write);
252 }
253
kbase_csf_firmware_parse_trace_buffer_entry(struct kbase_device * kbdev,const u32 * entry,unsigned int size,bool updatable)254 int kbase_csf_firmware_parse_trace_buffer_entry(struct kbase_device *kbdev,
255 const u32 *entry,
256 unsigned int size,
257 bool updatable)
258 {
259 const char *name = (char *)&entry[7];
260 const unsigned int name_len = size - TRACE_BUFFER_ENTRY_NAME_OFFSET;
261 struct firmware_trace_buffer *trace_buffer;
262 unsigned int i;
263
264 /* Allocate enough space for struct firmware_trace_buffer and the
265 * trace buffer name (with NULL termination).
266 */
267 trace_buffer =
268 kmalloc(sizeof(*trace_buffer) + name_len + 1, GFP_KERNEL);
269
270 if (!trace_buffer)
271 return -ENOMEM;
272
273 memcpy(&trace_buffer->name, name, name_len);
274 trace_buffer->name[name_len] = '\0';
275
276 for (i = 0; i < ARRAY_SIZE(trace_buffer_data); i++) {
277 if (!strcmp(trace_buffer_data[i].name, trace_buffer->name)) {
278 unsigned int j;
279
280 trace_buffer->kbdev = kbdev;
281 trace_buffer->updatable = updatable;
282 trace_buffer->type = entry[0];
283 trace_buffer->gpu_va.size_address = entry[1];
284 trace_buffer->gpu_va.insert_address = entry[2];
285 trace_buffer->gpu_va.extract_address = entry[3];
286 trace_buffer->gpu_va.data_address = entry[4];
287 trace_buffer->gpu_va.trace_enable = entry[5];
288 trace_buffer->trace_enable_entry_count = entry[6];
289 trace_buffer->num_pages = trace_buffer_data[i].size;
290
291 for (j = 0; j < CSF_FIRMWARE_TRACE_ENABLE_INIT_MASK_MAX; j++) {
292 trace_buffer->trace_enable_init_mask[j] =
293 trace_buffer_data[i].trace_enable_init_mask[j];
294 }
295 break;
296 }
297 }
298
299 if (i < ARRAY_SIZE(trace_buffer_data)) {
300 list_add(&trace_buffer->node, &kbdev->csf.firmware_trace_buffers.list);
301 dev_dbg(kbdev->dev, "Trace buffer '%s'", trace_buffer->name);
302 } else {
303 dev_dbg(kbdev->dev, "Unknown trace buffer '%s'", trace_buffer->name);
304 kfree(trace_buffer);
305 }
306
307 return 0;
308 }
309
kbase_csf_firmware_reload_trace_buffers_data(struct kbase_device * kbdev)310 void kbase_csf_firmware_reload_trace_buffers_data(struct kbase_device *kbdev)
311 {
312 struct firmware_trace_buffer *trace_buffer;
313 u32 mcu_rw_offset = 0, mcu_write_offset = 0;
314 const u32 cache_line_alignment = kbase_get_cache_line_alignment(kbdev);
315
316 list_for_each_entry(trace_buffer, &kbdev->csf.firmware_trace_buffers.list, node) {
317 u32 extract_gpu_va, insert_gpu_va, data_buffer_gpu_va,
318 trace_enable_size_dwords;
319 u32 *extract_cpu_va, *insert_cpu_va;
320 unsigned int i;
321
322 /* Rely on the fact that all required mappings already exist */
323 extract_gpu_va =
324 (kbdev->csf.firmware_trace_buffers.mcu_rw.va_reg->start_pfn << PAGE_SHIFT) +
325 mcu_rw_offset;
326 extract_cpu_va = (u32*)(
327 kbdev->csf.firmware_trace_buffers.mcu_rw.cpu_addr +
328 mcu_rw_offset);
329 insert_gpu_va =
330 (kbdev->csf.firmware_trace_buffers.mcu_write.va_reg->start_pfn << PAGE_SHIFT) +
331 mcu_write_offset;
332 insert_cpu_va = (u32*)(
333 kbdev->csf.firmware_trace_buffers.mcu_write.cpu_addr +
334 mcu_write_offset);
335 data_buffer_gpu_va =
336 (trace_buffer->data_mapping.va_reg->start_pfn << PAGE_SHIFT);
337
338 /* Notice that the function only re-updates firmware memory locations
339 * with information that allows access to the trace buffers without
340 * really resetting their state. For instance, the Insert offset will
341 * not change and, as a consequence, the Extract offset is not going
342 * to be reset to keep consistency.
343 */
344
345 /* Each FW address shall be mapped and set individually, as we can't
346 * assume anything about their location in the memory address space.
347 */
348 kbase_csf_update_firmware_memory(
349 kbdev, trace_buffer->gpu_va.data_address, data_buffer_gpu_va);
350 kbase_csf_update_firmware_memory(
351 kbdev, trace_buffer->gpu_va.insert_address, insert_gpu_va);
352 kbase_csf_update_firmware_memory(
353 kbdev, trace_buffer->gpu_va.extract_address, extract_gpu_va);
354 kbase_csf_update_firmware_memory(
355 kbdev, trace_buffer->gpu_va.size_address,
356 trace_buffer->num_pages << PAGE_SHIFT);
357
358 trace_enable_size_dwords =
359 (trace_buffer->trace_enable_entry_count + 31) >> 5;
360
361 for (i = 0; i < trace_enable_size_dwords; i++) {
362 kbase_csf_update_firmware_memory(
363 kbdev, trace_buffer->gpu_va.trace_enable + i*4,
364 trace_buffer->trace_enable_init_mask[i]);
365 }
366
367 /* Store CPU virtual addresses for permanently mapped variables,
368 * as they might have slightly changed.
369 */
370 trace_buffer->cpu_va.insert_cpu_va = insert_cpu_va;
371 trace_buffer->cpu_va.extract_cpu_va = extract_cpu_va;
372
373 /* Update offsets */
374 mcu_write_offset += cache_line_alignment;
375 mcu_rw_offset += cache_line_alignment;
376 }
377 }
378
kbase_csf_firmware_get_trace_buffer(struct kbase_device * kbdev,const char * name)379 struct firmware_trace_buffer *kbase_csf_firmware_get_trace_buffer(
380 struct kbase_device *kbdev, const char *name)
381 {
382 struct firmware_trace_buffer *trace_buffer;
383
384 list_for_each_entry(trace_buffer, &kbdev->csf.firmware_trace_buffers.list, node) {
385 if (!strcmp(trace_buffer->name, name))
386 return trace_buffer;
387 }
388
389 return NULL;
390 }
391 EXPORT_SYMBOL(kbase_csf_firmware_get_trace_buffer);
392
kbase_csf_firmware_trace_buffer_get_trace_enable_bits_count(const struct firmware_trace_buffer * trace_buffer)393 unsigned int kbase_csf_firmware_trace_buffer_get_trace_enable_bits_count(
394 const struct firmware_trace_buffer *trace_buffer)
395 {
396 return trace_buffer->trace_enable_entry_count;
397 }
398 EXPORT_SYMBOL(kbase_csf_firmware_trace_buffer_get_trace_enable_bits_count);
399
kbasep_csf_firmware_trace_buffer_update_trace_enable_bit(struct firmware_trace_buffer * tb,unsigned int bit,bool value)400 static void kbasep_csf_firmware_trace_buffer_update_trace_enable_bit(
401 struct firmware_trace_buffer *tb, unsigned int bit, bool value)
402 {
403 struct kbase_device *kbdev = tb->kbdev;
404
405 lockdep_assert_held(&kbdev->hwaccess_lock);
406
407 if (bit < tb->trace_enable_entry_count) {
408 unsigned int trace_enable_reg_offset = bit >> 5;
409 u32 trace_enable_bit_mask = 1u << (bit & 0x1F);
410
411 if (value) {
412 tb->trace_enable_init_mask[trace_enable_reg_offset] |=
413 trace_enable_bit_mask;
414 } else {
415 tb->trace_enable_init_mask[trace_enable_reg_offset] &=
416 ~trace_enable_bit_mask;
417 }
418
419 /* This is not strictly needed as the caller is supposed to
420 * reload the firmware image (through GPU reset) after updating
421 * the bitmask. Otherwise there is no guarantee that firmware
422 * will take into account the updated bitmask for all types of
423 * trace buffers, since firmware could continue to use the
424 * value of bitmask it cached after the boot.
425 */
426 kbase_csf_update_firmware_memory(
427 kbdev,
428 tb->gpu_va.trace_enable + trace_enable_reg_offset * 4,
429 tb->trace_enable_init_mask[trace_enable_reg_offset]);
430 }
431 }
432
kbase_csf_firmware_trace_buffer_update_trace_enable_bit(struct firmware_trace_buffer * tb,unsigned int bit,bool value)433 int kbase_csf_firmware_trace_buffer_update_trace_enable_bit(
434 struct firmware_trace_buffer *tb, unsigned int bit, bool value)
435 {
436 struct kbase_device *kbdev = tb->kbdev;
437 int err = 0;
438 unsigned long flags;
439
440 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
441
442 /* If trace buffer update cannot be performed with
443 * FIRMWARE_CONFIG_UPDATE then we need to do a
444 * silent reset before we update the memory.
445 */
446 if (!tb->updatable) {
447 /* If there is already a GPU reset pending then inform
448 * the User to retry the update.
449 */
450 if (kbase_reset_gpu_silent(kbdev)) {
451 dev_warn(
452 kbdev->dev,
453 "GPU reset already in progress when enabling firmware timeline.");
454 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
455 return -EAGAIN;
456 }
457 }
458
459 kbasep_csf_firmware_trace_buffer_update_trace_enable_bit(tb, bit,
460 value);
461 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
462
463 if (tb->updatable)
464 err = kbase_csf_trigger_firmware_config_update(kbdev);
465
466 return err;
467 }
468 EXPORT_SYMBOL(kbase_csf_firmware_trace_buffer_update_trace_enable_bit);
469
kbase_csf_firmware_trace_buffer_is_empty(const struct firmware_trace_buffer * trace_buffer)470 bool kbase_csf_firmware_trace_buffer_is_empty(
471 const struct firmware_trace_buffer *trace_buffer)
472 {
473 return *(trace_buffer->cpu_va.insert_cpu_va) ==
474 *(trace_buffer->cpu_va.extract_cpu_va);
475 }
476 EXPORT_SYMBOL(kbase_csf_firmware_trace_buffer_is_empty);
477
kbase_csf_firmware_trace_buffer_read_data(struct firmware_trace_buffer * trace_buffer,u8 * data,unsigned int num_bytes)478 unsigned int kbase_csf_firmware_trace_buffer_read_data(
479 struct firmware_trace_buffer *trace_buffer, u8 *data, unsigned int num_bytes)
480 {
481 unsigned int bytes_copied;
482 u8 *data_cpu_va = trace_buffer->data_mapping.cpu_addr;
483 u32 extract_offset = *(trace_buffer->cpu_va.extract_cpu_va);
484 u32 insert_offset = *(trace_buffer->cpu_va.insert_cpu_va);
485 u32 buffer_size = trace_buffer->num_pages << PAGE_SHIFT;
486
487 if (insert_offset >= extract_offset) {
488 bytes_copied = min_t(unsigned int, num_bytes,
489 (insert_offset - extract_offset));
490 memcpy(data, &data_cpu_va[extract_offset], bytes_copied);
491 extract_offset += bytes_copied;
492 } else {
493 unsigned int bytes_copied_head, bytes_copied_tail;
494
495 bytes_copied_tail = min_t(unsigned int, num_bytes,
496 (buffer_size - extract_offset));
497 memcpy(data, &data_cpu_va[extract_offset], bytes_copied_tail);
498
499 bytes_copied_head = min_t(unsigned int,
500 (num_bytes - bytes_copied_tail), insert_offset);
501 memcpy(&data[bytes_copied_tail], data_cpu_va, bytes_copied_head);
502
503 bytes_copied = bytes_copied_head + bytes_copied_tail;
504 extract_offset += bytes_copied;
505 if (extract_offset >= buffer_size)
506 extract_offset = bytes_copied_head;
507 }
508
509 *(trace_buffer->cpu_va.extract_cpu_va) = extract_offset;
510
511 return bytes_copied;
512 }
513 EXPORT_SYMBOL(kbase_csf_firmware_trace_buffer_read_data);
514
515 #if IS_ENABLED(CONFIG_DEBUG_FS)
516
517 #define U32_BITS 32
get_trace_buffer_active_mask64(struct firmware_trace_buffer * tb)518 static u64 get_trace_buffer_active_mask64(struct firmware_trace_buffer *tb)
519 {
520 u64 active_mask = tb->trace_enable_init_mask[0];
521
522 if (tb->trace_enable_entry_count > U32_BITS)
523 active_mask |= (u64)tb->trace_enable_init_mask[1] << U32_BITS;
524
525 return active_mask;
526 }
527
update_trace_buffer_active_mask64(struct firmware_trace_buffer * tb,u64 mask)528 static void update_trace_buffer_active_mask64(struct firmware_trace_buffer *tb,
529 u64 mask)
530 {
531 unsigned int i;
532
533 for (i = 0; i < tb->trace_enable_entry_count; i++)
534 kbasep_csf_firmware_trace_buffer_update_trace_enable_bit(
535 tb, i, (mask >> i) & 1);
536 }
537
set_trace_buffer_active_mask64(struct firmware_trace_buffer * tb,u64 mask)538 static int set_trace_buffer_active_mask64(struct firmware_trace_buffer *tb,
539 u64 mask)
540 {
541 struct kbase_device *kbdev = tb->kbdev;
542 unsigned long flags;
543 int err = 0;
544
545 if (!tb->updatable) {
546 /* If there is already a GPU reset pending, need a retry */
547 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
548 if (kbase_reset_gpu_silent(kbdev))
549 err = -EAGAIN;
550 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
551 }
552
553 if (!err) {
554 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
555 update_trace_buffer_active_mask64(tb, mask);
556 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
557
558 /* if we can update the config we need to just trigger
559 * FIRMWARE_CONFIG_UPDATE.
560 */
561 if (tb->updatable)
562 err = kbase_csf_trigger_firmware_config_update(kbdev);
563 }
564
565 return err;
566 }
567
kbase_csf_firmware_trace_enable_mask_read(void * data,u64 * val)568 static int kbase_csf_firmware_trace_enable_mask_read(void *data, u64 *val)
569 {
570 struct kbase_device *kbdev = (struct kbase_device *)data;
571 struct firmware_trace_buffer *tb =
572 kbase_csf_firmware_get_trace_buffer(kbdev, FW_TRACE_BUF_NAME);
573
574 if (tb == NULL) {
575 dev_err(kbdev->dev, "Couldn't get the firmware trace buffer");
576 return -EIO;
577 }
578 /* The enabled traces limited to u64 here, regarded practical */
579 *val = get_trace_buffer_active_mask64(tb);
580 return 0;
581 }
582
kbase_csf_firmware_trace_enable_mask_write(void * data,u64 val)583 static int kbase_csf_firmware_trace_enable_mask_write(void *data, u64 val)
584 {
585 struct kbase_device *kbdev = (struct kbase_device *)data;
586 struct firmware_trace_buffer *tb =
587 kbase_csf_firmware_get_trace_buffer(kbdev, FW_TRACE_BUF_NAME);
588 u64 new_mask;
589 unsigned int enable_bits_count;
590
591 if (tb == NULL) {
592 dev_err(kbdev->dev, "Couldn't get the firmware trace buffer");
593 return -EIO;
594 }
595
596 /* Ignore unsupported types */
597 enable_bits_count =
598 kbase_csf_firmware_trace_buffer_get_trace_enable_bits_count(tb);
599 if (enable_bits_count > 64) {
600 dev_dbg(kbdev->dev, "Limit enabled bits count from %u to 64",
601 enable_bits_count);
602 enable_bits_count = 64;
603 }
604 new_mask = val & ((1 << enable_bits_count) - 1);
605
606 if (new_mask != get_trace_buffer_active_mask64(tb))
607 return set_trace_buffer_active_mask64(tb, new_mask);
608 else
609 return 0;
610 }
611
kbasep_csf_firmware_trace_debugfs_open(struct inode * in,struct file * file)612 static int kbasep_csf_firmware_trace_debugfs_open(struct inode *in,
613 struct file *file)
614 {
615 struct kbase_device *kbdev = in->i_private;
616
617 file->private_data = kbdev;
618 dev_dbg(kbdev->dev, "Opened firmware trace buffer dump debugfs file");
619
620 return 0;
621 }
622
kbasep_csf_firmware_trace_debugfs_read(struct file * file,char __user * buf,size_t size,loff_t * ppos)623 static ssize_t kbasep_csf_firmware_trace_debugfs_read(struct file *file,
624 char __user *buf, size_t size, loff_t *ppos)
625 {
626 struct kbase_device *kbdev = file->private_data;
627 u8 *pbyte;
628 unsigned int n_read;
629 unsigned long not_copied;
630 /* Limit the kernel buffer to no more than two pages */
631 size_t mem = MIN(size, 2 * PAGE_SIZE);
632 unsigned long flags;
633
634 struct firmware_trace_buffer *tb =
635 kbase_csf_firmware_get_trace_buffer(kbdev, FW_TRACE_BUF_NAME);
636
637 if (tb == NULL) {
638 dev_err(kbdev->dev, "Couldn't get the firmware trace buffer");
639 return -EIO;
640 }
641
642 pbyte = kmalloc(mem, GFP_KERNEL);
643 if (pbyte == NULL) {
644 dev_err(kbdev->dev, "Couldn't allocate memory for trace buffer dump");
645 return -ENOMEM;
646 }
647
648 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
649 n_read = kbase_csf_firmware_trace_buffer_read_data(tb, pbyte, mem);
650 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
651
652 /* Do the copy, if we have obtained some trace data */
653 not_copied = (n_read) ? copy_to_user(buf, pbyte, n_read) : 0;
654 kfree(pbyte);
655
656 if (!not_copied) {
657 *ppos += n_read;
658 return n_read;
659 }
660
661 dev_err(kbdev->dev, "Couldn't copy trace buffer data to user space buffer");
662 return -EFAULT;
663 }
664
665
666 DEFINE_SIMPLE_ATTRIBUTE(kbase_csf_firmware_trace_enable_mask_fops,
667 kbase_csf_firmware_trace_enable_mask_read,
668 kbase_csf_firmware_trace_enable_mask_write, "%llx\n");
669
670 static const struct file_operations kbasep_csf_firmware_trace_debugfs_fops = {
671 .owner = THIS_MODULE,
672 .open = kbasep_csf_firmware_trace_debugfs_open,
673 .read = kbasep_csf_firmware_trace_debugfs_read,
674 .llseek = no_llseek,
675 };
676
kbase_csf_firmware_trace_buffer_debugfs_init(struct kbase_device * kbdev)677 void kbase_csf_firmware_trace_buffer_debugfs_init(struct kbase_device *kbdev)
678 {
679 debugfs_create_file("fw_trace_enable_mask", 0644,
680 kbdev->mali_debugfs_directory, kbdev,
681 &kbase_csf_firmware_trace_enable_mask_fops);
682
683 debugfs_create_file("fw_traces", 0444,
684 kbdev->mali_debugfs_directory, kbdev,
685 &kbasep_csf_firmware_trace_debugfs_fops);
686 }
687 #endif /* CONFIG_DEBUG_FS */
688