• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include "mali_kbase_csf_csg_debugfs.h"
23 #include <mali_kbase.h>
24 #include <linux/seq_file.h>
25 #include <linux/delay.h>
26 #include <csf/mali_kbase_csf_trace_buffer.h>
27 #include <backend/gpu/mali_kbase_pm_internal.h>
28 
29 #if IS_ENABLED(CONFIG_DEBUG_FS)
30 #include "mali_kbase_csf_tl_reader.h"
31 
32 #define MAX_SCHED_STATE_STRING_LEN (16)
scheduler_state_to_string(struct kbase_device * kbdev,enum kbase_csf_scheduler_state sched_state)33 static const char *scheduler_state_to_string(struct kbase_device *kbdev,
34 			enum kbase_csf_scheduler_state sched_state)
35 {
36 	switch (sched_state) {
37 	case SCHED_BUSY:
38 		return "BUSY";
39 	case SCHED_INACTIVE:
40 		return "INACTIVE";
41 	case SCHED_SUSPENDED:
42 		return "SUSPENDED";
43 #ifdef KBASE_PM_RUNTIME
44 	case SCHED_SLEEPING:
45 		return "SLEEPING";
46 #endif
47 	default:
48 		dev_warn(kbdev->dev, "Unknown Scheduler state %d", sched_state);
49 		return NULL;
50 	}
51 }
52 
53 /**
54  * blocked_reason_to_string() - Convert blocking reason id to a string
55  *
56  * @reason_id: blocked_reason
57  *
58  * Return: Suitable string
59  */
blocked_reason_to_string(u32 reason_id)60 static const char *blocked_reason_to_string(u32 reason_id)
61 {
62 	/* possible blocking reasons of a cs */
63 	static const char *const cs_blocked_reason[] = {
64 		[CS_STATUS_BLOCKED_REASON_REASON_UNBLOCKED] = "UNBLOCKED",
65 		[CS_STATUS_BLOCKED_REASON_REASON_WAIT] = "WAIT",
66 		[CS_STATUS_BLOCKED_REASON_REASON_PROGRESS_WAIT] =
67 			"PROGRESS_WAIT",
68 		[CS_STATUS_BLOCKED_REASON_REASON_SYNC_WAIT] = "SYNC_WAIT",
69 		[CS_STATUS_BLOCKED_REASON_REASON_DEFERRED] = "DEFERRED",
70 		[CS_STATUS_BLOCKED_REASON_REASON_RESOURCE] = "RESOURCE",
71 		[CS_STATUS_BLOCKED_REASON_REASON_FLUSH] = "FLUSH"
72 	};
73 
74 	if (WARN_ON(reason_id >= ARRAY_SIZE(cs_blocked_reason)))
75 		return "UNKNOWN_BLOCKED_REASON_ID";
76 
77 	return cs_blocked_reason[reason_id];
78 }
79 
kbasep_csf_scheduler_dump_active_queue_cs_status_wait(struct seq_file * file,u32 wait_status,u32 wait_sync_value,u64 wait_sync_live_value,u64 wait_sync_pointer,u32 sb_status,u32 blocked_reason)80 static void kbasep_csf_scheduler_dump_active_queue_cs_status_wait(
81 	struct seq_file *file, u32 wait_status, u32 wait_sync_value,
82 	u64 wait_sync_live_value, u64 wait_sync_pointer, u32 sb_status,
83 	u32 blocked_reason)
84 {
85 #define WAITING "Waiting"
86 #define NOT_WAITING "Not waiting"
87 
88 	seq_printf(file, "SB_MASK: %d\n",
89 			CS_STATUS_WAIT_SB_MASK_GET(wait_status));
90 	seq_printf(file, "PROGRESS_WAIT: %s\n",
91 			CS_STATUS_WAIT_PROGRESS_WAIT_GET(wait_status) ?
92 			WAITING : NOT_WAITING);
93 	seq_printf(file, "PROTM_PEND: %s\n",
94 			CS_STATUS_WAIT_PROTM_PEND_GET(wait_status) ?
95 			WAITING : NOT_WAITING);
96 	seq_printf(file, "SYNC_WAIT: %s\n",
97 			CS_STATUS_WAIT_SYNC_WAIT_GET(wait_status) ?
98 			WAITING : NOT_WAITING);
99 	seq_printf(file, "WAIT_CONDITION: %s\n",
100 			CS_STATUS_WAIT_SYNC_WAIT_CONDITION_GET(wait_status) ?
101 			"greater than" : "less or equal");
102 	seq_printf(file, "SYNC_POINTER: 0x%llx\n", wait_sync_pointer);
103 	seq_printf(file, "SYNC_VALUE: %d\n", wait_sync_value);
104 	seq_printf(file, "SYNC_LIVE_VALUE: 0x%016llx\n", wait_sync_live_value);
105 	seq_printf(file, "SB_STATUS: %u\n",
106 		   CS_STATUS_SCOREBOARDS_NONZERO_GET(sb_status));
107 	seq_printf(file, "BLOCKED_REASON: %s\n",
108 		   blocked_reason_to_string(CS_STATUS_BLOCKED_REASON_REASON_GET(
109 			   blocked_reason)));
110 }
111 
kbasep_csf_scheduler_dump_active_cs_trace(struct seq_file * file,struct kbase_csf_cmd_stream_info const * const stream)112 static void kbasep_csf_scheduler_dump_active_cs_trace(struct seq_file *file,
113 			struct kbase_csf_cmd_stream_info const *const stream)
114 {
115 	u32 val = kbase_csf_firmware_cs_input_read(stream,
116 			CS_INSTR_BUFFER_BASE_LO);
117 	u64 addr = ((u64)kbase_csf_firmware_cs_input_read(stream,
118 				CS_INSTR_BUFFER_BASE_HI) << 32) | val;
119 	val = kbase_csf_firmware_cs_input_read(stream,
120 				CS_INSTR_BUFFER_SIZE);
121 
122 	seq_printf(file, "CS_TRACE_BUF_ADDR: 0x%16llx, SIZE: %u\n", addr, val);
123 
124 	/* Write offset variable address (pointer) */
125 	val = kbase_csf_firmware_cs_input_read(stream,
126 			CS_INSTR_BUFFER_OFFSET_POINTER_LO);
127 	addr = ((u64)kbase_csf_firmware_cs_input_read(stream,
128 			CS_INSTR_BUFFER_OFFSET_POINTER_HI) << 32) | val;
129 	seq_printf(file, "CS_TRACE_BUF_OFFSET_PTR: 0x%16llx\n", addr);
130 
131 	/* EVENT_SIZE and EVENT_STATEs */
132 	val = kbase_csf_firmware_cs_input_read(stream, CS_INSTR_CONFIG);
133 	seq_printf(file, "TRACE_EVENT_SIZE: 0x%x, TRACE_EVENT_STAES 0x%x\n",
134 			CS_INSTR_CONFIG_EVENT_SIZE_GET(val),
135 			CS_INSTR_CONFIG_EVENT_STATE_GET(val));
136 }
137 
138 /**
139  * kbasep_csf_scheduler_dump_active_queue() - Print GPU command queue
140  *                                            debug information
141  *
142  * @file:  seq_file for printing to
143  * @queue: Address of a GPU command queue to examine
144  */
kbasep_csf_scheduler_dump_active_queue(struct seq_file * file,struct kbase_queue * queue)145 static void kbasep_csf_scheduler_dump_active_queue(struct seq_file *file,
146 		struct kbase_queue *queue)
147 {
148 	u32 *addr;
149 	u64 cs_extract;
150 	u64 cs_insert;
151 	u32 cs_active;
152 	u64 wait_sync_pointer;
153 	u32 wait_status, wait_sync_value;
154 	u32 sb_status;
155 	u32 blocked_reason;
156 	struct kbase_vmap_struct *mapping;
157 	u64 *evt;
158 	u64 wait_sync_live_value;
159 
160 	if (!queue)
161 		return;
162 
163 	if (WARN_ON(queue->csi_index == KBASEP_IF_NR_INVALID ||
164 		    !queue->group))
165 		return;
166 
167 	addr = (u32 *)queue->user_io_addr;
168 	cs_insert = addr[CS_INSERT_LO/4] | ((u64)addr[CS_INSERT_HI/4] << 32);
169 
170 	addr = (u32 *)(queue->user_io_addr + PAGE_SIZE);
171 	cs_extract = addr[CS_EXTRACT_LO/4] | ((u64)addr[CS_EXTRACT_HI/4] << 32);
172 	cs_active = addr[CS_ACTIVE/4];
173 
174 #define KBASEP_CSF_DEBUGFS_CS_HEADER_USER_IO \
175 	"Bind Idx,     Ringbuf addr, Prio,    Insert offset,   Extract offset, Active, Doorbell\n"
176 
177 	seq_printf(file, KBASEP_CSF_DEBUGFS_CS_HEADER_USER_IO "%8d, %16llx, %4u, %16llx, %16llx, %6u, %8d\n",
178 			queue->csi_index, queue->base_addr, queue->priority,
179 			cs_insert, cs_extract, cs_active, queue->doorbell_nr);
180 
181 	/* Print status information for blocked group waiting for sync object. For on-slot queues,
182 	 * if cs_trace is enabled, dump the interface's cs_trace configuration.
183 	 */
184 	if (kbase_csf_scheduler_group_get_slot(queue->group) < 0) {
185 		if (CS_STATUS_WAIT_SYNC_WAIT_GET(queue->status_wait)) {
186 			wait_status = queue->status_wait;
187 			wait_sync_value = queue->sync_value;
188 			wait_sync_pointer = queue->sync_ptr;
189 			sb_status = queue->sb_status;
190 			blocked_reason = queue->blocked_reason;
191 
192 			evt = (u64 *)kbase_phy_alloc_mapping_get(queue->kctx, wait_sync_pointer, &mapping);
193 			if (evt) {
194 				wait_sync_live_value = evt[0];
195 				kbase_phy_alloc_mapping_put(queue->kctx, mapping);
196 			} else {
197 				wait_sync_live_value = U64_MAX;
198 			}
199 
200 			kbasep_csf_scheduler_dump_active_queue_cs_status_wait(
201 				file, wait_status, wait_sync_value,
202 				wait_sync_live_value, wait_sync_pointer,
203 				sb_status, blocked_reason);
204 		}
205 	} else {
206 		struct kbase_device const *const kbdev =
207 			queue->group->kctx->kbdev;
208 		struct kbase_csf_cmd_stream_group_info const *const ginfo =
209 			&kbdev->csf.global_iface.groups[queue->group->csg_nr];
210 		struct kbase_csf_cmd_stream_info const *const stream =
211 			&ginfo->streams[queue->csi_index];
212 		u64 cmd_ptr;
213 		u32 req_res;
214 
215 		if (WARN_ON(!stream))
216 			return;
217 
218 		cmd_ptr = kbase_csf_firmware_cs_output(stream,
219 				CS_STATUS_CMD_PTR_LO);
220 		cmd_ptr |= (u64)kbase_csf_firmware_cs_output(stream,
221 				CS_STATUS_CMD_PTR_HI) << 32;
222 		req_res = kbase_csf_firmware_cs_output(stream,
223 					CS_STATUS_REQ_RESOURCE);
224 
225 		seq_printf(file, "CMD_PTR: 0x%llx\n", cmd_ptr);
226 		seq_printf(file, "REQ_RESOURCE [COMPUTE]: %d\n",
227 			CS_STATUS_REQ_RESOURCE_COMPUTE_RESOURCES_GET(req_res));
228 		seq_printf(file, "REQ_RESOURCE [FRAGMENT]: %d\n",
229 			CS_STATUS_REQ_RESOURCE_FRAGMENT_RESOURCES_GET(req_res));
230 		seq_printf(file, "REQ_RESOURCE [TILER]: %d\n",
231 			CS_STATUS_REQ_RESOURCE_TILER_RESOURCES_GET(req_res));
232 		seq_printf(file, "REQ_RESOURCE [IDVS]: %d\n",
233 			CS_STATUS_REQ_RESOURCE_IDVS_RESOURCES_GET(req_res));
234 
235 		wait_status = kbase_csf_firmware_cs_output(stream,
236 				CS_STATUS_WAIT);
237 		wait_sync_value = kbase_csf_firmware_cs_output(stream,
238 					CS_STATUS_WAIT_SYNC_VALUE);
239 		wait_sync_pointer = kbase_csf_firmware_cs_output(stream,
240 					CS_STATUS_WAIT_SYNC_POINTER_LO);
241 		wait_sync_pointer |= (u64)kbase_csf_firmware_cs_output(stream,
242 					CS_STATUS_WAIT_SYNC_POINTER_HI) << 32;
243 
244 		sb_status = kbase_csf_firmware_cs_output(stream,
245 							 CS_STATUS_SCOREBOARDS);
246 		blocked_reason = kbase_csf_firmware_cs_output(
247 			stream, CS_STATUS_BLOCKED_REASON);
248 
249 		evt = (u64 *)kbase_phy_alloc_mapping_get(queue->kctx, wait_sync_pointer, &mapping);
250 		if (evt) {
251 			wait_sync_live_value = evt[0];
252 			kbase_phy_alloc_mapping_put(queue->kctx, mapping);
253 		} else {
254 			wait_sync_live_value = U64_MAX;
255 		}
256 
257 		kbasep_csf_scheduler_dump_active_queue_cs_status_wait(
258 			file, wait_status, wait_sync_value,
259 			wait_sync_live_value, wait_sync_pointer, sb_status,
260 			blocked_reason);
261 		/* Dealing with cs_trace */
262 		if (kbase_csf_scheduler_queue_has_trace(queue))
263 			kbasep_csf_scheduler_dump_active_cs_trace(file, stream);
264 		else
265 			seq_puts(file, "NO CS_TRACE\n");
266 	}
267 
268 	seq_puts(file, "\n");
269 }
270 
271 /* Waiting timeout for STATUS_UPDATE acknowledgment, in milliseconds */
272 #define CSF_STATUS_UPDATE_TO_MS (100)
273 
update_active_group_status(struct seq_file * file,struct kbase_queue_group * const group)274 static void update_active_group_status(struct seq_file *file,
275 		struct kbase_queue_group *const group)
276 {
277 	struct kbase_device *const kbdev = group->kctx->kbdev;
278 	struct kbase_csf_cmd_stream_group_info const *const ginfo =
279 		&kbdev->csf.global_iface.groups[group->csg_nr];
280 	long remaining =
281 		kbase_csf_timeout_in_jiffies(CSF_STATUS_UPDATE_TO_MS);
282 	unsigned long flags;
283 
284 	/* Global doorbell ring for CSG STATUS_UPDATE request or User doorbell
285 	 * ring for Extract offset update, shall not be made when MCU has been
286 	 * put to sleep otherwise it will undesirably make MCU exit the sleep
287 	 * state. Also it isn't really needed as FW will implicitly update the
288 	 * status of all on-slot groups when MCU sleep request is sent to it.
289 	 */
290 	if (kbdev->csf.scheduler.state == SCHED_SLEEPING)
291 		return;
292 
293 	/* Ring the User doobell shared between the queues bound to this
294 	 * group, to have FW update the CS_EXTRACT for all the queues
295 	 * bound to the group. Ring early so that FW gets adequate time
296 	 * for the handling.
297 	 */
298 	kbase_csf_ring_doorbell(kbdev, group->doorbell_nr);
299 
300 	kbase_csf_scheduler_spin_lock(kbdev, &flags);
301 	kbase_csf_firmware_csg_input_mask(ginfo, CSG_REQ,
302 			~kbase_csf_firmware_csg_output(ginfo, CSG_ACK),
303 			CSG_REQ_STATUS_UPDATE_MASK);
304 	kbase_csf_scheduler_spin_unlock(kbdev, flags);
305 	kbase_csf_ring_csg_doorbell(kbdev, group->csg_nr);
306 
307 	remaining = wait_event_timeout(kbdev->csf.event_wait,
308 		!((kbase_csf_firmware_csg_input_read(ginfo, CSG_REQ) ^
309 		kbase_csf_firmware_csg_output(ginfo, CSG_ACK)) &
310 		CSG_REQ_STATUS_UPDATE_MASK), remaining);
311 
312 	if (!remaining) {
313 		dev_err(kbdev->dev,
314 			"Timed out for STATUS_UPDATE on group %d on slot %d",
315 			group->handle, group->csg_nr);
316 
317 		seq_printf(file, "*** Warn: Timed out for STATUS_UPDATE on slot %d\n",
318 			group->csg_nr);
319 		seq_puts(file, "*** The following group-record is likely stale\n");
320 	}
321 }
322 
kbasep_csf_scheduler_dump_active_group(struct seq_file * file,struct kbase_queue_group * const group)323 static void kbasep_csf_scheduler_dump_active_group(struct seq_file *file,
324 		struct kbase_queue_group *const group)
325 {
326 	if (kbase_csf_scheduler_group_get_slot(group) >= 0) {
327 		struct kbase_device *const kbdev = group->kctx->kbdev;
328 		u32 ep_c, ep_r;
329 		char exclusive;
330 		struct kbase_csf_cmd_stream_group_info const *const ginfo =
331 			&kbdev->csf.global_iface.groups[group->csg_nr];
332 		u8 slot_priority =
333 			kbdev->csf.scheduler.csg_slots[group->csg_nr].priority;
334 
335 		update_active_group_status(file, group);
336 
337 		ep_c = kbase_csf_firmware_csg_output(ginfo,
338 				CSG_STATUS_EP_CURRENT);
339 		ep_r = kbase_csf_firmware_csg_output(ginfo, CSG_STATUS_EP_REQ);
340 
341 		if (CSG_STATUS_EP_REQ_EXCLUSIVE_COMPUTE_GET(ep_r))
342 			exclusive = 'C';
343 		else if (CSG_STATUS_EP_REQ_EXCLUSIVE_FRAGMENT_GET(ep_r))
344 			exclusive = 'F';
345 		else
346 			exclusive = '0';
347 
348 		seq_puts(file, "GroupID, CSG NR, CSG Prio, Run State, Priority, C_EP(Alloc/Req), F_EP(Alloc/Req), T_EP(Alloc/Req), Exclusive\n");
349 		seq_printf(file, "%7d, %6d, %8d, %9d, %8d, %11d/%3d, %11d/%3d, %11d/%3d, %9c\n",
350 			group->handle,
351 			group->csg_nr,
352 			slot_priority,
353 			group->run_state,
354 			group->priority,
355 			CSG_STATUS_EP_CURRENT_COMPUTE_EP_GET(ep_c),
356 			CSG_STATUS_EP_REQ_COMPUTE_EP_GET(ep_r),
357 			CSG_STATUS_EP_CURRENT_FRAGMENT_EP_GET(ep_c),
358 			CSG_STATUS_EP_REQ_FRAGMENT_EP_GET(ep_r),
359 			CSG_STATUS_EP_CURRENT_TILER_EP_GET(ep_c),
360 			CSG_STATUS_EP_REQ_TILER_EP_GET(ep_r),
361 			exclusive);
362 
363 		/* Wait for the User doobell ring to take effect */
364 		if (kbdev->csf.scheduler.state != SCHED_SLEEPING)
365 			msleep(100);
366 	} else {
367 		seq_puts(file, "GroupID, CSG NR, Run State, Priority\n");
368 		seq_printf(file, "%7d, %6d, %9d, %8d\n",
369 			group->handle,
370 			group->csg_nr,
371 			group->run_state,
372 			group->priority);
373 	}
374 
375 	if (group->run_state != KBASE_CSF_GROUP_TERMINATED) {
376 		unsigned int i;
377 
378 		seq_puts(file, "Bound queues:\n");
379 
380 		for (i = 0; i < MAX_SUPPORTED_STREAMS_PER_GROUP; i++) {
381 			kbasep_csf_scheduler_dump_active_queue(file,
382 					group->bound_queues[i]);
383 		}
384 	}
385 
386 	seq_puts(file, "\n");
387 }
388 
389 /**
390  * kbasep_csf_queue_group_debugfs_show() - Print per-context GPU command queue
391  *					   group debug information
392  *
393  * @file: The seq_file for printing to
394  * @data: The debugfs dentry private data, a pointer to kbase context
395  *
396  * Return: Negative error code or 0 on success.
397  */
kbasep_csf_queue_group_debugfs_show(struct seq_file * file,void * data)398 static int kbasep_csf_queue_group_debugfs_show(struct seq_file *file,
399 		void *data)
400 {
401 	u32 gr;
402 	struct kbase_context *const kctx = file->private;
403 	struct kbase_device *const kbdev = kctx->kbdev;
404 
405 	if (WARN_ON(!kctx))
406 		return -EINVAL;
407 
408 	seq_printf(file, "MALI_CSF_CSG_DEBUGFS_VERSION: v%u\n",
409 			MALI_CSF_CSG_DEBUGFS_VERSION);
410 
411 	mutex_lock(&kctx->csf.lock);
412 	kbase_csf_scheduler_lock(kbdev);
413 	if (kbdev->csf.scheduler.state == SCHED_SLEEPING) {
414 		/* Wait for the MCU sleep request to complete. Please refer the
415 		 * update_active_group_status() function for the explanation.
416 		 */
417 		kbase_pm_wait_for_desired_state(kbdev);
418 	}
419 	for (gr = 0; gr < MAX_QUEUE_GROUP_NUM; gr++) {
420 		struct kbase_queue_group *const group =
421 			kctx->csf.queue_groups[gr];
422 
423 		if (group)
424 			kbasep_csf_scheduler_dump_active_group(file, group);
425 	}
426 	kbase_csf_scheduler_unlock(kbdev);
427 	mutex_unlock(&kctx->csf.lock);
428 
429 	return 0;
430 }
431 
432 /**
433  * kbasep_csf_scheduler_dump_active_groups() - Print debug info for active
434  *                                             GPU command queue groups
435  *
436  * @file: The seq_file for printing to
437  * @data: The debugfs dentry private data, a pointer to kbase_device
438  *
439  * Return: Negative error code or 0 on success.
440  */
kbasep_csf_scheduler_dump_active_groups(struct seq_file * file,void * data)441 static int kbasep_csf_scheduler_dump_active_groups(struct seq_file *file,
442 		void *data)
443 {
444 	u32 csg_nr;
445 	struct kbase_device *kbdev = file->private;
446 	u32 num_groups = kbdev->csf.global_iface.group_num;
447 
448 	seq_printf(file, "MALI_CSF_CSG_DEBUGFS_VERSION: v%u\n",
449 			MALI_CSF_CSG_DEBUGFS_VERSION);
450 
451 	kbase_csf_scheduler_lock(kbdev);
452 	if (kbdev->csf.scheduler.state == SCHED_SLEEPING) {
453 		/* Wait for the MCU sleep request to complete. Please refer the
454 		 * update_active_group_status() function for the explanation.
455 		 */
456 		kbase_pm_wait_for_desired_state(kbdev);
457 	}
458 	for (csg_nr = 0; csg_nr < num_groups; csg_nr++) {
459 		struct kbase_queue_group *const group =
460 			kbdev->csf.scheduler.csg_slots[csg_nr].resident_group;
461 
462 		if (!group)
463 			continue;
464 
465 		seq_printf(file, "\nCtx %d_%d\n", group->kctx->tgid,
466 				group->kctx->id);
467 
468 		kbasep_csf_scheduler_dump_active_group(file, group);
469 	}
470 	kbase_csf_scheduler_unlock(kbdev);
471 
472 	return 0;
473 }
474 
kbasep_csf_queue_group_debugfs_open(struct inode * in,struct file * file)475 static int kbasep_csf_queue_group_debugfs_open(struct inode *in,
476 		struct file *file)
477 {
478 	return single_open(file, kbasep_csf_queue_group_debugfs_show,
479 			in->i_private);
480 }
481 
kbasep_csf_active_queue_groups_debugfs_open(struct inode * in,struct file * file)482 static int kbasep_csf_active_queue_groups_debugfs_open(struct inode *in,
483 		struct file *file)
484 {
485 	return single_open(file, kbasep_csf_scheduler_dump_active_groups,
486 			in->i_private);
487 }
488 
489 static const struct file_operations kbasep_csf_queue_group_debugfs_fops = {
490 	.open = kbasep_csf_queue_group_debugfs_open,
491 	.read = seq_read,
492 	.llseek = seq_lseek,
493 	.release = single_release,
494 };
495 
kbase_csf_queue_group_debugfs_init(struct kbase_context * kctx)496 void kbase_csf_queue_group_debugfs_init(struct kbase_context *kctx)
497 {
498 	struct dentry *file;
499 #if (KERNEL_VERSION(4, 7, 0) <= LINUX_VERSION_CODE)
500 	const mode_t mode = 0444;
501 #else
502 	const mode_t mode = 0400;
503 #endif
504 
505 	if (WARN_ON(!kctx || IS_ERR_OR_NULL(kctx->kctx_dentry)))
506 		return;
507 
508 	file = debugfs_create_file("groups", mode,
509 		kctx->kctx_dentry, kctx, &kbasep_csf_queue_group_debugfs_fops);
510 
511 	if (IS_ERR_OR_NULL(file)) {
512 		dev_warn(kctx->kbdev->dev,
513 		    "Unable to create per context queue groups debugfs entry");
514 	}
515 }
516 
517 static const struct file_operations
518 	kbasep_csf_active_queue_groups_debugfs_fops = {
519 	.open = kbasep_csf_active_queue_groups_debugfs_open,
520 	.read = seq_read,
521 	.llseek = seq_lseek,
522 	.release = single_release,
523 };
524 
kbasep_csf_debugfs_scheduling_timer_enabled_get(void * data,u64 * val)525 static int kbasep_csf_debugfs_scheduling_timer_enabled_get(
526 		void *data, u64 *val)
527 {
528 	struct kbase_device *const kbdev = data;
529 
530 	*val = kbase_csf_scheduler_timer_is_enabled(kbdev);
531 
532 	return 0;
533 }
534 
kbasep_csf_debugfs_scheduling_timer_enabled_set(void * data,u64 val)535 static int kbasep_csf_debugfs_scheduling_timer_enabled_set(
536 		void *data, u64 val)
537 {
538 	struct kbase_device *const kbdev = data;
539 
540 	kbase_csf_scheduler_timer_set_enabled(kbdev, val != 0);
541 
542 	return 0;
543 }
544 
kbasep_csf_debugfs_scheduling_timer_kick_set(void * data,u64 val)545 static int kbasep_csf_debugfs_scheduling_timer_kick_set(
546 		void *data, u64 val)
547 {
548 	struct kbase_device *const kbdev = data;
549 
550 	kbase_csf_scheduler_kick(kbdev);
551 
552 	return 0;
553 }
554 
555 DEFINE_SIMPLE_ATTRIBUTE(kbasep_csf_debugfs_scheduling_timer_enabled_fops,
556 		&kbasep_csf_debugfs_scheduling_timer_enabled_get,
557 		&kbasep_csf_debugfs_scheduling_timer_enabled_set,
558 		"%llu\n");
559 DEFINE_SIMPLE_ATTRIBUTE(kbasep_csf_debugfs_scheduling_timer_kick_fops,
560 		NULL,
561 		&kbasep_csf_debugfs_scheduling_timer_kick_set,
562 		"%llu\n");
563 
564 /**
565  * kbase_csf_debugfs_scheduler_state_get() - Get the state of scheduler.
566  *
567  * @file:     Object of the file that is being read.
568  * @user_buf: User buffer that contains the string.
569  * @count:    Length of user buffer
570  * @ppos:     Offset within file object
571  *
572  * This function will return the current Scheduler state to Userspace
573  * Scheduler may exit that state by the time the state string is received
574  * by the Userspace.
575  *
576  * Return: 0 if Scheduler was found in an unexpected state, or the
577  *         size of the state string if it was copied successfully to the
578  *         User buffer or a negative value in case of an error.
579  */
kbase_csf_debugfs_scheduler_state_get(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)580 static ssize_t kbase_csf_debugfs_scheduler_state_get(struct file *file,
581 		    char __user *user_buf, size_t count, loff_t *ppos)
582 {
583 	struct kbase_device *kbdev = file->private_data;
584 	struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler;
585 	const char *state_string;
586 
587 	kbase_csf_scheduler_lock(kbdev);
588 	state_string = scheduler_state_to_string(kbdev, scheduler->state);
589 	kbase_csf_scheduler_unlock(kbdev);
590 
591 	if (!state_string)
592 		count = 0;
593 
594 	return simple_read_from_buffer(user_buf, count, ppos,
595 				       state_string, strlen(state_string));
596 }
597 
598 /**
599  * kbase_csf_debugfs_scheduler_state_set() - Set the state of scheduler.
600  *
601  * @file:  Object of the file that is being written to.
602  * @ubuf:  User buffer that contains the string.
603  * @count: Length of user buffer
604  * @ppos:  Offset within file object
605  *
606  * This function will update the Scheduler state as per the state string
607  * passed by the Userspace. Scheduler may or may not remain in new state
608  * for long.
609  *
610  * Return: Negative value if the string doesn't correspond to a valid Scheduler
611  *         state or if copy from user buffer failed, otherwise the length of
612  *         the User buffer.
613  */
kbase_csf_debugfs_scheduler_state_set(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)614 static ssize_t kbase_csf_debugfs_scheduler_state_set(struct file *file,
615 		const char __user *ubuf, size_t count, loff_t *ppos)
616 {
617 	struct kbase_device *kbdev = file->private_data;
618 	char buf[MAX_SCHED_STATE_STRING_LEN];
619 	ssize_t ret = count;
620 
621 	CSTD_UNUSED(ppos);
622 
623 	count = min_t(size_t, sizeof(buf) - 1, count);
624 	if (copy_from_user(buf, ubuf, count))
625 		return -EFAULT;
626 
627 	buf[count] = 0;
628 
629 	if (sysfs_streq(buf, "SUSPENDED"))
630 		kbase_csf_scheduler_pm_suspend(kbdev);
631 #ifdef KBASE_PM_RUNTIME
632 	else if (sysfs_streq(buf, "SLEEPING"))
633 		kbase_csf_scheduler_force_sleep(kbdev);
634 #endif
635 	else if (sysfs_streq(buf, "INACTIVE"))
636 		kbase_csf_scheduler_force_wakeup(kbdev);
637 	else {
638 		dev_dbg(kbdev->dev, "Bad scheduler state %s", buf);
639 		ret = -EINVAL;
640 	}
641 
642 	return ret;
643 }
644 
645 static const struct file_operations kbasep_csf_debugfs_scheduler_state_fops = {
646 	.owner = THIS_MODULE,
647 	.read = kbase_csf_debugfs_scheduler_state_get,
648 	.write = kbase_csf_debugfs_scheduler_state_set,
649 	.open = simple_open,
650 	.llseek = default_llseek,
651 };
652 
kbase_csf_debugfs_init(struct kbase_device * kbdev)653 void kbase_csf_debugfs_init(struct kbase_device *kbdev)
654 {
655 	debugfs_create_file("active_groups", 0444,
656 		kbdev->mali_debugfs_directory, kbdev,
657 		&kbasep_csf_active_queue_groups_debugfs_fops);
658 
659 	debugfs_create_file("scheduling_timer_enabled", 0644,
660 			kbdev->mali_debugfs_directory, kbdev,
661 			&kbasep_csf_debugfs_scheduling_timer_enabled_fops);
662 	debugfs_create_file("scheduling_timer_kick", 0200,
663 			kbdev->mali_debugfs_directory, kbdev,
664 			&kbasep_csf_debugfs_scheduling_timer_kick_fops);
665 	debugfs_create_file("scheduler_state", 0644,
666 			kbdev->mali_debugfs_directory, kbdev,
667 			&kbasep_csf_debugfs_scheduler_state_fops);
668 
669 	kbase_csf_tl_reader_debugfs_init(kbdev);
670 	kbase_csf_firmware_trace_buffer_debugfs_init(kbdev);
671 }
672 
673 #else
674 /*
675  * Stub functions for when debugfs is disabled
676  */
kbase_csf_queue_group_debugfs_init(struct kbase_context * kctx)677 void kbase_csf_queue_group_debugfs_init(struct kbase_context *kctx)
678 {
679 }
680 
kbase_csf_debugfs_init(struct kbase_device * kbdev)681 void kbase_csf_debugfs_init(struct kbase_device *kbdev)
682 {
683 }
684 
685 #endif /* CONFIG_DEBUG_FS */
686