• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include <mali_kbase.h>
23 #include "debug/mali_kbase_debug_ktrace_internal.h"
24 #include "debug/backend/mali_kbase_debug_ktrace_csf.h"
25 
26 #if KBASE_KTRACE_TARGET_RBUF
27 
kbasep_ktrace_backend_format_header(char * buffer,int sz,s32 * written)28 void kbasep_ktrace_backend_format_header(char *buffer, int sz, s32 *written)
29 {
30 	*written += MAX(snprintf(buffer + *written, MAX(sz - *written, 0),
31 			"group,slot,prio,csi,kcpu"), 0);
32 }
33 
kbasep_ktrace_backend_format_msg(struct kbase_ktrace_msg * trace_msg,char * buffer,int sz,s32 * written)34 void kbasep_ktrace_backend_format_msg(struct kbase_ktrace_msg *trace_msg,
35 		char *buffer, int sz, s32 *written)
36 {
37 	const union kbase_ktrace_backend * const be_msg = &trace_msg->backend;
38 	/* At present, no need to check for KBASE_KTRACE_FLAG_BACKEND, as the
39 	 * other backend-specific flags currently imply this anyway
40 	 */
41 
42 	/* group parts */
43 	if (be_msg->gpu.flags & KBASE_KTRACE_FLAG_CSF_GROUP) {
44 		const s8 slot = be_msg->gpu.csg_nr;
45 		/* group,slot, */
46 		*written += MAX(snprintf(buffer + *written,
47 				MAX(sz - *written, 0),
48 				"%u,%d,", be_msg->gpu.group_handle, slot), 0);
49 
50 		/* prio */
51 		if (slot >= 0)
52 			*written += MAX(snprintf(buffer + *written,
53 					MAX(sz - *written, 0),
54 					"%u", be_msg->gpu.slot_prio), 0);
55 
56 		/* , */
57 		*written += MAX(snprintf(buffer + *written,
58 				MAX(sz - *written, 0),
59 				","), 0);
60 	} else {
61 		/* No group,slot,prio fields, but ensure ending with "," */
62 		*written += MAX(snprintf(buffer + *written,
63 				MAX(sz - *written, 0),
64 				",,,"), 0);
65 	}
66 
67 	/* queue parts: csi */
68 	if (trace_msg->backend.gpu.flags & KBASE_KTRACE_FLAG_CSF_QUEUE)
69 		*written += MAX(snprintf(buffer + *written,
70 				MAX(sz - *written, 0),
71 				"%d", be_msg->gpu.csi_index), 0);
72 
73 	/* , */
74 	*written += MAX(snprintf(buffer + *written,
75 				MAX(sz - *written, 0),
76 				","), 0);
77 
78 	if (be_msg->gpu.flags & KBASE_KTRACE_FLAG_CSF_KCPU) {
79 		/* kcpu data */
80 		*written += MAX(snprintf(buffer + *written,
81 				MAX(sz - *written, 0),
82 				"kcpu %d (0x%llx)",
83 				be_msg->kcpu.id,
84 				be_msg->kcpu.extra_info_val), 0);
85 	}
86 
87 	/* Don't end with a trailing "," - this is a 'standalone' formatted
88 	 * msg, caller will handle the delimiters
89 	 */
90 }
91 
kbasep_ktrace_add_csf(struct kbase_device * kbdev,enum kbase_ktrace_code code,struct kbase_queue_group * group,struct kbase_queue * queue,kbase_ktrace_flag_t flags,u64 info_val)92 void kbasep_ktrace_add_csf(struct kbase_device *kbdev,
93 		enum kbase_ktrace_code code, struct kbase_queue_group *group,
94 		struct kbase_queue *queue, kbase_ktrace_flag_t flags,
95 		u64 info_val)
96 {
97 	unsigned long irqflags;
98 	struct kbase_ktrace_msg *trace_msg;
99 	struct kbase_context *kctx = NULL;
100 
101 	spin_lock_irqsave(&kbdev->ktrace.lock, irqflags);
102 
103 	/* Reserve and update indices */
104 	trace_msg = kbasep_ktrace_reserve(&kbdev->ktrace);
105 
106 	/* Determine the kctx */
107 	if (group)
108 		kctx = group->kctx;
109 	else if (queue)
110 		kctx = queue->kctx;
111 
112 	/* Fill the common part of the message (including backend.gpu.flags) */
113 	kbasep_ktrace_msg_init(&kbdev->ktrace, trace_msg, code, kctx, flags,
114 			info_val);
115 
116 	/* Indicate to the common code that backend-specific parts will be
117 	 * valid
118 	 */
119 	trace_msg->backend.gpu.flags |= KBASE_KTRACE_FLAG_BACKEND;
120 
121 	/* Fill the CSF-specific parts of the message
122 	 *
123 	 * Generally, no need to use default initializers when queue/group not
124 	 * present - can usually check the flags instead.
125 	 */
126 
127 	if (queue) {
128 		trace_msg->backend.gpu.flags |= KBASE_KTRACE_FLAG_CSF_QUEUE;
129 		trace_msg->backend.gpu.csi_index = queue->csi_index;
130 	}
131 
132 	if (group) {
133 		const s8 slot = group->csg_nr;
134 
135 		trace_msg->backend.gpu.flags |= KBASE_KTRACE_FLAG_CSF_GROUP;
136 
137 		trace_msg->backend.gpu.csg_nr = slot;
138 
139 		if (slot >= 0) {
140 			struct kbase_csf_csg_slot *csg_slot =
141 				&kbdev->csf.scheduler.csg_slots[slot];
142 
143 			trace_msg->backend.gpu.slot_prio =
144 				csg_slot->priority;
145 		}
146 		/* slot >=0 indicates whether slot_prio valid, so no need to
147 		 * initialize in the case where it's invalid
148 		 */
149 
150 		trace_msg->backend.gpu.group_handle = group->handle;
151 	}
152 
153 	WARN_ON((trace_msg->backend.gpu.flags & ~KBASE_KTRACE_FLAG_ALL));
154 
155 	/* Done */
156 	spin_unlock_irqrestore(&kbdev->ktrace.lock, irqflags);
157 }
158 
kbasep_ktrace_add_csf_kcpu(struct kbase_device * kbdev,enum kbase_ktrace_code code,struct kbase_kcpu_command_queue * queue,u64 info_val1,u64 info_val2)159 void kbasep_ktrace_add_csf_kcpu(struct kbase_device *kbdev,
160 				enum kbase_ktrace_code code,
161 				struct kbase_kcpu_command_queue *queue,
162 				u64 info_val1, u64 info_val2)
163 {
164 	unsigned long irqflags;
165 	struct kbase_ktrace_msg *trace_msg;
166 	struct kbase_context *kctx = queue->kctx;
167 
168 	spin_lock_irqsave(&kbdev->ktrace.lock, irqflags);
169 
170 	/* Reserve and update indices */
171 	trace_msg = kbasep_ktrace_reserve(&kbdev->ktrace);
172 
173 	/* Fill the common part of the message */
174 	kbasep_ktrace_msg_init(&kbdev->ktrace, trace_msg, code, kctx, 0,
175 		info_val1);
176 
177 	/* Indicate to the common code that backend-specific parts will be
178 	 * valid
179 	 */
180 	trace_msg->backend.gpu.flags |= KBASE_KTRACE_FLAG_BACKEND;
181 
182 	/* Fill the KCPU-specific parts of the message */
183 	trace_msg->backend.kcpu.id = queue->id;
184 	trace_msg->backend.kcpu.extra_info_val = info_val2;
185 	trace_msg->backend.gpu.flags |= KBASE_KTRACE_FLAG_CSF_KCPU;
186 
187 	WARN_ON((trace_msg->backend.gpu.flags & ~KBASE_KTRACE_FLAG_ALL));
188 
189 	/* Done */
190 	spin_unlock_irqrestore(&kbdev->ktrace.lock, irqflags);
191 }
192 
193 #endif /* KBASE_KTRACE_TARGET_RBUF */
194