1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include "mali_kbase_hwcnt_gpu.h"
23 #include "mali_kbase_hwcnt_gpu_narrow.h"
24
25 #include <linux/bug.h>
26 #include <linux/err.h>
27 #include <linux/slab.h>
28
kbase_hwcnt_gpu_metadata_narrow_create(const struct kbase_hwcnt_metadata_narrow ** dst_md_narrow,const struct kbase_hwcnt_metadata * src_md)29 int kbase_hwcnt_gpu_metadata_narrow_create(
30 const struct kbase_hwcnt_metadata_narrow **dst_md_narrow,
31 const struct kbase_hwcnt_metadata *src_md)
32 {
33 struct kbase_hwcnt_description desc;
34 struct kbase_hwcnt_group_description group;
35 struct kbase_hwcnt_block_description
36 blks[KBASE_HWCNT_V5_BLOCK_TYPE_COUNT];
37 size_t prfcnt_values_per_block;
38 size_t blk;
39 int err;
40 struct kbase_hwcnt_metadata_narrow *metadata_narrow;
41
42 if (!dst_md_narrow || !src_md || !src_md->grp_metadata ||
43 !src_md->grp_metadata[0].blk_metadata)
44 return -EINVAL;
45
46 /* Only support 1 group count and KBASE_HWCNT_V5_BLOCK_TYPE_COUNT block
47 * count in the metadata.
48 */
49 if ((kbase_hwcnt_metadata_group_count(src_md) != 1) ||
50 (kbase_hwcnt_metadata_block_count(src_md, 0) !=
51 KBASE_HWCNT_V5_BLOCK_TYPE_COUNT))
52 return -EINVAL;
53
54 /* Get the values count in the first block. */
55 prfcnt_values_per_block =
56 kbase_hwcnt_metadata_block_values_count(src_md, 0, 0);
57
58 /* check all blocks should have same values count. */
59 for (blk = 1; blk < KBASE_HWCNT_V5_BLOCK_TYPE_COUNT; blk++) {
60 size_t val_cnt =
61 kbase_hwcnt_metadata_block_values_count(src_md, 0, blk);
62 if (val_cnt != prfcnt_values_per_block)
63 return -EINVAL;
64 }
65
66 /* Only support 64 and 128 entries per block. */
67 if ((prfcnt_values_per_block != 64) && (prfcnt_values_per_block != 128))
68 return -EINVAL;
69
70 metadata_narrow = kmalloc(sizeof(*metadata_narrow), GFP_KERNEL);
71 if (!metadata_narrow)
72 return -ENOMEM;
73
74 /* Narrow to 64 entries per block to keep API backward compatibility. */
75 prfcnt_values_per_block = 64;
76
77 for (blk = 0; blk < KBASE_HWCNT_V5_BLOCK_TYPE_COUNT; blk++) {
78 size_t blk_hdr_cnt = kbase_hwcnt_metadata_block_headers_count(
79 src_md, 0, blk);
80 blks[blk] = (struct kbase_hwcnt_block_description){
81 .type = kbase_hwcnt_metadata_block_type(src_md, 0, blk),
82 .inst_cnt = kbase_hwcnt_metadata_block_instance_count(
83 src_md, 0, blk),
84 .hdr_cnt = blk_hdr_cnt,
85 .ctr_cnt = prfcnt_values_per_block - blk_hdr_cnt,
86 };
87 }
88
89 group = (struct kbase_hwcnt_group_description){
90 .type = kbase_hwcnt_metadata_group_type(src_md, 0),
91 .blk_cnt = KBASE_HWCNT_V5_BLOCK_TYPE_COUNT,
92 .blks = blks,
93 };
94
95 desc = (struct kbase_hwcnt_description){
96 .grp_cnt = kbase_hwcnt_metadata_group_count(src_md),
97 .avail_mask = src_md->avail_mask,
98 .clk_cnt = src_md->clk_cnt,
99 .grps = &group,
100 };
101
102 err = kbase_hwcnt_metadata_create(&desc, &metadata_narrow->metadata);
103 if (!err) {
104 /* Narrow down the buffer size to half as the narrowed metadata
105 * only supports 32-bit but the created metadata uses 64-bit for
106 * block entry.
107 */
108 metadata_narrow->dump_buf_bytes =
109 metadata_narrow->metadata->dump_buf_bytes >> 1;
110 *dst_md_narrow = metadata_narrow;
111 } else {
112 kfree(metadata_narrow);
113 }
114
115 return err;
116 }
117
kbase_hwcnt_gpu_metadata_narrow_destroy(const struct kbase_hwcnt_metadata_narrow * md_narrow)118 void kbase_hwcnt_gpu_metadata_narrow_destroy(
119 const struct kbase_hwcnt_metadata_narrow *md_narrow)
120 {
121 if (!md_narrow)
122 return;
123
124 kbase_hwcnt_metadata_destroy(md_narrow->metadata);
125 kfree(md_narrow);
126 }
127
kbase_hwcnt_dump_buffer_narrow_alloc(const struct kbase_hwcnt_metadata_narrow * md_narrow,struct kbase_hwcnt_dump_buffer_narrow * dump_buf)128 int kbase_hwcnt_dump_buffer_narrow_alloc(
129 const struct kbase_hwcnt_metadata_narrow *md_narrow,
130 struct kbase_hwcnt_dump_buffer_narrow *dump_buf)
131 {
132 size_t dump_buf_bytes;
133 size_t clk_cnt_buf_bytes;
134 u8 *buf;
135
136 if (!md_narrow || !dump_buf)
137 return -EINVAL;
138
139 dump_buf_bytes = md_narrow->dump_buf_bytes;
140 clk_cnt_buf_bytes =
141 sizeof(*dump_buf->clk_cnt_buf) * md_narrow->metadata->clk_cnt;
142
143 /* Make a single allocation for both dump_buf and clk_cnt_buf. */
144 buf = kmalloc(dump_buf_bytes + clk_cnt_buf_bytes, GFP_KERNEL);
145 if (!buf)
146 return -ENOMEM;
147
148 *dump_buf = (struct kbase_hwcnt_dump_buffer_narrow){
149 .md_narrow = md_narrow,
150 .dump_buf = (u32 *)buf,
151 .clk_cnt_buf = (u64 *)(buf + dump_buf_bytes),
152 };
153
154 return 0;
155 }
156
kbase_hwcnt_dump_buffer_narrow_free(struct kbase_hwcnt_dump_buffer_narrow * dump_buf_narrow)157 void kbase_hwcnt_dump_buffer_narrow_free(
158 struct kbase_hwcnt_dump_buffer_narrow *dump_buf_narrow)
159 {
160 if (!dump_buf_narrow)
161 return;
162
163 kfree(dump_buf_narrow->dump_buf);
164 *dump_buf_narrow = (struct kbase_hwcnt_dump_buffer_narrow){ 0 };
165 }
166
kbase_hwcnt_dump_buffer_narrow_array_alloc(const struct kbase_hwcnt_metadata_narrow * md_narrow,size_t n,struct kbase_hwcnt_dump_buffer_narrow_array * dump_bufs)167 int kbase_hwcnt_dump_buffer_narrow_array_alloc(
168 const struct kbase_hwcnt_metadata_narrow *md_narrow, size_t n,
169 struct kbase_hwcnt_dump_buffer_narrow_array *dump_bufs)
170 {
171 struct kbase_hwcnt_dump_buffer_narrow *buffers;
172 size_t buf_idx;
173 unsigned int order;
174 unsigned long addr;
175 size_t dump_buf_bytes;
176 size_t clk_cnt_buf_bytes;
177 size_t total_dump_buf_size;
178
179 if (!md_narrow || !dump_bufs)
180 return -EINVAL;
181
182 dump_buf_bytes = md_narrow->dump_buf_bytes;
183 clk_cnt_buf_bytes = sizeof(*dump_bufs->bufs->clk_cnt_buf) *
184 md_narrow->metadata->clk_cnt;
185
186 /* Allocate memory for the dump buffer struct array */
187 buffers = kmalloc_array(n, sizeof(*buffers), GFP_KERNEL);
188 if (!buffers)
189 return -ENOMEM;
190
191 /* Allocate pages for the actual dump buffers, as they tend to be fairly
192 * large.
193 */
194 order = get_order((dump_buf_bytes + clk_cnt_buf_bytes) * n);
195 addr = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
196
197 if (!addr) {
198 kfree(buffers);
199 return -ENOMEM;
200 }
201
202 *dump_bufs = (struct kbase_hwcnt_dump_buffer_narrow_array){
203 .page_addr = addr,
204 .page_order = order,
205 .buf_cnt = n,
206 .bufs = buffers,
207 };
208
209 total_dump_buf_size = dump_buf_bytes * n;
210 /* Set the buffer of each dump buf */
211 for (buf_idx = 0; buf_idx < n; buf_idx++) {
212 const size_t dump_buf_offset = dump_buf_bytes * buf_idx;
213 const size_t clk_cnt_buf_offset =
214 total_dump_buf_size + (clk_cnt_buf_bytes * buf_idx);
215
216 buffers[buf_idx] = (struct kbase_hwcnt_dump_buffer_narrow){
217 .md_narrow = md_narrow,
218 .dump_buf = (u32 *)(addr + dump_buf_offset),
219 .clk_cnt_buf = (u64 *)(addr + clk_cnt_buf_offset),
220 };
221 }
222
223 return 0;
224 }
225
kbase_hwcnt_dump_buffer_narrow_array_free(struct kbase_hwcnt_dump_buffer_narrow_array * dump_bufs)226 void kbase_hwcnt_dump_buffer_narrow_array_free(
227 struct kbase_hwcnt_dump_buffer_narrow_array *dump_bufs)
228 {
229 if (!dump_bufs)
230 return;
231
232 kfree(dump_bufs->bufs);
233 free_pages(dump_bufs->page_addr, dump_bufs->page_order);
234 memset(dump_bufs, 0, sizeof(*dump_bufs));
235 }
236
kbase_hwcnt_dump_buffer_block_copy_strict_narrow(u32 * dst_blk,const u64 * src_blk,const u64 * blk_em,size_t val_cnt)237 void kbase_hwcnt_dump_buffer_block_copy_strict_narrow(u32 *dst_blk,
238 const u64 *src_blk,
239 const u64 *blk_em,
240 size_t val_cnt)
241 {
242 size_t val;
243
244 for (val = 0; val < val_cnt; val++) {
245 bool val_enabled =
246 kbase_hwcnt_enable_map_block_value_enabled(blk_em, val);
247 u32 src_val =
248 (src_blk[val] > U32_MAX) ? U32_MAX : (u32)src_blk[val];
249
250 dst_blk[val] = val_enabled ? src_val : 0;
251 }
252 }
253
kbase_hwcnt_dump_buffer_copy_strict_narrow(struct kbase_hwcnt_dump_buffer_narrow * dst_narrow,const struct kbase_hwcnt_dump_buffer * src,const struct kbase_hwcnt_enable_map * dst_enable_map)254 void kbase_hwcnt_dump_buffer_copy_strict_narrow(
255 struct kbase_hwcnt_dump_buffer_narrow *dst_narrow,
256 const struct kbase_hwcnt_dump_buffer *src,
257 const struct kbase_hwcnt_enable_map *dst_enable_map)
258 {
259 const struct kbase_hwcnt_metadata_narrow *metadata_narrow;
260 size_t grp;
261 size_t clk;
262
263 if (WARN_ON(!dst_narrow) || WARN_ON(!src) || WARN_ON(!dst_enable_map) ||
264 WARN_ON(dst_narrow->md_narrow->metadata == src->metadata) ||
265 WARN_ON(dst_narrow->md_narrow->metadata->grp_cnt !=
266 src->metadata->grp_cnt) ||
267 WARN_ON(src->metadata->grp_cnt != 1) ||
268 WARN_ON(dst_narrow->md_narrow->metadata->grp_metadata[0].blk_cnt !=
269 src->metadata->grp_metadata[0].blk_cnt) ||
270 WARN_ON(dst_narrow->md_narrow->metadata->grp_metadata[0].blk_cnt !=
271 KBASE_HWCNT_V5_BLOCK_TYPE_COUNT) ||
272 WARN_ON(dst_narrow->md_narrow->metadata->grp_metadata[0]
273 .blk_metadata[0]
274 .ctr_cnt >
275 src->metadata->grp_metadata[0].blk_metadata[0].ctr_cnt))
276 return;
277
278 /* Don't use src metadata since src buffer is bigger than dst buffer. */
279 metadata_narrow = dst_narrow->md_narrow;
280
281 for (grp = 0;
282 grp < kbase_hwcnt_metadata_narrow_group_count(metadata_narrow);
283 grp++) {
284 size_t blk;
285 size_t blk_cnt = kbase_hwcnt_metadata_narrow_block_count(
286 metadata_narrow, grp);
287
288 for (blk = 0; blk < blk_cnt; blk++) {
289 size_t blk_inst;
290 size_t blk_inst_cnt =
291 kbase_hwcnt_metadata_narrow_block_instance_count(
292 metadata_narrow, grp, blk);
293
294 for (blk_inst = 0; blk_inst < blk_inst_cnt;
295 blk_inst++) {
296 /* The narrowed down buffer is only 32-bit. */
297 u32 *dst_blk =
298 kbase_hwcnt_dump_buffer_narrow_block_instance(
299 dst_narrow, grp, blk, blk_inst);
300 const u64 *src_blk =
301 kbase_hwcnt_dump_buffer_block_instance(
302 src, grp, blk, blk_inst);
303 const u64 *blk_em =
304 kbase_hwcnt_enable_map_block_instance(
305 dst_enable_map, grp, blk,
306 blk_inst);
307 size_t val_cnt =
308 kbase_hwcnt_metadata_narrow_block_values_count(
309 metadata_narrow, grp, blk);
310 /* Align upwards to include padding bytes */
311 val_cnt = KBASE_HWCNT_ALIGN_UPWARDS(
312 val_cnt,
313 (KBASE_HWCNT_BLOCK_BYTE_ALIGNMENT /
314 KBASE_HWCNT_VALUE_BYTES));
315
316 kbase_hwcnt_dump_buffer_block_copy_strict_narrow(
317 dst_blk, src_blk, blk_em, val_cnt);
318 }
319 }
320 }
321
322 for (clk = 0; clk < metadata_narrow->metadata->clk_cnt; clk++) {
323 bool clk_enabled = kbase_hwcnt_clk_enable_map_enabled(
324 dst_enable_map->clk_enable_map, clk);
325
326 dst_narrow->clk_cnt_buf[clk] =
327 clk_enabled ? src->clk_cnt_buf[clk] : 0;
328 }
329 }
330