1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2018, 2020-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include "mali_kbase_hwcnt_virtualizer.h"
23 #include "mali_kbase_hwcnt_accumulator.h"
24 #include "mali_kbase_hwcnt_context.h"
25 #include "mali_kbase_hwcnt_types.h"
26
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29
30 /**
31 * struct kbase_hwcnt_virtualizer - Hardware counter virtualizer structure.
32 * @hctx: Hardware counter context being virtualized.
33 * @dump_threshold_ns: Minimum threshold period for dumps between different
34 * clients where a new accumulator dump will not be
35 * performed, and instead accumulated values will be used.
36 * If 0, rate limiting is disabled.
37 * @metadata: Hardware counter metadata.
38 * @lock: Lock acquired at all entrypoints, to protect mutable
39 * state.
40 * @client_count: Current number of virtualizer clients.
41 * @clients: List of virtualizer clients.
42 * @accum: Hardware counter accumulator. NULL if no clients.
43 * @scratch_map: Enable map used as scratch space during counter changes.
44 * @scratch_buf: Dump buffer used as scratch space during dumps.
45 * @ts_last_dump_ns: End time of most recent dump across all clients.
46 */
47 struct kbase_hwcnt_virtualizer {
48 struct kbase_hwcnt_context *hctx;
49 u64 dump_threshold_ns;
50 const struct kbase_hwcnt_metadata *metadata;
51 struct mutex lock;
52 size_t client_count;
53 struct list_head clients;
54 struct kbase_hwcnt_accumulator *accum;
55 struct kbase_hwcnt_enable_map scratch_map;
56 struct kbase_hwcnt_dump_buffer scratch_buf;
57 u64 ts_last_dump_ns;
58 };
59
60 /**
61 * struct kbase_hwcnt_virtualizer_client - Virtualizer client structure.
62 * @node: List node used for virtualizer client list.
63 * @hvirt: Hardware counter virtualizer.
64 * @enable_map: Enable map with client's current enabled counters.
65 * @accum_buf: Dump buffer with client's current accumulated counters.
66 * @has_accum: True if accum_buf contains any accumulated counters.
67 * @ts_start_ns: Counter collection start time of current dump.
68 */
69 struct kbase_hwcnt_virtualizer_client {
70 struct list_head node;
71 struct kbase_hwcnt_virtualizer *hvirt;
72 struct kbase_hwcnt_enable_map enable_map;
73 struct kbase_hwcnt_dump_buffer accum_buf;
74 bool has_accum;
75 u64 ts_start_ns;
76 };
77
kbase_hwcnt_virtualizer_metadata(struct kbase_hwcnt_virtualizer * hvirt)78 const struct kbase_hwcnt_metadata *kbase_hwcnt_virtualizer_metadata(
79 struct kbase_hwcnt_virtualizer *hvirt)
80 {
81 if (!hvirt)
82 return NULL;
83
84 return hvirt->metadata;
85 }
86
87 /**
88 * kbasep_hwcnt_virtualizer_client_free - Free a virtualizer client's memory.
89 * @hvcli: Pointer to virtualizer client.
90 *
91 * Will safely free a client in any partial state of construction.
92 */
kbasep_hwcnt_virtualizer_client_free(struct kbase_hwcnt_virtualizer_client * hvcli)93 static void kbasep_hwcnt_virtualizer_client_free(
94 struct kbase_hwcnt_virtualizer_client *hvcli)
95 {
96 if (!hvcli)
97 return;
98
99 kbase_hwcnt_dump_buffer_free(&hvcli->accum_buf);
100 kbase_hwcnt_enable_map_free(&hvcli->enable_map);
101 kfree(hvcli);
102 }
103
104 /**
105 * kbasep_hwcnt_virtualizer_client_alloc - Allocate memory for a virtualizer
106 * client.
107 * @metadata: Non-NULL pointer to counter metadata.
108 * @out_hvcli: Non-NULL pointer to where created client will be stored on
109 * success.
110 *
111 * Return: 0 on success, else error code.
112 */
kbasep_hwcnt_virtualizer_client_alloc(const struct kbase_hwcnt_metadata * metadata,struct kbase_hwcnt_virtualizer_client ** out_hvcli)113 static int kbasep_hwcnt_virtualizer_client_alloc(
114 const struct kbase_hwcnt_metadata *metadata,
115 struct kbase_hwcnt_virtualizer_client **out_hvcli)
116 {
117 int errcode;
118 struct kbase_hwcnt_virtualizer_client *hvcli = NULL;
119
120 WARN_ON(!metadata);
121 WARN_ON(!out_hvcli);
122
123 hvcli = kzalloc(sizeof(*hvcli), GFP_KERNEL);
124 if (!hvcli)
125 return -ENOMEM;
126
127 errcode = kbase_hwcnt_enable_map_alloc(metadata, &hvcli->enable_map);
128 if (errcode)
129 goto error;
130
131 errcode = kbase_hwcnt_dump_buffer_alloc(metadata, &hvcli->accum_buf);
132 if (errcode)
133 goto error;
134
135 *out_hvcli = hvcli;
136 return 0;
137 error:
138 kbasep_hwcnt_virtualizer_client_free(hvcli);
139 return errcode;
140 }
141
142 /**
143 * kbasep_hwcnt_virtualizer_client_accumulate - Accumulate a dump buffer into a
144 * client's accumulation buffer.
145 * @hvcli: Non-NULL pointer to virtualizer client.
146 * @dump_buf: Non-NULL pointer to dump buffer to accumulate from.
147 */
kbasep_hwcnt_virtualizer_client_accumulate(struct kbase_hwcnt_virtualizer_client * hvcli,const struct kbase_hwcnt_dump_buffer * dump_buf)148 static void kbasep_hwcnt_virtualizer_client_accumulate(
149 struct kbase_hwcnt_virtualizer_client *hvcli,
150 const struct kbase_hwcnt_dump_buffer *dump_buf)
151 {
152 WARN_ON(!hvcli);
153 WARN_ON(!dump_buf);
154 lockdep_assert_held(&hvcli->hvirt->lock);
155
156 if (hvcli->has_accum) {
157 /* If already some accumulation, accumulate */
158 kbase_hwcnt_dump_buffer_accumulate(
159 &hvcli->accum_buf, dump_buf, &hvcli->enable_map);
160 } else {
161 /* If no accumulation, copy */
162 kbase_hwcnt_dump_buffer_copy(
163 &hvcli->accum_buf, dump_buf, &hvcli->enable_map);
164 }
165 hvcli->has_accum = true;
166 }
167
168 /**
169 * kbasep_hwcnt_virtualizer_accumulator_term - Terminate the hardware counter
170 * accumulator after final client
171 * removal.
172 * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
173 *
174 * Will safely terminate the accumulator in any partial state of initialisation.
175 */
kbasep_hwcnt_virtualizer_accumulator_term(struct kbase_hwcnt_virtualizer * hvirt)176 static void kbasep_hwcnt_virtualizer_accumulator_term(
177 struct kbase_hwcnt_virtualizer *hvirt)
178 {
179 WARN_ON(!hvirt);
180 lockdep_assert_held(&hvirt->lock);
181 WARN_ON(hvirt->client_count);
182
183 kbase_hwcnt_dump_buffer_free(&hvirt->scratch_buf);
184 kbase_hwcnt_enable_map_free(&hvirt->scratch_map);
185 kbase_hwcnt_accumulator_release(hvirt->accum);
186 hvirt->accum = NULL;
187 }
188
189 /**
190 * kbasep_hwcnt_virtualizer_accumulator_init - Initialise the hardware counter
191 * accumulator before first client
192 * addition.
193 * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
194 *
195 * Return: 0 on success, else error code.
196 */
kbasep_hwcnt_virtualizer_accumulator_init(struct kbase_hwcnt_virtualizer * hvirt)197 static int kbasep_hwcnt_virtualizer_accumulator_init(
198 struct kbase_hwcnt_virtualizer *hvirt)
199 {
200 int errcode;
201
202 WARN_ON(!hvirt);
203 lockdep_assert_held(&hvirt->lock);
204 WARN_ON(hvirt->client_count);
205 WARN_ON(hvirt->accum);
206
207 errcode = kbase_hwcnt_accumulator_acquire(
208 hvirt->hctx, &hvirt->accum);
209 if (errcode)
210 goto error;
211
212 errcode = kbase_hwcnt_enable_map_alloc(
213 hvirt->metadata, &hvirt->scratch_map);
214 if (errcode)
215 goto error;
216
217 errcode = kbase_hwcnt_dump_buffer_alloc(
218 hvirt->metadata, &hvirt->scratch_buf);
219 if (errcode)
220 goto error;
221
222 return 0;
223 error:
224 kbasep_hwcnt_virtualizer_accumulator_term(hvirt);
225 return errcode;
226 }
227
228 /**
229 * kbasep_hwcnt_virtualizer_client_add - Add a newly allocated client to the
230 * virtualizer.
231 * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
232 * @hvcli: Non-NULL pointer to the virtualizer client to add.
233 * @enable_map: Non-NULL pointer to client's initial enable map.
234 *
235 * Return: 0 on success, else error code.
236 */
kbasep_hwcnt_virtualizer_client_add(struct kbase_hwcnt_virtualizer * hvirt,struct kbase_hwcnt_virtualizer_client * hvcli,const struct kbase_hwcnt_enable_map * enable_map)237 static int kbasep_hwcnt_virtualizer_client_add(
238 struct kbase_hwcnt_virtualizer *hvirt,
239 struct kbase_hwcnt_virtualizer_client *hvcli,
240 const struct kbase_hwcnt_enable_map *enable_map)
241 {
242 int errcode = 0;
243 u64 ts_start_ns;
244 u64 ts_end_ns;
245
246 WARN_ON(!hvirt);
247 WARN_ON(!hvcli);
248 WARN_ON(!enable_map);
249 lockdep_assert_held(&hvirt->lock);
250
251 if (hvirt->client_count == 0)
252 /* First client added, so initialise the accumulator */
253 errcode = kbasep_hwcnt_virtualizer_accumulator_init(hvirt);
254 if (errcode)
255 return errcode;
256
257 hvirt->client_count += 1;
258
259 if (hvirt->client_count == 1) {
260 /* First client, so just pass the enable map onwards as is */
261 errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum,
262 enable_map, &ts_start_ns, &ts_end_ns, NULL);
263 } else {
264 struct kbase_hwcnt_virtualizer_client *pos;
265
266 /* Make the scratch enable map the union of all enable maps */
267 kbase_hwcnt_enable_map_copy(
268 &hvirt->scratch_map, enable_map);
269 list_for_each_entry(pos, &hvirt->clients, node)
270 kbase_hwcnt_enable_map_union(
271 &hvirt->scratch_map, &pos->enable_map);
272
273 /* Set the counters with the new union enable map */
274 errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum,
275 &hvirt->scratch_map,
276 &ts_start_ns, &ts_end_ns,
277 &hvirt->scratch_buf);
278 /* Accumulate into only existing clients' accumulation bufs */
279 if (!errcode)
280 list_for_each_entry(pos, &hvirt->clients, node)
281 kbasep_hwcnt_virtualizer_client_accumulate(
282 pos, &hvirt->scratch_buf);
283 }
284 if (errcode)
285 goto error;
286
287 list_add(&hvcli->node, &hvirt->clients);
288 hvcli->hvirt = hvirt;
289 kbase_hwcnt_enable_map_copy(&hvcli->enable_map, enable_map);
290 hvcli->has_accum = false;
291 hvcli->ts_start_ns = ts_end_ns;
292
293 /* Store the most recent dump time for rate limiting */
294 hvirt->ts_last_dump_ns = ts_end_ns;
295
296 return 0;
297 error:
298 hvirt->client_count -= 1;
299 if (hvirt->client_count == 0)
300 kbasep_hwcnt_virtualizer_accumulator_term(hvirt);
301 return errcode;
302 }
303
304 /**
305 * kbasep_hwcnt_virtualizer_client_remove - Remove a client from the
306 * virtualizer.
307 * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
308 * @hvcli: Non-NULL pointer to the virtualizer client to remove.
309 */
kbasep_hwcnt_virtualizer_client_remove(struct kbase_hwcnt_virtualizer * hvirt,struct kbase_hwcnt_virtualizer_client * hvcli)310 static void kbasep_hwcnt_virtualizer_client_remove(
311 struct kbase_hwcnt_virtualizer *hvirt,
312 struct kbase_hwcnt_virtualizer_client *hvcli)
313 {
314 int errcode = 0;
315 u64 ts_start_ns;
316 u64 ts_end_ns;
317
318 WARN_ON(!hvirt);
319 WARN_ON(!hvcli);
320 lockdep_assert_held(&hvirt->lock);
321
322 list_del(&hvcli->node);
323 hvirt->client_count -= 1;
324
325 if (hvirt->client_count == 0) {
326 /* Last client removed, so terminate the accumulator */
327 kbasep_hwcnt_virtualizer_accumulator_term(hvirt);
328 } else {
329 struct kbase_hwcnt_virtualizer_client *pos;
330 /* Make the scratch enable map the union of all enable maps */
331 kbase_hwcnt_enable_map_disable_all(&hvirt->scratch_map);
332 list_for_each_entry(pos, &hvirt->clients, node)
333 kbase_hwcnt_enable_map_union(
334 &hvirt->scratch_map, &pos->enable_map);
335 /* Set the counters with the new union enable map */
336 errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum,
337 &hvirt->scratch_map,
338 &ts_start_ns, &ts_end_ns,
339 &hvirt->scratch_buf);
340 /* Accumulate into remaining clients' accumulation bufs */
341 if (!errcode)
342 list_for_each_entry(pos, &hvirt->clients, node)
343 kbasep_hwcnt_virtualizer_client_accumulate(
344 pos, &hvirt->scratch_buf);
345
346 /* Store the most recent dump time for rate limiting */
347 hvirt->ts_last_dump_ns = ts_end_ns;
348 }
349 WARN_ON(errcode);
350 }
351
352 /**
353 * kbasep_hwcnt_virtualizer_client_set_counters - Perform a dump of the client's
354 * currently enabled counters,
355 * and enable a new set of
356 * counters that will be used for
357 * subsequent dumps.
358 * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
359 * @hvcli: Non-NULL pointer to the virtualizer client.
360 * @enable_map: Non-NULL pointer to the new counter enable map for the client.
361 * Must have the same metadata as the virtualizer.
362 * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
363 * be written out to on success.
364 * @ts_end_ns: Non-NULL pointer where the end timestamp of the dump will
365 * be written out to on success.
366 * @dump_buf: Pointer to the buffer where the dump will be written out to on
367 * success. If non-NULL, must have the same metadata as the
368 * accumulator. If NULL, the dump will be discarded.
369 *
370 * Return: 0 on success or error code.
371 */
kbasep_hwcnt_virtualizer_client_set_counters(struct kbase_hwcnt_virtualizer * hvirt,struct kbase_hwcnt_virtualizer_client * hvcli,const struct kbase_hwcnt_enable_map * enable_map,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf)372 static int kbasep_hwcnt_virtualizer_client_set_counters(
373 struct kbase_hwcnt_virtualizer *hvirt,
374 struct kbase_hwcnt_virtualizer_client *hvcli,
375 const struct kbase_hwcnt_enable_map *enable_map,
376 u64 *ts_start_ns,
377 u64 *ts_end_ns,
378 struct kbase_hwcnt_dump_buffer *dump_buf)
379 {
380 int errcode;
381 struct kbase_hwcnt_virtualizer_client *pos;
382
383 WARN_ON(!hvirt);
384 WARN_ON(!hvcli);
385 WARN_ON(!enable_map);
386 WARN_ON(!ts_start_ns);
387 WARN_ON(!ts_end_ns);
388 WARN_ON(enable_map->metadata != hvirt->metadata);
389 WARN_ON(dump_buf && (dump_buf->metadata != hvirt->metadata));
390 lockdep_assert_held(&hvirt->lock);
391
392 /* Make the scratch enable map the union of all enable maps */
393 kbase_hwcnt_enable_map_copy(&hvirt->scratch_map, enable_map);
394 list_for_each_entry(pos, &hvirt->clients, node)
395 /* Ignore the enable map of the selected client */
396 if (pos != hvcli)
397 kbase_hwcnt_enable_map_union(
398 &hvirt->scratch_map, &pos->enable_map);
399
400 /* Set the counters with the new union enable map */
401 errcode = kbase_hwcnt_accumulator_set_counters(hvirt->accum,
402 &hvirt->scratch_map, ts_start_ns, ts_end_ns,
403 &hvirt->scratch_buf);
404 if (errcode)
405 return errcode;
406
407 /* Accumulate into all accumulation bufs except the selected client's */
408 list_for_each_entry(pos, &hvirt->clients, node)
409 if (pos != hvcli)
410 kbasep_hwcnt_virtualizer_client_accumulate(
411 pos, &hvirt->scratch_buf);
412
413 /* Finally, write into the dump buf */
414 if (dump_buf) {
415 const struct kbase_hwcnt_dump_buffer *src = &hvirt->scratch_buf;
416
417 if (hvcli->has_accum) {
418 kbase_hwcnt_dump_buffer_accumulate(
419 &hvcli->accum_buf, src, &hvcli->enable_map);
420 src = &hvcli->accum_buf;
421 }
422 kbase_hwcnt_dump_buffer_copy(dump_buf, src, &hvcli->enable_map);
423 }
424 hvcli->has_accum = false;
425
426 /* Update the selected client's enable map */
427 kbase_hwcnt_enable_map_copy(&hvcli->enable_map, enable_map);
428
429 /* Fix up the timestamps */
430 *ts_start_ns = hvcli->ts_start_ns;
431 hvcli->ts_start_ns = *ts_end_ns;
432
433 /* Store the most recent dump time for rate limiting */
434 hvirt->ts_last_dump_ns = *ts_end_ns;
435
436 return errcode;
437 }
438
kbase_hwcnt_virtualizer_client_set_counters(struct kbase_hwcnt_virtualizer_client * hvcli,const struct kbase_hwcnt_enable_map * enable_map,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf)439 int kbase_hwcnt_virtualizer_client_set_counters(
440 struct kbase_hwcnt_virtualizer_client *hvcli,
441 const struct kbase_hwcnt_enable_map *enable_map,
442 u64 *ts_start_ns,
443 u64 *ts_end_ns,
444 struct kbase_hwcnt_dump_buffer *dump_buf)
445 {
446 int errcode;
447 struct kbase_hwcnt_virtualizer *hvirt;
448
449 if (!hvcli || !enable_map || !ts_start_ns || !ts_end_ns)
450 return -EINVAL;
451
452 hvirt = hvcli->hvirt;
453
454 if ((enable_map->metadata != hvirt->metadata) ||
455 (dump_buf && (dump_buf->metadata != hvirt->metadata)))
456 return -EINVAL;
457
458 mutex_lock(&hvirt->lock);
459
460 if ((hvirt->client_count == 1) && (!hvcli->has_accum)) {
461 /*
462 * If there's only one client with no prior accumulation, we can
463 * completely skip the virtualize and just pass through the call
464 * to the accumulator, saving a fair few copies and
465 * accumulations.
466 */
467 errcode = kbase_hwcnt_accumulator_set_counters(
468 hvirt->accum, enable_map,
469 ts_start_ns, ts_end_ns, dump_buf);
470
471 if (!errcode) {
472 /* Update the selected client's enable map */
473 kbase_hwcnt_enable_map_copy(
474 &hvcli->enable_map, enable_map);
475
476 /* Fix up the timestamps */
477 *ts_start_ns = hvcli->ts_start_ns;
478 hvcli->ts_start_ns = *ts_end_ns;
479
480 /* Store the most recent dump time for rate limiting */
481 hvirt->ts_last_dump_ns = *ts_end_ns;
482 }
483 } else {
484 /* Otherwise, do the full virtualize */
485 errcode = kbasep_hwcnt_virtualizer_client_set_counters(
486 hvirt, hvcli, enable_map,
487 ts_start_ns, ts_end_ns, dump_buf);
488 }
489
490 mutex_unlock(&hvirt->lock);
491
492 return errcode;
493 }
494
495 /**
496 * kbasep_hwcnt_virtualizer_client_dump - Perform a dump of the client's
497 * currently enabled counters.
498 * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
499 * @hvcli: Non-NULL pointer to the virtualizer client.
500 * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
501 * be written out to on success.
502 * @ts_end_ns: Non-NULL pointer where the end timestamp of the dump will
503 * be written out to on success.
504 * @dump_buf: Pointer to the buffer where the dump will be written out to on
505 * success. If non-NULL, must have the same metadata as the
506 * accumulator. If NULL, the dump will be discarded.
507 *
508 * Return: 0 on success or error code.
509 */
kbasep_hwcnt_virtualizer_client_dump(struct kbase_hwcnt_virtualizer * hvirt,struct kbase_hwcnt_virtualizer_client * hvcli,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf)510 static int kbasep_hwcnt_virtualizer_client_dump(
511 struct kbase_hwcnt_virtualizer *hvirt,
512 struct kbase_hwcnt_virtualizer_client *hvcli,
513 u64 *ts_start_ns,
514 u64 *ts_end_ns,
515 struct kbase_hwcnt_dump_buffer *dump_buf)
516 {
517 int errcode;
518 struct kbase_hwcnt_virtualizer_client *pos;
519
520 WARN_ON(!hvirt);
521 WARN_ON(!hvcli);
522 WARN_ON(!ts_start_ns);
523 WARN_ON(!ts_end_ns);
524 WARN_ON(dump_buf && (dump_buf->metadata != hvirt->metadata));
525 lockdep_assert_held(&hvirt->lock);
526
527 /* Perform the dump */
528 errcode = kbase_hwcnt_accumulator_dump(hvirt->accum,
529 ts_start_ns, ts_end_ns, &hvirt->scratch_buf);
530 if (errcode)
531 return errcode;
532
533 /* Accumulate into all accumulation bufs except the selected client's */
534 list_for_each_entry(pos, &hvirt->clients, node)
535 if (pos != hvcli)
536 kbasep_hwcnt_virtualizer_client_accumulate(
537 pos, &hvirt->scratch_buf);
538
539 /* Finally, write into the dump buf */
540 if (dump_buf) {
541 const struct kbase_hwcnt_dump_buffer *src = &hvirt->scratch_buf;
542
543 if (hvcli->has_accum) {
544 kbase_hwcnt_dump_buffer_accumulate(
545 &hvcli->accum_buf, src, &hvcli->enable_map);
546 src = &hvcli->accum_buf;
547 }
548 kbase_hwcnt_dump_buffer_copy(dump_buf, src, &hvcli->enable_map);
549 }
550 hvcli->has_accum = false;
551
552 /* Fix up the timestamps */
553 *ts_start_ns = hvcli->ts_start_ns;
554 hvcli->ts_start_ns = *ts_end_ns;
555
556 /* Store the most recent dump time for rate limiting */
557 hvirt->ts_last_dump_ns = *ts_end_ns;
558
559 return errcode;
560 }
561
562 /**
563 * kbasep_hwcnt_virtualizer_client_dump_rate_limited - Perform a dump of the
564 * client's currently enabled counters
565 * if it hasn't been rate limited,
566 * otherwise return the client's most
567 * recent accumulation.
568 * @hvirt: Non-NULL pointer to the hardware counter virtualizer.
569 * @hvcli: Non-NULL pointer to the virtualizer client.
570 * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
571 * be written out to on success.
572 * @ts_end_ns: Non-NULL pointer where the end timestamp of the dump will
573 * be written out to on success.
574 * @dump_buf: Pointer to the buffer where the dump will be written out to on
575 * success. If non-NULL, must have the same metadata as the
576 * accumulator. If NULL, the dump will be discarded.
577 *
578 * Return: 0 on success or error code.
579 */
kbasep_hwcnt_virtualizer_client_dump_rate_limited(struct kbase_hwcnt_virtualizer * hvirt,struct kbase_hwcnt_virtualizer_client * hvcli,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf)580 static int kbasep_hwcnt_virtualizer_client_dump_rate_limited(
581 struct kbase_hwcnt_virtualizer *hvirt,
582 struct kbase_hwcnt_virtualizer_client *hvcli,
583 u64 *ts_start_ns,
584 u64 *ts_end_ns,
585 struct kbase_hwcnt_dump_buffer *dump_buf)
586 {
587 bool rate_limited = true;
588
589 WARN_ON(!hvirt);
590 WARN_ON(!hvcli);
591 WARN_ON(!ts_start_ns);
592 WARN_ON(!ts_end_ns);
593 WARN_ON(dump_buf && (dump_buf->metadata != hvirt->metadata));
594 lockdep_assert_held(&hvirt->lock);
595
596 if (hvirt->dump_threshold_ns == 0) {
597 /* Threshold == 0, so rate limiting disabled */
598 rate_limited = false;
599 } else if (hvirt->ts_last_dump_ns == hvcli->ts_start_ns) {
600 /* Last dump was performed by this client, and dumps from an
601 * individual client are never rate limited
602 */
603 rate_limited = false;
604 } else {
605 const u64 ts_ns =
606 kbase_hwcnt_accumulator_timestamp_ns(hvirt->accum);
607 const u64 time_since_last_dump_ns =
608 ts_ns - hvirt->ts_last_dump_ns;
609
610 /* Dump period equals or exceeds the threshold */
611 if (time_since_last_dump_ns >= hvirt->dump_threshold_ns)
612 rate_limited = false;
613 }
614
615 if (!rate_limited)
616 return kbasep_hwcnt_virtualizer_client_dump(
617 hvirt, hvcli, ts_start_ns, ts_end_ns, dump_buf);
618
619 /* If we've gotten this far, the client must have something accumulated
620 * otherwise it is a logic error
621 */
622 WARN_ON(!hvcli->has_accum);
623
624 if (dump_buf)
625 kbase_hwcnt_dump_buffer_copy(
626 dump_buf, &hvcli->accum_buf, &hvcli->enable_map);
627 hvcli->has_accum = false;
628
629 *ts_start_ns = hvcli->ts_start_ns;
630 *ts_end_ns = hvirt->ts_last_dump_ns;
631 hvcli->ts_start_ns = hvirt->ts_last_dump_ns;
632
633 return 0;
634 }
635
kbase_hwcnt_virtualizer_client_dump(struct kbase_hwcnt_virtualizer_client * hvcli,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf)636 int kbase_hwcnt_virtualizer_client_dump(
637 struct kbase_hwcnt_virtualizer_client *hvcli,
638 u64 *ts_start_ns,
639 u64 *ts_end_ns,
640 struct kbase_hwcnt_dump_buffer *dump_buf)
641 {
642 int errcode;
643 struct kbase_hwcnt_virtualizer *hvirt;
644
645 if (!hvcli || !ts_start_ns || !ts_end_ns)
646 return -EINVAL;
647
648 hvirt = hvcli->hvirt;
649
650 if (dump_buf && (dump_buf->metadata != hvirt->metadata))
651 return -EINVAL;
652
653 mutex_lock(&hvirt->lock);
654
655 if ((hvirt->client_count == 1) && (!hvcli->has_accum)) {
656 /*
657 * If there's only one client with no prior accumulation, we can
658 * completely skip the virtualize and just pass through the call
659 * to the accumulator, saving a fair few copies and
660 * accumulations.
661 */
662 errcode = kbase_hwcnt_accumulator_dump(
663 hvirt->accum, ts_start_ns, ts_end_ns, dump_buf);
664
665 if (!errcode) {
666 /* Fix up the timestamps */
667 *ts_start_ns = hvcli->ts_start_ns;
668 hvcli->ts_start_ns = *ts_end_ns;
669
670 /* Store the most recent dump time for rate limiting */
671 hvirt->ts_last_dump_ns = *ts_end_ns;
672 }
673 } else {
674 /* Otherwise, do the full virtualize */
675 errcode = kbasep_hwcnt_virtualizer_client_dump_rate_limited(
676 hvirt, hvcli, ts_start_ns, ts_end_ns, dump_buf);
677 }
678
679 mutex_unlock(&hvirt->lock);
680
681 return errcode;
682 }
683
kbase_hwcnt_virtualizer_client_create(struct kbase_hwcnt_virtualizer * hvirt,const struct kbase_hwcnt_enable_map * enable_map,struct kbase_hwcnt_virtualizer_client ** out_hvcli)684 int kbase_hwcnt_virtualizer_client_create(
685 struct kbase_hwcnt_virtualizer *hvirt,
686 const struct kbase_hwcnt_enable_map *enable_map,
687 struct kbase_hwcnt_virtualizer_client **out_hvcli)
688 {
689 int errcode;
690 struct kbase_hwcnt_virtualizer_client *hvcli;
691
692 if (!hvirt || !enable_map || !out_hvcli ||
693 (enable_map->metadata != hvirt->metadata))
694 return -EINVAL;
695
696 errcode = kbasep_hwcnt_virtualizer_client_alloc(
697 hvirt->metadata, &hvcli);
698 if (errcode)
699 return errcode;
700
701 mutex_lock(&hvirt->lock);
702
703 errcode = kbasep_hwcnt_virtualizer_client_add(hvirt, hvcli, enable_map);
704
705 mutex_unlock(&hvirt->lock);
706
707 if (errcode) {
708 kbasep_hwcnt_virtualizer_client_free(hvcli);
709 return errcode;
710 }
711
712 *out_hvcli = hvcli;
713 return 0;
714 }
715
kbase_hwcnt_virtualizer_client_destroy(struct kbase_hwcnt_virtualizer_client * hvcli)716 void kbase_hwcnt_virtualizer_client_destroy(
717 struct kbase_hwcnt_virtualizer_client *hvcli)
718 {
719 if (!hvcli)
720 return;
721
722 mutex_lock(&hvcli->hvirt->lock);
723
724 kbasep_hwcnt_virtualizer_client_remove(hvcli->hvirt, hvcli);
725
726 mutex_unlock(&hvcli->hvirt->lock);
727
728 kbasep_hwcnt_virtualizer_client_free(hvcli);
729 }
730
kbase_hwcnt_virtualizer_init(struct kbase_hwcnt_context * hctx,u64 dump_threshold_ns,struct kbase_hwcnt_virtualizer ** out_hvirt)731 int kbase_hwcnt_virtualizer_init(
732 struct kbase_hwcnt_context *hctx,
733 u64 dump_threshold_ns,
734 struct kbase_hwcnt_virtualizer **out_hvirt)
735 {
736 struct kbase_hwcnt_virtualizer *virt;
737 const struct kbase_hwcnt_metadata *metadata;
738
739 if (!hctx || !out_hvirt)
740 return -EINVAL;
741
742 metadata = kbase_hwcnt_context_metadata(hctx);
743 if (!metadata)
744 return -EINVAL;
745
746 virt = kzalloc(sizeof(*virt), GFP_KERNEL);
747 if (!virt)
748 return -ENOMEM;
749
750 virt->hctx = hctx;
751 virt->dump_threshold_ns = dump_threshold_ns;
752 virt->metadata = metadata;
753
754 mutex_init(&virt->lock);
755 INIT_LIST_HEAD(&virt->clients);
756
757 *out_hvirt = virt;
758 return 0;
759 }
760
kbase_hwcnt_virtualizer_term(struct kbase_hwcnt_virtualizer * hvirt)761 void kbase_hwcnt_virtualizer_term(
762 struct kbase_hwcnt_virtualizer *hvirt)
763 {
764 if (!hvirt)
765 return;
766
767 /* Non-zero client count implies client leak */
768 if (WARN_ON(hvirt->client_count != 0)) {
769 struct kbase_hwcnt_virtualizer_client *pos, *n;
770
771 list_for_each_entry_safe(pos, n, &hvirt->clients, node)
772 kbase_hwcnt_virtualizer_client_destroy(pos);
773 }
774
775 WARN_ON(hvirt->client_count != 0);
776 WARN_ON(hvirt->accum);
777
778 kfree(hvirt);
779 }
780
kbase_hwcnt_virtualizer_queue_work(struct kbase_hwcnt_virtualizer * hvirt,struct work_struct * work)781 bool kbase_hwcnt_virtualizer_queue_work(struct kbase_hwcnt_virtualizer *hvirt,
782 struct work_struct *work)
783 {
784 if (WARN_ON(!hvirt) || WARN_ON(!work))
785 return false;
786
787 return kbase_hwcnt_context_queue_work(hvirt->hctx, work);
788 }
789