1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2018, 2020-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 /*
23 * Implementation of hardware counter context and accumulator APIs.
24 */
25
26 #include "mali_kbase_hwcnt_context.h"
27 #include "mali_kbase_hwcnt_accumulator.h"
28 #include "mali_kbase_hwcnt_backend.h"
29 #include "mali_kbase_hwcnt_types.h"
30
31 #include <linux/mutex.h>
32 #include <linux/spinlock.h>
33 #include <linux/slab.h>
34
35 /**
36 * enum kbase_hwcnt_accum_state - Hardware counter accumulator states.
37 * @ACCUM_STATE_ERROR: Error state, where all accumulator operations fail.
38 * @ACCUM_STATE_DISABLED: Disabled state, where dumping is always disabled.
39 * @ACCUM_STATE_ENABLED: Enabled state, where dumping is enabled if there are
40 * any enabled counters.
41 */
42 enum kbase_hwcnt_accum_state {
43 ACCUM_STATE_ERROR,
44 ACCUM_STATE_DISABLED,
45 ACCUM_STATE_ENABLED
46 };
47
48 /**
49 * struct kbase_hwcnt_accumulator - Hardware counter accumulator structure.
50 * @metadata: Pointer to immutable hwcnt metadata.
51 * @backend: Pointer to created counter backend.
52 * @state: The current state of the accumulator.
53 * - State transition from disabled->enabled or
54 * disabled->error requires state_lock.
55 * - State transition from enabled->disabled or
56 * enabled->error requires both accum_lock and
57 * state_lock.
58 * - Error state persists until next disable.
59 * @enable_map: The current set of enabled counters.
60 * - Must only be modified while holding both
61 * accum_lock and state_lock.
62 * - Can be read while holding either lock.
63 * - Must stay in sync with enable_map_any_enabled.
64 * @enable_map_any_enabled: True if any counters in the map are enabled, else
65 * false. If true, and state is ACCUM_STATE_ENABLED,
66 * then the counter backend will be enabled.
67 * - Must only be modified while holding both
68 * accum_lock and state_lock.
69 * - Can be read while holding either lock.
70 * - Must stay in sync with enable_map.
71 * @scratch_map: Scratch enable map, used as temporary enable map
72 * storage during dumps.
73 * - Must only be read or modified while holding
74 * accum_lock.
75 * @accum_buf: Accumulation buffer, where dumps will be accumulated
76 * into on transition to a disable state.
77 * - Must only be read or modified while holding
78 * accum_lock.
79 * @accumulated: True if the accumulation buffer has been accumulated
80 * into and not subsequently read from yet, else false.
81 * - Must only be read or modified while holding
82 * accum_lock.
83 * @ts_last_dump_ns: Timestamp (ns) of the end time of the most recent
84 * dump that was requested by the user.
85 * - Must only be read or modified while holding
86 * accum_lock.
87 */
88 struct kbase_hwcnt_accumulator {
89 const struct kbase_hwcnt_metadata *metadata;
90 struct kbase_hwcnt_backend *backend;
91 enum kbase_hwcnt_accum_state state;
92 struct kbase_hwcnt_enable_map enable_map;
93 bool enable_map_any_enabled;
94 struct kbase_hwcnt_enable_map scratch_map;
95 struct kbase_hwcnt_dump_buffer accum_buf;
96 bool accumulated;
97 u64 ts_last_dump_ns;
98 };
99
100 /**
101 * struct kbase_hwcnt_context - Hardware counter context structure.
102 * @iface: Pointer to hardware counter backend interface.
103 * @state_lock: Spinlock protecting state.
104 * @disable_count: Disable count of the context. Initialised to 1.
105 * Decremented when the accumulator is acquired, and incremented
106 * on release. Incremented on calls to
107 * kbase_hwcnt_context_disable[_atomic], and decremented on
108 * calls to kbase_hwcnt_context_enable.
109 * - Must only be read or modified while holding state_lock.
110 * @accum_lock: Mutex protecting accumulator.
111 * @accum_inited: Flag to prevent concurrent accumulator initialisation and/or
112 * termination. Set to true before accumulator initialisation,
113 * and false after accumulator termination.
114 * - Must only be modified while holding both accum_lock and
115 * state_lock.
116 * - Can be read while holding either lock.
117 * @accum: Hardware counter accumulator structure.
118 * @wq: Centralized workqueue for users of hardware counters to
119 * submit async hardware counter related work. Never directly
120 * called, but it's expected that a lot of the functions in this
121 * API will end up called from the enqueued async work.
122 */
123 struct kbase_hwcnt_context {
124 const struct kbase_hwcnt_backend_interface *iface;
125 spinlock_t state_lock;
126 size_t disable_count;
127 struct mutex accum_lock;
128 bool accum_inited;
129 struct kbase_hwcnt_accumulator accum;
130 struct workqueue_struct *wq;
131 };
132
kbase_hwcnt_context_init(const struct kbase_hwcnt_backend_interface * iface,struct kbase_hwcnt_context ** out_hctx)133 int kbase_hwcnt_context_init(
134 const struct kbase_hwcnt_backend_interface *iface,
135 struct kbase_hwcnt_context **out_hctx)
136 {
137 struct kbase_hwcnt_context *hctx = NULL;
138
139 if (!iface || !out_hctx)
140 return -EINVAL;
141
142 hctx = kzalloc(sizeof(*hctx), GFP_KERNEL);
143 if (!hctx)
144 goto err_alloc_hctx;
145
146 hctx->iface = iface;
147 spin_lock_init(&hctx->state_lock);
148 hctx->disable_count = 1;
149 mutex_init(&hctx->accum_lock);
150 hctx->accum_inited = false;
151
152 hctx->wq =
153 alloc_workqueue("mali_kbase_hwcnt", WQ_HIGHPRI | WQ_UNBOUND, 0);
154 if (!hctx->wq)
155 goto err_alloc_workqueue;
156
157 *out_hctx = hctx;
158
159 return 0;
160
161 err_alloc_workqueue:
162 kfree(hctx);
163 err_alloc_hctx:
164 return -ENOMEM;
165 }
166
kbase_hwcnt_context_term(struct kbase_hwcnt_context * hctx)167 void kbase_hwcnt_context_term(struct kbase_hwcnt_context *hctx)
168 {
169 if (!hctx)
170 return;
171
172 /* Make sure we didn't leak the accumulator */
173 WARN_ON(hctx->accum_inited);
174
175 /* We don't expect any work to be pending on this workqueue.
176 * Regardless, this will safely drain and complete the work.
177 */
178 destroy_workqueue(hctx->wq);
179 kfree(hctx);
180 }
181
182 /**
183 * kbasep_hwcnt_accumulator_term() - Terminate the accumulator for the context.
184 * @hctx: Non-NULL pointer to hardware counter context.
185 */
kbasep_hwcnt_accumulator_term(struct kbase_hwcnt_context * hctx)186 static void kbasep_hwcnt_accumulator_term(struct kbase_hwcnt_context *hctx)
187 {
188 WARN_ON(!hctx);
189 WARN_ON(!hctx->accum_inited);
190
191 kbase_hwcnt_enable_map_free(&hctx->accum.scratch_map);
192 kbase_hwcnt_dump_buffer_free(&hctx->accum.accum_buf);
193 kbase_hwcnt_enable_map_free(&hctx->accum.enable_map);
194 hctx->iface->term(hctx->accum.backend);
195 memset(&hctx->accum, 0, sizeof(hctx->accum));
196 }
197
198 /**
199 * kbasep_hwcnt_accumulator_init() - Initialise the accumulator for the context.
200 * @hctx: Non-NULL pointer to hardware counter context.
201 *
202 * Return: 0 on success, else error code.
203 */
kbasep_hwcnt_accumulator_init(struct kbase_hwcnt_context * hctx)204 static int kbasep_hwcnt_accumulator_init(struct kbase_hwcnt_context *hctx)
205 {
206 int errcode;
207
208 WARN_ON(!hctx);
209 WARN_ON(!hctx->accum_inited);
210
211 errcode = hctx->iface->init(
212 hctx->iface->info, &hctx->accum.backend);
213 if (errcode)
214 goto error;
215
216 hctx->accum.metadata = hctx->iface->metadata(hctx->iface->info);
217 hctx->accum.state = ACCUM_STATE_ERROR;
218
219 errcode = kbase_hwcnt_enable_map_alloc(hctx->accum.metadata,
220 &hctx->accum.enable_map);
221 if (errcode)
222 goto error;
223
224 hctx->accum.enable_map_any_enabled = false;
225
226 errcode = kbase_hwcnt_dump_buffer_alloc(hctx->accum.metadata,
227 &hctx->accum.accum_buf);
228 if (errcode)
229 goto error;
230
231 errcode = kbase_hwcnt_enable_map_alloc(hctx->accum.metadata,
232 &hctx->accum.scratch_map);
233 if (errcode)
234 goto error;
235
236 hctx->accum.accumulated = false;
237
238 hctx->accum.ts_last_dump_ns =
239 hctx->iface->timestamp_ns(hctx->accum.backend);
240
241 return 0;
242
243 error:
244 kbasep_hwcnt_accumulator_term(hctx);
245 return errcode;
246 }
247
248 /**
249 * kbasep_hwcnt_accumulator_disable() - Transition the accumulator into the
250 * disabled state, from the enabled or
251 * error states.
252 * @hctx: Non-NULL pointer to hardware counter context.
253 * @accumulate: True if we should accumulate before disabling, else false.
254 */
kbasep_hwcnt_accumulator_disable(struct kbase_hwcnt_context * hctx,bool accumulate)255 static void kbasep_hwcnt_accumulator_disable(
256 struct kbase_hwcnt_context *hctx, bool accumulate)
257 {
258 int errcode = 0;
259 bool backend_enabled = false;
260 struct kbase_hwcnt_accumulator *accum;
261 unsigned long flags;
262 u64 dump_time_ns;
263
264 WARN_ON(!hctx);
265 lockdep_assert_held(&hctx->accum_lock);
266 WARN_ON(!hctx->accum_inited);
267
268 accum = &hctx->accum;
269
270 spin_lock_irqsave(&hctx->state_lock, flags);
271
272 WARN_ON(hctx->disable_count != 0);
273 WARN_ON(hctx->accum.state == ACCUM_STATE_DISABLED);
274
275 if ((hctx->accum.state == ACCUM_STATE_ENABLED) &&
276 (accum->enable_map_any_enabled))
277 backend_enabled = true;
278
279 if (!backend_enabled)
280 hctx->accum.state = ACCUM_STATE_DISABLED;
281
282 spin_unlock_irqrestore(&hctx->state_lock, flags);
283
284 /* Early out if the backend is not already enabled */
285 if (!backend_enabled)
286 return;
287
288 if (!accumulate)
289 goto disable;
290
291 /* Try and accumulate before disabling */
292 errcode = hctx->iface->dump_request(accum->backend, &dump_time_ns);
293 if (errcode)
294 goto disable;
295
296 errcode = hctx->iface->dump_wait(accum->backend);
297 if (errcode)
298 goto disable;
299
300 errcode = hctx->iface->dump_get(accum->backend,
301 &accum->accum_buf, &accum->enable_map, accum->accumulated);
302 if (errcode)
303 goto disable;
304
305 accum->accumulated = true;
306
307 disable:
308 hctx->iface->dump_disable(accum->backend);
309
310 /* Regardless of any errors during the accumulate, put the accumulator
311 * in the disabled state.
312 */
313 spin_lock_irqsave(&hctx->state_lock, flags);
314
315 hctx->accum.state = ACCUM_STATE_DISABLED;
316
317 spin_unlock_irqrestore(&hctx->state_lock, flags);
318 }
319
320 /**
321 * kbasep_hwcnt_accumulator_enable() - Transition the accumulator into the
322 * enabled state, from the disabled state.
323 * @hctx: Non-NULL pointer to hardware counter context.
324 */
kbasep_hwcnt_accumulator_enable(struct kbase_hwcnt_context * hctx)325 static void kbasep_hwcnt_accumulator_enable(struct kbase_hwcnt_context *hctx)
326 {
327 int errcode = 0;
328 struct kbase_hwcnt_accumulator *accum;
329
330 WARN_ON(!hctx);
331 lockdep_assert_held(&hctx->state_lock);
332 WARN_ON(!hctx->accum_inited);
333 WARN_ON(hctx->accum.state != ACCUM_STATE_DISABLED);
334
335 accum = &hctx->accum;
336
337 /* The backend only needs enabling if any counters are enabled */
338 if (accum->enable_map_any_enabled)
339 errcode = hctx->iface->dump_enable_nolock(
340 accum->backend, &accum->enable_map);
341
342 if (!errcode)
343 accum->state = ACCUM_STATE_ENABLED;
344 else
345 accum->state = ACCUM_STATE_ERROR;
346 }
347
348 /**
349 * kbasep_hwcnt_accumulator_dump() - Perform a dump with the most up-to-date
350 * values of enabled counters possible, and
351 * optionally update the set of enabled
352 * counters.
353 * @hctx : Non-NULL pointer to the hardware counter context
354 * @ts_start_ns: Non-NULL pointer where the start timestamp of the dump will
355 * be written out to on success
356 * @ts_end_ns: Non-NULL pointer where the end timestamp of the dump will
357 * be written out to on success
358 * @dump_buf: Pointer to the buffer where the dump will be written out to on
359 * success. If non-NULL, must have the same metadata as the
360 * accumulator. If NULL, the dump will be discarded.
361 * @new_map: Pointer to the new counter enable map. If non-NULL, must have
362 * the same metadata as the accumulator. If NULL, the set of
363 * enabled counters will be unchanged.
364 */
kbasep_hwcnt_accumulator_dump(struct kbase_hwcnt_context * hctx,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf,const struct kbase_hwcnt_enable_map * new_map)365 static int kbasep_hwcnt_accumulator_dump(
366 struct kbase_hwcnt_context *hctx,
367 u64 *ts_start_ns,
368 u64 *ts_end_ns,
369 struct kbase_hwcnt_dump_buffer *dump_buf,
370 const struct kbase_hwcnt_enable_map *new_map)
371 {
372 int errcode = 0;
373 unsigned long flags;
374 enum kbase_hwcnt_accum_state state;
375 bool dump_requested = false;
376 bool dump_written = false;
377 bool cur_map_any_enabled;
378 struct kbase_hwcnt_enable_map *cur_map;
379 bool new_map_any_enabled = false;
380 u64 dump_time_ns;
381 struct kbase_hwcnt_accumulator *accum;
382
383 WARN_ON(!hctx);
384 WARN_ON(!ts_start_ns);
385 WARN_ON(!ts_end_ns);
386 WARN_ON(dump_buf && (dump_buf->metadata != hctx->accum.metadata));
387 WARN_ON(new_map && (new_map->metadata != hctx->accum.metadata));
388 WARN_ON(!hctx->accum_inited);
389 lockdep_assert_held(&hctx->accum_lock);
390
391 accum = &hctx->accum;
392 cur_map = &accum->scratch_map;
393
394 /* Save out info about the current enable map */
395 cur_map_any_enabled = accum->enable_map_any_enabled;
396 kbase_hwcnt_enable_map_copy(cur_map, &accum->enable_map);
397
398 if (new_map)
399 new_map_any_enabled =
400 kbase_hwcnt_enable_map_any_enabled(new_map);
401
402 /*
403 * We're holding accum_lock, so the accumulator state might transition
404 * from disabled to enabled during this function (as enabling is lock
405 * free), but it will never disable (as disabling needs to hold the
406 * accum_lock), nor will it ever transition from enabled to error (as
407 * an enable while we're already enabled is impossible).
408 *
409 * If we're already disabled, we'll only look at the accumulation buffer
410 * rather than do a real dump, so a concurrent enable does not affect
411 * us.
412 *
413 * If a concurrent enable fails, we might transition to the error
414 * state, but again, as we're only looking at the accumulation buffer,
415 * it's not an issue.
416 */
417 spin_lock_irqsave(&hctx->state_lock, flags);
418
419 state = accum->state;
420
421 /*
422 * Update the new map now, such that if an enable occurs during this
423 * dump then that enable will set the new map. If we're already enabled,
424 * then we'll do it ourselves after the dump.
425 */
426 if (new_map) {
427 kbase_hwcnt_enable_map_copy(
428 &accum->enable_map, new_map);
429 accum->enable_map_any_enabled = new_map_any_enabled;
430 }
431
432 spin_unlock_irqrestore(&hctx->state_lock, flags);
433
434 /* Error state, so early out. No need to roll back any map updates */
435 if (state == ACCUM_STATE_ERROR)
436 return -EIO;
437
438 /* Initiate the dump if the backend is enabled. */
439 if ((state == ACCUM_STATE_ENABLED) && cur_map_any_enabled) {
440 if (dump_buf) {
441 errcode = hctx->iface->dump_request(
442 accum->backend, &dump_time_ns);
443 dump_requested = true;
444 } else {
445 dump_time_ns = hctx->iface->timestamp_ns(
446 accum->backend);
447 errcode = hctx->iface->dump_clear(accum->backend);
448 }
449
450 if (errcode)
451 goto error;
452 } else {
453 dump_time_ns = hctx->iface->timestamp_ns(accum->backend);
454 }
455
456 /* Copy any accumulation into the dest buffer */
457 if (accum->accumulated && dump_buf) {
458 kbase_hwcnt_dump_buffer_copy(
459 dump_buf, &accum->accum_buf, cur_map);
460 dump_written = true;
461 }
462
463 /* Wait for any requested dumps to complete */
464 if (dump_requested) {
465 WARN_ON(state != ACCUM_STATE_ENABLED);
466 errcode = hctx->iface->dump_wait(accum->backend);
467 if (errcode)
468 goto error;
469 }
470
471 /* If we're enabled and there's a new enable map, change the enabled set
472 * as soon after the dump has completed as possible.
473 */
474 if ((state == ACCUM_STATE_ENABLED) && new_map) {
475 /* Backend is only enabled if there were any enabled counters */
476 if (cur_map_any_enabled)
477 hctx->iface->dump_disable(accum->backend);
478
479 /* (Re-)enable the backend if the new map has enabled counters.
480 * No need to acquire the spinlock, as concurrent enable while
481 * we're already enabled and holding accum_lock is impossible.
482 */
483 if (new_map_any_enabled) {
484 errcode = hctx->iface->dump_enable(
485 accum->backend, new_map);
486 if (errcode)
487 goto error;
488 }
489 }
490
491 /* Copy, accumulate, or zero into the dest buffer to finish */
492 if (dump_buf) {
493 /* If we dumped, copy or accumulate it into the destination */
494 if (dump_requested) {
495 WARN_ON(state != ACCUM_STATE_ENABLED);
496 errcode = hctx->iface->dump_get(
497 accum->backend,
498 dump_buf,
499 cur_map,
500 dump_written);
501 if (errcode)
502 goto error;
503 dump_written = true;
504 }
505
506 /* If we've not written anything into the dump buffer so far, it
507 * means there was nothing to write. Zero any enabled counters.
508 */
509 if (!dump_written)
510 kbase_hwcnt_dump_buffer_zero(dump_buf, cur_map);
511 }
512
513 /* Write out timestamps */
514 *ts_start_ns = accum->ts_last_dump_ns;
515 *ts_end_ns = dump_time_ns;
516
517 accum->accumulated = false;
518 accum->ts_last_dump_ns = dump_time_ns;
519
520 return 0;
521 error:
522 /* An error was only physically possible if the backend was enabled */
523 WARN_ON(state != ACCUM_STATE_ENABLED);
524
525 /* Disable the backend, and transition to the error state */
526 hctx->iface->dump_disable(accum->backend);
527 spin_lock_irqsave(&hctx->state_lock, flags);
528
529 accum->state = ACCUM_STATE_ERROR;
530
531 spin_unlock_irqrestore(&hctx->state_lock, flags);
532
533 return errcode;
534 }
535
536 /**
537 * kbasep_hwcnt_context_disable() - Increment the disable count of the context.
538 * @hctx: Non-NULL pointer to hardware counter context.
539 * @accumulate: True if we should accumulate before disabling, else false.
540 */
kbasep_hwcnt_context_disable(struct kbase_hwcnt_context * hctx,bool accumulate)541 static void kbasep_hwcnt_context_disable(
542 struct kbase_hwcnt_context *hctx, bool accumulate)
543 {
544 unsigned long flags;
545
546 WARN_ON(!hctx);
547 lockdep_assert_held(&hctx->accum_lock);
548
549 if (!kbase_hwcnt_context_disable_atomic(hctx)) {
550 kbasep_hwcnt_accumulator_disable(hctx, accumulate);
551
552 spin_lock_irqsave(&hctx->state_lock, flags);
553
554 /* Atomic disable failed and we're holding the mutex, so current
555 * disable count must be 0.
556 */
557 WARN_ON(hctx->disable_count != 0);
558 hctx->disable_count++;
559
560 spin_unlock_irqrestore(&hctx->state_lock, flags);
561 }
562 }
563
kbase_hwcnt_accumulator_acquire(struct kbase_hwcnt_context * hctx,struct kbase_hwcnt_accumulator ** accum)564 int kbase_hwcnt_accumulator_acquire(
565 struct kbase_hwcnt_context *hctx,
566 struct kbase_hwcnt_accumulator **accum)
567 {
568 int errcode = 0;
569 unsigned long flags;
570
571 if (!hctx || !accum)
572 return -EINVAL;
573
574 mutex_lock(&hctx->accum_lock);
575 spin_lock_irqsave(&hctx->state_lock, flags);
576
577 if (!hctx->accum_inited)
578 /* Set accum initing now to prevent concurrent init */
579 hctx->accum_inited = true;
580 else
581 /* Already have an accum, or already being inited */
582 errcode = -EBUSY;
583
584 spin_unlock_irqrestore(&hctx->state_lock, flags);
585 mutex_unlock(&hctx->accum_lock);
586
587 if (errcode)
588 return errcode;
589
590 errcode = kbasep_hwcnt_accumulator_init(hctx);
591
592 if (errcode) {
593 mutex_lock(&hctx->accum_lock);
594 spin_lock_irqsave(&hctx->state_lock, flags);
595
596 hctx->accum_inited = false;
597
598 spin_unlock_irqrestore(&hctx->state_lock, flags);
599 mutex_unlock(&hctx->accum_lock);
600
601 return errcode;
602 }
603
604 spin_lock_irqsave(&hctx->state_lock, flags);
605
606 WARN_ON(hctx->disable_count == 0);
607 WARN_ON(hctx->accum.enable_map_any_enabled);
608
609 /* Decrement the disable count to allow the accumulator to be accessible
610 * now that it's fully constructed.
611 */
612 hctx->disable_count--;
613
614 /*
615 * Make sure the accumulator is initialised to the correct state.
616 * Regardless of initial state, counters don't need to be enabled via
617 * the backend, as the initial enable map has no enabled counters.
618 */
619 hctx->accum.state = (hctx->disable_count == 0) ?
620 ACCUM_STATE_ENABLED :
621 ACCUM_STATE_DISABLED;
622
623 spin_unlock_irqrestore(&hctx->state_lock, flags);
624
625 *accum = &hctx->accum;
626
627 return 0;
628 }
629
kbase_hwcnt_accumulator_release(struct kbase_hwcnt_accumulator * accum)630 void kbase_hwcnt_accumulator_release(struct kbase_hwcnt_accumulator *accum)
631 {
632 unsigned long flags;
633 struct kbase_hwcnt_context *hctx;
634
635 if (!accum)
636 return;
637
638 hctx = container_of(accum, struct kbase_hwcnt_context, accum);
639
640 mutex_lock(&hctx->accum_lock);
641
642 /* Double release is a programming error */
643 WARN_ON(!hctx->accum_inited);
644
645 /* Disable the context to ensure the accumulator is inaccesible while
646 * we're destroying it. This performs the corresponding disable count
647 * increment to the decrement done during acquisition.
648 */
649 kbasep_hwcnt_context_disable(hctx, false);
650
651 mutex_unlock(&hctx->accum_lock);
652
653 kbasep_hwcnt_accumulator_term(hctx);
654
655 mutex_lock(&hctx->accum_lock);
656 spin_lock_irqsave(&hctx->state_lock, flags);
657
658 hctx->accum_inited = false;
659
660 spin_unlock_irqrestore(&hctx->state_lock, flags);
661 mutex_unlock(&hctx->accum_lock);
662 }
663
kbase_hwcnt_context_disable(struct kbase_hwcnt_context * hctx)664 void kbase_hwcnt_context_disable(struct kbase_hwcnt_context *hctx)
665 {
666 if (WARN_ON(!hctx))
667 return;
668
669 /* Try and atomically disable first, so we can avoid locking the mutex
670 * if we don't need to.
671 */
672 if (kbase_hwcnt_context_disable_atomic(hctx))
673 return;
674
675 mutex_lock(&hctx->accum_lock);
676
677 kbasep_hwcnt_context_disable(hctx, true);
678
679 mutex_unlock(&hctx->accum_lock);
680 }
681
kbase_hwcnt_context_disable_atomic(struct kbase_hwcnt_context * hctx)682 bool kbase_hwcnt_context_disable_atomic(struct kbase_hwcnt_context *hctx)
683 {
684 unsigned long flags;
685 bool atomic_disabled = false;
686
687 if (WARN_ON(!hctx))
688 return false;
689
690 spin_lock_irqsave(&hctx->state_lock, flags);
691
692 if (!WARN_ON(hctx->disable_count == SIZE_MAX)) {
693 /*
694 * If disable count is non-zero, we can just bump the disable
695 * count.
696 *
697 * Otherwise, we can't disable in an atomic context.
698 */
699 if (hctx->disable_count != 0) {
700 hctx->disable_count++;
701 atomic_disabled = true;
702 }
703 }
704
705 spin_unlock_irqrestore(&hctx->state_lock, flags);
706
707 return atomic_disabled;
708 }
709
kbase_hwcnt_context_enable(struct kbase_hwcnt_context * hctx)710 void kbase_hwcnt_context_enable(struct kbase_hwcnt_context *hctx)
711 {
712 unsigned long flags;
713
714 if (WARN_ON(!hctx))
715 return;
716
717 spin_lock_irqsave(&hctx->state_lock, flags);
718
719 if (!WARN_ON(hctx->disable_count == 0)) {
720 if (hctx->disable_count == 1)
721 kbasep_hwcnt_accumulator_enable(hctx);
722
723 hctx->disable_count--;
724 }
725
726 spin_unlock_irqrestore(&hctx->state_lock, flags);
727 }
728
kbase_hwcnt_context_metadata(struct kbase_hwcnt_context * hctx)729 const struct kbase_hwcnt_metadata *kbase_hwcnt_context_metadata(
730 struct kbase_hwcnt_context *hctx)
731 {
732 if (!hctx)
733 return NULL;
734
735 return hctx->iface->metadata(hctx->iface->info);
736 }
737
kbase_hwcnt_context_queue_work(struct kbase_hwcnt_context * hctx,struct work_struct * work)738 bool kbase_hwcnt_context_queue_work(struct kbase_hwcnt_context *hctx,
739 struct work_struct *work)
740 {
741 if (WARN_ON(!hctx) || WARN_ON(!work))
742 return false;
743
744 return queue_work(hctx->wq, work);
745 }
746
kbase_hwcnt_accumulator_set_counters(struct kbase_hwcnt_accumulator * accum,const struct kbase_hwcnt_enable_map * new_map,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf)747 int kbase_hwcnt_accumulator_set_counters(
748 struct kbase_hwcnt_accumulator *accum,
749 const struct kbase_hwcnt_enable_map *new_map,
750 u64 *ts_start_ns,
751 u64 *ts_end_ns,
752 struct kbase_hwcnt_dump_buffer *dump_buf)
753 {
754 int errcode;
755 struct kbase_hwcnt_context *hctx;
756
757 if (!accum || !new_map || !ts_start_ns || !ts_end_ns)
758 return -EINVAL;
759
760 hctx = container_of(accum, struct kbase_hwcnt_context, accum);
761
762 if ((new_map->metadata != hctx->accum.metadata) ||
763 (dump_buf && (dump_buf->metadata != hctx->accum.metadata)))
764 return -EINVAL;
765
766 mutex_lock(&hctx->accum_lock);
767
768 errcode = kbasep_hwcnt_accumulator_dump(
769 hctx, ts_start_ns, ts_end_ns, dump_buf, new_map);
770
771 mutex_unlock(&hctx->accum_lock);
772
773 return errcode;
774 }
775
kbase_hwcnt_accumulator_dump(struct kbase_hwcnt_accumulator * accum,u64 * ts_start_ns,u64 * ts_end_ns,struct kbase_hwcnt_dump_buffer * dump_buf)776 int kbase_hwcnt_accumulator_dump(
777 struct kbase_hwcnt_accumulator *accum,
778 u64 *ts_start_ns,
779 u64 *ts_end_ns,
780 struct kbase_hwcnt_dump_buffer *dump_buf)
781 {
782 int errcode;
783 struct kbase_hwcnt_context *hctx;
784
785 if (!accum || !ts_start_ns || !ts_end_ns)
786 return -EINVAL;
787
788 hctx = container_of(accum, struct kbase_hwcnt_context, accum);
789
790 if (dump_buf && (dump_buf->metadata != hctx->accum.metadata))
791 return -EINVAL;
792
793 mutex_lock(&hctx->accum_lock);
794
795 errcode = kbasep_hwcnt_accumulator_dump(
796 hctx, ts_start_ns, ts_end_ns, dump_buf, NULL);
797
798 mutex_unlock(&hctx->accum_lock);
799
800 return errcode;
801 }
802
kbase_hwcnt_accumulator_timestamp_ns(struct kbase_hwcnt_accumulator * accum)803 u64 kbase_hwcnt_accumulator_timestamp_ns(struct kbase_hwcnt_accumulator *accum)
804 {
805 struct kbase_hwcnt_context *hctx;
806
807 if (WARN_ON(!accum))
808 return 0;
809
810 hctx = container_of(accum, struct kbase_hwcnt_context, accum);
811 return hctx->iface->timestamp_ns(accum->backend);
812 }
813