1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 /**
23 * Mali arbiter power manager state machine and APIs
24 */
25
26 #include <mali_kbase.h>
27 #include <mali_kbase_pm.h>
28 #include <backend/gpu/mali_kbase_irq_internal.h>
29 #include <backend/gpu/mali_kbase_pm_internal.h>
30 #include <tl/mali_kbase_tracepoints.h>
31 #include <mali_kbase_gpuprops.h>
32
33 /* A dmesg warning will occur if the GPU is not granted
34 * after the following time (in milliseconds) has ellapsed.
35 */
36 #define GPU_REQUEST_TIMEOUT 1000
37 #define KHZ_TO_HZ 1000
38
39 #define MAX_L2_SLICES_MASK 0xFF
40
41 /* Maximum time in ms, before deferring probe incase
42 * GPU_GRANTED message is not received
43 */
44 static int gpu_req_timeout = 1;
45 module_param(gpu_req_timeout, int, 0644);
46 MODULE_PARM_DESC(gpu_req_timeout,
47 "On a virtualized platform, if the GPU is not granted within this time(ms) kbase will defer the probe");
48
49 static void kbase_arbiter_pm_vm_wait_gpu_assignment(struct kbase_device *kbdev);
50 static inline bool kbase_arbiter_pm_vm_gpu_assigned_lockheld(
51 struct kbase_device *kbdev);
52
53 /**
54 * kbase_arbiter_pm_vm_state_str() - Helper function to get string
55 * for kbase VM state.(debug)
56 * @state: kbase VM state
57 *
58 * Return: string representation of Kbase_vm_state
59 */
kbase_arbiter_pm_vm_state_str(enum kbase_vm_state state)60 static inline const char *kbase_arbiter_pm_vm_state_str(
61 enum kbase_vm_state state)
62 {
63 switch (state) {
64 case KBASE_VM_STATE_INITIALIZING:
65 return "KBASE_VM_STATE_INITIALIZING";
66 case KBASE_VM_STATE_INITIALIZING_WITH_GPU:
67 return "KBASE_VM_STATE_INITIALIZING_WITH_GPU";
68 case KBASE_VM_STATE_SUSPENDED:
69 return "KBASE_VM_STATE_SUSPENDED";
70 case KBASE_VM_STATE_STOPPED:
71 return "KBASE_VM_STATE_STOPPED";
72 case KBASE_VM_STATE_STOPPED_GPU_REQUESTED:
73 return "KBASE_VM_STATE_STOPPED_GPU_REQUESTED";
74 case KBASE_VM_STATE_STARTING:
75 return "KBASE_VM_STATE_STARTING";
76 case KBASE_VM_STATE_IDLE:
77 return "KBASE_VM_STATE_IDLE";
78 case KBASE_VM_STATE_ACTIVE:
79 return "KBASE_VM_STATE_ACTIVE";
80 case KBASE_VM_STATE_STOPPING_IDLE:
81 return "KBASE_VM_STATE_STOPPING_IDLE";
82 case KBASE_VM_STATE_STOPPING_ACTIVE:
83 return "KBASE_VM_STATE_STOPPING_ACTIVE";
84 case KBASE_VM_STATE_SUSPEND_PENDING:
85 return "KBASE_VM_STATE_SUSPEND_PENDING";
86 case KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT:
87 return "KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT";
88 default:
89 KBASE_DEBUG_ASSERT(false);
90 return "[UnknownState]";
91 }
92 }
93
94 /**
95 * kbase_arbiter_pm_vm_event_str() - Helper function to get string
96 * for kbase VM event.(debug)
97 * @evt: kbase VM state
98 *
99 * Return: String representation of Kbase_arbif_event
100 */
kbase_arbiter_pm_vm_event_str(enum kbase_arbif_evt evt)101 static inline const char *kbase_arbiter_pm_vm_event_str(
102 enum kbase_arbif_evt evt)
103 {
104 switch (evt) {
105 case KBASE_VM_GPU_INITIALIZED_EVT:
106 return "KBASE_VM_GPU_INITIALIZED_EVT";
107 case KBASE_VM_GPU_STOP_EVT:
108 return "KBASE_VM_GPU_STOP_EVT";
109 case KBASE_VM_GPU_GRANTED_EVT:
110 return "KBASE_VM_GPU_GRANTED_EVT";
111 case KBASE_VM_GPU_LOST_EVT:
112 return "KBASE_VM_GPU_LOST_EVT";
113 case KBASE_VM_OS_SUSPEND_EVENT:
114 return "KBASE_VM_OS_SUSPEND_EVENT";
115 case KBASE_VM_OS_RESUME_EVENT:
116 return "KBASE_VM_OS_RESUME_EVENT";
117 case KBASE_VM_GPU_IDLE_EVENT:
118 return "KBASE_VM_GPU_IDLE_EVENT";
119 case KBASE_VM_REF_EVENT:
120 return "KBASE_VM_REF_EVENT";
121 default:
122 KBASE_DEBUG_ASSERT(false);
123 return "[UnknownEvent]";
124 }
125 }
126
127 /**
128 * kbase_arbiter_pm_vm_set_state() - Sets new kbase_arbiter_vm_state
129 * @kbdev: The kbase device structure for the device (must be a valid pointer)
130 * @new_state: kbase VM new state
131 *
132 * This function sets the new state for the VM
133 */
kbase_arbiter_pm_vm_set_state(struct kbase_device * kbdev,enum kbase_vm_state new_state)134 static void kbase_arbiter_pm_vm_set_state(struct kbase_device *kbdev,
135 enum kbase_vm_state new_state)
136 {
137 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
138
139 dev_dbg(kbdev->dev, "VM set_state %s -> %s",
140 kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state),
141 kbase_arbiter_pm_vm_state_str(new_state));
142
143 lockdep_assert_held(&arb_vm_state->vm_state_lock);
144 arb_vm_state->vm_state = new_state;
145 if (new_state != KBASE_VM_STATE_INITIALIZING_WITH_GPU &&
146 new_state != KBASE_VM_STATE_INITIALIZING)
147 KBASE_KTRACE_ADD(kbdev, ARB_VM_STATE, NULL, new_state);
148 wake_up(&arb_vm_state->vm_state_wait);
149 }
150
151 /**
152 * kbase_arbiter_pm_suspend_wq() - suspend work queue of the driver.
153 * @data: work queue
154 *
155 * Suspends work queue of the driver, when VM is in SUSPEND_PENDING or
156 * STOPPING_IDLE or STOPPING_ACTIVE state
157 */
kbase_arbiter_pm_suspend_wq(struct work_struct * data)158 static void kbase_arbiter_pm_suspend_wq(struct work_struct *data)
159 {
160 struct kbase_arbiter_vm_state *arb_vm_state = container_of(data,
161 struct kbase_arbiter_vm_state,
162 vm_suspend_work);
163 struct kbase_device *kbdev = arb_vm_state->kbdev;
164
165 mutex_lock(&arb_vm_state->vm_state_lock);
166 dev_dbg(kbdev->dev, ">%s\n", __func__);
167 if (arb_vm_state->vm_state == KBASE_VM_STATE_STOPPING_IDLE ||
168 arb_vm_state->vm_state ==
169 KBASE_VM_STATE_STOPPING_ACTIVE ||
170 arb_vm_state->vm_state ==
171 KBASE_VM_STATE_SUSPEND_PENDING) {
172 mutex_unlock(&arb_vm_state->vm_state_lock);
173 dev_dbg(kbdev->dev, ">kbase_pm_driver_suspend\n");
174 kbase_pm_driver_suspend(kbdev);
175 dev_dbg(kbdev->dev, "<kbase_pm_driver_suspend\n");
176 mutex_lock(&arb_vm_state->vm_state_lock);
177 }
178 mutex_unlock(&arb_vm_state->vm_state_lock);
179 dev_dbg(kbdev->dev, "<%s\n", __func__);
180 }
181
182 /**
183 * kbase_arbiter_pm_resume_wq() -Kbase resume work queue.
184 * @data: work item
185 *
186 * Resume work queue of the driver when VM is in STARTING state,
187 * else if its in STOPPING_ACTIVE will request a stop event.
188 */
kbase_arbiter_pm_resume_wq(struct work_struct * data)189 static void kbase_arbiter_pm_resume_wq(struct work_struct *data)
190 {
191 struct kbase_arbiter_vm_state *arb_vm_state = container_of(data,
192 struct kbase_arbiter_vm_state,
193 vm_resume_work);
194 struct kbase_device *kbdev = arb_vm_state->kbdev;
195
196 mutex_lock(&arb_vm_state->vm_state_lock);
197 dev_dbg(kbdev->dev, ">%s\n", __func__);
198 arb_vm_state->vm_arb_starting = true;
199 if (arb_vm_state->vm_state == KBASE_VM_STATE_STARTING) {
200 mutex_unlock(&arb_vm_state->vm_state_lock);
201 dev_dbg(kbdev->dev, ">kbase_pm_driver_resume\n");
202 kbase_pm_driver_resume(kbdev, true);
203 dev_dbg(kbdev->dev, "<kbase_pm_driver_resume\n");
204 mutex_lock(&arb_vm_state->vm_state_lock);
205 } else if (arb_vm_state->vm_state == KBASE_VM_STATE_STOPPING_ACTIVE) {
206 kbase_arbiter_pm_vm_stopped(kbdev);
207 }
208 arb_vm_state->vm_arb_starting = false;
209 mutex_unlock(&arb_vm_state->vm_state_lock);
210 KBASE_TLSTREAM_TL_ARBITER_STARTED(kbdev, kbdev);
211 dev_dbg(kbdev->dev, "<%s\n", __func__);
212 }
213
214 /**
215 * request_timer_callback() - Issue warning on request timer expiration
216 * @timer: Request hr timer data
217 *
218 * Called when the Arbiter takes too long to grant the GPU after a
219 * request has been made. Issues a warning in dmesg.
220 *
221 * Return: Always returns HRTIMER_NORESTART
222 */
request_timer_callback(struct hrtimer * timer)223 static enum hrtimer_restart request_timer_callback(struct hrtimer *timer)
224 {
225 struct kbase_arbiter_vm_state *arb_vm_state = container_of(timer,
226 struct kbase_arbiter_vm_state, vm_request_timer);
227
228 KBASE_DEBUG_ASSERT(arb_vm_state);
229 KBASE_DEBUG_ASSERT(arb_vm_state->kbdev);
230
231 dev_warn(arb_vm_state->kbdev->dev,
232 "Still waiting for GPU to be granted from Arbiter after %d ms\n",
233 GPU_REQUEST_TIMEOUT);
234 return HRTIMER_NORESTART;
235 }
236
237 /**
238 * start_request_timer() - Start a timer after requesting GPU
239 * @kbdev: The kbase device structure for the device (must be a valid pointer)
240 *
241 * Start a timer to track when kbase is waiting for the GPU from the
242 * Arbiter. If the timer expires before GPU is granted, a warning in
243 * dmesg will be issued.
244 */
start_request_timer(struct kbase_device * kbdev)245 static void start_request_timer(struct kbase_device *kbdev)
246 {
247 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
248
249 hrtimer_start(&arb_vm_state->vm_request_timer,
250 HR_TIMER_DELAY_MSEC(GPU_REQUEST_TIMEOUT),
251 HRTIMER_MODE_REL);
252 }
253
254 /**
255 * cancel_request_timer() - Stop the request timer
256 * @kbdev: The kbase device structure for the device (must be a valid pointer)
257 *
258 * Stops the request timer once GPU has been granted. Safe to call
259 * even if timer is no longer running.
260 */
cancel_request_timer(struct kbase_device * kbdev)261 static void cancel_request_timer(struct kbase_device *kbdev)
262 {
263 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
264
265 hrtimer_cancel(&arb_vm_state->vm_request_timer);
266 }
267
268 /**
269 * kbase_arbiter_pm_early_init() - Initialize arbiter for VM
270 * Paravirtualized use.
271 * @kbdev: The kbase device structure for the device (must be a valid pointer)
272 *
273 * Initialize the arbiter and other required resources during the runtime
274 * and request the GPU for the VM for the first time.
275 *
276 * Return: 0 if success, or a Linux error code
277 */
kbase_arbiter_pm_early_init(struct kbase_device * kbdev)278 int kbase_arbiter_pm_early_init(struct kbase_device *kbdev)
279 {
280 int err;
281 struct kbase_arbiter_vm_state *arb_vm_state = NULL;
282
283 arb_vm_state = kmalloc(sizeof(struct kbase_arbiter_vm_state),
284 GFP_KERNEL);
285 if (arb_vm_state == NULL)
286 return -ENOMEM;
287
288 arb_vm_state->kbdev = kbdev;
289 arb_vm_state->vm_state = KBASE_VM_STATE_INITIALIZING;
290
291 mutex_init(&arb_vm_state->vm_state_lock);
292 init_waitqueue_head(&arb_vm_state->vm_state_wait);
293 arb_vm_state->vm_arb_wq = alloc_ordered_workqueue("kbase_vm_arb_wq",
294 WQ_HIGHPRI);
295 if (!arb_vm_state->vm_arb_wq) {
296 dev_err(kbdev->dev, "Failed to allocate vm_arb workqueue\n");
297 kfree(arb_vm_state);
298 return -ENOMEM;
299 }
300 INIT_WORK(&arb_vm_state->vm_suspend_work, kbase_arbiter_pm_suspend_wq);
301 INIT_WORK(&arb_vm_state->vm_resume_work, kbase_arbiter_pm_resume_wq);
302 arb_vm_state->vm_arb_starting = false;
303 atomic_set(&kbdev->pm.gpu_users_waiting, 0);
304 hrtimer_init(&arb_vm_state->vm_request_timer, CLOCK_MONOTONIC,
305 HRTIMER_MODE_REL);
306 arb_vm_state->vm_request_timer.function =
307 request_timer_callback;
308 kbdev->pm.arb_vm_state = arb_vm_state;
309
310 err = kbase_arbif_init(kbdev);
311 if (err) {
312 dev_err(kbdev->dev, "Failed to initialise arbif module\n");
313 goto arbif_init_fail;
314 }
315
316 if (kbdev->arb.arb_if) {
317 kbase_arbif_gpu_request(kbdev);
318 dev_dbg(kbdev->dev, "Waiting for initial GPU assignment...\n");
319
320 err = wait_event_timeout(arb_vm_state->vm_state_wait,
321 arb_vm_state->vm_state ==
322 KBASE_VM_STATE_INITIALIZING_WITH_GPU,
323 msecs_to_jiffies(gpu_req_timeout));
324
325 if (!err) {
326 dev_dbg(kbdev->dev,
327 "Kbase probe Deferred after waiting %d ms to receive GPU_GRANT\n",
328 gpu_req_timeout);
329
330 err = -ENODEV;
331 goto arbif_timeout;
332 }
333
334 dev_dbg(kbdev->dev,
335 "Waiting for initial GPU assignment - done\n");
336 }
337 return 0;
338
339 arbif_timeout:
340 kbase_arbiter_pm_early_term(kbdev);
341 return err;
342
343 arbif_init_fail:
344 destroy_workqueue(arb_vm_state->vm_arb_wq);
345 kfree(arb_vm_state);
346 kbdev->pm.arb_vm_state = NULL;
347 return err;
348 }
349
350 /**
351 * kbase_arbiter_pm_early_term() - Shutdown arbiter and free resources
352 * @kbdev: The kbase device structure for the device (must be a valid pointer)
353 *
354 * Clean up all the resources
355 */
kbase_arbiter_pm_early_term(struct kbase_device * kbdev)356 void kbase_arbiter_pm_early_term(struct kbase_device *kbdev)
357 {
358 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
359
360 cancel_request_timer(kbdev);
361 mutex_lock(&arb_vm_state->vm_state_lock);
362 if (arb_vm_state->vm_state > KBASE_VM_STATE_STOPPED_GPU_REQUESTED) {
363 kbase_pm_set_gpu_lost(kbdev, false);
364 kbase_arbif_gpu_stopped(kbdev, false);
365 }
366 mutex_unlock(&arb_vm_state->vm_state_lock);
367 destroy_workqueue(arb_vm_state->vm_arb_wq);
368 kbase_arbif_destroy(kbdev);
369 arb_vm_state->vm_arb_wq = NULL;
370 kfree(kbdev->pm.arb_vm_state);
371 kbdev->pm.arb_vm_state = NULL;
372 }
373
374 /**
375 * kbase_arbiter_pm_release_interrupts() - Release the GPU interrupts
376 * @kbdev: The kbase device structure for the device (must be a valid pointer)
377 *
378 * Releases interrupts and set the interrupt flag to false
379 */
kbase_arbiter_pm_release_interrupts(struct kbase_device * kbdev)380 void kbase_arbiter_pm_release_interrupts(struct kbase_device *kbdev)
381 {
382 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
383
384 mutex_lock(&arb_vm_state->vm_state_lock);
385 if (arb_vm_state->interrupts_installed == true) {
386 arb_vm_state->interrupts_installed = false;
387 kbase_release_interrupts(kbdev);
388 }
389 mutex_unlock(&arb_vm_state->vm_state_lock);
390 }
391
392 /**
393 * kbase_arbiter_pm_install_interrupts() - Install the GPU interrupts
394 * @kbdev: The kbase device structure for the device (must be a valid pointer)
395 *
396 * Install interrupts and set the interrupt_install flag to true.
397 */
kbase_arbiter_pm_install_interrupts(struct kbase_device * kbdev)398 int kbase_arbiter_pm_install_interrupts(struct kbase_device *kbdev)
399 {
400 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
401 int err;
402
403 mutex_lock(&arb_vm_state->vm_state_lock);
404 arb_vm_state->interrupts_installed = true;
405 err = kbase_install_interrupts(kbdev);
406 mutex_unlock(&arb_vm_state->vm_state_lock);
407 return err;
408 }
409
410 /**
411 * kbase_arbiter_pm_vm_stopped() - Handle stop state for the VM
412 * @kbdev: The kbase device structure for the device (must be a valid pointer)
413 *
414 * Handles a stop state for the VM
415 */
kbase_arbiter_pm_vm_stopped(struct kbase_device * kbdev)416 void kbase_arbiter_pm_vm_stopped(struct kbase_device *kbdev)
417 {
418 bool request_gpu = false;
419 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
420
421 lockdep_assert_held(&arb_vm_state->vm_state_lock);
422
423 if (atomic_read(&kbdev->pm.gpu_users_waiting) > 0 &&
424 arb_vm_state->vm_state == KBASE_VM_STATE_STOPPING_IDLE)
425 kbase_arbiter_pm_vm_set_state(kbdev,
426 KBASE_VM_STATE_STOPPING_ACTIVE);
427
428 dev_dbg(kbdev->dev, "%s %s\n", __func__,
429 kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state));
430
431 if (arb_vm_state->interrupts_installed) {
432 arb_vm_state->interrupts_installed = false;
433 kbase_release_interrupts(kbdev);
434 }
435
436 switch (arb_vm_state->vm_state) {
437 case KBASE_VM_STATE_STOPPING_ACTIVE:
438 request_gpu = true;
439 kbase_arbiter_pm_vm_set_state(kbdev,
440 KBASE_VM_STATE_STOPPED_GPU_REQUESTED);
441 break;
442 case KBASE_VM_STATE_STOPPING_IDLE:
443 kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_STOPPED);
444 break;
445 case KBASE_VM_STATE_SUSPEND_PENDING:
446 kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_SUSPENDED);
447 break;
448 default:
449 dev_warn(kbdev->dev, "unexpected pm_stop VM state %u",
450 arb_vm_state->vm_state);
451 break;
452 }
453
454 kbase_pm_set_gpu_lost(kbdev, false);
455 kbase_arbif_gpu_stopped(kbdev, request_gpu);
456 if (request_gpu)
457 start_request_timer(kbdev);
458 }
459
kbase_arbiter_set_max_config(struct kbase_device * kbdev,uint32_t max_l2_slices,uint32_t max_core_mask)460 void kbase_arbiter_set_max_config(struct kbase_device *kbdev,
461 uint32_t max_l2_slices,
462 uint32_t max_core_mask)
463 {
464 struct kbase_arbiter_vm_state *arb_vm_state;
465 struct max_config_props max_config;
466
467 if (!kbdev)
468 return;
469
470 /* Mask the max_l2_slices as it is stored as 8 bits into kbase */
471 max_config.l2_slices = max_l2_slices & MAX_L2_SLICES_MASK;
472 max_config.core_mask = max_core_mask;
473 arb_vm_state = kbdev->pm.arb_vm_state;
474
475 mutex_lock(&arb_vm_state->vm_state_lock);
476 /* Just set the max_props in kbase during initialization. */
477 if (arb_vm_state->vm_state == KBASE_VM_STATE_INITIALIZING)
478 kbase_gpuprops_set_max_config(kbdev, &max_config);
479 else
480 dev_dbg(kbdev->dev, "Unexpected max_config on VM state %s",
481 kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state));
482
483 mutex_unlock(&arb_vm_state->vm_state_lock);
484 }
485
kbase_arbiter_pm_gpu_assigned(struct kbase_device * kbdev)486 int kbase_arbiter_pm_gpu_assigned(struct kbase_device *kbdev)
487 {
488 struct kbase_arbiter_vm_state *arb_vm_state;
489 int result = -EINVAL;
490
491 if (!kbdev)
492 return result;
493
494 /* First check the GPU_LOST state */
495 kbase_pm_lock(kbdev);
496 if (kbase_pm_is_gpu_lost(kbdev)) {
497 kbase_pm_unlock(kbdev);
498 return 0;
499 }
500 kbase_pm_unlock(kbdev);
501
502 /* Then the arbitration state machine */
503 arb_vm_state = kbdev->pm.arb_vm_state;
504
505 mutex_lock(&arb_vm_state->vm_state_lock);
506 switch (arb_vm_state->vm_state) {
507 case KBASE_VM_STATE_INITIALIZING:
508 case KBASE_VM_STATE_SUSPENDED:
509 case KBASE_VM_STATE_STOPPED:
510 case KBASE_VM_STATE_STOPPED_GPU_REQUESTED:
511 case KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT:
512 result = 0;
513 break;
514 default:
515 result = 1;
516 break;
517 }
518 mutex_unlock(&arb_vm_state->vm_state_lock);
519
520 return result;
521 }
522
523 /**
524 * kbase_arbiter_pm_vm_gpu_start() - Handles the start state of the VM
525 * @kbdev: The kbase device structure for the device (must be a valid pointer)
526 *
527 * Handles the start state of the VM
528 */
kbase_arbiter_pm_vm_gpu_start(struct kbase_device * kbdev)529 static void kbase_arbiter_pm_vm_gpu_start(struct kbase_device *kbdev)
530 {
531 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
532 bool freq_updated = false;
533
534 lockdep_assert_held(&arb_vm_state->vm_state_lock);
535 mutex_lock(&kbdev->arb.arb_freq.arb_freq_lock);
536 if (kbdev->arb.arb_freq.freq_updated) {
537 kbdev->arb.arb_freq.freq_updated = false;
538 freq_updated = true;
539 }
540 mutex_unlock(&kbdev->arb.arb_freq.arb_freq_lock);
541
542 cancel_request_timer(kbdev);
543 switch (arb_vm_state->vm_state) {
544 case KBASE_VM_STATE_INITIALIZING:
545 kbase_arbiter_pm_vm_set_state(kbdev,
546 KBASE_VM_STATE_INITIALIZING_WITH_GPU);
547 break;
548 case KBASE_VM_STATE_STOPPED_GPU_REQUESTED:
549 kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_STARTING);
550 arb_vm_state->interrupts_installed = true;
551 kbase_install_interrupts(kbdev);
552 /*
553 * GPU GRANTED received while in stop can be a result of a
554 * repartitioning.
555 */
556 kbase_gpuprops_req_curr_config_update(kbdev);
557 /* curr_config will be updated while resuming the PM. */
558 queue_work(arb_vm_state->vm_arb_wq,
559 &arb_vm_state->vm_resume_work);
560 break;
561 case KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT:
562 kbase_pm_set_gpu_lost(kbdev, false);
563 kbase_arbif_gpu_stopped(kbdev, false);
564 kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_SUSPENDED);
565 break;
566 default:
567 /*
568 * GPU_GRANTED can be received when there is a frequency update
569 * Only show a warning if received in an unexpected state
570 * without a frequency update
571 */
572 if (!freq_updated)
573 dev_warn(kbdev->dev,
574 "GPU_GRANTED when not expected - state %s\n",
575 kbase_arbiter_pm_vm_state_str(
576 arb_vm_state->vm_state));
577 break;
578 }
579 }
580
581 /**
582 * kbase_arbiter_pm_vm_gpu_stop() - Handles the stop state of the VM
583 * @kbdev: The kbase device structure for the device (must be a valid pointer)
584 *
585 * Handles the start state of the VM
586 */
kbase_arbiter_pm_vm_gpu_stop(struct kbase_device * kbdev)587 static void kbase_arbiter_pm_vm_gpu_stop(struct kbase_device *kbdev)
588 {
589 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
590
591 lockdep_assert_held(&arb_vm_state->vm_state_lock);
592 if (arb_vm_state->vm_state == KBASE_VM_STATE_INITIALIZING_WITH_GPU) {
593 mutex_unlock(&arb_vm_state->vm_state_lock);
594 kbase_arbiter_pm_vm_wait_gpu_assignment(kbdev);
595 mutex_lock(&arb_vm_state->vm_state_lock);
596 }
597
598 switch (arb_vm_state->vm_state) {
599 case KBASE_VM_STATE_IDLE:
600 kbase_arbiter_pm_vm_set_state(kbdev,
601 KBASE_VM_STATE_STOPPING_IDLE);
602 queue_work(arb_vm_state->vm_arb_wq,
603 &arb_vm_state->vm_suspend_work);
604 break;
605 case KBASE_VM_STATE_ACTIVE:
606 kbase_arbiter_pm_vm_set_state(kbdev,
607 KBASE_VM_STATE_STOPPING_ACTIVE);
608 queue_work(arb_vm_state->vm_arb_wq,
609 &arb_vm_state->vm_suspend_work);
610 break;
611 case KBASE_VM_STATE_STARTING:
612 dev_dbg(kbdev->dev, "Got GPU_STOP event while STARTING.");
613 kbase_arbiter_pm_vm_set_state(kbdev,
614 KBASE_VM_STATE_STOPPING_ACTIVE);
615 if (arb_vm_state->vm_arb_starting)
616 queue_work(arb_vm_state->vm_arb_wq,
617 &arb_vm_state->vm_suspend_work);
618 break;
619 case KBASE_VM_STATE_SUSPEND_PENDING:
620 /* Suspend finishes with a stop so nothing else to do */
621 break;
622 case KBASE_VM_STATE_INITIALIZING:
623 case KBASE_VM_STATE_STOPPED_GPU_REQUESTED:
624 /*
625 * Case stop() is received when in a GPU REQUESTED state, it
626 * means that the granted() was missed so the GPU needs to be
627 * requested again.
628 */
629 dev_dbg(kbdev->dev,
630 "GPU stop while already stopped with GPU requested");
631 kbase_arbif_gpu_stopped(kbdev, true);
632 start_request_timer(kbdev);
633 break;
634 default:
635 dev_warn(kbdev->dev, "GPU_STOP when not expected - state %s\n",
636 kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state));
637 break;
638 }
639 }
640
641 /**
642 * kbase_gpu_lost() - Kbase signals GPU is lost on a lost event signal
643 * @kbdev: The kbase device structure for the device (must be a valid pointer)
644 *
645 * On GPU lost event signals GPU_LOST to the aribiter
646 */
kbase_gpu_lost(struct kbase_device * kbdev)647 static void kbase_gpu_lost(struct kbase_device *kbdev)
648 {
649 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
650 bool handle_gpu_lost = false;
651
652 lockdep_assert_held(&arb_vm_state->vm_state_lock);
653
654 switch (arb_vm_state->vm_state) {
655 case KBASE_VM_STATE_STARTING:
656 case KBASE_VM_STATE_ACTIVE:
657 case KBASE_VM_STATE_IDLE:
658 dev_warn(kbdev->dev, "GPU lost in state %s",
659 kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state));
660 kbase_arbiter_pm_vm_gpu_stop(kbdev);
661 handle_gpu_lost = true;
662 break;
663 case KBASE_VM_STATE_STOPPING_IDLE:
664 case KBASE_VM_STATE_STOPPING_ACTIVE:
665 case KBASE_VM_STATE_SUSPEND_PENDING:
666 dev_dbg(kbdev->dev, "GPU lost while stopping");
667 handle_gpu_lost = true;
668 break;
669 case KBASE_VM_STATE_SUSPENDED:
670 case KBASE_VM_STATE_STOPPED:
671 dev_dbg(kbdev->dev, "GPU lost while already stopped");
672 break;
673 case KBASE_VM_STATE_INITIALIZING:
674 case KBASE_VM_STATE_STOPPED_GPU_REQUESTED:
675 /*
676 * Case lost() is received when in a GPU REQUESTED state, it
677 * means that the granted() and stop() were missed so the GPU
678 * needs to be requested again. Very unlikely to happen.
679 */
680 dev_dbg(kbdev->dev,
681 "GPU lost while already stopped with GPU requested");
682 kbase_arbif_gpu_request(kbdev);
683 start_request_timer(kbdev);
684 break;
685 case KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT:
686 dev_dbg(kbdev->dev, "GPU lost while waiting to suspend");
687 kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_SUSPENDED);
688 break;
689 default:
690 break;
691 }
692 if (handle_gpu_lost) {
693 /* Releasing the VM state lock here is safe because
694 * we are guaranteed to be in either STOPPING_IDLE,
695 * STOPPING_ACTIVE or SUSPEND_PENDING at this point.
696 * The only transitions that are valid from here are to
697 * STOPPED, STOPPED_GPU_REQUESTED or SUSPENDED which can
698 * only happen at the completion of the GPU lost handling.
699 */
700 mutex_unlock(&arb_vm_state->vm_state_lock);
701 kbase_pm_handle_gpu_lost(kbdev);
702 mutex_lock(&arb_vm_state->vm_state_lock);
703 }
704 }
705
706 /**
707 * kbase_arbiter_pm_vm_os_suspend_ready_state() - checks if VM is ready
708 * to be moved to suspended state.
709 * @kbdev: The kbase device structure for the device (must be a valid pointer)
710 *
711 * Return: True if its ready to be suspended else False.
712 */
kbase_arbiter_pm_vm_os_suspend_ready_state(struct kbase_device * kbdev)713 static inline bool kbase_arbiter_pm_vm_os_suspend_ready_state(
714 struct kbase_device *kbdev)
715 {
716 switch (kbdev->pm.arb_vm_state->vm_state) {
717 case KBASE_VM_STATE_SUSPENDED:
718 case KBASE_VM_STATE_STOPPED:
719 case KBASE_VM_STATE_IDLE:
720 case KBASE_VM_STATE_ACTIVE:
721 return true;
722 default:
723 return false;
724 }
725 }
726
727 /**
728 * kbase_arbiter_pm_vm_os_prepare_suspend() - Prepare OS to be in suspend state
729 * until it receives the grant message from arbiter
730 * @kbdev: The kbase device structure for the device (must be a valid pointer)
731 *
732 * Prepares OS to be in suspend state until it receives GRANT message
733 * from Arbiter asynchronously.
734 */
kbase_arbiter_pm_vm_os_prepare_suspend(struct kbase_device * kbdev)735 static void kbase_arbiter_pm_vm_os_prepare_suspend(struct kbase_device *kbdev)
736 {
737 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
738 enum kbase_vm_state prev_state;
739
740 lockdep_assert_held(&arb_vm_state->vm_state_lock);
741 if (kbdev->arb.arb_if) {
742 if (kbdev->pm.arb_vm_state->vm_state ==
743 KBASE_VM_STATE_SUSPENDED)
744 return;
745 }
746 /* Block suspend OS function until we are in a stable state
747 * with vm_state_lock
748 */
749 while (!kbase_arbiter_pm_vm_os_suspend_ready_state(kbdev)) {
750 prev_state = arb_vm_state->vm_state;
751 switch (arb_vm_state->vm_state) {
752 case KBASE_VM_STATE_STOPPING_ACTIVE:
753 case KBASE_VM_STATE_STOPPING_IDLE:
754 kbase_arbiter_pm_vm_set_state(kbdev,
755 KBASE_VM_STATE_SUSPEND_PENDING);
756 break;
757 case KBASE_VM_STATE_STOPPED_GPU_REQUESTED:
758 kbase_arbiter_pm_vm_set_state(kbdev,
759 KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT);
760 break;
761 case KBASE_VM_STATE_STARTING:
762 if (!arb_vm_state->vm_arb_starting) {
763 kbase_arbiter_pm_vm_set_state(kbdev,
764 KBASE_VM_STATE_SUSPEND_PENDING);
765 kbase_arbiter_pm_vm_stopped(kbdev);
766 }
767 break;
768 default:
769 break;
770 }
771 mutex_unlock(&arb_vm_state->vm_state_lock);
772 wait_event(arb_vm_state->vm_state_wait,
773 arb_vm_state->vm_state != prev_state);
774 mutex_lock(&arb_vm_state->vm_state_lock);
775 }
776
777 switch (arb_vm_state->vm_state) {
778 case KBASE_VM_STATE_STOPPED:
779 kbase_arbiter_pm_vm_set_state(kbdev,
780 KBASE_VM_STATE_SUSPENDED);
781 break;
782 case KBASE_VM_STATE_IDLE:
783 case KBASE_VM_STATE_ACTIVE:
784 kbase_arbiter_pm_vm_set_state(kbdev,
785 KBASE_VM_STATE_SUSPEND_PENDING);
786 mutex_unlock(&arb_vm_state->vm_state_lock);
787 /* Ensure resume has completed fully before starting suspend */
788 flush_work(&arb_vm_state->vm_resume_work);
789 kbase_pm_driver_suspend(kbdev);
790 mutex_lock(&arb_vm_state->vm_state_lock);
791 break;
792 case KBASE_VM_STATE_SUSPENDED:
793 break;
794 default:
795 KBASE_DEBUG_ASSERT_MSG(false, "Unexpected state to suspend");
796 break;
797 }
798 }
799
800 /**
801 * kbase_arbiter_pm_vm_os_resume() - Resume OS function once it receives
802 * a grant message from arbiter
803 * @kbdev: The kbase device structure for the device (must be a valid pointer)
804 *
805 * Resume OS function once it receives GRANT message
806 * from Arbiter asynchronously.
807 */
kbase_arbiter_pm_vm_os_resume(struct kbase_device * kbdev)808 static void kbase_arbiter_pm_vm_os_resume(struct kbase_device *kbdev)
809 {
810 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
811
812 lockdep_assert_held(&arb_vm_state->vm_state_lock);
813 KBASE_DEBUG_ASSERT_MSG(arb_vm_state->vm_state ==
814 KBASE_VM_STATE_SUSPENDED,
815 "Unexpected state to resume");
816
817 kbase_arbiter_pm_vm_set_state(kbdev,
818 KBASE_VM_STATE_STOPPED_GPU_REQUESTED);
819 kbase_arbif_gpu_request(kbdev);
820 start_request_timer(kbdev);
821
822 /* Release lock and block resume OS function until we have
823 * asynchronously received the GRANT message from the Arbiter and
824 * fully resumed
825 */
826 mutex_unlock(&arb_vm_state->vm_state_lock);
827 kbase_arbiter_pm_vm_wait_gpu_assignment(kbdev);
828 flush_work(&arb_vm_state->vm_resume_work);
829 mutex_lock(&arb_vm_state->vm_state_lock);
830 }
831
832 /**
833 * kbase_arbiter_pm_vm_event() - Dispatch VM event to the state machine.
834 * @kbdev: The kbase device structure for the device (must be a valid pointer)
835 * @evt: VM event
836 *
837 * The state machine function. Receives events and transitions states
838 * according the event received and the current state
839 */
kbase_arbiter_pm_vm_event(struct kbase_device * kbdev,enum kbase_arbif_evt evt)840 void kbase_arbiter_pm_vm_event(struct kbase_device *kbdev,
841 enum kbase_arbif_evt evt)
842 {
843 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
844
845 if (!kbdev->arb.arb_if)
846 return;
847
848 mutex_lock(&arb_vm_state->vm_state_lock);
849 dev_dbg(kbdev->dev, "%s %s\n", __func__,
850 kbase_arbiter_pm_vm_event_str(evt));
851 if (arb_vm_state->vm_state != KBASE_VM_STATE_INITIALIZING_WITH_GPU &&
852 arb_vm_state->vm_state != KBASE_VM_STATE_INITIALIZING)
853 KBASE_KTRACE_ADD(kbdev, ARB_VM_EVT, NULL, evt);
854 switch (evt) {
855 case KBASE_VM_GPU_GRANTED_EVT:
856 kbase_arbiter_pm_vm_gpu_start(kbdev);
857 break;
858 case KBASE_VM_GPU_STOP_EVT:
859 kbase_arbiter_pm_vm_gpu_stop(kbdev);
860 break;
861 case KBASE_VM_GPU_LOST_EVT:
862 dev_dbg(kbdev->dev, "KBASE_ARBIF_GPU_LOST_EVT!");
863 kbase_gpu_lost(kbdev);
864 break;
865 case KBASE_VM_OS_SUSPEND_EVENT:
866 kbase_arbiter_pm_vm_os_prepare_suspend(kbdev);
867 break;
868 case KBASE_VM_OS_RESUME_EVENT:
869 kbase_arbiter_pm_vm_os_resume(kbdev);
870 break;
871 case KBASE_VM_GPU_IDLE_EVENT:
872 switch (arb_vm_state->vm_state) {
873 case KBASE_VM_STATE_ACTIVE:
874 kbase_arbiter_pm_vm_set_state(kbdev,
875 KBASE_VM_STATE_IDLE);
876 kbase_arbif_gpu_idle(kbdev);
877 break;
878 default:
879 break;
880 }
881 break;
882
883 case KBASE_VM_REF_EVENT:
884 switch (arb_vm_state->vm_state) {
885 case KBASE_VM_STATE_STARTING:
886 case KBASE_VM_STATE_IDLE:
887 kbase_arbiter_pm_vm_set_state(kbdev,
888 KBASE_VM_STATE_ACTIVE);
889 kbase_arbif_gpu_active(kbdev);
890 break;
891 case KBASE_VM_STATE_STOPPING_IDLE:
892 kbase_arbiter_pm_vm_set_state(kbdev,
893 KBASE_VM_STATE_STOPPING_ACTIVE);
894 break;
895 default:
896 break;
897 }
898 break;
899
900 case KBASE_VM_GPU_INITIALIZED_EVT:
901 switch (arb_vm_state->vm_state) {
902 case KBASE_VM_STATE_INITIALIZING_WITH_GPU:
903 lockdep_assert_held(&kbdev->pm.lock);
904 if (kbdev->pm.active_count > 0) {
905 kbase_arbiter_pm_vm_set_state(kbdev,
906 KBASE_VM_STATE_ACTIVE);
907 kbase_arbif_gpu_active(kbdev);
908 } else {
909 kbase_arbiter_pm_vm_set_state(kbdev,
910 KBASE_VM_STATE_IDLE);
911 kbase_arbif_gpu_idle(kbdev);
912 }
913 break;
914 default:
915 break;
916 }
917 break;
918
919 default:
920 dev_alert(kbdev->dev, "Got Unknown Event!");
921 break;
922 }
923 mutex_unlock(&arb_vm_state->vm_state_lock);
924 }
925
926 KBASE_EXPORT_TEST_API(kbase_arbiter_pm_vm_event);
927
928 /**
929 * kbase_arbiter_pm_vm_wait_gpu_assignment() - VM wait for a GPU assignment.
930 * @kbdev: The kbase device structure for the device (must be a valid pointer)
931 *
932 * VM waits for a GPU assignment.
933 */
kbase_arbiter_pm_vm_wait_gpu_assignment(struct kbase_device * kbdev)934 static void kbase_arbiter_pm_vm_wait_gpu_assignment(struct kbase_device *kbdev)
935 {
936 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
937
938 dev_dbg(kbdev->dev, "Waiting for GPU assignment...\n");
939 wait_event(arb_vm_state->vm_state_wait,
940 arb_vm_state->vm_state == KBASE_VM_STATE_IDLE ||
941 arb_vm_state->vm_state == KBASE_VM_STATE_ACTIVE);
942 dev_dbg(kbdev->dev, "Waiting for GPU assignment - done\n");
943 }
944
945 /**
946 * kbase_arbiter_pm_vm_gpu_assigned_lockheld() - Check if VM holds VM state lock
947 * @kbdev: The kbase device structure for the device (must be a valid pointer)
948 *
949 * Checks if the virtual machine holds VM state lock.
950 */
kbase_arbiter_pm_vm_gpu_assigned_lockheld(struct kbase_device * kbdev)951 static inline bool kbase_arbiter_pm_vm_gpu_assigned_lockheld(
952 struct kbase_device *kbdev)
953 {
954 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
955
956 lockdep_assert_held(&arb_vm_state->vm_state_lock);
957 return (arb_vm_state->vm_state == KBASE_VM_STATE_IDLE ||
958 arb_vm_state->vm_state == KBASE_VM_STATE_ACTIVE);
959 }
960
961 /**
962 * kbase_arbiter_pm_ctx_active_handle_suspend() - Handle suspend operation for
963 * arbitration mode
964 * @kbdev: The kbase device structure for the device (must be a valid pointer)
965 * @suspend_handler: The handler code for how to handle a suspend
966 * that might occur
967 *
968 * This function handles a suspend event from the driver,
969 * communicating with the arbiter and waiting synchronously for the GPU
970 * to be granted again depending on the VM state.
971 *
972 * Return: 0 on success else 1 suspend handler isn not possible.
973 */
kbase_arbiter_pm_ctx_active_handle_suspend(struct kbase_device * kbdev,enum kbase_pm_suspend_handler suspend_handler)974 int kbase_arbiter_pm_ctx_active_handle_suspend(struct kbase_device *kbdev,
975 enum kbase_pm_suspend_handler suspend_handler)
976 {
977 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
978 struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
979 int res = 0;
980
981 if (kbdev->arb.arb_if) {
982 mutex_lock(&arb_vm_state->vm_state_lock);
983 while (!kbase_arbiter_pm_vm_gpu_assigned_lockheld(kbdev)) {
984 /* Update VM state since we have GPU work to do */
985 if (arb_vm_state->vm_state ==
986 KBASE_VM_STATE_STOPPING_IDLE)
987 kbase_arbiter_pm_vm_set_state(kbdev,
988 KBASE_VM_STATE_STOPPING_ACTIVE);
989 else if (arb_vm_state->vm_state ==
990 KBASE_VM_STATE_STOPPED) {
991 kbase_arbiter_pm_vm_set_state(kbdev,
992 KBASE_VM_STATE_STOPPED_GPU_REQUESTED);
993 kbase_arbif_gpu_request(kbdev);
994 start_request_timer(kbdev);
995 } else if (arb_vm_state->vm_state ==
996 KBASE_VM_STATE_INITIALIZING_WITH_GPU)
997 break;
998
999 if (suspend_handler !=
1000 KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE) {
1001
1002 /* In case of GPU lost, even if
1003 * active_count > 0, we no longer have GPU
1004 * access
1005 */
1006 if (kbase_pm_is_gpu_lost(kbdev))
1007 res = 1;
1008
1009 switch (suspend_handler) {
1010 case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
1011 res = 1;
1012 break;
1013 case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
1014 if (kbdev->pm.active_count == 0)
1015 res = 1;
1016 break;
1017 case KBASE_PM_SUSPEND_HANDLER_VM_GPU_GRANTED:
1018 break;
1019 default:
1020 WARN(1, "Unknown suspend_handler\n");
1021 res = 1;
1022 break;
1023 }
1024 break;
1025 }
1026
1027 /* Need to synchronously wait for GPU assignment */
1028 atomic_inc(&kbdev->pm.gpu_users_waiting);
1029 mutex_unlock(&arb_vm_state->vm_state_lock);
1030 mutex_unlock(&kbdev->pm.lock);
1031 mutex_unlock(&js_devdata->runpool_mutex);
1032 kbase_arbiter_pm_vm_wait_gpu_assignment(kbdev);
1033 mutex_lock(&js_devdata->runpool_mutex);
1034 mutex_lock(&kbdev->pm.lock);
1035 mutex_lock(&arb_vm_state->vm_state_lock);
1036 atomic_dec(&kbdev->pm.gpu_users_waiting);
1037 }
1038 mutex_unlock(&arb_vm_state->vm_state_lock);
1039 }
1040 return res;
1041 }
1042
1043 /**
1044 * kbase_arbiter_pm_update_gpu_freq() - Updates GPU clock frequency received
1045 * from arbiter.
1046 * @arb_freq: Pointer to struchture holding GPU clock frequenecy data
1047 * @freq: New frequency value in KHz
1048 */
kbase_arbiter_pm_update_gpu_freq(struct kbase_arbiter_freq * arb_freq,uint32_t freq)1049 void kbase_arbiter_pm_update_gpu_freq(struct kbase_arbiter_freq *arb_freq,
1050 uint32_t freq)
1051 {
1052 struct kbase_gpu_clk_notifier_data ndata;
1053
1054 mutex_lock(&arb_freq->arb_freq_lock);
1055 if (arb_freq->arb_freq != freq) {
1056 ndata.new_rate = (unsigned long)freq * KHZ_TO_HZ;
1057 ndata.old_rate = (unsigned long)arb_freq->arb_freq * KHZ_TO_HZ;
1058 ndata.gpu_clk_handle = arb_freq;
1059 arb_freq->arb_freq = freq;
1060 arb_freq->freq_updated = true;
1061 if (arb_freq->nb)
1062 arb_freq->nb->notifier_call(arb_freq->nb,
1063 POST_RATE_CHANGE, &ndata);
1064 }
1065
1066 mutex_unlock(&arb_freq->arb_freq_lock);
1067 }
1068
1069 /**
1070 * enumerate_arb_gpu_clk() - Enumerate a GPU clock on the given index
1071 * @kbdev: kbase_device pointer
1072 * @index: GPU clock index
1073 *
1074 * Returns pointer to structure holding GPU clock frequency data reported from
1075 * arbiter, only index 0 is valid.
1076 */
enumerate_arb_gpu_clk(struct kbase_device * kbdev,unsigned int index)1077 static void *enumerate_arb_gpu_clk(struct kbase_device *kbdev,
1078 unsigned int index)
1079 {
1080 if (index == 0)
1081 return &kbdev->arb.arb_freq;
1082 return NULL;
1083 }
1084
1085 /**
1086 * get_arb_gpu_clk_rate() - Get the current rate of GPU clock frequency value
1087 * @kbdev: kbase_device pointer
1088 * @index: GPU clock index
1089 *
1090 * Returns the GPU clock frequency value saved when gpu is granted from arbiter
1091 */
get_arb_gpu_clk_rate(struct kbase_device * kbdev,void * gpu_clk_handle)1092 static unsigned long get_arb_gpu_clk_rate(struct kbase_device *kbdev,
1093 void *gpu_clk_handle)
1094 {
1095 uint32_t freq;
1096 struct kbase_arbiter_freq *arb_dev_freq =
1097 (struct kbase_arbiter_freq *) gpu_clk_handle;
1098
1099 mutex_lock(&arb_dev_freq->arb_freq_lock);
1100 /* Convert from KHz to Hz */
1101 freq = arb_dev_freq->arb_freq * KHZ_TO_HZ;
1102 mutex_unlock(&arb_dev_freq->arb_freq_lock);
1103 return freq;
1104 }
1105
1106 /**
1107 * arb_gpu_clk_notifier_register() - Register a clock rate change notifier.
1108 * @kbdev: kbase_device pointer
1109 * @gpu_clk_handle: Handle unique to the enumerated GPU clock
1110 * @nb: notifier block containing the callback function pointer
1111 *
1112 * Returns 0 on success, negative error code otherwise.
1113 *
1114 * This function registers a callback function that is invoked whenever the
1115 * frequency of the clock corresponding to @gpu_clk_handle changes.
1116 */
arb_gpu_clk_notifier_register(struct kbase_device * kbdev,void * gpu_clk_handle,struct notifier_block * nb)1117 static int arb_gpu_clk_notifier_register(struct kbase_device *kbdev,
1118 void *gpu_clk_handle, struct notifier_block *nb)
1119 {
1120 int ret = 0;
1121 struct kbase_arbiter_freq *arb_dev_freq =
1122 (struct kbase_arbiter_freq *)gpu_clk_handle;
1123
1124 if (!arb_dev_freq->nb)
1125 arb_dev_freq->nb = nb;
1126 else
1127 ret = -EBUSY;
1128
1129 return ret;
1130 }
1131
1132 /**
1133 * gpu_clk_notifier_unregister() - Unregister clock rate change notifier
1134 * @kbdev: kbase_device pointer
1135 * @gpu_clk_handle: Handle unique to the enumerated GPU clock
1136 * @nb: notifier block containing the callback function pointer
1137 *
1138 * This function pointer is used to unregister a callback function that
1139 * was previously registered to get notified of a frequency change of the
1140 * clock corresponding to @gpu_clk_handle.
1141 */
arb_gpu_clk_notifier_unregister(struct kbase_device * kbdev,void * gpu_clk_handle,struct notifier_block * nb)1142 static void arb_gpu_clk_notifier_unregister(struct kbase_device *kbdev,
1143 void *gpu_clk_handle, struct notifier_block *nb)
1144 {
1145 struct kbase_arbiter_freq *arb_dev_freq =
1146 (struct kbase_arbiter_freq *)gpu_clk_handle;
1147 if (arb_dev_freq->nb == nb) {
1148 arb_dev_freq->nb = NULL;
1149 } else {
1150 dev_err(kbdev->dev, "%s - notifier did not match\n",
1151 __func__);
1152 }
1153 }
1154
1155 struct kbase_clk_rate_trace_op_conf arb_clk_rate_trace_ops = {
1156 .get_gpu_clk_rate = get_arb_gpu_clk_rate,
1157 .enumerate_gpu_clk = enumerate_arb_gpu_clk,
1158 .gpu_clk_notifier_register = arb_gpu_clk_notifier_register,
1159 .gpu_clk_notifier_unregister = arb_gpu_clk_notifier_unregister
1160 };
1161