1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3 *
4 * (C) COPYRIGHT 2018-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #ifndef _KBASE_CSF_H_
23 #define _KBASE_CSF_H_
24
25 #include "mali_kbase_csf_kcpu.h"
26 #include "mali_kbase_csf_scheduler.h"
27 #include "mali_kbase_csf_firmware.h"
28 #include "mali_kbase_csf_protected_memory.h"
29 #include "mali_kbase_hwaccess_time.h"
30
31 /* Indicate invalid CS h/w interface
32 */
33 #define KBASEP_IF_NR_INVALID ((s8)-1)
34
35 /* Indicate invalid CSG number for a GPU command queue group
36 */
37 #define KBASEP_CSG_NR_INVALID ((s8)-1)
38
39 /* Indicate invalid user doorbell number for a GPU command queue
40 */
41 #define KBASEP_USER_DB_NR_INVALID ((s8)-1)
42
43 #define FIRMWARE_PING_INTERVAL_MS (8000) /* 8 seconds */
44
45 #define FIRMWARE_IDLE_HYSTERESIS_TIME_MS (10) /* Default 10 milliseconds */
46
47 /* Idle hysteresis time can be scaled down when GPU sleep feature is used */
48 #define FIRMWARE_IDLE_HYSTERESIS_GPU_SLEEP_SCALER (5)
49
50 /**
51 * kbase_csf_ctx_init - Initialize the CSF interface for a GPU address space.
52 *
53 * @kctx: Pointer to the kbase context which is being initialized.
54 *
55 * Return: 0 if successful or a negative error code on failure.
56 */
57 int kbase_csf_ctx_init(struct kbase_context *kctx);
58
59 /**
60 * kbase_csf_ctx_handle_fault - Terminate queue groups & notify fault upon
61 * GPU bus fault, MMU page fault or similar.
62 *
63 * @kctx: Pointer to faulty kbase context.
64 * @fault: Pointer to the fault.
65 *
66 * This function terminates all GPU command queue groups in the context and
67 * notifies the event notification thread of the fault.
68 */
69 void kbase_csf_ctx_handle_fault(struct kbase_context *kctx,
70 struct kbase_fault *fault);
71
72 /**
73 * kbase_csf_ctx_term - Terminate the CSF interface for a GPU address space.
74 *
75 * @kctx: Pointer to the kbase context which is being terminated.
76 *
77 * This function terminates any remaining CSGs and CSs which weren't destroyed
78 * before context termination.
79 */
80 void kbase_csf_ctx_term(struct kbase_context *kctx);
81
82 /**
83 * kbase_csf_queue_register - Register a GPU command queue.
84 *
85 * @kctx: Pointer to the kbase context within which the
86 * queue is to be registered.
87 * @reg: Pointer to the structure which contains details of the
88 * queue to be registered within the provided
89 * context.
90 *
91 * Return: 0 on success, or negative on failure.
92 */
93 int kbase_csf_queue_register(struct kbase_context *kctx,
94 struct kbase_ioctl_cs_queue_register *reg);
95
96 /**
97 * kbase_csf_queue_register_ex - Register a GPU command queue with
98 * extended format.
99 *
100 * @kctx: Pointer to the kbase context within which the
101 * queue is to be registered.
102 * @reg: Pointer to the structure which contains details of the
103 * queue to be registered within the provided
104 * context, together with the extended parameter fields
105 * for supporting cs trace command.
106 *
107 * Return: 0 on success, or negative on failure.
108 */
109 int kbase_csf_queue_register_ex(struct kbase_context *kctx,
110 struct kbase_ioctl_cs_queue_register_ex *reg);
111
112 /**
113 * kbase_csf_queue_terminate - Terminate a GPU command queue.
114 *
115 * @kctx: Pointer to the kbase context within which the
116 * queue is to be terminated.
117 * @term: Pointer to the structure which identifies which
118 * queue is to be terminated.
119 */
120 void kbase_csf_queue_terminate(struct kbase_context *kctx,
121 struct kbase_ioctl_cs_queue_terminate *term);
122
123 /**
124 * kbase_csf_alloc_command_stream_user_pages - Allocate resources for a
125 * GPU command queue.
126 *
127 * @kctx: Pointer to the kbase context within which the resources
128 * for the queue are being allocated.
129 * @queue: Pointer to the queue for which to allocate resources.
130 *
131 * This function allocates a pair of User mode input/output pages for a
132 * GPU command queue and maps them in the shared interface segment of MCU
133 * firmware address space. Also reserves a hardware doorbell page for the queue.
134 *
135 * Return: 0 on success, or negative on failure.
136 */
137 int kbase_csf_alloc_command_stream_user_pages(struct kbase_context *kctx,
138 struct kbase_queue *queue);
139
140 /**
141 * kbase_csf_queue_bind - Bind a GPU command queue to a queue group.
142 *
143 * @kctx: The kbase context.
144 * @bind: Pointer to the union which specifies a queue group and a
145 * queue to be bound to that group.
146 *
147 * Return: 0 on success, or negative on failure.
148 */
149 int kbase_csf_queue_bind(struct kbase_context *kctx,
150 union kbase_ioctl_cs_queue_bind *bind);
151
152 /**
153 * kbase_csf_queue_unbind - Unbind a GPU command queue from a queue group
154 * to which it has been bound and free
155 * resources allocated for this queue if there
156 * are any.
157 *
158 * @queue: Pointer to queue to be unbound.
159 */
160 void kbase_csf_queue_unbind(struct kbase_queue *queue);
161
162 /**
163 * kbase_csf_queue_unbind_stopped - Unbind a GPU command queue in the case
164 * where it was never started.
165 * @queue: Pointer to queue to be unbound.
166 *
167 * Variant of kbase_csf_queue_unbind() for use on error paths for cleaning up
168 * queues that failed to fully bind.
169 */
170 void kbase_csf_queue_unbind_stopped(struct kbase_queue *queue);
171
172 /**
173 * kbase_csf_queue_kick - Schedule a GPU command queue on the firmware
174 *
175 * @kctx: The kbase context.
176 * @kick: Pointer to the struct which specifies the queue
177 * that needs to be scheduled.
178 *
179 * Return: 0 on success, or negative on failure.
180 */
181 int kbase_csf_queue_kick(struct kbase_context *kctx,
182 struct kbase_ioctl_cs_queue_kick *kick);
183
184 /**
185 * kbase_csf_queue_group_handle_is_valid - Find if the given queue group handle
186 * is valid.
187 *
188 * @kctx: The kbase context under which the queue group exists.
189 * @group_handle: Handle for the group which uniquely identifies it within
190 * the context with which it was created.
191 *
192 * This function is used to determine if the queue group handle is valid.
193 *
194 * Return: 0 on success, or negative on failure.
195 */
196 int kbase_csf_queue_group_handle_is_valid(struct kbase_context *kctx,
197 u8 group_handle);
198
199 /**
200 * kbase_csf_queue_group_create - Create a GPU command queue group.
201 *
202 * @kctx: Pointer to the kbase context within which the
203 * queue group is to be created.
204 * @create: Pointer to the structure which contains details of the
205 * queue group which is to be created within the
206 * provided kbase context.
207 *
208 * Return: 0 on success, or negative on failure.
209 */
210 int kbase_csf_queue_group_create(struct kbase_context *kctx,
211 union kbase_ioctl_cs_queue_group_create *create);
212
213 /**
214 * kbase_csf_queue_group_terminate - Terminate a GPU command queue group.
215 *
216 * @kctx: Pointer to the kbase context within which the
217 * queue group is to be terminated.
218 * @group_handle: Pointer to the structure which identifies the queue
219 * group which is to be terminated.
220 */
221 void kbase_csf_queue_group_terminate(struct kbase_context *kctx,
222 u8 group_handle);
223
224 /**
225 * kbase_csf_term_descheduled_queue_group - Terminate a GPU command queue
226 * group that is not operational
227 * inside the scheduler.
228 *
229 * @group: Pointer to the structure which identifies the queue
230 * group to be terminated. The function assumes that the caller
231 * is sure that the given group is not operational inside the
232 * scheduler. If in doubt, use its alternative:
233 * @ref kbase_csf_queue_group_terminate().
234 */
235 void kbase_csf_term_descheduled_queue_group(struct kbase_queue_group *group);
236
237 /**
238 * kbase_csf_queue_group_suspend - Suspend a GPU command queue group
239 *
240 * @kctx: The kbase context for which the queue group is to be
241 * suspended.
242 * @sus_buf: Pointer to the structure which contains details of the
243 * user buffer and its kernel pinned pages.
244 * @group_handle: Handle for the group which uniquely identifies it within
245 * the context within which it was created.
246 *
247 * This function is used to suspend a queue group and copy the suspend buffer.
248 *
249 * Return: 0 on success or negative value if failed to suspend
250 * queue group and copy suspend buffer contents.
251 */
252 int kbase_csf_queue_group_suspend(struct kbase_context *kctx,
253 struct kbase_suspend_copy_buffer *sus_buf, u8 group_handle);
254
255 /**
256 * kbase_csf_add_group_fatal_error - Report a fatal group error to userspace
257 *
258 * @group: GPU command queue group.
259 * @err_payload: Error payload to report.
260 */
261 void kbase_csf_add_group_fatal_error(
262 struct kbase_queue_group *const group,
263 struct base_gpu_queue_group_error const *const err_payload);
264
265 /**
266 * kbase_csf_interrupt - Handle interrupts issued by CSF firmware.
267 *
268 * @kbdev: The kbase device to handle an IRQ for
269 * @val: The value of JOB IRQ status register which triggered the interrupt
270 */
271 void kbase_csf_interrupt(struct kbase_device *kbdev, u32 val);
272
273 /**
274 * kbase_csf_doorbell_mapping_init - Initialize the fields that facilitates
275 * the update of userspace mapping of HW
276 * doorbell page.
277 *
278 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
279 *
280 * The function creates a file and allocates a dummy page to facilitate the
281 * update of userspace mapping to point to the dummy page instead of the real
282 * HW doorbell page after the suspend of queue group.
283 *
284 * Return: 0 on success, or negative on failure.
285 */
286 int kbase_csf_doorbell_mapping_init(struct kbase_device *kbdev);
287
288 /**
289 * kbase_csf_doorbell_mapping_term - Free the dummy page & close the file used
290 * to update the userspace mapping of HW doorbell page
291 *
292 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
293 */
294 void kbase_csf_doorbell_mapping_term(struct kbase_device *kbdev);
295
296 /**
297 * kbase_csf_setup_dummy_user_reg_page - Setup the dummy page that is accessed
298 * instead of the User register page after
299 * the GPU power down.
300 *
301 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
302 *
303 * The function allocates a dummy page which is used to replace the User
304 * register page in the userspace mapping after the power down of GPU.
305 * On the power up of GPU, the mapping is updated to point to the real
306 * User register page. The mapping is used to allow access to LATEST_FLUSH
307 * register from userspace.
308 *
309 * Return: 0 on success, or negative on failure.
310 */
311 int kbase_csf_setup_dummy_user_reg_page(struct kbase_device *kbdev);
312
313 /**
314 * kbase_csf_free_dummy_user_reg_page - Free the dummy page that was used
315 * used to replace the User register page
316 *
317 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
318 */
319 void kbase_csf_free_dummy_user_reg_page(struct kbase_device *kbdev);
320
321 /**
322 * kbase_csf_ring_csg_doorbell - ring the doorbell for a CSG interface.
323 *
324 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
325 * @slot: Index of CSG interface for ringing the door-bell.
326 *
327 * The function kicks a notification on the CSG interface to firmware.
328 */
329 void kbase_csf_ring_csg_doorbell(struct kbase_device *kbdev, int slot);
330
331 /**
332 * kbase_csf_ring_csg_slots_doorbell - ring the doorbell for a set of CSG
333 * interfaces.
334 *
335 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
336 * @slot_bitmap: bitmap for the given slots, slot-0 on bit-0, etc.
337 *
338 * The function kicks a notification on a set of CSG interfaces to firmware.
339 */
340 void kbase_csf_ring_csg_slots_doorbell(struct kbase_device *kbdev,
341 u32 slot_bitmap);
342
343 /**
344 * kbase_csf_ring_cs_kernel_doorbell - ring the kernel doorbell for a CSI
345 * assigned to a GPU queue
346 *
347 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
348 * @csi_index: ID of the CSI assigned to the GPU queue.
349 * @csg_nr: Index of the CSG slot assigned to the queue
350 * group to which the GPU queue is bound.
351 * @ring_csg_doorbell: Flag to indicate if the CSG doorbell needs to be rung
352 * after updating the CSG_DB_REQ. So if this flag is false
353 * the doorbell interrupt will not be sent to FW.
354 * The flag is supposed be false only when the input page
355 * for bound GPU queues is programmed at the time of
356 * starting/resuming the group on a CSG slot.
357 *
358 * The function sends a doorbell interrupt notification to the firmware for
359 * a CSI assigned to a GPU queue.
360 */
361 void kbase_csf_ring_cs_kernel_doorbell(struct kbase_device *kbdev,
362 int csi_index, int csg_nr,
363 bool ring_csg_doorbell);
364
365 /**
366 * kbase_csf_ring_cs_user_doorbell - ring the user doorbell allocated for a
367 * queue.
368 *
369 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
370 * @queue: Pointer to the queue for ringing the door-bell.
371 *
372 * The function kicks a notification to the firmware on the doorbell assigned
373 * to the queue.
374 */
375 void kbase_csf_ring_cs_user_doorbell(struct kbase_device *kbdev,
376 struct kbase_queue *queue);
377
378 /**
379 * kbase_csf_active_queue_groups_reset - Reset the state of all active GPU
380 * command queue groups associated with the context.
381 *
382 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
383 * @kctx: The kbase context.
384 *
385 * This function will iterate through all the active/scheduled GPU command
386 * queue groups associated with the context, deschedule and mark them as
387 * terminated (which will then lead to unbinding of all the queues bound to
388 * them) and also no more work would be allowed to execute for them.
389 *
390 * This is similar to the action taken in response to an unexpected OoM event.
391 */
392 void kbase_csf_active_queue_groups_reset(struct kbase_device *kbdev,
393 struct kbase_context *kctx);
394
395 /**
396 * kbase_csf_priority_check - Check the priority requested
397 *
398 * @kbdev: Device pointer
399 * @req_priority: Requested priority
400 *
401 * This will determine whether the requested priority can be satisfied.
402 *
403 * Return: The same or lower priority than requested.
404 */
405 u8 kbase_csf_priority_check(struct kbase_device *kbdev, u8 req_priority);
406
407 extern const u8 kbasep_csf_queue_group_priority_to_relative[BASE_QUEUE_GROUP_PRIORITY_COUNT];
408 extern const u8 kbasep_csf_relative_to_queue_group_priority[KBASE_QUEUE_GROUP_PRIORITY_COUNT];
409
410 /**
411 * kbase_csf_priority_relative_to_queue_group_priority - Convert relative to base priority
412 *
413 * @priority: kbase relative priority
414 *
415 * This will convert the monotonically increasing realtive priority to the
416 * fixed base priority list.
417 *
418 * Return: base_queue_group_priority priority.
419 */
kbase_csf_priority_relative_to_queue_group_priority(u8 priority)420 static inline u8 kbase_csf_priority_relative_to_queue_group_priority(u8 priority)
421 {
422 if (priority >= KBASE_QUEUE_GROUP_PRIORITY_COUNT)
423 priority = KBASE_QUEUE_GROUP_PRIORITY_LOW;
424 return kbasep_csf_relative_to_queue_group_priority[priority];
425 }
426
427 /**
428 * kbase_csf_priority_queue_group_priority_to_relative - Convert base priority to relative
429 *
430 * @priority: base_queue_group_priority priority
431 *
432 * This will convert the fixed base priority list to monotonically increasing realtive priority.
433 *
434 * Return: kbase relative priority.
435 */
kbase_csf_priority_queue_group_priority_to_relative(u8 priority)436 static inline u8 kbase_csf_priority_queue_group_priority_to_relative(u8 priority)
437 {
438 /* Apply low priority in case of invalid priority */
439 if (priority >= BASE_QUEUE_GROUP_PRIORITY_COUNT)
440 priority = BASE_QUEUE_GROUP_PRIORITY_LOW;
441 return kbasep_csf_queue_group_priority_to_relative[priority];
442 }
443
444 /**
445 * kbase_csf_ktrace_gpu_cycle_cnt - Wrapper to retreive the GPU cycle counter
446 * value for Ktrace purpose.
447 *
448 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
449 *
450 * This function is just a wrapper to retreive the GPU cycle counter value, to
451 * avoid any overhead on Release builds where Ktrace is disabled by default.
452 *
453 * Return: Snapshot of the GPU cycle count register.
454 */
kbase_csf_ktrace_gpu_cycle_cnt(struct kbase_device * kbdev)455 static inline u64 kbase_csf_ktrace_gpu_cycle_cnt(struct kbase_device *kbdev)
456 {
457 #if KBASE_KTRACE_ENABLE
458 return kbase_backend_get_cycle_cnt(kbdev);
459 #else
460 return 0;
461 #endif
462 }
463 #endif /* _KBASE_CSF_H_ */
464