1 /*
2 *
3 * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
13 *
14 */
15
16 #ifndef KBASE_H_
17 #define KBASE_H_
18
19 #include <mali_malisw.h>
20
21 #include <mali_kbase_debug.h>
22
23 #include <asm/page.h>
24
25 #include <linux/atomic.h>
26 #include <linux/highmem.h>
27 #include <linux/hrtimer.h>
28 #include <linux/ktime.h>
29 #include <linux/list.h>
30 #include <linux/mm_types.h>
31 #include <linux/mutex.h>
32 #include <linux/rwsem.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/vmalloc.h>
37 #include <linux/wait.h>
38 #include <linux/workqueue.h>
39 #include <linux/sched/task_stack.h>
40
41 #include "mali_base_kernel.h"
42 #include <mali_kbase_uku.h>
43 #include <mali_kbase_linux.h>
44
45 /*
46 * Include mali_kbase_defs.h first as this provides types needed by other local
47 * header files.
48 */
49 #include "mali_kbase_defs.h"
50 #include "mali_kbase_context.h"
51 #include "mali_kbase_strings.h"
52 #include "mali_kbase_mem_lowlevel.h"
53 #include "mali_kbase_trace_timeline.h"
54 #include "mali_kbase_js.h"
55 #include "mali_kbase_mem.h"
56 #include "mali_kbase_utility.h"
57 #include "mali_kbase_gpu_memory_debugfs.h"
58 #include "mali_kbase_mem_profile_debugfs.h"
59 #include "mali_kbase_debug_job_fault.h"
60 #include "mali_kbase_gpuprops.h"
61 #include "mali_kbase_jm.h"
62 #include "ipa/mali_kbase_ipa.h"
63
64 #ifdef CONFIG_GPU_TRACEPOINTS
65 #include <trace/events/gpu.h>
66 #endif
67 /**
68 * @page page_base_kernel_main Kernel-side Base (KBase) APIs
69 */
70
71 /**
72 * @defgroup base_kbase_api Kernel-side Base (KBase) APIs
73 */
74
75 struct kbase_device *kbase_device_alloc(void);
76 /*
77 * note: configuration attributes member of kbdev needs to have
78 * been setup before calling kbase_device_init
79 */
80
81 /*
82 * API to acquire device list semaphore and return pointer
83 * to the device list head
84 */
85 const struct list_head *kbase_dev_list_get(void);
86 /* API to release the device list semaphore */
87 void kbase_dev_list_put(const struct list_head *dev_list);
88
89 int kbase_device_init(struct kbase_device *const kbdev);
90 void kbase_device_term(struct kbase_device *kbdev);
91 void kbase_device_free(struct kbase_device *kbdev);
92 int kbase_device_has_feature(struct kbase_device *kbdev, u32 feature);
93
94 /* Needed for gator integration and for reporting vsync information */
95 struct kbase_device *kbase_find_device(int minor);
96 void kbase_release_device(struct kbase_device *kbdev);
97
98 void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value);
99
100 struct kbase_context *kbase_create_context(struct kbase_device *kbdev, bool is_compat);
101 void kbase_destroy_context(struct kbase_context *kctx);
102
103 int kbase_jd_init(struct kbase_context *kctx);
104 void kbase_jd_exit(struct kbase_context *kctx);
105
106 /**
107 * kbase_jd_submit - Submit atoms to the job dispatcher
108 *
109 * @kctx: The kbase context to submit to
110 * @user_addr: The address in user space of the struct base_jd_atom_v2 array
111 * @nr_atoms: The number of atoms in the array
112 * @stride: sizeof(struct base_jd_atom_v2)
113 * @uk6_atom: true if the atoms are legacy atoms (struct base_jd_atom_v2_uk6)
114 *
115 * Return: 0 on success or error code
116 */
117 int kbase_jd_submit(struct kbase_context *kctx, void __user *user_addr, u32 nr_atoms, u32 stride, bool uk6_atom);
118
119 /**
120 * kbase_jd_done_worker - Handle a job completion
121 * @data: a &struct work_struct
122 *
123 * This function requeues the job from the runpool (if it was soft-stopped or
124 * removed from NEXT registers).
125 *
126 * Removes it from the system if it finished/failed/was cancelled.
127 *
128 * Resolves dependencies to add dependent jobs to the context, potentially
129 * starting them if necessary (which may add more references to the context)
130 *
131 * Releases the reference to the context from the no-longer-running job.
132 *
133 * Handles retrying submission outside of IRQ context if it failed from within
134 * IRQ context.
135 */
136 void kbase_jd_done_worker(struct work_struct *data);
137
138 void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
139 kbasep_js_atom_done_code done_code);
140 void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
141 void kbase_jd_zap_context(struct kbase_context *kctx);
142 bool jd_done_nolock(struct kbase_jd_atom *katom, struct list_head *completed_jobs_ctx);
143 void kbase_jd_free_external_resources(struct kbase_jd_atom *katom);
144 bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *user_atom, struct kbase_jd_atom *katom);
145 void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom);
146
147 void kbase_job_done(struct kbase_device *kbdev, u32 done);
148
149 /**
150 * kbase_job_slot_ctx_priority_check_locked(): - Check for lower priority atoms
151 * and soft stop them
152 * @kctx: Pointer to context to check.
153 * @katom: Pointer to priority atom.
154 *
155 * Atoms from @kctx on the same job slot as @katom, which have lower priority
156 * than @katom will be soft stopped and put back in the queue, so that atoms
157 * with higher priority can run.
158 *
159 * The hwaccess_lock must be held when calling this function.
160 */
161 void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx, struct kbase_jd_atom *katom);
162
163 void kbase_job_slot_softstop(struct kbase_device *kbdev, int js, struct kbase_jd_atom *target_katom);
164 void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js, struct kbase_jd_atom *target_katom,
165 u32 sw_flags);
166 void kbase_job_slot_hardstop(struct kbase_context *kctx, int js, struct kbase_jd_atom *target_katom);
167 void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action, base_jd_core_req core_reqs,
168 struct kbase_jd_atom *target_katom);
169 void kbase_job_check_leave_disjoint(struct kbase_device *kbdev, struct kbase_jd_atom *target_katom);
170
171 void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *event);
172 int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent);
173 int kbase_event_pending(struct kbase_context *ctx);
174 int kbase_event_init(struct kbase_context *kctx);
175 void kbase_event_close(struct kbase_context *kctx);
176 void kbase_event_cleanup(struct kbase_context *kctx);
177 void kbase_event_wakeup(struct kbase_context *kctx);
178
179 int kbase_process_soft_job(struct kbase_jd_atom *katom);
180 int kbase_prepare_soft_job(struct kbase_jd_atom *katom);
181 void kbase_finish_soft_job(struct kbase_jd_atom *katom);
182 void kbase_cancel_soft_job(struct kbase_jd_atom *katom);
183 void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev);
184 void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom);
185 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
186 void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom);
187 #endif
188 int kbase_soft_event_update(struct kbase_context *kctx, u64 event, unsigned char new_status);
189
190 bool kbase_replay_process(struct kbase_jd_atom *katom);
191
192 void kbasep_soft_job_timeout_worker(struct timer_list *t);
193 void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt);
194
195 /* api used internally for register access. Contains validation and tracing */
196 void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset,
197 u32 reg_value);
198 int kbase_device_trace_buffer_install(struct kbase_context *kctx, u32 *tb, size_t size);
199 void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx);
200
201 /* api to be ported per OS, only need to do the raw register access */
202 void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value);
203 u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset);
204
205 void kbasep_as_do_poke(struct work_struct *work);
206
207 /** Returns the name associated with a Mali exception code
208 *
209 * This function is called from the interrupt handler when a GPU fault occurs.
210 * It reports the details of the fault using KBASE_DEBUG_PRINT_WARN.
211 *
212 * @param[in] kbdev The kbase device that the GPU fault occurred from.
213 * @param[in] exception_code exception code
214 * @return name associated with the exception code
215 */
216 const char *kbase_exception_name(struct kbase_device *kbdev, u32 exception_code);
217
218 /**
219 * Check whether a system suspend is in progress, or has already been suspended
220 *
221 * The caller should ensure that either kbdev->pm.active_count_lock is held, or
222 * a dmb was executed recently (to ensure the value is most
223 * up-to-date). However, without a lock the value could change afterwards.
224 *
225 * @return false if a suspend is not in progress
226 * @return !=false otherwise
227 */
kbase_pm_is_suspending(struct kbase_device * kbdev)228 static inline bool kbase_pm_is_suspending(struct kbase_device *kbdev)
229 {
230 return kbdev->pm.suspending;
231 }
232
233 /**
234 * Return the atom's ID, as was originally supplied by userspace in
235 * base_jd_atom_v2::atom_number
236 */
kbase_jd_atom_id(struct kbase_context * kctx,struct kbase_jd_atom * katom)237 static inline int kbase_jd_atom_id(struct kbase_context *kctx, struct kbase_jd_atom *katom)
238 {
239 int result;
240
241 KBASE_DEBUG_ASSERT(kctx);
242 KBASE_DEBUG_ASSERT(katom);
243 KBASE_DEBUG_ASSERT(katom->kctx == kctx);
244
245 result = katom - &kctx->jctx.atoms[0];
246 KBASE_DEBUG_ASSERT(result >= 0 && result <= BASE_JD_ATOM_COUNT);
247 return result;
248 }
249
250 /**
251 * kbase_jd_atom_from_id - Return the atom structure for the given atom ID
252 * @kctx: Context pointer
253 * @id: ID of atom to retrieve
254 *
255 * Return: Pointer to struct kbase_jd_atom associated with the supplied ID
256 */
kbase_jd_atom_from_id(struct kbase_context * kctx,int id)257 static inline struct kbase_jd_atom *kbase_jd_atom_from_id(struct kbase_context *kctx, int id)
258 {
259 return &kctx->jctx.atoms[id];
260 }
261
262 /**
263 * Initialize the disjoint state
264 *
265 * The disjoint event count and state are both set to zero.
266 *
267 * Disjoint functions usage
268 *
269 * The disjoint event count should be incremented whenever a disjoint event occurs.
270 *
271 * There are several cases which are regarded as disjoint behavior. Rather than just increment
272 * the counter during disjoint events we also increment the counter when jobs may be affected
273 * by what the GPU is currently doing. To facilitate this we have the concept of disjoint state.
274 *
275 * Disjoint state is entered during GPU reset and for the entire time that an atom is replaying
276 * (as part of the replay workaround). Increasing the disjoint state also increases the count of
277 * disjoint events.
278 *
279 * The disjoint state is then used to increase the count of disjoint events during job submission
280 * and job completion. Any atom submitted or completed while the disjoint state is greater than
281 * zero is regarded as a disjoint event.
282 *
283 * The disjoint event counter is also incremented immediately whenever a job is soft stopped
284 * and during context creation.
285 *
286 * @param kbdev The kbase device
287 */
288 void kbase_disjoint_init(struct kbase_device *kbdev);
289
290 /**
291 * Increase the count of disjoint events
292 * called when a disjoint event has happened
293 *
294 * @param kbdev The kbase device
295 */
296 void kbase_disjoint_event(struct kbase_device *kbdev);
297
298 /**
299 * Increase the count of disjoint events only if the GPU is in a disjoint state
300 *
301 * This should be called when something happens which could be disjoint if the GPU
302 * is in a disjoint state. The state refcount keeps track of this.
303 *
304 * @param kbdev The kbase device
305 */
306 void kbase_disjoint_event_potential(struct kbase_device *kbdev);
307
308 /**
309 * Returns the count of disjoint events
310 *
311 * @param kbdev The kbase device
312 * @return the count of disjoint events
313 */
314 u32 kbase_disjoint_event_get(struct kbase_device *kbdev);
315
316 /**
317 * Increment the refcount state indicating that the GPU is in a disjoint state.
318 *
319 * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
320 * eventually after the disjoint state has completed @ref kbase_disjoint_state_down
321 * should be called
322 *
323 * @param kbdev The kbase device
324 */
325 void kbase_disjoint_state_up(struct kbase_device *kbdev);
326
327 /**
328 * Decrement the refcount state
329 *
330 * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
331 *
332 * Called after @ref kbase_disjoint_state_up once the disjoint state is over
333 *
334 * @param kbdev The kbase device
335 */
336 void kbase_disjoint_state_down(struct kbase_device *kbdev);
337
338 /**
339 * If a job is soft stopped and the number of contexts is >= this value
340 * it is reported as a disjoint event
341 */
342 #define KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD 2
343
344 #if !defined(UINT64_MAX)
345 #define UINT64_MAX ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
346 #endif
347
348 #if KBASE_TRACE_ENABLE
349 void kbasep_trace_debugfs_init(struct kbase_device *kbdev);
350
351 #ifndef CONFIG_MALI_SYSTEM_TRACE
352 /** Add trace values about a job-slot
353 *
354 * @note Any functions called through this macro will still be evaluated in
355 * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
356 * functions called to get the parameters supplied to this macro must:
357 * - be static or static inline
358 * - must just return 0 and have no other statements present in the body.
359 */
360 #define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot) \
361 kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, 0)
362
363 /** Add trace values about a job-slot, with info
364 *
365 * @note Any functions called through this macro will still be evaluated in
366 * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
367 * functions called to get the parameters supplied to this macro must:
368 * - be static or static inline
369 * - must just return 0 and have no other statements present in the body.
370 */
371 #define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val) \
372 kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, \
373 info_val)
374
375 /** Add trace values about a ctx refcount
376 *
377 * @note Any functions called through this macro will still be evaluated in
378 * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
379 * functions called to get the parameters supplied to this macro must:
380 * - be static or static inline
381 * - must just return 0 and have no other statements present in the body.
382 */
383 #define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount) \
384 kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, 0)
385 /** Add trace values about a ctx refcount, and info
386 *
387 * @note Any functions called through this macro will still be evaluated in
388 * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
389 * functions called to get the parameters supplied to this macro must:
390 * - be static or static inline
391 * - must just return 0 and have no other statements present in the body.
392 */
393 #define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val) \
394 kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, \
395 info_val)
396
397 /** Add trace values (no slot or refcount)
398 *
399 * @note Any functions called through this macro will still be evaluated in
400 * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
401 * functions called to get the parameters supplied to this macro must:
402 * - be static or static inline
403 * - must just return 0 and have no other statements present in the body.
404 */
405 #define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val) \
406 kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, 0, 0, 0, info_val)
407
408 /** Clear the trace */
409 #define KBASE_TRACE_CLEAR(kbdev) kbasep_trace_clear(kbdev)
410
411 /** Dump the slot trace */
412 #define KBASE_TRACE_DUMP(kbdev) kbasep_trace_dump(kbdev)
413
414 /** PRIVATE - do not use directly. Use KBASE_TRACE_ADD() instead */
415 void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom,
416 u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val);
417 /** PRIVATE - do not use directly. Use KBASE_TRACE_CLEAR() instead */
418 void kbasep_trace_clear(struct kbase_device *kbdev);
419 #else /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
420 /* Dispatch kbase trace events as system trace events */
421 #include <mali_linux_kbase_trace.h>
422 #define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot) trace_mali_##code(jobslot, 0)
423
424 #define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val) \
425 trace_mali_##code(jobslot, info_val)
426
427 #define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount) trace_mali_##code(refcount, 0)
428
429 #define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val) \
430 trace_mali_##code(refcount, info_val)
431
432 #define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val) trace_mali_##code(gpu_addr, info_val)
433
434 #define KBASE_TRACE_CLEAR(kbdev) \
435 do { \
436 CSTD_UNUSED(kbdev); \
437 CSTD_NOP(0); \
438 } while (0)
439 #define KBASE_TRACE_DUMP(kbdev) \
440 do { \
441 CSTD_UNUSED(kbdev); \
442 CSTD_NOP(0); \
443 } while (0)
444
445 #endif /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
446 #else
447 #define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot) \
448 do { \
449 CSTD_UNUSED(kbdev); \
450 CSTD_NOP(code); \
451 CSTD_UNUSED(ctx); \
452 CSTD_UNUSED(katom); \
453 CSTD_UNUSED(gpu_addr); \
454 CSTD_UNUSED(jobslot); \
455 } while (0)
456
457 #define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val) \
458 do { \
459 CSTD_UNUSED(kbdev); \
460 CSTD_NOP(code); \
461 CSTD_UNUSED(ctx); \
462 CSTD_UNUSED(katom); \
463 CSTD_UNUSED(gpu_addr); \
464 CSTD_UNUSED(jobslot); \
465 CSTD_UNUSED(info_val); \
466 CSTD_NOP(0); \
467 } while (0)
468
469 #define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount) \
470 do { \
471 CSTD_UNUSED(kbdev); \
472 CSTD_NOP(code); \
473 CSTD_UNUSED(ctx); \
474 CSTD_UNUSED(katom); \
475 CSTD_UNUSED(gpu_addr); \
476 CSTD_UNUSED(refcount); \
477 CSTD_NOP(0); \
478 } while (0)
479
480 #define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val) \
481 do { \
482 CSTD_UNUSED(kbdev); \
483 CSTD_NOP(code); \
484 CSTD_UNUSED(ctx); \
485 CSTD_UNUSED(katom); \
486 CSTD_UNUSED(gpu_addr); \
487 CSTD_UNUSED(info_val); \
488 CSTD_NOP(0); \
489 } while (0)
490
491 #define KBASE_TRACE_ADD(kbdev, code, subcode, ctx, katom, val) \
492 do { \
493 CSTD_UNUSED(kbdev); \
494 CSTD_NOP(code); \
495 CSTD_UNUSED(subcode); \
496 CSTD_UNUSED(ctx); \
497 CSTD_UNUSED(katom); \
498 CSTD_UNUSED(val); \
499 CSTD_NOP(0); \
500 } while (0)
501
502 #define KBASE_TRACE_CLEAR(kbdev) \
503 do { \
504 CSTD_UNUSED(kbdev); \
505 CSTD_NOP(0); \
506 } while (0)
507 #define KBASE_TRACE_DUMP(kbdev) \
508 do { \
509 CSTD_UNUSED(kbdev); \
510 CSTD_NOP(0); \
511 } while (0)
512 #endif /* KBASE_TRACE_ENABLE */
513 /** PRIVATE - do not use directly. Use KBASE_TRACE_DUMP() instead */
514 void kbasep_trace_dump(struct kbase_device *kbdev);
515
516 #ifdef CONFIG_MALI_DEBUG
517 /**
518 * kbase_set_driver_inactive - Force driver to go inactive
519 * @kbdev: Device pointer
520 * @inactive: true if driver should go inactive, false otherwise
521 *
522 * Forcing the driver inactive will cause all future IOCTLs to wait until the
523 * driver is made active again. This is intended solely for the use of tests
524 * which require that no jobs are running while the test executes.
525 */
526 void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive);
527 #endif /* CONFIG_MALI_DEBUG */
528
529 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI)
530
531 /* kbase_io_history_init - initialize data struct for register access history
532 *
533 * @kbdev The register history to initialize
534 * @n The number of register accesses that the buffer could hold
535 *
536 * @return 0 if successfully initialized, failure otherwise
537 */
538 int kbase_io_history_init(struct kbase_io_history *h, u16 n);
539
540 /* kbase_io_history_term - uninit all resources for the register access history
541 *
542 * @h The register history to terminate
543 */
544 void kbase_io_history_term(struct kbase_io_history *h);
545
546 /* kbase_io_history_dump - print the register history to the kernel ring buffer
547 *
548 * @kbdev Pointer to kbase_device containing the register history to dump
549 */
550 void kbase_io_history_dump(struct kbase_device *kbdev);
551
552 /**
553 * kbase_io_history_resize - resize the register access history buffer.
554 *
555 * @h: Pointer to a valid register history to resize
556 * @new_size: Number of accesses the buffer could hold
557 *
558 * A successful resize will clear all recent register accesses.
559 * If resizing fails for any reason (e.g., could not allocate memory, invalid
560 * buffer size) then the original buffer will be kept intact.
561 *
562 * @return 0 if the buffer was resized, failure otherwise
563 */
564 int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size);
565
566 #else /* CONFIG_DEBUG_FS */
567
568 #define kbase_io_history_init(...) ((int)0)
569
570 #define kbase_io_history_term CSTD_NOP
571
572 #define kbase_io_history_dump CSTD_NOP
573
574 #define kbase_io_history_resize CSTD_NOP
575
576 #endif /* CONFIG_DEBUG_FS */
577
578 #endif
579