• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15 
16 
17 
18 
19 
20 /**
21  * @file
22  * Base structures shared with the kernel.
23  */
24 
25 #ifndef _BASE_KERNEL_H_
26 #define _BASE_KERNEL_H_
27 
28 #ifndef __user
29 #define __user
30 #endif
31 
32 /* Support UK6 IOCTLS */
33 #define BASE_LEGACY_UK6_SUPPORT 1
34 
35 /* Support UK7 IOCTLS */
36 /* NB: To support UK6 we also need to support UK7 */
37 #define BASE_LEGACY_UK7_SUPPORT 1
38 
39 /* Support UK8 IOCTLS */
40 #define BASE_LEGACY_UK8_SUPPORT 1
41 
42 /* Support UK9 IOCTLS */
43 #define BASE_LEGACY_UK9_SUPPORT 1
44 
45 /* Support UK10_2 IOCTLS */
46 #define BASE_LEGACY_UK10_2_SUPPORT 1
47 
48 /* Support UK10_4 IOCTLS */
49 #define BASE_LEGACY_UK10_4_SUPPORT 1
50 
51 typedef struct base_mem_handle {
52 	struct {
53 		u64 handle;
54 	} basep;
55 } base_mem_handle;
56 
57 #include "mali_base_mem_priv.h"
58 #include "mali_kbase_profiling_gator_api.h"
59 #include "mali_midg_coherency.h"
60 #include "mali_kbase_gpu_id.h"
61 
62 /*
63  * Dependency stuff, keep it private for now. May want to expose it if
64  * we decide to make the number of semaphores a configurable
65  * option.
66  */
67 #define BASE_JD_ATOM_COUNT              512
68 
69 #define BASEP_JD_SEM_PER_WORD_LOG2      5
70 #define BASEP_JD_SEM_PER_WORD           (1 << BASEP_JD_SEM_PER_WORD_LOG2)
71 #define BASEP_JD_SEM_WORD_NR(x)         ((x) >> BASEP_JD_SEM_PER_WORD_LOG2)
72 #define BASEP_JD_SEM_MASK_IN_WORD(x)    (1 << ((x) & (BASEP_JD_SEM_PER_WORD - 1)))
73 #define BASEP_JD_SEM_ARRAY_SIZE         BASEP_JD_SEM_WORD_NR(BASE_JD_ATOM_COUNT)
74 
75 /* Set/reset values for a software event */
76 #define BASE_JD_SOFT_EVENT_SET             ((unsigned char)1)
77 #define BASE_JD_SOFT_EVENT_RESET           ((unsigned char)0)
78 
79 #define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3
80 
81 #define BASE_MAX_COHERENT_GROUPS 16
82 
83 #if defined CDBG_ASSERT
84 #define LOCAL_ASSERT CDBG_ASSERT
85 #elif defined KBASE_DEBUG_ASSERT
86 #define LOCAL_ASSERT KBASE_DEBUG_ASSERT
87 #else
88 #error assert macro not defined!
89 #endif
90 
91 #if defined PAGE_MASK
92 #define LOCAL_PAGE_LSB ~PAGE_MASK
93 #else
94 #include <osu/mali_osu.h>
95 
96 #if defined OSU_CONFIG_CPU_PAGE_SIZE_LOG2
97 #define LOCAL_PAGE_LSB ((1ul << OSU_CONFIG_CPU_PAGE_SIZE_LOG2) - 1)
98 #else
99 #error Failed to find page size
100 #endif
101 #endif
102 
103 /** 32/64-bit neutral way to represent pointers */
104 typedef union kbase_pointer {
105 	void __user *value;	  /**< client should store their pointers here */
106 	u32 compat_value; /**< 64-bit kernels should fetch value here when handling 32-bit clients */
107 	u64 sizer;	  /**< Force 64-bit storage for all clients regardless */
108 } kbase_pointer;
109 
110 /**
111  * @addtogroup base_user_api User-side Base APIs
112  * @{
113  */
114 
115 /**
116  * @addtogroup base_user_api_memory User-side Base Memory APIs
117  * @{
118  */
119 
120 /**
121  * typedef base_mem_alloc_flags - Memory allocation, access/hint flags.
122  *
123  * A combination of MEM_PROT/MEM_HINT flags must be passed to each allocator
124  * in order to determine the best cache policy. Some combinations are
125  * of course invalid (e.g. MEM_PROT_CPU_WR | MEM_HINT_CPU_RD),
126  * which defines a write-only region on the CPU side, which is
127  * heavily read by the CPU...
128  * Other flags are only meaningful to a particular allocator.
129  * More flags can be added to this list, as long as they don't clash
130  * (see BASE_MEM_FLAGS_NR_BITS for the number of the first free bit).
131  */
132 typedef u32 base_mem_alloc_flags;
133 
134 /* Memory allocation, access/hint flags.
135  *
136  * See base_mem_alloc_flags.
137  */
138 
139 /* IN */
140 /* Read access CPU side
141  */
142 #define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0)
143 
144 /* Write access CPU side
145  */
146 #define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1)
147 
148 /* Read access GPU side
149  */
150 #define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2)
151 
152 /* Write access GPU side
153  */
154 #define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3)
155 
156 /* Execute allowed on the GPU side
157  */
158 #define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4)
159 
160 	/* BASE_MEM_HINT flags have been removed, but their values are reserved
161 	 * for backwards compatibility with older user-space drivers. The values
162 	 * can be re-used once support for r5p0 user-space drivers is removed,
163 	 * presumably in r7p0.
164 	 *
165 	 * RESERVED: (1U << 5)
166 	 * RESERVED: (1U << 6)
167 	 * RESERVED: (1U << 7)
168 	 * RESERVED: (1U << 8)
169 	 */
170 
171 /* Grow backing store on GPU Page Fault
172  */
173 #define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9)
174 
175 /* Page coherence Outer shareable, if available
176  */
177 #define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10)
178 
179 /* Page coherence Inner shareable
180  */
181 #define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11)
182 
183 /* Should be cached on the CPU
184  */
185 #define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12)
186 
187 /* IN/OUT */
188 /* Must have same VA on both the GPU and the CPU
189  */
190 #define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13)
191 
192 /* OUT */
193 /* Must call mmap to acquire a GPU address for the alloc
194  */
195 #define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14)
196 
197 /* IN */
198 /* Page coherence Outer shareable, required.
199  */
200 #define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15)
201 
202 /* Secure memory
203  */
204 #define BASE_MEM_SECURE ((base_mem_alloc_flags)1 << 16)
205 
206 /* Not needed physical memory
207  */
208 #define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17)
209 
210 /* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the
211  * addresses to be the same
212  */
213 #define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18)
214 
215 /* Number of bits used as flags for base memory management
216  *
217  * Must be kept in sync with the base_mem_alloc_flags flags
218  */
219 #define BASE_MEM_FLAGS_NR_BITS 19
220 
221 /* A mask for all output bits, excluding IN/OUT bits.
222  */
223 #define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP
224 
225 /* A mask for all input bits, including IN/OUT bits.
226  */
227 #define BASE_MEM_FLAGS_INPUT_MASK \
228 	(((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK)
229 
230 /* A mask for all the flags which are modifiable via the base_mem_set_flags
231  * interface.
232  */
233 #define BASE_MEM_FLAGS_MODIFIABLE \
234 	(BASE_MEM_DONT_NEED | BASE_MEM_COHERENT_SYSTEM | \
235 	 BASE_MEM_COHERENT_LOCAL)
236 
237 /**
238  * enum base_mem_import_type - Memory types supported by @a base_mem_import
239  *
240  * @BASE_MEM_IMPORT_TYPE_INVALID: Invalid type
241  * @BASE_MEM_IMPORT_TYPE_UMP: UMP import. Handle type is ump_secure_id.
242  * @BASE_MEM_IMPORT_TYPE_UMM: UMM import. Handle type is a file descriptor (int)
243  * @BASE_MEM_IMPORT_TYPE_USER_BUFFER: User buffer import. Handle is a
244  * base_mem_import_user_buffer
245  *
246  * Each type defines what the supported handle type is.
247  *
248  * If any new type is added here ARM must be contacted
249  * to allocate a numeric value for it.
250  * Do not just add a new type without synchronizing with ARM
251  * as future releases from ARM might include other new types
252  * which could clash with your custom types.
253  */
254 typedef enum base_mem_import_type {
255 	BASE_MEM_IMPORT_TYPE_INVALID = 0,
256 	BASE_MEM_IMPORT_TYPE_UMP = 1,
257 	BASE_MEM_IMPORT_TYPE_UMM = 2,
258 	BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3
259 } base_mem_import_type;
260 
261 /**
262  * struct base_mem_import_user_buffer - Handle of an imported user buffer
263  *
264  * @ptr:	kbase_pointer to imported user buffer
265  * @length:	length of imported user buffer in bytes
266  *
267  * This structure is used to represent a handle of an imported user buffer.
268  */
269 
270 struct base_mem_import_user_buffer {
271 	kbase_pointer ptr;
272 	u64 length;
273 };
274 
275 /**
276  * @brief Invalid memory handle.
277  *
278  * Return value from functions returning @ref base_mem_handle on error.
279  *
280  * @warning @ref base_mem_handle_new_invalid must be used instead of this macro
281  *          in C++ code or other situations where compound literals cannot be used.
282  */
283 #define BASE_MEM_INVALID_HANDLE ((base_mem_handle) { {BASEP_MEM_INVALID_HANDLE} })
284 
285 /**
286  * @brief Special write-alloc memory handle.
287  *
288  * A special handle is used to represent a region where a special page is mapped
289  * with a write-alloc cache setup, typically used when the write result of the
290  * GPU isn't needed, but the GPU must write anyway.
291  *
292  * @warning @ref base_mem_handle_new_write_alloc must be used instead of this macro
293  *          in C++ code or other situations where compound literals cannot be used.
294  */
295 #define BASE_MEM_WRITE_ALLOC_PAGES_HANDLE ((base_mem_handle) { {BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE} })
296 
297 #define BASEP_MEM_INVALID_HANDLE               (0ull  << 12)
298 #define BASE_MEM_MMU_DUMP_HANDLE               (1ull  << 12)
299 #define BASE_MEM_TRACE_BUFFER_HANDLE           (2ull  << 12)
300 #define BASE_MEM_MAP_TRACKING_HANDLE           (3ull  << 12)
301 #define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE     (4ull  << 12)
302 /* reserved handles ..-64<<PAGE_SHIFT> for future special handles */
303 #define BASE_MEM_COOKIE_BASE                   (64ul  << 12)
304 #define BASE_MEM_FIRST_FREE_ADDRESS            ((BITS_PER_LONG << 12) + \
305 						BASE_MEM_COOKIE_BASE)
306 
307 /* Mask to detect 4GB boundary alignment */
308 #define BASE_MEM_MASK_4GB  0xfffff000UL
309 
310 
311 /* Bit mask of cookies used for for memory allocation setup */
312 #define KBASE_COOKIE_MASK  ~1UL /* bit 0 is reserved */
313 
314 
315 /**
316  * @brief Result codes of changing the size of the backing store allocated to a tmem region
317  */
318 typedef enum base_backing_threshold_status {
319 	BASE_BACKING_THRESHOLD_OK = 0,			    /**< Resize successful */
320 	BASE_BACKING_THRESHOLD_ERROR_OOM = -2,		    /**< Increase failed due to an out-of-memory condition */
321 	BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS = -4 /**< Invalid arguments (not tmem, illegal size request, etc.) */
322 } base_backing_threshold_status;
323 
324 /**
325  * @addtogroup base_user_api_memory_defered User-side Base Defered Memory Coherency APIs
326  * @{
327  */
328 
329 /**
330  * @brief a basic memory operation (sync-set).
331  *
332  * The content of this structure is private, and should only be used
333  * by the accessors.
334  */
335 typedef struct base_syncset {
336 	struct basep_syncset basep_sset;
337 } base_syncset;
338 
339 /** @} end group base_user_api_memory_defered */
340 
341 /**
342  * Handle to represent imported memory object.
343  * Simple opague handle to imported memory, can't be used
344  * with anything but base_external_resource_init to bind to an atom.
345  */
346 typedef struct base_import_handle {
347 	struct {
348 		u64 handle;
349 	} basep;
350 } base_import_handle;
351 
352 /** @} end group base_user_api_memory */
353 
354 /**
355  * @addtogroup base_user_api_job_dispatch User-side Base Job Dispatcher APIs
356  * @{
357  */
358 
359 typedef int platform_fence_type;
360 #define INVALID_PLATFORM_FENCE ((platform_fence_type)-1)
361 
362 /**
363  * Base stream handle.
364  *
365  * References an underlying base stream object.
366  */
367 typedef struct base_stream {
368 	struct {
369 		int fd;
370 	} basep;
371 } base_stream;
372 
373 /**
374  * Base fence handle.
375  *
376  * References an underlying base fence object.
377  */
378 typedef struct base_fence {
379 	struct {
380 		int fd;
381 		int stream_fd;
382 	} basep;
383 } base_fence;
384 
385 /**
386  * @brief Per-job data
387  *
388  * This structure is used to store per-job data, and is completely unused
389  * by the Base driver. It can be used to store things such as callback
390  * function pointer, data to handle job completion. It is guaranteed to be
391  * untouched by the Base driver.
392  */
393 typedef struct base_jd_udata {
394 	u64 blob[2];	 /**< per-job data array */
395 } base_jd_udata;
396 
397 /**
398  * @brief Memory aliasing info
399  *
400  * Describes a memory handle to be aliased.
401  * A subset of the handle can be chosen for aliasing, given an offset and a
402  * length.
403  * A special handle BASE_MEM_WRITE_ALLOC_PAGES_HANDLE is used to represent a
404  * region where a special page is mapped with a write-alloc cache setup,
405  * typically used when the write result of the GPU isn't needed, but the GPU
406  * must write anyway.
407  *
408  * Offset and length are specified in pages.
409  * Offset must be within the size of the handle.
410  * Offset+length must not overrun the size of the handle.
411  *
412  * @handle Handle to alias, can be BASE_MEM_WRITE_ALLOC_PAGES_HANDLE
413  * @offset Offset within the handle to start aliasing from, in pages.
414  *         Not used with BASE_MEM_WRITE_ALLOC_PAGES_HANDLE.
415  * @length Length to alias, in pages. For BASE_MEM_WRITE_ALLOC_PAGES_HANDLE
416  *         specifies the number of times the special page is needed.
417  */
418 struct base_mem_aliasing_info {
419 	base_mem_handle handle;
420 	u64 offset;
421 	u64 length;
422 };
423 
424 /**
425  * struct base_jit_alloc_info - Structure which describes a JIT allocation
426  *                              request.
427  * @gpu_alloc_addr:             The GPU virtual address to write the JIT
428  *                              allocated GPU virtual address to.
429  * @va_pages:                   The minimum number of virtual pages required.
430  * @commit_pages:               The minimum number of physical pages which
431  *                              should back the allocation.
432  * @extent:                     Granularity of physical pages to grow the
433  *                              allocation by during a fault.
434  * @id:                         Unique ID provided by the caller, this is used
435  *                              to pair allocation and free requests.
436  *                              Zero is not a valid value.
437  */
438 struct base_jit_alloc_info {
439 	u64 gpu_alloc_addr;
440 	u64 va_pages;
441 	u64 commit_pages;
442 	u64 extent;
443 	u8 id;
444 };
445 
446 /**
447  * @brief Job dependency type.
448  *
449  * A flags field will be inserted into the atom structure to specify whether a dependency is a data or
450  * ordering dependency (by putting it before/after 'core_req' in the structure it should be possible to add without
451  * changing the structure size).
452  * When the flag is set for a particular dependency to signal that it is an ordering only dependency then
453  * errors will not be propagated.
454  */
455 typedef u8 base_jd_dep_type;
456 
457 
458 #define BASE_JD_DEP_TYPE_INVALID  (0)       /**< Invalid dependency */
459 #define BASE_JD_DEP_TYPE_DATA     (1U << 0) /**< Data dependency */
460 #define BASE_JD_DEP_TYPE_ORDER    (1U << 1) /**< Order dependency */
461 
462 /**
463  * @brief Job chain hardware requirements.
464  *
465  * A job chain must specify what GPU features it needs to allow the
466  * driver to schedule the job correctly.  By not specifying the
467  * correct settings can/will cause an early job termination.  Multiple
468  * values can be ORed together to specify multiple requirements.
469  * Special case is ::BASE_JD_REQ_DEP, which is used to express complex
470  * dependencies, and that doesn't execute anything on the hardware.
471  */
472 typedef u32 base_jd_core_req;
473 
474 /* Requirements that come from the HW */
475 
476 /**
477  * No requirement, dependency only
478  */
479 #define BASE_JD_REQ_DEP ((base_jd_core_req)0)
480 
481 /**
482  * Requires fragment shaders
483  */
484 #define BASE_JD_REQ_FS  ((base_jd_core_req)1 << 0)
485 
486 /**
487  * Requires compute shaders
488  * This covers any of the following Midgard Job types:
489  * - Vertex Shader Job
490  * - Geometry Shader Job
491  * - An actual Compute Shader Job
492  *
493  * Compare this with @ref BASE_JD_REQ_ONLY_COMPUTE, which specifies that the
494  * job is specifically just the "Compute Shader" job type, and not the "Vertex
495  * Shader" nor the "Geometry Shader" job type.
496  */
497 #define BASE_JD_REQ_CS  ((base_jd_core_req)1 << 1)
498 #define BASE_JD_REQ_T   ((base_jd_core_req)1 << 2)   /**< Requires tiling */
499 #define BASE_JD_REQ_CF  ((base_jd_core_req)1 << 3)   /**< Requires cache flushes */
500 #define BASE_JD_REQ_V   ((base_jd_core_req)1 << 4)   /**< Requires value writeback */
501 
502 /* SW-only requirements - the HW does not expose these as part of the job slot capabilities */
503 
504 /* Requires fragment job with AFBC encoding */
505 #define BASE_JD_REQ_FS_AFBC  ((base_jd_core_req)1 << 13)
506 
507 /**
508  * SW-only requirement: coalesce completion events.
509  * If this bit is set then completion of this atom will not cause an event to
510  * be sent to userspace, whether successful or not; completion events will be
511  * deferred until an atom completes which does not have this bit set.
512  *
513  * This bit may not be used in combination with BASE_JD_REQ_EXTERNAL_RESOURCES.
514  */
515 #define BASE_JD_REQ_EVENT_COALESCE ((base_jd_core_req)1 << 5)
516 
517 /**
518  * SW Only requirement: the job chain requires a coherent core group. We don't
519  * mind which coherent core group is used.
520  */
521 #define BASE_JD_REQ_COHERENT_GROUP  ((base_jd_core_req)1 << 6)
522 
523 /**
524  * SW Only requirement: The performance counters should be enabled only when
525  * they are needed, to reduce power consumption.
526  */
527 
528 #define BASE_JD_REQ_PERMON               ((base_jd_core_req)1 << 7)
529 
530 /**
531  * SW Only requirement: External resources are referenced by this atom.
532  * When external resources are referenced no syncsets can be bundled with the atom
533  * but should instead be part of a NULL jobs inserted into the dependency tree.
534  * The first pre_dep object must be configured for the external resouces to use,
535  * the second pre_dep object can be used to create other dependencies.
536  *
537  * This bit may not be used in combination with BASE_JD_REQ_EVENT_COALESCE.
538  */
539 #define BASE_JD_REQ_EXTERNAL_RESOURCES   ((base_jd_core_req)1 << 8)
540 
541 /**
542  * SW Only requirement: Software defined job. Jobs with this bit set will not be submitted
543  * to the hardware but will cause some action to happen within the driver
544  */
545 #define BASE_JD_REQ_SOFT_JOB        ((base_jd_core_req)1 << 9)
546 
547 #define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME      (BASE_JD_REQ_SOFT_JOB | 0x1)
548 #define BASE_JD_REQ_SOFT_FENCE_TRIGGER          (BASE_JD_REQ_SOFT_JOB | 0x2)
549 #define BASE_JD_REQ_SOFT_FENCE_WAIT             (BASE_JD_REQ_SOFT_JOB | 0x3)
550 
551 /**
552  * SW Only requirement : Replay job.
553  *
554  * If the preceding job fails, the replay job will cause the jobs specified in
555  * the list of base_jd_replay_payload pointed to by the jc pointer to be
556  * replayed.
557  *
558  * A replay job will only cause jobs to be replayed up to BASEP_JD_REPLAY_LIMIT
559  * times. If a job fails more than BASEP_JD_REPLAY_LIMIT times then the replay
560  * job is failed, as well as any following dependencies.
561  *
562  * The replayed jobs will require a number of atom IDs. If there are not enough
563  * free atom IDs then the replay job will fail.
564  *
565  * If the preceding job does not fail, then the replay job is returned as
566  * completed.
567  *
568  * The replayed jobs will never be returned to userspace. The preceding failed
569  * job will be returned to userspace as failed; the status of this job should
570  * be ignored. Completion should be determined by the status of the replay soft
571  * job.
572  *
573  * In order for the jobs to be replayed, the job headers will have to be
574  * modified. The Status field will be reset to NOT_STARTED. If the Job Type
575  * field indicates a Vertex Shader Job then it will be changed to Null Job.
576  *
577  * The replayed jobs have the following assumptions :
578  *
579  * - No external resources. Any required external resources will be held by the
580  *   replay atom.
581  * - Pre-dependencies are created based on job order.
582  * - Atom numbers are automatically assigned.
583  * - device_nr is set to 0. This is not relevant as
584  *   BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set.
585  * - Priority is inherited from the replay job.
586  */
587 #define BASE_JD_REQ_SOFT_REPLAY                 (BASE_JD_REQ_SOFT_JOB | 0x4)
588 /**
589  * SW only requirement: event wait/trigger job.
590  *
591  * - BASE_JD_REQ_SOFT_EVENT_WAIT: this job will block until the event is set.
592  * - BASE_JD_REQ_SOFT_EVENT_SET: this job sets the event, thus unblocks the
593  *   other waiting jobs. It completes immediately.
594  * - BASE_JD_REQ_SOFT_EVENT_RESET: this job resets the event, making it
595  *   possible for other jobs to wait upon. It completes immediately.
596  */
597 #define BASE_JD_REQ_SOFT_EVENT_WAIT             (BASE_JD_REQ_SOFT_JOB | 0x5)
598 #define BASE_JD_REQ_SOFT_EVENT_SET              (BASE_JD_REQ_SOFT_JOB | 0x6)
599 #define BASE_JD_REQ_SOFT_EVENT_RESET            (BASE_JD_REQ_SOFT_JOB | 0x7)
600 
601 #define BASE_JD_REQ_SOFT_DEBUG_COPY             (BASE_JD_REQ_SOFT_JOB | 0x8)
602 
603 /**
604  * SW only requirement: Just In Time allocation
605  *
606  * This job requests a JIT allocation based on the request in the
607  * @base_jit_alloc_info structure which is passed via the jc element of
608  * the atom.
609  *
610  * It should be noted that the id entry in @base_jit_alloc_info must not
611  * be reused until it has been released via @BASE_JD_REQ_SOFT_JIT_FREE.
612  *
613  * Should this soft job fail it is expected that a @BASE_JD_REQ_SOFT_JIT_FREE
614  * soft job to free the JIT allocation is still made.
615  *
616  * The job will complete immediately.
617  */
618 #define BASE_JD_REQ_SOFT_JIT_ALLOC              (BASE_JD_REQ_SOFT_JOB | 0x9)
619 /**
620  * SW only requirement: Just In Time free
621  *
622  * This job requests a JIT allocation created by @BASE_JD_REQ_SOFT_JIT_ALLOC
623  * to be freed. The ID of the JIT allocation is passed via the jc element of
624  * the atom.
625  *
626  * The job will complete immediately.
627  */
628 #define BASE_JD_REQ_SOFT_JIT_FREE               (BASE_JD_REQ_SOFT_JOB | 0xa)
629 
630 /**
631  * SW only requirement: Map external resource
632  *
633  * This job requests external resource(s) are mapped once the dependencies
634  * of the job have been satisfied. The list of external resources are
635  * passed via the jc element of the atom which is a pointer to a
636  * @base_external_resource_list.
637  */
638 #define BASE_JD_REQ_SOFT_EXT_RES_MAP            (BASE_JD_REQ_SOFT_JOB | 0xb)
639 /**
640  * SW only requirement: Unmap external resource
641  *
642  * This job requests external resource(s) are unmapped once the dependencies
643  * of the job has been satisfied. The list of external resources are
644  * passed via the jc element of the atom which is a pointer to a
645  * @base_external_resource_list.
646  */
647 #define BASE_JD_REQ_SOFT_EXT_RES_UNMAP          (BASE_JD_REQ_SOFT_JOB | 0xc)
648 
649 /**
650  * HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders)
651  *
652  * This indicates that the Job Chain contains Midgard Jobs of the 'Compute Shaders' type.
653  *
654  * In contrast to @ref BASE_JD_REQ_CS, this does \b not indicate that the Job
655  * Chain contains 'Geometry Shader' or 'Vertex Shader' jobs.
656  */
657 #define BASE_JD_REQ_ONLY_COMPUTE    ((base_jd_core_req)1 << 10)
658 
659 /**
660  * HW Requirement: Use the base_jd_atom::device_nr field to specify a
661  * particular core group
662  *
663  * If both @ref BASE_JD_REQ_COHERENT_GROUP and this flag are set, this flag takes priority
664  *
665  * This is only guaranteed to work for @ref BASE_JD_REQ_ONLY_COMPUTE atoms.
666  *
667  * If the core availability policy is keeping the required core group turned off, then
668  * the job will fail with a @ref BASE_JD_EVENT_PM_EVENT error code.
669  */
670 #define BASE_JD_REQ_SPECIFIC_COHERENT_GROUP ((base_jd_core_req)1 << 11)
671 
672 /**
673  * SW Flag: If this bit is set then the successful completion of this atom
674  * will not cause an event to be sent to userspace
675  */
676 #define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE   ((base_jd_core_req)1 << 12)
677 
678 /**
679  * SW Flag: If this bit is set then completion of this atom will not cause an
680  * event to be sent to userspace, whether successful or not.
681  */
682 #define BASEP_JD_REQ_EVENT_NEVER ((base_jd_core_req)1 << 14)
683 
684 /**
685  * SW Flag: Skip GPU cache clean and invalidation before starting a GPU job.
686  *
687  * If this bit is set then the GPU's cache will not be cleaned and invalidated
688  * until a GPU job starts which does not have this bit set or a job completes
689  * which does not have the @ref BASE_JD_REQ_SKIP_CACHE_END bit set. Do not use if
690  * the CPU may have written to memory addressed by the job since the last job
691  * without this bit set was submitted.
692  */
693 #define BASE_JD_REQ_SKIP_CACHE_START ((base_jd_core_req)1 << 15)
694 
695 /**
696  * SW Flag: Skip GPU cache clean and invalidation after a GPU job completes.
697  *
698  * If this bit is set then the GPU's cache will not be cleaned and invalidated
699  * until a GPU job completes which does not have this bit set or a job starts
700  * which does not have the @ref BASE_JD_REQ_SKIP_CACHE_START bti set. Do not use if
701  * the CPU may read from or partially overwrite memory addressed by the job
702  * before the next job without this bit set completes.
703  */
704 #define BASE_JD_REQ_SKIP_CACHE_END ((base_jd_core_req)1 << 16)
705 
706 /**
707  * These requirement bits are currently unused in base_jd_core_req
708  */
709 #define BASEP_JD_REQ_RESERVED \
710 	(~(BASE_JD_REQ_ATOM_TYPE | BASE_JD_REQ_EXTERNAL_RESOURCES | \
711 	BASE_JD_REQ_EVENT_ONLY_ON_FAILURE | BASEP_JD_REQ_EVENT_NEVER | \
712 	BASE_JD_REQ_EVENT_COALESCE | \
713 	BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | \
714 	BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \
715 	BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END))
716 
717 /**
718  * Mask of all bits in base_jd_core_req that control the type of the atom.
719  *
720  * This allows dependency only atoms to have flags set
721  */
722 #define BASE_JD_REQ_ATOM_TYPE \
723 	(BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T | BASE_JD_REQ_CF | \
724 	BASE_JD_REQ_V | BASE_JD_REQ_SOFT_JOB | BASE_JD_REQ_ONLY_COMPUTE)
725 
726 /**
727  * Mask of all bits in base_jd_core_req that control the type of a soft job.
728  */
729 #define BASE_JD_REQ_SOFT_JOB_TYPE (BASE_JD_REQ_SOFT_JOB | 0x1f)
730 
731 /*
732  * Returns non-zero value if core requirements passed define a soft job or
733  * a dependency only job.
734  */
735 #define BASE_JD_REQ_SOFT_JOB_OR_DEP(core_req) \
736 	((core_req & BASE_JD_REQ_SOFT_JOB) || \
737 	(core_req & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP)
738 
739 /**
740  * @brief States to model state machine processed by kbasep_js_job_check_ref_cores(), which
741  * handles retaining cores for power management and affinity management.
742  *
743  * The state @ref KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY prevents an attack
744  * where lots of atoms could be submitted before powerup, and each has an
745  * affinity chosen that causes other atoms to have an affinity
746  * violation. Whilst the affinity was not causing violations at the time it
747  * was chosen, it could cause violations thereafter. For example, 1000 jobs
748  * could have had their affinity chosen during the powerup time, so any of
749  * those 1000 jobs could cause an affinity violation later on.
750  *
751  * The attack would otherwise occur because other atoms/contexts have to wait for:
752  * -# the currently running atoms (which are causing the violation) to
753  * finish
754  * -# and, the atoms that had their affinity chosen during powerup to
755  * finish. These are run preferentially because they don't cause a
756  * violation, but instead continue to cause the violation in others.
757  * -# or, the attacker is scheduled out (which might not happen for just 2
758  * contexts)
759  *
760  * By re-choosing the affinity (which is designed to avoid violations at the
761  * time it's chosen), we break condition (2) of the wait, which minimizes the
762  * problem to just waiting for current jobs to finish (which can be bounded if
763  * the Job Scheduling Policy has a timer).
764  */
765 enum kbase_atom_coreref_state {
766 	/** Starting state: No affinity chosen, and cores must be requested. kbase_jd_atom::affinity==0 */
767 	KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED,
768 	/** Cores requested, but waiting for them to be powered. Requested cores given by kbase_jd_atom::affinity */
769 	KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES,
770 	/** Cores given by kbase_jd_atom::affinity are powered, but affinity might be out-of-date, so must recheck */
771 	KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY,
772 	/** Cores given by kbase_jd_atom::affinity are powered, and affinity is up-to-date, but must check for violations */
773 	KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS,
774 	/** Cores are powered, kbase_jd_atom::affinity up-to-date, no affinity violations: atom can be submitted to HW */
775 	KBASE_ATOM_COREREF_STATE_READY
776 };
777 
778 /*
779  * Base Atom priority
780  *
781  * Only certain priority levels are actually implemented, as specified by the
782  * BASE_JD_PRIO_<...> definitions below. It is undefined to use a priority
783  * level that is not one of those defined below.
784  *
785  * Priority levels only affect scheduling between atoms of the same type within
786  * a base context, and only after the atoms have had dependencies resolved.
787  * Fragment atoms does not affect non-frament atoms with lower priorities, and
788  * the other way around. For example, a low priority atom that has had its
789  * dependencies resolved might run before a higher priority atom that has not
790  * had its dependencies resolved.
791  *
792  * The scheduling between base contexts/processes and between atoms from
793  * different base contexts/processes is unaffected by atom priority.
794  *
795  * The atoms are scheduled as follows with respect to their priorities:
796  * - Let atoms 'X' and 'Y' be for the same job slot who have dependencies
797  *   resolved, and atom 'X' has a higher priority than atom 'Y'
798  * - If atom 'Y' is currently running on the HW, then it is interrupted to
799  *   allow atom 'X' to run soon after
800  * - If instead neither atom 'Y' nor atom 'X' are running, then when choosing
801  *   the next atom to run, atom 'X' will always be chosen instead of atom 'Y'
802  * - Any two atoms that have the same priority could run in any order with
803  *   respect to each other. That is, there is no ordering constraint between
804  *   atoms of the same priority.
805  */
806 typedef u8 base_jd_prio;
807 
808 /* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */
809 #define BASE_JD_PRIO_MEDIUM  ((base_jd_prio)0)
810 /* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and
811  * BASE_JD_PRIO_LOW */
812 #define BASE_JD_PRIO_HIGH    ((base_jd_prio)1)
813 /* Low atom priority. */
814 #define BASE_JD_PRIO_LOW     ((base_jd_prio)2)
815 
816 /* Count of the number of priority levels. This itself is not a valid
817  * base_jd_prio setting */
818 #define BASE_JD_NR_PRIO_LEVELS 3
819 
820 enum kbase_jd_atom_state {
821 	/** Atom is not used */
822 	KBASE_JD_ATOM_STATE_UNUSED,
823 	/** Atom is queued in JD */
824 	KBASE_JD_ATOM_STATE_QUEUED,
825 	/** Atom has been given to JS (is runnable/running) */
826 	KBASE_JD_ATOM_STATE_IN_JS,
827 	/** Atom has been completed, but not yet handed back to job dispatcher
828 	 *  for dependency resolution */
829 	KBASE_JD_ATOM_STATE_HW_COMPLETED,
830 	/** Atom has been completed, but not yet handed back to userspace */
831 	KBASE_JD_ATOM_STATE_COMPLETED
832 };
833 
834 typedef u16 base_atom_id; /**< Type big enough to store an atom number in */
835 
836 struct base_dependency {
837 	base_atom_id  atom_id;               /**< An atom number */
838 	base_jd_dep_type dependency_type;    /**< Dependency type */
839 };
840 
841 /* This structure has changed since UK 10.2 for which base_jd_core_req was a u16 value.
842  * In order to keep the size of the structure same, padding field has been adjusted
843  * accordingly and core_req field of a u32 type (to which UK 10.3 base_jd_core_req defines)
844  * is added at the end of the structure. Place in the structure previously occupied by u16 core_req
845  * is kept but renamed to compat_core_req and as such it can be used in ioctl call for job submission
846  * as long as UK 10.2 legacy is supported. Once when this support ends, this field can be left
847  * for possible future use. */
848 typedef struct base_jd_atom_v2 {
849 	u64 jc;			    /**< job-chain GPU address */
850 	struct base_jd_udata udata;		    /**< user data */
851 	kbase_pointer extres_list;	    /**< list of external resources */
852 	u16 nr_extres;			    /**< nr of external resources */
853 	u16 compat_core_req;	            /**< core requirements which correspond to the legacy support for UK 10.2 */
854 	struct base_dependency pre_dep[2];  /**< pre-dependencies, one need to use SETTER function to assign this field,
855 	this is done in order to reduce possibility of improper assigment of a dependency field */
856 	base_atom_id atom_number;	    /**< unique number to identify the atom */
857 	base_jd_prio prio;                  /**< Atom priority. Refer to @ref base_jd_prio for more details */
858 	u8 device_nr;			    /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */
859 	u8 padding[1];
860 	base_jd_core_req core_req;          /**< core requirements */
861 } base_jd_atom_v2;
862 
863 #ifdef BASE_LEGACY_UK6_SUPPORT
864 struct base_jd_atom_v2_uk6 {
865 	u64 jc;			    /**< job-chain GPU address */
866 	struct base_jd_udata udata;		    /**< user data */
867 	kbase_pointer extres_list;	    /**< list of external resources */
868 	u16 nr_extres;			    /**< nr of external resources */
869 	u16 core_req;                       /**< core requirements */
870 	base_atom_id pre_dep[2]; /**< pre-dependencies */
871 	base_atom_id atom_number;	    /**< unique number to identify the atom */
872 	base_jd_prio prio;		    /**< priority - smaller is higher priority */
873 	u8 device_nr;			    /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */
874 	u8 padding[7];
875 };
876 #endif /* BASE_LEGACY_UK6_SUPPORT */
877 
878 typedef enum base_external_resource_access {
879 	BASE_EXT_RES_ACCESS_SHARED,
880 	BASE_EXT_RES_ACCESS_EXCLUSIVE
881 } base_external_resource_access;
882 
883 typedef struct base_external_resource {
884 	u64 ext_resource;
885 } base_external_resource;
886 
887 
888 /**
889  * The maximum number of external resources which can be mapped/unmapped
890  * in a single request.
891  */
892 #define BASE_EXT_RES_COUNT_MAX 10
893 
894 /**
895  * struct base_external_resource_list - Structure which describes a list of
896  *                                      external resources.
897  * @count:                              The number of resources.
898  * @ext_res:                            Array of external resources which is
899  *                                      sized at allocation time.
900  */
901 struct base_external_resource_list {
902 	u64 count;
903 	struct base_external_resource ext_res[1];
904 };
905 
906 struct base_jd_debug_copy_buffer {
907 	u64 address;
908 	u64 size;
909 	struct base_external_resource extres;
910 };
911 
912 /**
913  * @brief Setter for a dependency structure
914  *
915  * @param[in] dep          The kbase jd atom dependency to be initialized.
916  * @param     id           The atom_id to be assigned.
917  * @param     dep_type     The dep_type to be assigned.
918  *
919  */
base_jd_atom_dep_set(struct base_dependency * dep,base_atom_id id,base_jd_dep_type dep_type)920 static inline void base_jd_atom_dep_set(struct base_dependency *dep,
921 		base_atom_id id, base_jd_dep_type dep_type)
922 {
923 	LOCAL_ASSERT(dep != NULL);
924 
925 	/*
926 	 * make sure we don't set not allowed combinations
927 	 * of atom_id/dependency_type.
928 	 */
929 	LOCAL_ASSERT((id == 0 && dep_type == BASE_JD_DEP_TYPE_INVALID) ||
930 			(id > 0 && dep_type != BASE_JD_DEP_TYPE_INVALID));
931 
932 	dep->atom_id = id;
933 	dep->dependency_type = dep_type;
934 }
935 
936 /**
937  * @brief Make a copy of a dependency structure
938  *
939  * @param[in,out] dep          The kbase jd atom dependency to be written.
940  * @param[in]     from         The dependency to make a copy from.
941  *
942  */
base_jd_atom_dep_copy(struct base_dependency * dep,const struct base_dependency * from)943 static inline void base_jd_atom_dep_copy(struct base_dependency *dep,
944 		const struct base_dependency *from)
945 {
946 	LOCAL_ASSERT(dep != NULL);
947 
948 	base_jd_atom_dep_set(dep, from->atom_id, from->dependency_type);
949 }
950 
951 /**
952  * @brief Soft-atom fence trigger setup.
953  *
954  * Sets up an atom to be a SW-only atom signaling a fence
955  * when it reaches the run state.
956  *
957  * Using the existing base dependency system the fence can
958  * be set to trigger when a GPU job has finished.
959  *
960  * The base fence object must not be terminated until the atom
961  * has been submitted to @a base_jd_submit and @a base_jd_submit has returned.
962  *
963  * @a fence must be a valid fence set up with @a base_fence_init.
964  * Calling this function with a uninitialized fence results in undefined behavior.
965  *
966  * @param[out] atom A pre-allocated atom to configure as a fence trigger SW atom
967  * @param[in] fence The base fence object to trigger.
968  */
base_jd_fence_trigger_setup_v2(struct base_jd_atom_v2 * atom,struct base_fence * fence)969 static inline void base_jd_fence_trigger_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
970 {
971 	LOCAL_ASSERT(atom);
972 	LOCAL_ASSERT(fence);
973 	LOCAL_ASSERT(fence->basep.fd == INVALID_PLATFORM_FENCE);
974 	LOCAL_ASSERT(fence->basep.stream_fd >= 0);
975 	atom->jc = (uintptr_t) fence;
976 	atom->core_req = BASE_JD_REQ_SOFT_FENCE_TRIGGER;
977 }
978 
979 /**
980  * @brief Soft-atom fence wait setup.
981  *
982  * Sets up an atom to be a SW-only atom waiting on a fence.
983  * When the fence becomes triggered the atom becomes runnable
984  * and completes immediately.
985  *
986  * Using the existing base dependency system the fence can
987  * be set to block a GPU job until it has been triggered.
988  *
989  * The base fence object must not be terminated until the atom
990  * has been submitted to @a base_jd_submit and @a base_jd_submit has returned.
991  *
992  * @a fence must be a valid fence set up with @a base_fence_init or @a base_fence_import.
993  * Calling this function with a uninitialized fence results in undefined behavior.
994  *
995  * @param[out] atom A pre-allocated atom to configure as a fence wait SW atom
996  * @param[in] fence The base fence object to wait on
997  */
base_jd_fence_wait_setup_v2(struct base_jd_atom_v2 * atom,struct base_fence * fence)998 static inline void base_jd_fence_wait_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
999 {
1000 	LOCAL_ASSERT(atom);
1001 	LOCAL_ASSERT(fence);
1002 	LOCAL_ASSERT(fence->basep.fd >= 0);
1003 	atom->jc = (uintptr_t) fence;
1004 	atom->core_req = BASE_JD_REQ_SOFT_FENCE_WAIT;
1005 }
1006 
1007 /**
1008  * @brief External resource info initialization.
1009  *
1010  * Sets up an external resource object to reference
1011  * a memory allocation and the type of access requested.
1012  *
1013  * @param[in] res     The resource object to initialize
1014  * @param     handle  The handle to the imported memory object, must be
1015  *                    obtained by calling @ref base_mem_as_import_handle().
1016  * @param     access  The type of access requested
1017  */
base_external_resource_init(struct base_external_resource * res,struct base_import_handle handle,base_external_resource_access access)1018 static inline void base_external_resource_init(struct base_external_resource *res, struct base_import_handle handle, base_external_resource_access access)
1019 {
1020 	u64 address;
1021 
1022 	address = handle.basep.handle;
1023 
1024 	LOCAL_ASSERT(res != NULL);
1025 	LOCAL_ASSERT(0 == (address & LOCAL_PAGE_LSB));
1026 	LOCAL_ASSERT(access == BASE_EXT_RES_ACCESS_SHARED || access == BASE_EXT_RES_ACCESS_EXCLUSIVE);
1027 
1028 	res->ext_resource = address | (access & LOCAL_PAGE_LSB);
1029 }
1030 
1031 /**
1032  * @brief Job chain event code bits
1033  * Defines the bits used to create ::base_jd_event_code
1034  */
1035 enum {
1036 	BASE_JD_SW_EVENT_KERNEL = (1u << 15), /**< Kernel side event */
1037 	BASE_JD_SW_EVENT = (1u << 14), /**< SW defined event */
1038 	BASE_JD_SW_EVENT_SUCCESS = (1u << 13), /**< Event idicates success (SW events only) */
1039 	BASE_JD_SW_EVENT_JOB = (0u << 11), /**< Job related event */
1040 	BASE_JD_SW_EVENT_BAG = (1u << 11), /**< Bag related event */
1041 	BASE_JD_SW_EVENT_INFO = (2u << 11), /**< Misc/info event */
1042 	BASE_JD_SW_EVENT_RESERVED = (3u << 11),	/**< Reserved event type */
1043 	BASE_JD_SW_EVENT_TYPE_MASK = (3u << 11)	    /**< Mask to extract the type from an event code */
1044 };
1045 
1046 /**
1047  * @brief Job chain event codes
1048  *
1049  * HW and low-level SW events are represented by event codes.
1050  * The status of jobs which succeeded are also represented by
1051  * an event code (see ::BASE_JD_EVENT_DONE).
1052  * Events are usually reported as part of a ::base_jd_event.
1053  *
1054  * The event codes are encoded in the following way:
1055  * @li 10:0  - subtype
1056  * @li 12:11 - type
1057  * @li 13    - SW success (only valid if the SW bit is set)
1058  * @li 14    - SW event (HW event if not set)
1059  * @li 15    - Kernel event (should never be seen in userspace)
1060  *
1061  * Events are split up into ranges as follows:
1062  * - BASE_JD_EVENT_RANGE_\<description\>_START
1063  * - BASE_JD_EVENT_RANGE_\<description\>_END
1064  *
1065  * \a code is in \<description\>'s range when:
1066  * - <tt>BASE_JD_EVENT_RANGE_\<description\>_START <= code < BASE_JD_EVENT_RANGE_\<description\>_END </tt>
1067  *
1068  * Ranges can be asserted for adjacency by testing that the END of the previous
1069  * is equal to the START of the next. This is useful for optimizing some tests
1070  * for range.
1071  *
1072  * A limitation is that the last member of this enum must explicitly be handled
1073  * (with an assert-unreachable statement) in switch statements that use
1074  * variables of this type. Otherwise, the compiler warns that we have not
1075  * handled that enum value.
1076  */
1077 typedef enum base_jd_event_code {
1078 	/* HW defined exceptions */
1079 
1080 	/** Start of HW Non-fault status codes
1081 	 *
1082 	 * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault,
1083 	 * because the job was hard-stopped
1084 	 */
1085 	BASE_JD_EVENT_RANGE_HW_NONFAULT_START = 0,
1086 
1087 	/* non-fatal exceptions */
1088 	BASE_JD_EVENT_NOT_STARTED = 0x00, /**< Can't be seen by userspace, treated as 'previous job done' */
1089 	BASE_JD_EVENT_DONE = 0x01,
1090 	BASE_JD_EVENT_STOPPED = 0x03,	  /**< Can't be seen by userspace, becomes TERMINATED, DONE or JOB_CANCELLED */
1091 	BASE_JD_EVENT_TERMINATED = 0x04,  /**< This is actually a fault status code - the job was hard stopped */
1092 	BASE_JD_EVENT_ACTIVE = 0x08,	  /**< Can't be seen by userspace, jobs only returned on complete/fail/cancel */
1093 
1094 	/** End of HW Non-fault status codes
1095 	 *
1096 	 * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault,
1097 	 * because the job was hard-stopped
1098 	 */
1099 	BASE_JD_EVENT_RANGE_HW_NONFAULT_END = 0x40,
1100 
1101 	/** Start of HW fault and SW Error status codes */
1102 	BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START = 0x40,
1103 
1104 	/* job exceptions */
1105 	BASE_JD_EVENT_JOB_CONFIG_FAULT = 0x40,
1106 	BASE_JD_EVENT_JOB_POWER_FAULT = 0x41,
1107 	BASE_JD_EVENT_JOB_READ_FAULT = 0x42,
1108 	BASE_JD_EVENT_JOB_WRITE_FAULT = 0x43,
1109 	BASE_JD_EVENT_JOB_AFFINITY_FAULT = 0x44,
1110 	BASE_JD_EVENT_JOB_BUS_FAULT = 0x48,
1111 	BASE_JD_EVENT_INSTR_INVALID_PC = 0x50,
1112 	BASE_JD_EVENT_INSTR_INVALID_ENC = 0x51,
1113 	BASE_JD_EVENT_INSTR_TYPE_MISMATCH = 0x52,
1114 	BASE_JD_EVENT_INSTR_OPERAND_FAULT = 0x53,
1115 	BASE_JD_EVENT_INSTR_TLS_FAULT = 0x54,
1116 	BASE_JD_EVENT_INSTR_BARRIER_FAULT = 0x55,
1117 	BASE_JD_EVENT_INSTR_ALIGN_FAULT = 0x56,
1118 	BASE_JD_EVENT_DATA_INVALID_FAULT = 0x58,
1119 	BASE_JD_EVENT_TILE_RANGE_FAULT = 0x59,
1120 	BASE_JD_EVENT_STATE_FAULT = 0x5A,
1121 	BASE_JD_EVENT_OUT_OF_MEMORY = 0x60,
1122 	BASE_JD_EVENT_UNKNOWN = 0x7F,
1123 
1124 	/* GPU exceptions */
1125 	BASE_JD_EVENT_DELAYED_BUS_FAULT = 0x80,
1126 	BASE_JD_EVENT_SHAREABILITY_FAULT = 0x88,
1127 
1128 	/* MMU exceptions */
1129 	BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1 = 0xC1,
1130 	BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2 = 0xC2,
1131 	BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3 = 0xC3,
1132 	BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4 = 0xC4,
1133 	BASE_JD_EVENT_PERMISSION_FAULT = 0xC8,
1134 	BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1 = 0xD1,
1135 	BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2 = 0xD2,
1136 	BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3 = 0xD3,
1137 	BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4 = 0xD4,
1138 	BASE_JD_EVENT_ACCESS_FLAG = 0xD8,
1139 
1140 	/* SW defined exceptions */
1141 	BASE_JD_EVENT_MEM_GROWTH_FAILED	= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000,
1142 	BASE_JD_EVENT_TIMED_OUT		= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001,
1143 	BASE_JD_EVENT_JOB_CANCELLED	= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002,
1144 	BASE_JD_EVENT_JOB_INVALID	= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003,
1145 	BASE_JD_EVENT_PM_EVENT		= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004,
1146 	BASE_JD_EVENT_FORCE_REPLAY	= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x005,
1147 
1148 	BASE_JD_EVENT_BAG_INVALID	= BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003,
1149 
1150 	/** End of HW fault and SW Error status codes */
1151 	BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
1152 
1153 	/** Start of SW Success status codes */
1154 	BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | 0x000,
1155 
1156 	BASE_JD_EVENT_PROGRESS_REPORT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_JOB | 0x000,
1157 	BASE_JD_EVENT_BAG_DONE = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_BAG | 0x000,
1158 	BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_INFO | 0x000,
1159 
1160 	/** End of SW Success status codes */
1161 	BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
1162 
1163 	/** Start of Kernel-only status codes. Such codes are never returned to user-space */
1164 	BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | 0x000,
1165 	BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x000,
1166 
1167 	/** End of Kernel-only status codes. */
1168 	BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_RESERVED | 0x3FF
1169 } base_jd_event_code;
1170 
1171 /**
1172  * @brief Event reporting structure
1173  *
1174  * This structure is used by the kernel driver to report information
1175  * about GPU events. The can either be HW-specific events or low-level
1176  * SW events, such as job-chain completion.
1177  *
1178  * The event code contains an event type field which can be extracted
1179  * by ANDing with ::BASE_JD_SW_EVENT_TYPE_MASK.
1180  *
1181  * Based on the event type base_jd_event::data holds:
1182  * @li ::BASE_JD_SW_EVENT_JOB : the offset in the ring-buffer for the completed
1183  * job-chain
1184  * @li ::BASE_JD_SW_EVENT_BAG : The address of the ::base_jd_bag that has
1185  * been completed (ie all contained job-chains have been completed).
1186  * @li ::BASE_JD_SW_EVENT_INFO : base_jd_event::data not used
1187  */
1188 typedef struct base_jd_event_v2 {
1189 	base_jd_event_code event_code;  /**< event code */
1190 	base_atom_id atom_number;       /**< the atom number that has completed */
1191 	struct base_jd_udata udata;     /**< user data */
1192 } base_jd_event_v2;
1193 
1194 /**
1195  * Padding required to ensure that the @ref struct base_dump_cpu_gpu_counters structure fills
1196  * a full cache line.
1197  */
1198 
1199 #define BASE_CPU_GPU_CACHE_LINE_PADDING (36)
1200 
1201 
1202 /**
1203  * @brief Structure for BASE_JD_REQ_SOFT_DUMP_CPU_GPU_COUNTERS jobs.
1204  *
1205  * This structure is stored into the memory pointed to by the @c jc field of @ref base_jd_atom.
1206  *
1207  * This structure must be padded to ensure that it will occupy whole cache lines. This is to avoid
1208  * cases where access to pages containing the structure is shared between cached and un-cached
1209  * memory regions, which would cause memory corruption.  Here we set the structure size to be 64 bytes
1210  * which is the cache line for ARM A15 processors.
1211  */
1212 
1213 typedef struct base_dump_cpu_gpu_counters {
1214 	u64 system_time;
1215 	u64 cycle_counter;
1216 	u64 sec;
1217 	u32 usec;
1218 	u8 padding[BASE_CPU_GPU_CACHE_LINE_PADDING];
1219 } base_dump_cpu_gpu_counters;
1220 
1221 
1222 
1223 /** @} end group base_user_api_job_dispatch */
1224 
1225 #define GPU_MAX_JOB_SLOTS 16
1226 
1227 /**
1228  * @page page_base_user_api_gpuprops User-side Base GPU Property Query API
1229  *
1230  * The User-side Base GPU Property Query API encapsulates two
1231  * sub-modules:
1232  *
1233  * - @ref base_user_api_gpuprops_dyn "Dynamic GPU Properties"
1234  * - @ref base_plat_config_gpuprops "Base Platform Config GPU Properties"
1235  *
1236  * There is a related third module outside of Base, which is owned by the MIDG
1237  * module:
1238  * - @ref gpu_props_static "Midgard Compile-time GPU Properties"
1239  *
1240  * Base only deals with properties that vary between different Midgard
1241  * implementations - the Dynamic GPU properties and the Platform Config
1242  * properties.
1243  *
1244  * For properties that are constant for the Midgard Architecture, refer to the
1245  * MIDG module. However, we will discuss their relevance here <b>just to
1246  * provide background information.</b>
1247  *
1248  * @section sec_base_user_api_gpuprops_about About the GPU Properties in Base and MIDG modules
1249  *
1250  * The compile-time properties (Platform Config, Midgard Compile-time
1251  * properties) are exposed as pre-processor macros.
1252  *
1253  * Complementing the compile-time properties are the Dynamic GPU
1254  * Properties, which act as a conduit for the Midgard Configuration
1255  * Discovery.
1256  *
1257  * In general, the dynamic properties are present to verify that the platform
1258  * has been configured correctly with the right set of Platform Config
1259  * Compile-time Properties.
1260  *
1261  * As a consistent guide across the entire DDK, the choice for dynamic or
1262  * compile-time should consider the following, in order:
1263  * -# Can the code be written so that it doesn't need to know the
1264  * implementation limits at all?
1265  * -# If you need the limits, get the information from the Dynamic Property
1266  * lookup. This should be done once as you fetch the context, and then cached
1267  * as part of the context data structure, so it's cheap to access.
1268  * -# If there's a clear and arguable inefficiency in using Dynamic Properties,
1269  * then use a Compile-Time Property (Platform Config, or Midgard Compile-time
1270  * property). Examples of where this might be sensible follow:
1271  *  - Part of a critical inner-loop
1272  *  - Frequent re-use throughout the driver, causing significant extra load
1273  * instructions or control flow that would be worthwhile optimizing out.
1274  *
1275  * We cannot provide an exhaustive set of examples, neither can we provide a
1276  * rule for every possible situation. Use common sense, and think about: what
1277  * the rest of the driver will be doing; how the compiler might represent the
1278  * value if it is a compile-time constant; whether an OEM shipping multiple
1279  * devices would benefit much more from a single DDK binary, instead of
1280  * insignificant micro-optimizations.
1281  *
1282  * @section sec_base_user_api_gpuprops_dyn Dynamic GPU Properties
1283  *
1284  * Dynamic GPU properties are presented in two sets:
1285  * -# the commonly used properties in @ref base_gpu_props, which have been
1286  * unpacked from GPU register bitfields.
1287  * -# The full set of raw, unprocessed properties in @ref gpu_raw_gpu_props
1288  * (also a member of @ref base_gpu_props). All of these are presented in
1289  * the packed form, as presented by the GPU  registers themselves.
1290  *
1291  * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to
1292  * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device
1293  * behaving differently?". In this case, all information about the
1294  * configuration is potentially useful, but it <b>does not need to be processed
1295  * by the driver</b>. Instead, the raw registers can be processed by the Mali
1296  * Tools software on the host PC.
1297  *
1298  * The properties returned extend the Midgard Configuration Discovery
1299  * registers. For example, GPU clock speed is not specified in the Midgard
1300  * Architecture, but is <b>necessary for OpenCL's clGetDeviceInfo() function</b>.
1301  *
1302  * The GPU properties are obtained by a call to
1303  * _mali_base_get_gpu_props(). This simply returns a pointer to a const
1304  * base_gpu_props structure. It is constant for the life of a base
1305  * context. Multiple calls to _mali_base_get_gpu_props() to a base context
1306  * return the same pointer to a constant structure. This avoids cache pollution
1307  * of the common data.
1308  *
1309  * This pointer must not be freed, because it does not point to the start of a
1310  * region allocated by the memory allocator; instead, just close the @ref
1311  * base_context.
1312  *
1313  *
1314  * @section sec_base_user_api_gpuprops_config Platform Config Compile-time Properties
1315  *
1316  * The Platform Config File sets up gpu properties that are specific to a
1317  * certain platform. Properties that are 'Implementation Defined' in the
1318  * Midgard Architecture spec are placed here.
1319  *
1320  * @note Reference configurations are provided for Midgard Implementations, such as
1321  * the Mali-T600 family. The customer need not repeat this information, and can select one of
1322  * these reference configurations. For example, VA_BITS, PA_BITS and the
1323  * maximum number of samples per pixel might vary between Midgard Implementations, but
1324  * \b not for platforms using the Mali-T604. This information is placed in
1325  * the reference configuration files.
1326  *
1327  * The System Integrator creates the following structure:
1328  * - platform_XYZ
1329  * - platform_XYZ/plat
1330  * - platform_XYZ/plat/plat_config.h
1331  *
1332  * They then edit plat_config.h, using the example plat_config.h files as a
1333  * guide.
1334  *
1335  * At the very least, the customer must set @ref CONFIG_GPU_CORE_TYPE, and will
1336  * receive a helpful \#error message if they do not do this correctly. This
1337  * selects the Reference Configuration for the Midgard Implementation. The rationale
1338  * behind this decision (against asking the customer to write \#include
1339  * <gpus/mali_t600.h> in their plat_config.h) is as follows:
1340  * - This mechanism 'looks' like a regular config file (such as Linux's
1341  * .config)
1342  * - It is difficult to get wrong in a way that will produce strange build
1343  * errors:
1344  *  - They need not know where the mali_t600.h, other_midg_gpu.h etc. files are stored - and
1345  *  so they won't accidentally pick another file with 'mali_t600' in its name
1346  *  - When the build doesn't work, the System Integrator may think the DDK is
1347  *  doesn't work, and attempt to fix it themselves:
1348  *   - For the @ref CONFIG_GPU_CORE_TYPE mechanism, the only way to get past the
1349  *   error is to set @ref CONFIG_GPU_CORE_TYPE, and this is what the \#error tells
1350  *   you.
1351  *   - For a \#include mechanism, checks must still be made elsewhere, which the
1352  *   System Integrator may try working around by setting \#defines (such as
1353  *   VA_BITS) themselves in their plat_config.h. In the  worst case, they may
1354  *   set the prevention-mechanism \#define of
1355  *   "A_CORRECT_MIDGARD_CORE_WAS_CHOSEN".
1356  *   - In this case, they would believe they are on the right track, because
1357  *   the build progresses with their fix, but with errors elsewhere.
1358  *
1359  * However, there is nothing to prevent the customer using \#include to organize
1360  * their own configurations files hierarchically.
1361  *
1362  * The mechanism for the header file processing is as follows:
1363  *
1364  * @dot
1365    digraph plat_config_mechanism {
1366 	   rankdir=BT
1367 	   size="6,6"
1368 
1369        "mali_base.h";
1370 	   "gpu/mali_gpu.h";
1371 
1372 	   node [ shape=box ];
1373 	   {
1374 	       rank = same; ordering = out;
1375 
1376 		   "gpu/mali_gpu_props.h";
1377 		   "base/midg_gpus/mali_t600.h";
1378 		   "base/midg_gpus/other_midg_gpu.h";
1379 	   }
1380 	   { rank = same; "plat/plat_config.h"; }
1381 	   {
1382 	       rank = same;
1383 		   "gpu/mali_gpu.h" [ shape=box ];
1384 		   gpu_chooser [ label="" style="invisible" width=0 height=0 fixedsize=true ];
1385 		   select_gpu [ label="Mali-T600 | Other\n(select_gpu.h)" shape=polygon,sides=4,distortion=0.25 width=3.3 height=0.99 fixedsize=true ] ;
1386 	   }
1387 	   node [ shape=box ];
1388 	   { rank = same; "plat/plat_config.h"; }
1389 	   { rank = same; "mali_base.h"; }
1390 
1391 	   "mali_base.h" -> "gpu/mali_gpu.h" -> "gpu/mali_gpu_props.h";
1392 	   "mali_base.h" -> "plat/plat_config.h" ;
1393 	   "mali_base.h" -> select_gpu ;
1394 
1395 	   "plat/plat_config.h" -> gpu_chooser [style="dotted,bold" dir=none weight=4] ;
1396 	   gpu_chooser -> select_gpu [style="dotted,bold"] ;
1397 
1398 	   select_gpu -> "base/midg_gpus/mali_t600.h" ;
1399 	   select_gpu -> "base/midg_gpus/other_midg_gpu.h" ;
1400    }
1401    @enddot
1402  *
1403  *
1404  * @section sec_base_user_api_gpuprops_kernel Kernel Operation
1405  *
1406  * During Base Context Create time, user-side makes a single kernel call:
1407  * - A call to fill user memory with GPU information structures
1408  *
1409  * The kernel-side will fill the provided the entire processed @ref base_gpu_props
1410  * structure, because this information is required in both
1411  * user and kernel side; it does not make sense to decode it twice.
1412  *
1413  * Coherency groups must be derived from the bitmasks, but this can be done
1414  * kernel side, and just once at kernel startup: Coherency groups must already
1415  * be known kernel-side, to support chains that specify a 'Only Coherent Group'
1416  * SW requirement, or 'Only Coherent Group with Tiler' SW requirement.
1417  *
1418  * @section sec_base_user_api_gpuprops_cocalc Coherency Group calculation
1419  * Creation of the coherent group data is done at device-driver startup, and so
1420  * is one-time. This will most likely involve a loop with CLZ, shifting, and
1421  * bit clearing on the L2_PRESENT mask, depending on whether the
1422  * system is L2 Coherent. The number of shader cores is done by a
1423  * population count, since faulty cores may be disabled during production,
1424  * producing a non-contiguous mask.
1425  *
1426  * The memory requirements for this algorithm can be determined either by a u64
1427  * population count on the L2_PRESENT mask (a LUT helper already is
1428  * required for the above), or simple assumption that there can be no more than
1429  * 16 coherent groups, since core groups are typically 4 cores.
1430  */
1431 
1432 /**
1433  * @addtogroup base_user_api_gpuprops User-side Base GPU Property Query APIs
1434  * @{
1435  */
1436 
1437 /**
1438  * @addtogroup base_user_api_gpuprops_dyn Dynamic HW Properties
1439  * @{
1440  */
1441 
1442 #define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3
1443 
1444 #define BASE_MAX_COHERENT_GROUPS 16
1445 
1446 struct mali_base_gpu_core_props {
1447 	/**
1448 	 * Product specific value.
1449 	 */
1450 	u32 product_id;
1451 
1452 	/**
1453 	 * Status of the GPU release.
1454 	 * No defined values, but starts at 0 and increases by one for each
1455 	 * release status (alpha, beta, EAC, etc.).
1456 	 * 4 bit values (0-15).
1457 	 */
1458 	u16 version_status;
1459 
1460 	/**
1461 	 * Minor release number of the GPU. "P" part of an "RnPn" release number.
1462      * 8 bit values (0-255).
1463 	 */
1464 	u16 minor_revision;
1465 
1466 	/**
1467 	 * Major release number of the GPU. "R" part of an "RnPn" release number.
1468      * 4 bit values (0-15).
1469 	 */
1470 	u16 major_revision;
1471 
1472 	u16 padding;
1473 
1474 	/**
1475 	 * This property is deprecated since it has not contained the real current
1476 	 * value of GPU clock speed. It is kept here only for backwards compatibility.
1477 	 * For the new ioctl interface, it is ignored and is treated as a padding
1478 	 * to keep the structure of the same size and retain the placement of its
1479 	 * members.
1480 	 */
1481 	u32 gpu_speed_mhz;
1482 
1483 	/**
1484 	 * @usecase GPU clock max/min speed is required for computing best/worst case
1485 	 * in tasks as job scheduling ant irq_throttling. (It is not specified in the
1486 	 *  Midgard Architecture).
1487 	 * Also, GPU clock max speed is used for OpenCL's clGetDeviceInfo() function.
1488 	 */
1489 	u32 gpu_freq_khz_max;
1490 	u32 gpu_freq_khz_min;
1491 
1492 	/**
1493 	 * Size of the shader program counter, in bits.
1494 	 */
1495 	u32 log2_program_counter_size;
1496 
1497 	/**
1498 	 * TEXTURE_FEATURES_x registers, as exposed by the GPU. This is a
1499 	 * bitpattern where a set bit indicates that the format is supported.
1500 	 *
1501 	 * Before using a texture format, it is recommended that the corresponding
1502 	 * bit be checked.
1503 	 */
1504 	u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
1505 
1506 	/**
1507 	 * Theoretical maximum memory available to the GPU. It is unlikely that a
1508 	 * client will be able to allocate all of this memory for their own
1509 	 * purposes, but this at least provides an upper bound on the memory
1510 	 * available to the GPU.
1511 	 *
1512 	 * This is required for OpenCL's clGetDeviceInfo() call when
1513 	 * CL_DEVICE_GLOBAL_MEM_SIZE is requested, for OpenCL GPU devices. The
1514 	 * client will not be expecting to allocate anywhere near this value.
1515 	 */
1516 	u64 gpu_available_memory_size;
1517 };
1518 
1519 /**
1520  *
1521  * More information is possible - but associativity and bus width are not
1522  * required by upper-level apis.
1523  */
1524 struct mali_base_gpu_l2_cache_props {
1525 	u8 log2_line_size;
1526 	u8 log2_cache_size;
1527 	u8 num_l2_slices; /* Number of L2C slices. 1 or higher */
1528 	u8 padding[5];
1529 };
1530 
1531 struct mali_base_gpu_tiler_props {
1532 	u32 bin_size_bytes;	/* Max is 4*2^15 */
1533 	u32 max_active_levels;	/* Max is 2^15 */
1534 };
1535 
1536 /**
1537  * GPU threading system details.
1538  */
1539 struct mali_base_gpu_thread_props {
1540 	u32 max_threads;            /* Max. number of threads per core */
1541 	u32 max_workgroup_size;     /* Max. number of threads per workgroup */
1542 	u32 max_barrier_size;       /* Max. number of threads that can synchronize on a simple barrier */
1543 	u16 max_registers;          /* Total size [1..65535] of the register file available per core. */
1544 	u8  max_task_queue;         /* Max. tasks [1..255] which may be sent to a core before it becomes blocked. */
1545 	u8  max_thread_group_split; /* Max. allowed value [1..15] of the Thread Group Split field. */
1546 	u8  impl_tech;              /* 0 = Not specified, 1 = Silicon, 2 = FPGA, 3 = SW Model/Emulation */
1547 	u8  padding[7];
1548 };
1549 
1550 /**
1551  * @brief descriptor for a coherent group
1552  *
1553  * \c core_mask exposes all cores in that coherent group, and \c num_cores
1554  * provides a cached population-count for that mask.
1555  *
1556  * @note Whilst all cores are exposed in the mask, not all may be available to
1557  * the application, depending on the Kernel Power policy.
1558  *
1559  * @note if u64s must be 8-byte aligned, then this structure has 32-bits of wastage.
1560  */
1561 struct mali_base_gpu_coherent_group {
1562 	u64 core_mask;	       /**< Core restriction mask required for the group */
1563 	u16 num_cores;	       /**< Number of cores in the group */
1564 	u16 padding[3];
1565 };
1566 
1567 /**
1568  * @brief Coherency group information
1569  *
1570  * Note that the sizes of the members could be reduced. However, the \c group
1571  * member might be 8-byte aligned to ensure the u64 core_mask is 8-byte
1572  * aligned, thus leading to wastage if the other members sizes were reduced.
1573  *
1574  * The groups are sorted by core mask. The core masks are non-repeating and do
1575  * not intersect.
1576  */
1577 struct mali_base_gpu_coherent_group_info {
1578 	u32 num_groups;
1579 
1580 	/**
1581 	 * Number of core groups (coherent or not) in the GPU. Equivalent to the number of L2 Caches.
1582 	 *
1583 	 * The GPU Counter dumping writes 2048 bytes per core group, regardless of
1584 	 * whether the core groups are coherent or not. Hence this member is needed
1585 	 * to calculate how much memory is required for dumping.
1586 	 *
1587 	 * @note Do not use it to work out how many valid elements are in the
1588 	 * group[] member. Use num_groups instead.
1589 	 */
1590 	u32 num_core_groups;
1591 
1592 	/**
1593 	 * Coherency features of the memory, accessed by @ref gpu_mem_features
1594 	 * methods
1595 	 */
1596 	u32 coherency;
1597 
1598 	u32 padding;
1599 
1600 	/**
1601 	 * Descriptors of coherent groups
1602 	 */
1603 	struct mali_base_gpu_coherent_group group[BASE_MAX_COHERENT_GROUPS];
1604 };
1605 
1606 /**
1607  * A complete description of the GPU's Hardware Configuration Discovery
1608  * registers.
1609  *
1610  * The information is presented inefficiently for access. For frequent access,
1611  * the values should be better expressed in an unpacked form in the
1612  * base_gpu_props structure.
1613  *
1614  * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to
1615  * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device
1616  * behaving differently?". In this case, all information about the
1617  * configuration is potentially useful, but it <b>does not need to be processed
1618  * by the driver</b>. Instead, the raw registers can be processed by the Mali
1619  * Tools software on the host PC.
1620  *
1621  */
1622 struct gpu_raw_gpu_props {
1623 	u64 shader_present;
1624 	u64 tiler_present;
1625 	u64 l2_present;
1626 	u64 stack_present;
1627 
1628 	u32 l2_features;
1629 	u32 suspend_size; /* API 8.2+ */
1630 	u32 mem_features;
1631 	u32 mmu_features;
1632 
1633 	u32 as_present;
1634 
1635 	u32 js_present;
1636 	u32 js_features[GPU_MAX_JOB_SLOTS];
1637 	u32 tiler_features;
1638 	u32 texture_features[3];
1639 
1640 	u32 gpu_id;
1641 
1642 	u32 thread_max_threads;
1643 	u32 thread_max_workgroup_size;
1644 	u32 thread_max_barrier_size;
1645 	u32 thread_features;
1646 
1647 	/*
1648 	 * Note: This is the _selected_ coherency mode rather than the
1649 	 * available modes as exposed in the coherency_features register.
1650 	 */
1651 	u32 coherency_mode;
1652 };
1653 
1654 /**
1655  * Return structure for _mali_base_get_gpu_props().
1656  *
1657  * NOTE: the raw_props member in this data structure contains the register
1658  * values from which the value of the other members are derived. The derived
1659  * members exist to allow for efficient access and/or shielding the details
1660  * of the layout of the registers.
1661  *
1662  */
1663 typedef struct mali_base_gpu_props {
1664 	struct mali_base_gpu_core_props core_props;
1665 	struct mali_base_gpu_l2_cache_props l2_props;
1666 	u64 unused_1; /* keep for backwards compatibility */
1667 	struct mali_base_gpu_tiler_props tiler_props;
1668 	struct mali_base_gpu_thread_props thread_props;
1669 
1670 	/** This member is large, likely to be 128 bytes */
1671 	struct gpu_raw_gpu_props raw_props;
1672 
1673 	/** This must be last member of the structure */
1674 	struct mali_base_gpu_coherent_group_info coherency_info;
1675 } base_gpu_props;
1676 
1677 /** @} end group base_user_api_gpuprops_dyn */
1678 
1679 /** @} end group base_user_api_gpuprops */
1680 
1681 /**
1682  * @addtogroup base_user_api_core User-side Base core APIs
1683  * @{
1684  */
1685 
1686 /**
1687  * \enum base_context_create_flags
1688  *
1689  * Flags to pass to ::base_context_init.
1690  * Flags can be ORed together to enable multiple things.
1691  *
1692  * These share the same space as BASEP_CONTEXT_FLAG_*, and so must
1693  * not collide with them.
1694  */
1695 enum base_context_create_flags {
1696 	/** No flags set */
1697 	BASE_CONTEXT_CREATE_FLAG_NONE = 0,
1698 
1699 	/** Base context is embedded in a cctx object (flag used for CINSTR software counter macros) */
1700 	BASE_CONTEXT_CCTX_EMBEDDED = (1u << 0),
1701 
1702 	/** Base context is a 'System Monitor' context for Hardware counters.
1703 	 *
1704 	 * One important side effect of this is that job submission is disabled. */
1705 	BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED = (1u << 1)
1706 };
1707 
1708 /**
1709  * Bitpattern describing the ::base_context_create_flags that can be passed to base_context_init()
1710  */
1711 #define BASE_CONTEXT_CREATE_ALLOWED_FLAGS \
1712 	(((u32)BASE_CONTEXT_CCTX_EMBEDDED) | \
1713 	  ((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED))
1714 
1715 /**
1716  * Bitpattern describing the ::base_context_create_flags that can be passed to the kernel
1717  */
1718 #define BASE_CONTEXT_CREATE_KERNEL_FLAGS \
1719 	((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED)
1720 
1721 /*
1722  * Private flags used on the base context
1723  *
1724  * These start at bit 31, and run down to zero.
1725  *
1726  * They share the same space as @ref base_context_create_flags, and so must
1727  * not collide with them.
1728  */
1729 /** Private flag tracking whether job descriptor dumping is disabled */
1730 #define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED ((u32)(1 << 31))
1731 
1732 /** @} end group base_user_api_core */
1733 
1734 /** @} end group base_user_api */
1735 
1736 /**
1737  * @addtogroup base_plat_config_gpuprops Base Platform Config GPU Properties
1738  * @{
1739  *
1740  * C Pre-processor macros are exposed here to do with Platform
1741  * Config.
1742  *
1743  * These include:
1744  * - GPU Properties that are constant on a particular Midgard Family
1745  * Implementation e.g. Maximum samples per pixel on Mali-T600.
1746  * - General platform config for the GPU, such as the GPU major and minor
1747  * revison.
1748  */
1749 
1750 /** @} end group base_plat_config_gpuprops */
1751 
1752 /**
1753  * @addtogroup base_api Base APIs
1754  * @{
1755  */
1756 
1757 /**
1758  * @brief The payload for a replay job. This must be in GPU memory.
1759  */
1760 typedef struct base_jd_replay_payload {
1761 	/**
1762 	 * Pointer to the first entry in the base_jd_replay_jc list.  These
1763 	 * will be replayed in @b reverse order (so that extra ones can be added
1764 	 * to the head in future soft jobs without affecting this soft job)
1765 	 */
1766 	u64 tiler_jc_list;
1767 
1768 	/**
1769 	 * Pointer to the fragment job chain.
1770 	 */
1771 	u64 fragment_jc;
1772 
1773 	/**
1774 	 * Pointer to the tiler heap free FBD field to be modified.
1775 	 */
1776 	u64 tiler_heap_free;
1777 
1778 	/**
1779 	 * Hierarchy mask for the replayed fragment jobs. May be zero.
1780 	 */
1781 	u16 fragment_hierarchy_mask;
1782 
1783 	/**
1784 	 * Hierarchy mask for the replayed tiler jobs. May be zero.
1785 	 */
1786 	u16 tiler_hierarchy_mask;
1787 
1788 	/**
1789 	 * Default weight to be used for hierarchy levels not in the original
1790 	 * mask.
1791 	 */
1792 	u32 hierarchy_default_weight;
1793 
1794 	/**
1795 	 * Core requirements for the tiler job chain
1796 	 */
1797 	base_jd_core_req tiler_core_req;
1798 
1799 	/**
1800 	 * Core requirements for the fragment job chain
1801 	 */
1802 	base_jd_core_req fragment_core_req;
1803 } base_jd_replay_payload;
1804 
1805 #ifdef BASE_LEGACY_UK10_2_SUPPORT
1806 typedef struct base_jd_replay_payload_uk10_2 {
1807 	u64 tiler_jc_list;
1808 	u64 fragment_jc;
1809 	u64 tiler_heap_free;
1810 	u16 fragment_hierarchy_mask;
1811 	u16 tiler_hierarchy_mask;
1812 	u32 hierarchy_default_weight;
1813 	u16 tiler_core_req;
1814 	u16 fragment_core_req;
1815 	u8 padding[4];
1816 } base_jd_replay_payload_uk10_2;
1817 #endif /* BASE_LEGACY_UK10_2_SUPPORT */
1818 
1819 /**
1820  * @brief An entry in the linked list of job chains to be replayed. This must
1821  *        be in GPU memory.
1822  */
1823 typedef struct base_jd_replay_jc {
1824 	/**
1825 	 * Pointer to next entry in the list. A setting of NULL indicates the
1826 	 * end of the list.
1827 	 */
1828 	u64 next;
1829 
1830 	/**
1831 	 * Pointer to the job chain.
1832 	 */
1833 	u64 jc;
1834 
1835 } base_jd_replay_jc;
1836 
1837 /* Maximum number of jobs allowed in a fragment chain in the payload of a
1838  * replay job */
1839 #define BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT 256
1840 
1841 /** @} end group base_api */
1842 
1843 typedef struct base_profiling_controls {
1844 	u32 profiling_controls[FBDUMP_CONTROL_MAX];
1845 } base_profiling_controls;
1846 
1847 /* Enable additional tracepoints for latency measurements (TL_ATOM_READY,
1848  * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST) */
1849 #define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0)
1850 
1851 /* Indicate that job dumping is enabled. This could affect certain timers
1852  * to account for the performance impact. */
1853 #define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1)
1854 
1855 #define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \
1856 		BASE_TLSTREAM_JOB_DUMPING_ENABLED)
1857 
1858 #endif				/* _BASE_KERNEL_H_ */
1859