1 /*
2 * © Copyright 2017-2018 Alyssa Rosenzweig
3 * © Copyright 2017-2018 Connor Abbott
4 * © Copyright 2017-2018 Lyude Paul
5 * © Copyright2019 Collabora, Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 *
26 */
27
28 #ifndef __PANFROST_JOB_H__
29 #define __PANFROST_JOB_H__
30
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <inttypes.h>
34
35 typedef uint8_t u8;
36 typedef uint16_t u16;
37 typedef uint32_t u32;
38 typedef uint64_t u64;
39 typedef uint64_t mali_ptr;
40
41 /* Compressed per-pixel formats. Each of these formats expands to one to four
42 * floating-point or integer numbers, as defined by the OpenGL specification.
43 * There are various places in OpenGL where the user can specify a compressed
44 * format in memory, which all use the same 8-bit enum in the various
45 * descriptors, although different hardware units support different formats.
46 */
47
48 /* The top 3 bits specify how the bits of each component are interpreted. */
49
50 /* e.g. ETC2_RGB8 */
51 #define MALI_FORMAT_COMPRESSED (0 << 5)
52
53 /* e.g. R11F_G11F_B10F */
54 #define MALI_FORMAT_SPECIAL (2 << 5)
55
56 /* signed normalized, e.g. RGBA8_SNORM */
57 #define MALI_FORMAT_SNORM (3 << 5)
58
59 /* e.g. RGBA8UI */
60 #define MALI_FORMAT_UINT (4 << 5)
61
62 /* e.g. RGBA8 and RGBA32F */
63 #define MALI_FORMAT_UNORM (5 << 5)
64
65 /* e.g. RGBA8I and RGBA16F */
66 #define MALI_FORMAT_SINT (6 << 5)
67
68 /* These formats seem to largely duplicate the others. They're used at least
69 * for Bifrost framebuffer output.
70 */
71 #define MALI_FORMAT_SPECIAL2 (7 << 5)
72 #define MALI_EXTRACT_TYPE(fmt) ((fmt) & 0xe0)
73
74 /* If the high 3 bits are 3 to 6 these two bits say how many components
75 * there are.
76 */
77 #define MALI_NR_CHANNELS(n) ((n - 1) << 3)
78 #define MALI_EXTRACT_CHANNELS(fmt) ((((fmt) >> 3) & 3) + 1)
79
80 /* If the high 3 bits are 3 to 6, then the low 3 bits say how big each
81 * component is, except the special MALI_CHANNEL_FLOAT which overrides what the
82 * bits mean.
83 */
84
85 #define MALI_CHANNEL_4 2
86
87 #define MALI_CHANNEL_8 3
88
89 #define MALI_CHANNEL_16 4
90
91 #define MALI_CHANNEL_32 5
92
93 /* For MALI_FORMAT_SINT it means a half-float (e.g. RG16F). For
94 * MALI_FORMAT_UNORM, it means a 32-bit float.
95 */
96 #define MALI_CHANNEL_FLOAT 7
97 #define MALI_EXTRACT_BITS(fmt) (fmt & 0x7)
98
99 #define MALI_EXTRACT_INDEX(pixfmt) (((pixfmt) >> 12) & 0xFF)
100
101 /* The raw Midgard blend payload can either be an equation or a shader
102 * address, depending on the context */
103
104 /*
105 * Mali Attributes
106 *
107 * This structure lets the attribute unit compute the address of an attribute
108 * given the vertex and instance ID. Unfortunately, the way this works is
109 * rather complicated when instancing is enabled.
110 *
111 * To explain this, first we need to explain how compute and vertex threads are
112 * dispatched. This is a guess (although a pretty firm guess!) since the
113 * details are mostly hidden from the driver, except for attribute instancing.
114 * When a quad is dispatched, it receives a single, linear index. However, we
115 * need to translate that index into a (vertex id, instance id) pair, or a
116 * (local id x, local id y, local id z) triple for compute shaders (although
117 * vertex shaders and compute shaders are handled almost identically).
118 * Focusing on vertex shaders, one option would be to do:
119 *
120 * vertex_id = linear_id % num_vertices
121 * instance_id = linear_id / num_vertices
122 *
123 * but this involves a costly division and modulus by an arbitrary number.
124 * Instead, we could pad num_vertices. We dispatch padded_num_vertices *
125 * num_instances threads instead of num_vertices * num_instances, which results
126 * in some "extra" threads with vertex_id >= num_vertices, which we have to
127 * discard. The more we pad num_vertices, the more "wasted" threads we
128 * dispatch, but the division is potentially easier.
129 *
130 * One straightforward choice is to pad num_vertices to the next power of two,
131 * which means that the division and modulus are just simple bit shifts and
132 * masking. But the actual algorithm is a bit more complicated. The thread
133 * dispatcher has special support for dividing by 3, 5, 7, and 9, in addition
134 * to dividing by a power of two. This is possibly using the technique
135 * described in patent US20170010862A1. As a result, padded_num_vertices can be
136 * 1, 3, 5, 7, or 9 times a power of two. This results in less wasted threads,
137 * since we need less padding.
138 *
139 * padded_num_vertices is picked by the hardware. The driver just specifies the
140 * actual number of vertices. At least for Mali G71, the first few cases are
141 * given by:
142 *
143 * num_vertices | padded_num_vertices
144 * 3 | 4
145 * 4-7 | 8
146 * 8-11 | 12 (3 * 4)
147 * 12-15 | 16
148 * 16-19 | 20 (5 * 4)
149 *
150 * Note that padded_num_vertices is a multiple of four (presumably because
151 * threads are dispatched in groups of 4). Also, padded_num_vertices is always
152 * at least one more than num_vertices, which seems like a quirk of the
153 * hardware. For larger num_vertices, the hardware uses the following
154 * algorithm: using the binary representation of num_vertices, we look at the
155 * most significant set bit as well as the following 3 bits. Let n be the
156 * number of bits after those 4 bits. Then we set padded_num_vertices according
157 * to the following table:
158 *
159 * high bits | padded_num_vertices
160 * 1000 | 9 * 2^n
161 * 1001 | 5 * 2^(n+1)
162 * 101x | 3 * 2^(n+2)
163 * 110x | 7 * 2^(n+1)
164 * 111x | 2^(n+4)
165 *
166 * For example, if num_vertices = 70 is passed to glDraw(), its binary
167 * representation is 1000110, so n = 3 and the high bits are 1000, and
168 * therefore padded_num_vertices = 9 * 2^3 = 72.
169 *
170 * The attribute unit works in terms of the original linear_id. if
171 * num_instances = 1, then they are the same, and everything is simple.
172 * However, with instancing things get more complicated. There are four
173 * possible modes, two of them we can group together:
174 *
175 * 1. Use the linear_id directly. Only used when there is no instancing.
176 *
177 * 2. Use the linear_id modulo a constant. This is used for per-vertex
178 * attributes with instancing enabled by making the constant equal
179 * padded_num_vertices. Because the modulus is always padded_num_vertices, this
180 * mode only supports a modulus that is a power of 2 times 1, 3, 5, 7, or 9.
181 * The shift field specifies the power of two, while the extra_flags field
182 * specifies the odd number. If shift = n and extra_flags = m, then the modulus
183 * is (2m + 1) * 2^n. As an example, if num_vertices = 70, then as computed
184 * above, padded_num_vertices = 9 * 2^3, so we should set extra_flags = 4 and
185 * shift = 3. Note that we must exactly follow the hardware algorithm used to
186 * get padded_num_vertices in order to correctly implement per-vertex
187 * attributes.
188 *
189 * 3. Divide the linear_id by a constant. In order to correctly implement
190 * instance divisors, we have to divide linear_id by padded_num_vertices times
191 * to user-specified divisor. So first we compute padded_num_vertices, again
192 * following the exact same algorithm that the hardware uses, then multiply it
193 * by the GL-level divisor to get the hardware-level divisor. This case is
194 * further divided into two more cases. If the hardware-level divisor is a
195 * power of two, then we just need to shift. The shift amount is specified by
196 * the shift field, so that the hardware-level divisor is just 2^shift.
197 *
198 * If it isn't a power of two, then we have to divide by an arbitrary integer.
199 * For that, we use the well-known technique of multiplying by an approximation
200 * of the inverse. The driver must compute the magic multiplier and shift
201 * amount, and then the hardware does the multiplication and shift. The
202 * hardware and driver also use the "round-down" optimization as described in
203 * http://ridiculousfish.com/files/faster_unsigned_division_by_constants.pdf.
204 * The hardware further assumes the multiplier is between 2^31 and 2^32, so the
205 * high bit is implicitly set to 1 even though it is set to 0 by the driver --
206 * presumably this simplifies the hardware multiplier a little. The hardware
207 * first multiplies linear_id by the multiplier and takes the high 32 bits,
208 * then applies the round-down correction if extra_flags = 1, then finally
209 * shifts right by the shift field.
210 *
211 * There are some differences between ridiculousfish's algorithm and the Mali
212 * hardware algorithm, which means that the reference code from ridiculousfish
213 * doesn't always produce the right constants. Mali does not use the pre-shift
214 * optimization, since that would make a hardware implementation slower (it
215 * would have to always do the pre-shift, multiply, and post-shift operations).
216 * It also forces the multplier to be at least 2^31, which means that the
217 * exponent is entirely fixed, so there is no trial-and-error. Altogether,
218 * given the divisor d, the algorithm the driver must follow is:
219 *
220 * 1. Set shift = floor(log2(d)).
221 * 2. Compute m = ceil(2^(shift + 32) / d) and e = 2^(shift + 32) % d.
222 * 3. If e <= 2^shift, then we need to use the round-down algorithm. Set
223 * magic_divisor = m - 1 and extra_flags = 1.
224 * 4. Otherwise, set magic_divisor = m and extra_flags = 0.
225 */
226
227 /* Purposeful off-by-one in width, height fields. For example, a (64, 64)
228 * texture is stored as (63, 63) in these fields. This adjusts for that.
229 * There's an identical pattern in the framebuffer descriptor. Even vertex
230 * count fields work this way, hence the generic name -- integral fields that
231 * are strictly positive generally need this adjustment. */
232
233 #define MALI_POSITIVE(dim) (dim - 1)
234
235 /* 8192x8192 */
236 #define MAX_MIP_LEVELS (13)
237
238 /* Cubemap bloats everything up */
239 #define MAX_CUBE_FACES (6)
240
241 /* For each pointer, there is an address and optionally also a stride */
242 #define MAX_ELEMENTS (2)
243
244 /* Used for lod encoding. Thanks @urjaman for pointing out these routines can
245 * be cleaned up a lot. */
246
247 #define DECODE_FIXED_16(x) ((float) (x / 256.0))
248
249 static inline int16_t
FIXED_16(float x,bool allow_negative)250 FIXED_16(float x, bool allow_negative)
251 {
252 /* Clamp inputs, accounting for float error */
253 float max_lod = (32.0 - (1.0 / 512.0));
254 float min_lod = allow_negative ? -max_lod : 0.0;
255
256 x = ((x > max_lod) ? max_lod : ((x < min_lod) ? min_lod : x));
257
258 return (int) (x * 256.0);
259 }
260
261 #endif /* __PANFROST_JOB_H__ */
262