• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 Collabora, Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors (Collabora):
24  *      Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25  */
26 
27 /**
28  * Implements framebuffer format conversions in software for Midgard/Bifrost
29  * blend shaders. This pass is designed for a single render target; Midgard
30  * duplicates blend shaders for MRT to simplify everything. A particular
31  * framebuffer format may be categorized as 1) typed load available, 2) typed
32  * unpack available, or 3) software unpack only, and likewise for stores. The
33  * first two types are handled in the compiler backend directly, so this module
34  * is responsible for identifying type 3 formats (hardware dependent) and
35  * inserting appropriate ALU code to perform the conversion from the packed
36  * type to a designated unpacked type, and vice versa.
37  *
38  * The unpacked type depends on the format:
39  *
40  *      - For 32-bit float formats, 32-bit floats.
41  *      - For other floats, 16-bit floats.
42  *      - For 32-bit ints, 32-bit ints.
43  *      - For 8-bit ints, 8-bit ints.
44  *      - For other ints, 16-bit ints.
45  *
46  * The rationale is to optimize blending and logic op instructions by using the
47  * smallest precision necessary to store the pixel losslessly.
48  */
49 
50 #include "compiler/nir/nir.h"
51 #include "compiler/nir/nir_builder.h"
52 #include "compiler/nir/nir_format_convert.h"
53 #include "util/format/u_format.h"
54 #include "pan_lower_framebuffer.h"
55 #include "panfrost-quirks.h"
56 
57 /* Determines the unpacked type best suiting a given format, so the rest of the
58  * pipeline may be adjusted accordingly */
59 
60 nir_alu_type
pan_unpacked_type_for_format(const struct util_format_description * desc)61 pan_unpacked_type_for_format(const struct util_format_description *desc)
62 {
63         int c = util_format_get_first_non_void_channel(desc->format);
64 
65         if (c == -1)
66                 unreachable("Void format not renderable");
67 
68         bool large = (desc->channel[c].size > 16);
69         bool bit8 = (desc->channel[c].size == 8);
70         assert(desc->channel[c].size <= 32);
71 
72         if (desc->channel[c].normalized)
73                 return large ? nir_type_float32 : nir_type_float16;
74 
75         switch (desc->channel[c].type) {
76         case UTIL_FORMAT_TYPE_UNSIGNED:
77                 return bit8 ? nir_type_uint8 :
78                         large ? nir_type_uint32 : nir_type_uint16;
79         case UTIL_FORMAT_TYPE_SIGNED:
80                 return bit8 ? nir_type_int8 :
81                         large ? nir_type_int32 : nir_type_int16;
82         case UTIL_FORMAT_TYPE_FLOAT:
83                 return large ? nir_type_float32 : nir_type_float16;
84         default:
85                 unreachable("Format not renderable");
86         }
87 }
88 
89 enum pan_format_class
pan_format_class_load(const struct util_format_description * desc,unsigned quirks)90 pan_format_class_load(const struct util_format_description *desc, unsigned quirks)
91 {
92         /* Pure integers can be loaded via EXT_framebuffer_fetch and should be
93          * handled as a raw load with a size conversion (it's cheap). Likewise,
94          * since float framebuffers are internally implemented as raw (i.e.
95          * integer) framebuffers with blend shaders to go back and forth, they
96          * should be s/w as well */
97 
98         if (util_format_is_pure_integer(desc->format) || util_format_is_float(desc->format))
99                 return PAN_FORMAT_SOFTWARE;
100 
101         /* Check if we can do anything better than software architecturally */
102         if (quirks & MIDGARD_NO_TYPED_BLEND_LOADS) {
103                 return (quirks & NO_BLEND_PACKS)
104                         ? PAN_FORMAT_SOFTWARE : PAN_FORMAT_PACK;
105         }
106 
107         /* Some formats are missing as typed on some GPUs but have unpacks */
108         if (quirks & MIDGARD_MISSING_LOADS) {
109                 switch (desc->format) {
110                 case PIPE_FORMAT_R11G11B10_FLOAT:
111                 case PIPE_FORMAT_R10G10B10A2_UNORM:
112                 case PIPE_FORMAT_B10G10R10A2_UNORM:
113                 case PIPE_FORMAT_R10G10B10X2_UNORM:
114                 case PIPE_FORMAT_B10G10R10X2_UNORM:
115                 case PIPE_FORMAT_R10G10B10A2_UINT:
116                         return PAN_FORMAT_PACK;
117                 default:
118                         return PAN_FORMAT_NATIVE;
119                 }
120         }
121 
122         /* Otherwise, we can do native */
123         return PAN_FORMAT_NATIVE;
124 }
125 
126 enum pan_format_class
pan_format_class_store(const struct util_format_description * desc,unsigned quirks)127 pan_format_class_store(const struct util_format_description *desc, unsigned quirks)
128 {
129         /* Check if we can do anything better than software architecturally */
130         if (quirks & MIDGARD_NO_TYPED_BLEND_STORES) {
131                 return (quirks & NO_BLEND_PACKS)
132                         ? PAN_FORMAT_SOFTWARE : PAN_FORMAT_PACK;
133         }
134 
135         return PAN_FORMAT_NATIVE;
136 }
137 
138 /* Convenience method */
139 
140 static enum pan_format_class
pan_format_class(const struct util_format_description * desc,unsigned quirks,bool is_store)141 pan_format_class(const struct util_format_description *desc, unsigned quirks, bool is_store)
142 {
143         if (is_store)
144                 return pan_format_class_store(desc, quirks);
145         else
146                 return pan_format_class_load(desc, quirks);
147 }
148 
149 /* Software packs/unpacks, by format class. Packs take in the pixel value typed
150  * as `pan_unpacked_type_for_format` of the format and return an i32vec4
151  * suitable for storing (with components replicated to fill). Unpacks do the
152  * reverse but cannot rely on replication.
153  *
154  * Pure 32 formats (R32F ... RGBA32F) are 32 unpacked, so just need to
155  * replicate to fill */
156 
157 static nir_ssa_def *
pan_pack_pure_32(nir_builder * b,nir_ssa_def * v)158 pan_pack_pure_32(nir_builder *b, nir_ssa_def *v)
159 {
160         nir_ssa_def *replicated[4];
161 
162         for (unsigned i = 0; i < 4; ++i)
163                 replicated[i] = nir_channel(b, v, i % v->num_components);
164 
165         return nir_vec(b, replicated, 4);
166 }
167 
168 static nir_ssa_def *
pan_unpack_pure_32(nir_builder * b,nir_ssa_def * pack,unsigned num_components)169 pan_unpack_pure_32(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
170 {
171         return nir_channels(b, pack, (1 << num_components) - 1);
172 }
173 
174 /* Pure x16 formats are x16 unpacked, so it's similar, but we need to pack
175  * upper/lower halves of course */
176 
177 static nir_ssa_def *
pan_pack_pure_16(nir_builder * b,nir_ssa_def * v)178 pan_pack_pure_16(nir_builder *b, nir_ssa_def *v)
179 {
180         nir_ssa_def *replicated[4];
181 
182         for (unsigned i = 0; i < 4; ++i) {
183                 unsigned c = 2 * i;
184 
185                 nir_ssa_def *parts[2] = {
186                         nir_channel(b, v, (c + 0) % v->num_components),
187                         nir_channel(b, v, (c + 1) % v->num_components)
188                 };
189 
190                 replicated[i] = nir_pack_32_2x16(b, nir_vec(b, parts, 2));
191         }
192 
193         return nir_vec(b, replicated, 4);
194 }
195 
196 static nir_ssa_def *
pan_unpack_pure_16(nir_builder * b,nir_ssa_def * pack,unsigned num_components)197 pan_unpack_pure_16(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
198 {
199         nir_ssa_def *unpacked[4];
200 
201         assert(num_components <= 4);
202 
203         for (unsigned i = 0; i < num_components; i += 2) {
204                 nir_ssa_def *halves =
205                         nir_unpack_32_2x16(b, nir_channel(b, pack, i >> 1));
206 
207                 unpacked[i + 0] = nir_channel(b, halves, 0);
208                 unpacked[i + 1] = nir_channel(b, halves, 1);
209         }
210 
211         for (unsigned i = num_components; i < 4; ++i)
212                 unpacked[i] = nir_imm_intN_t(b, 0, 16);
213 
214         return nir_vec(b, unpacked, 4);
215 }
216 
217 /* And likewise for x8. pan_fill_4 fills a 4-channel vector with a n-channel
218  * vector (n <= 4), replicating as needed. pan_replicate_4 constructs a
219  * 4-channel vector from a scalar via replication */
220 
221 static nir_ssa_def *
pan_fill_4(nir_builder * b,nir_ssa_def * v)222 pan_fill_4(nir_builder *b, nir_ssa_def *v)
223 {
224         nir_ssa_def *q[4];
225         assert(v->num_components <= 4);
226 
227         for (unsigned j = 0; j < 4; ++j)
228                 q[j] = nir_channel(b, v, j % v->num_components);
229 
230         return nir_vec(b, q, 4);
231 }
232 
233 static nir_ssa_def *
pan_extend(nir_builder * b,nir_ssa_def * v,unsigned N)234 pan_extend(nir_builder *b, nir_ssa_def *v, unsigned N)
235 {
236         nir_ssa_def *q[4];
237         assert(v->num_components <= 4);
238         assert(N <= 4);
239 
240         for (unsigned j = 0; j < v->num_components; ++j)
241                 q[j] = nir_channel(b, v, j);
242 
243         for (unsigned j = v->num_components; j < N; ++j)
244                 q[j] = nir_imm_int(b, 0);
245 
246         return nir_vec(b, q, N);
247 }
248 
249 static nir_ssa_def *
pan_replicate_4(nir_builder * b,nir_ssa_def * v)250 pan_replicate_4(nir_builder *b, nir_ssa_def *v)
251 {
252         nir_ssa_def *replicated[4] = { v, v, v, v };
253         return nir_vec(b, replicated, 4);
254 }
255 
256 static nir_ssa_def *
pan_pack_pure_8(nir_builder * b,nir_ssa_def * v)257 pan_pack_pure_8(nir_builder *b, nir_ssa_def *v)
258 {
259         return pan_replicate_4(b, nir_pack_32_4x8(b, pan_fill_4(b, v)));
260 }
261 
262 static nir_ssa_def *
pan_unpack_pure_8(nir_builder * b,nir_ssa_def * pack,unsigned num_components)263 pan_unpack_pure_8(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
264 {
265         assert(num_components <= 4);
266         nir_ssa_def *unpacked = nir_unpack_32_4x8(b, nir_channel(b, pack, 0));
267         return nir_channels(b, unpacked, (1 << num_components) - 1);
268 }
269 
270 /* UNORM 8 is unpacked to f16 vec4. We could directly use the un/pack_unorm_4x8
271  * ops provided we replicate appropriately, but for packing we'd rather stay in
272  * 8/16-bit whereas the NIR op forces 32-bit, so we do it manually */
273 
274 static nir_ssa_def *
pan_pack_unorm_8(nir_builder * b,nir_ssa_def * v)275 pan_pack_unorm_8(nir_builder *b, nir_ssa_def *v)
276 {
277         return pan_replicate_4(b, nir_pack_32_4x8(b,
278                 nir_f2u8(b, nir_fround_even(b, nir_fmul(b, nir_fsat(b,
279                         pan_fill_4(b, v)), nir_imm_float16(b, 255.0))))));
280 }
281 
282 static nir_ssa_def *
pan_unpack_unorm_8(nir_builder * b,nir_ssa_def * pack,unsigned num_components)283 pan_unpack_unorm_8(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
284 {
285         assert(num_components <= 4);
286         nir_ssa_def *unpacked = nir_unpack_unorm_4x8(b, nir_channel(b, pack, 0));
287         return nir_f2fmp(b, unpacked);
288 }
289 
290 /* UNORM 4 is also unpacked to f16, which prevents us from using the shared
291  * unpack which strongly assumes fp32. However, on the tilebuffer it is actually packed as:
292  *
293  *      [AAAA] [0000] [BBBB] [0000] [GGGG] [0000] [RRRR] [0000]
294  *
295  * In other words, spacing it out so we're aligned to bytes and on top. So
296  * pack as:
297  *
298  *      pack_32_4x8(f2u8_rte(v * 15.0) << 4)
299  */
300 
301 static nir_ssa_def *
pan_pack_unorm_small(nir_builder * b,nir_ssa_def * v,nir_ssa_def * scales,nir_ssa_def * shifts)302 pan_pack_unorm_small(nir_builder *b, nir_ssa_def *v,
303                 nir_ssa_def *scales, nir_ssa_def *shifts)
304 {
305         nir_ssa_def *f = nir_fmul(b, nir_fsat(b, pan_fill_4(b, v)), scales);
306         nir_ssa_def *u8 = nir_f2u8(b, nir_fround_even(b, f));
307         nir_ssa_def *s = nir_ishl(b, u8, shifts);
308         nir_ssa_def *repl = nir_pack_32_4x8(b, s);
309 
310         return pan_replicate_4(b, repl);
311 }
312 
313 static nir_ssa_def *
pan_unpack_unorm_small(nir_builder * b,nir_ssa_def * pack,nir_ssa_def * scales,nir_ssa_def * shifts)314 pan_unpack_unorm_small(nir_builder *b, nir_ssa_def *pack,
315                 nir_ssa_def *scales, nir_ssa_def *shifts)
316 {
317         nir_ssa_def *channels = nir_unpack_32_4x8(b, nir_channel(b, pack, 0));
318         nir_ssa_def *raw = nir_ushr(b, nir_i2imp(b, channels), shifts);
319         return nir_fmul(b, nir_u2f16(b, raw), scales);
320 }
321 
322 static nir_ssa_def *
pan_pack_unorm_4(nir_builder * b,nir_ssa_def * v)323 pan_pack_unorm_4(nir_builder *b, nir_ssa_def *v)
324 {
325         return pan_pack_unorm_small(b, v,
326                 nir_imm_vec4_16(b, 15.0, 15.0, 15.0, 15.0),
327                 nir_imm_ivec4(b, 4, 4, 4, 4));
328 }
329 
330 static nir_ssa_def *
pan_unpack_unorm_4(nir_builder * b,nir_ssa_def * v)331 pan_unpack_unorm_4(nir_builder *b, nir_ssa_def *v)
332 {
333         return pan_unpack_unorm_small(b, v,
334                         nir_imm_vec4_16(b, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0),
335                         nir_imm_ivec4(b, 4, 4, 4, 4));
336 }
337 
338 /* UNORM RGB5_A1 and RGB565 are similar */
339 
340 static nir_ssa_def *
pan_pack_unorm_5551(nir_builder * b,nir_ssa_def * v)341 pan_pack_unorm_5551(nir_builder *b, nir_ssa_def *v)
342 {
343         return pan_pack_unorm_small(b, v,
344                         nir_imm_vec4_16(b, 31.0, 31.0, 31.0, 1.0),
345                         nir_imm_ivec4(b, 3, 3, 3, 7));
346 }
347 
348 static nir_ssa_def *
pan_unpack_unorm_5551(nir_builder * b,nir_ssa_def * v)349 pan_unpack_unorm_5551(nir_builder *b, nir_ssa_def *v)
350 {
351         return pan_unpack_unorm_small(b, v,
352                         nir_imm_vec4_16(b, 1.0 / 31.0, 1.0 / 31.0, 1.0 / 31.0, 1.0),
353                         nir_imm_ivec4(b, 3, 3, 3, 7));
354 }
355 
356 static nir_ssa_def *
pan_pack_unorm_565(nir_builder * b,nir_ssa_def * v)357 pan_pack_unorm_565(nir_builder *b, nir_ssa_def *v)
358 {
359         return pan_pack_unorm_small(b, v,
360                         nir_imm_vec4_16(b, 31.0, 63.0, 31.0, 0.0),
361                         nir_imm_ivec4(b, 3, 2, 3, 0));
362 }
363 
364 static nir_ssa_def *
pan_unpack_unorm_565(nir_builder * b,nir_ssa_def * v)365 pan_unpack_unorm_565(nir_builder *b, nir_ssa_def *v)
366 {
367         return pan_unpack_unorm_small(b, v,
368                         nir_imm_vec4_16(b, 1.0 / 31.0, 1.0 / 63.0, 1.0 / 31.0, 0.0),
369                         nir_imm_ivec4(b, 3, 2, 3, 0));
370 }
371 
372 /* RGB10_A2 is packed in the tilebuffer as the bottom 3 bytes being the top
373  * 8-bits of RGB and the top byte being RGBA as 2-bits packed. As imirkin
374  * pointed out, this means free conversion to RGBX8 */
375 
376 static nir_ssa_def *
pan_pack_unorm_1010102(nir_builder * b,nir_ssa_def * v)377 pan_pack_unorm_1010102(nir_builder *b, nir_ssa_def *v)
378 {
379         nir_ssa_def *scale = nir_imm_vec4_16(b, 1023.0, 1023.0, 1023.0, 3.0);
380         nir_ssa_def *s = nir_f2u32(b, nir_fround_even(b, nir_f2f32(b, nir_fmul(b, nir_fsat(b, v), scale))));
381 
382         nir_ssa_def *top8 = nir_ushr(b, s, nir_imm_ivec4(b, 0x2, 0x2, 0x2, 0x2));
383         nir_ssa_def *top8_rgb = nir_pack_32_4x8(b, nir_u2u8(b, top8));
384 
385         nir_ssa_def *bottom2 = nir_iand(b, s, nir_imm_ivec4(b, 0x3, 0x3, 0x3, 0x3));
386 
387         nir_ssa_def *top =
388                  nir_ior(b,
389                         nir_ior(b,
390                                 nir_ishl(b, nir_channel(b, bottom2, 0), nir_imm_int(b, 24 + 0)),
391                                 nir_ishl(b, nir_channel(b, bottom2, 1), nir_imm_int(b, 24 + 2))),
392                         nir_ior(b,
393                                 nir_ishl(b, nir_channel(b, bottom2, 2), nir_imm_int(b, 24 + 4)),
394                                 nir_ishl(b, nir_channel(b, bottom2, 3), nir_imm_int(b, 24 + 6))));
395 
396         nir_ssa_def *p = nir_ior(b, top, top8_rgb);
397         return pan_replicate_4(b, p);
398 }
399 
400 static nir_ssa_def *
pan_unpack_unorm_1010102(nir_builder * b,nir_ssa_def * packed)401 pan_unpack_unorm_1010102(nir_builder *b, nir_ssa_def *packed)
402 {
403         nir_ssa_def *p = nir_channel(b, packed, 0);
404         nir_ssa_def *bytes = nir_unpack_32_4x8(b, p);
405         nir_ssa_def *ubytes = nir_i2imp(b, bytes);
406 
407         nir_ssa_def *shifts = nir_ushr(b, pan_replicate_4(b, nir_channel(b, ubytes, 3)),
408                         nir_imm_ivec4(b, 0, 2, 4, 6));
409         nir_ssa_def *precision = nir_iand(b, shifts,
410                         nir_i2imp(b, nir_imm_ivec4(b, 0x3, 0x3, 0x3, 0x3)));
411 
412         nir_ssa_def *top_rgb = nir_ishl(b, nir_channels(b, ubytes, 0x7), nir_imm_int(b, 2));
413         top_rgb = nir_ior(b, nir_channels(b, precision, 0x7), top_rgb);
414 
415         nir_ssa_def *chans [4] = {
416                 nir_channel(b, top_rgb, 0),
417                 nir_channel(b, top_rgb, 1),
418                 nir_channel(b, top_rgb, 2),
419                 nir_channel(b, precision, 3)
420         };
421 
422         nir_ssa_def *scale = nir_imm_vec4(b, 1.0 / 1023.0, 1.0 / 1023.0, 1.0 / 1023.0, 1.0 / 3.0);
423         return nir_f2fmp(b, nir_fmul(b, nir_u2f32(b, nir_vec(b, chans, 4)), scale));
424 }
425 
426 /* On the other hand, the pure int RGB10_A2 is identical to the spec */
427 
428 static nir_ssa_def *
pan_pack_uint_1010102(nir_builder * b,nir_ssa_def * v)429 pan_pack_uint_1010102(nir_builder *b, nir_ssa_def *v)
430 {
431         nir_ssa_def *shift = nir_ishl(b, nir_u2u32(b, v),
432                         nir_imm_ivec4(b, 0, 10, 20, 30));
433 
434         nir_ssa_def *p = nir_ior(b,
435                         nir_ior(b, nir_channel(b, shift, 0), nir_channel(b, shift, 1)),
436                         nir_ior(b, nir_channel(b, shift, 2), nir_channel(b, shift, 3)));
437 
438         return pan_replicate_4(b, p);
439 }
440 
441 static nir_ssa_def *
pan_unpack_uint_1010102(nir_builder * b,nir_ssa_def * packed)442 pan_unpack_uint_1010102(nir_builder *b, nir_ssa_def *packed)
443 {
444         nir_ssa_def *chan = nir_channel(b, packed, 0);
445 
446         nir_ssa_def *shift = nir_ushr(b, pan_replicate_4(b, chan),
447                         nir_imm_ivec4(b, 0, 10, 20, 30));
448 
449         nir_ssa_def *mask = nir_iand(b, shift,
450                         nir_imm_ivec4(b, 0x3ff, 0x3ff, 0x3ff, 0x3));
451 
452         return nir_i2imp(b, mask);
453 }
454 
455 /* NIR means we can *finally* catch a break */
456 
457 static nir_ssa_def *
pan_pack_r11g11b10(nir_builder * b,nir_ssa_def * v)458 pan_pack_r11g11b10(nir_builder *b, nir_ssa_def *v)
459 {
460         return pan_replicate_4(b, nir_format_pack_11f11f10f(b,
461                                 nir_f2f32(b, v)));
462 }
463 
464 static nir_ssa_def *
pan_unpack_r11g11b10(nir_builder * b,nir_ssa_def * v)465 pan_unpack_r11g11b10(nir_builder *b, nir_ssa_def *v)
466 {
467         nir_ssa_def *f32 = nir_format_unpack_11f11f10f(b, nir_channel(b, v, 0));
468         nir_ssa_def *f16 = nir_f2fmp(b, f32);
469 
470         /* Extend to vec4 with alpha */
471         nir_ssa_def *components[4] = {
472                 nir_channel(b, f16, 0),
473                 nir_channel(b, f16, 1),
474                 nir_channel(b, f16, 2),
475                 nir_imm_float16(b, 1.0)
476         };
477 
478         return nir_vec(b, components, 4);
479 }
480 
481 /* Wrapper around sRGB conversion */
482 
483 static nir_ssa_def *
pan_linear_to_srgb(nir_builder * b,nir_ssa_def * linear)484 pan_linear_to_srgb(nir_builder *b, nir_ssa_def *linear)
485 {
486         nir_ssa_def *rgb = nir_channels(b, linear, 0x7);
487 
488         /* TODO: fp16 native conversion */
489         nir_ssa_def *srgb = nir_f2fmp(b,
490                         nir_format_linear_to_srgb(b, nir_f2f32(b, rgb)));
491 
492         nir_ssa_def *comp[4] = {
493                 nir_channel(b, srgb, 0),
494                 nir_channel(b, srgb, 1),
495                 nir_channel(b, srgb, 2),
496                 nir_channel(b, linear, 3),
497         };
498 
499         return nir_vec(b, comp, 4);
500 }
501 
502 static nir_ssa_def *
pan_srgb_to_linear(nir_builder * b,nir_ssa_def * srgb)503 pan_srgb_to_linear(nir_builder *b, nir_ssa_def *srgb)
504 {
505         nir_ssa_def *rgb = nir_channels(b, srgb, 0x7);
506 
507         /* TODO: fp16 native conversion */
508         nir_ssa_def *linear = nir_f2fmp(b,
509                         nir_format_srgb_to_linear(b, nir_f2f32(b, rgb)));
510 
511         nir_ssa_def *comp[4] = {
512                 nir_channel(b, linear, 0),
513                 nir_channel(b, linear, 1),
514                 nir_channel(b, linear, 2),
515                 nir_channel(b, srgb, 3),
516         };
517 
518         return nir_vec(b, comp, 4);
519 }
520 
521 
522 
523 /* Generic dispatches for un/pack regardless of format */
524 
525 static bool
pan_is_unorm4(const struct util_format_description * desc)526 pan_is_unorm4(const struct util_format_description *desc)
527 {
528         switch (desc->format) {
529         case PIPE_FORMAT_B4G4R4A4_UNORM:
530         case PIPE_FORMAT_B4G4R4X4_UNORM:
531         case PIPE_FORMAT_A4R4_UNORM:
532         case PIPE_FORMAT_R4A4_UNORM:
533         case PIPE_FORMAT_A4B4G4R4_UNORM:
534         case PIPE_FORMAT_R4G4B4A4_UNORM:
535                 return true;
536         default:
537                 return false;
538         }
539 
540 }
541 
542 static nir_ssa_def *
pan_unpack(nir_builder * b,const struct util_format_description * desc,nir_ssa_def * packed)543 pan_unpack(nir_builder *b,
544                 const struct util_format_description *desc,
545                 nir_ssa_def *packed)
546 {
547         if (util_format_is_unorm8(desc))
548                 return pan_unpack_unorm_8(b, packed, desc->nr_channels);
549 
550         if (pan_is_unorm4(desc))
551                 return pan_unpack_unorm_4(b, packed);
552 
553         if (desc->is_array) {
554                 int c = util_format_get_first_non_void_channel(desc->format);
555                 assert(c >= 0);
556                 struct util_format_channel_description d = desc->channel[c];
557 
558                 if (d.size == 32 || d.size == 16) {
559                         assert(!d.normalized);
560                         assert(d.type == UTIL_FORMAT_TYPE_FLOAT || d.pure_integer);
561 
562                         return d.size == 32 ? pan_unpack_pure_32(b, packed, desc->nr_channels) :
563                                 pan_unpack_pure_16(b, packed, desc->nr_channels);
564                 } else if (d.size == 8) {
565                         assert(d.pure_integer);
566                         return pan_unpack_pure_8(b, packed, desc->nr_channels);
567                 } else {
568                         unreachable("Unrenderable size");
569                 }
570         }
571 
572         switch (desc->format) {
573         case PIPE_FORMAT_B5G5R5A1_UNORM:
574         case PIPE_FORMAT_R5G5B5A1_UNORM:
575                 return pan_unpack_unorm_5551(b, packed);
576         case PIPE_FORMAT_B5G6R5_UNORM:
577                 return pan_unpack_unorm_565(b, packed);
578         case PIPE_FORMAT_R10G10B10A2_UNORM:
579                 return pan_unpack_unorm_1010102(b, packed);
580         case PIPE_FORMAT_R10G10B10A2_UINT:
581                 return pan_unpack_uint_1010102(b, packed);
582         case PIPE_FORMAT_R11G11B10_FLOAT:
583                 return pan_unpack_r11g11b10(b, packed);
584         default:
585                 break;
586         }
587 
588         fprintf(stderr, "%s\n", desc->name);
589         unreachable("Unknown format");
590 }
591 
592 static nir_ssa_def *
pan_pack(nir_builder * b,const struct util_format_description * desc,nir_ssa_def * unpacked)593 pan_pack(nir_builder *b,
594                 const struct util_format_description *desc,
595                 nir_ssa_def *unpacked)
596 {
597         if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
598                 unpacked = pan_linear_to_srgb(b, unpacked);
599 
600         if (util_format_is_unorm8(desc))
601                 return pan_pack_unorm_8(b, unpacked);
602 
603         if (pan_is_unorm4(desc))
604                 return pan_pack_unorm_4(b, unpacked);
605 
606         if (desc->is_array) {
607                 int c = util_format_get_first_non_void_channel(desc->format);
608                 assert(c >= 0);
609                 struct util_format_channel_description d = desc->channel[c];
610 
611                 if (d.size == 32 || d.size == 16) {
612                         assert(!d.normalized);
613                         assert(d.type == UTIL_FORMAT_TYPE_FLOAT || d.pure_integer);
614 
615                         return d.size == 32 ? pan_pack_pure_32(b, unpacked) :
616                                 pan_pack_pure_16(b, unpacked);
617                 } else if (d.size == 8) {
618                         assert(d.pure_integer);
619                         return pan_pack_pure_8(b, unpacked);
620                 } else {
621                         unreachable("Unrenderable size");
622                 }
623         }
624 
625         switch (desc->format) {
626         case PIPE_FORMAT_B5G5R5A1_UNORM:
627         case PIPE_FORMAT_R5G5B5A1_UNORM:
628                 return pan_pack_unorm_5551(b, unpacked);
629         case PIPE_FORMAT_B5G6R5_UNORM:
630                 return pan_pack_unorm_565(b, unpacked);
631         case PIPE_FORMAT_R10G10B10A2_UNORM:
632                 return pan_pack_unorm_1010102(b, unpacked);
633         case PIPE_FORMAT_R10G10B10A2_UINT:
634                 return pan_pack_uint_1010102(b, unpacked);
635         case PIPE_FORMAT_R11G11B10_FLOAT:
636                 return pan_pack_r11g11b10(b, unpacked);
637         default:
638                 break;
639         }
640 
641         fprintf(stderr, "%s\n", desc->name);
642         unreachable("Unknown format");
643 }
644 
645 static void
pan_lower_fb_store(nir_shader * shader,nir_builder * b,nir_intrinsic_instr * intr,const struct util_format_description * desc,unsigned quirks)646 pan_lower_fb_store(nir_shader *shader,
647                 nir_builder *b,
648                 nir_intrinsic_instr *intr,
649                 const struct util_format_description *desc,
650                 unsigned quirks)
651 {
652         /* For stores, add conversion before */
653         nir_ssa_def *unpacked = nir_ssa_for_src(b, intr->src[1], 4);
654         nir_ssa_def *packed = pan_pack(b, desc, unpacked);
655 
656         nir_intrinsic_instr *new =
657                 nir_intrinsic_instr_create(shader, nir_intrinsic_store_raw_output_pan);
658         new->src[0] = nir_src_for_ssa(packed);
659         new->num_components = 4;
660         nir_builder_instr_insert(b, &new->instr);
661 }
662 
663 static nir_ssa_def *
pan_sample_id(nir_builder * b,int sample)664 pan_sample_id(nir_builder *b, int sample)
665 {
666         return (sample >= 0) ? nir_imm_int(b, sample) : nir_load_sample_id(b);
667 }
668 
669 static void
pan_lower_fb_load(nir_shader * shader,nir_builder * b,nir_intrinsic_instr * intr,const struct util_format_description * desc,unsigned base,int sample,unsigned quirks)670 pan_lower_fb_load(nir_shader *shader,
671                 nir_builder *b,
672                 nir_intrinsic_instr *intr,
673                 const struct util_format_description *desc,
674                 unsigned base, int sample, unsigned quirks)
675 {
676         nir_intrinsic_instr *new = nir_intrinsic_instr_create(shader,
677                        nir_intrinsic_load_raw_output_pan);
678         new->num_components = 4;
679         new->src[0] = nir_src_for_ssa(pan_sample_id(b, sample));
680 
681         nir_intrinsic_set_base(new, base);
682 
683         nir_ssa_dest_init(&new->instr, &new->dest, 4, 32, NULL);
684         nir_builder_instr_insert(b, &new->instr);
685 
686         /* Convert the raw value */
687         nir_ssa_def *packed = &new->dest.ssa;
688         nir_ssa_def *unpacked = pan_unpack(b, desc, packed);
689 
690         if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
691                 unpacked = pan_srgb_to_linear(b, unpacked);
692 
693         /* Convert to the size of the load intrinsic.
694          *
695          * We can assume that the type will match with the framebuffer format:
696          *
697          * Page 170 of the PDF of the OpenGL ES 3.0.6 spec says:
698          *
699          * If [UNORM or SNORM, convert to fixed-point]; otherwise no type
700          * conversion is applied. If the values written by the fragment shader
701          * do not match the format(s) of the corresponding color buffer(s),
702          * the result is undefined.
703          */
704 
705         unsigned bits = nir_dest_bit_size(intr->dest);
706 
707         nir_alu_type src_type;
708         if (desc->channel[0].pure_integer) {
709                 if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED)
710                         src_type = nir_type_int;
711                 else
712                         src_type = nir_type_uint;
713         } else {
714                 src_type = nir_type_float;
715         }
716 
717         unpacked = nir_convert_to_bit_size(b, unpacked, src_type, bits);
718         unpacked = pan_extend(b, unpacked, nir_dest_num_components(intr->dest));
719 
720         nir_src rewritten = nir_src_for_ssa(unpacked);
721         nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, rewritten, &intr->instr);
722 }
723 
724 bool
pan_lower_framebuffer(nir_shader * shader,const enum pipe_format * rt_fmts,bool is_blend,unsigned quirks)725 pan_lower_framebuffer(nir_shader *shader, const enum pipe_format *rt_fmts,
726                       bool is_blend, unsigned quirks)
727 {
728         if (shader->info.stage != MESA_SHADER_FRAGMENT)
729                return false;
730 
731         bool progress = false;
732 
733         nir_foreach_function(func, shader) {
734                 nir_foreach_block(block, func->impl) {
735                         nir_foreach_instr_safe(instr, block) {
736                                 if (instr->type != nir_instr_type_intrinsic)
737                                         continue;
738 
739                                 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
740 
741                                 bool is_load = intr->intrinsic == nir_intrinsic_load_deref;
742                                 bool is_store = intr->intrinsic == nir_intrinsic_store_deref;
743 
744                                 if (!(is_load || (is_store && is_blend)))
745                                         continue;
746 
747                                 nir_variable *var = nir_intrinsic_get_var(intr, 0);
748 
749                                 if (var->data.mode != nir_var_shader_out)
750                                         continue;
751 
752                                 unsigned base = var->data.driver_location;
753 
754                                 unsigned rt;
755                                 if (var->data.location == FRAG_RESULT_COLOR)
756                                         rt = 0;
757                                 else if (var->data.location >= FRAG_RESULT_DATA0)
758                                         rt = var->data.location - FRAG_RESULT_DATA0;
759                                 else
760                                         continue;
761 
762                                 if (rt_fmts[rt] == PIPE_FORMAT_NONE)
763                                         continue;
764 
765                                 const struct util_format_description *desc =
766                                    util_format_description(rt_fmts[rt]);
767 
768                                 enum pan_format_class fmt_class =
769                                         pan_format_class(desc, quirks, is_store);
770 
771                                 /* Don't lower */
772                                 if (fmt_class == PAN_FORMAT_NATIVE)
773                                         continue;
774 
775                                 /* EXT_shader_framebuffer_fetch requires
776                                  * per-sample loads.
777                                  * MSAA blend shaders are not yet handled, so
778                                  * for now always load sample 0. */
779                                 int sample = is_blend ? 0 : -1;
780 
781                                 nir_builder b;
782                                 nir_builder_init(&b, func->impl);
783 
784                                 if (is_store) {
785                                         b.cursor = nir_before_instr(instr);
786                                         pan_lower_fb_store(shader, &b, intr, desc, quirks);
787                                 } else {
788                                         b.cursor = nir_after_instr(instr);
789                                         pan_lower_fb_load(shader, &b, intr, desc, base, sample, quirks);
790                                 }
791 
792                                 nir_instr_remove(instr);
793 
794                                 progress = true;
795                         }
796                 }
797 
798                 nir_metadata_preserve(func->impl, nir_metadata_block_index |
799                                 nir_metadata_dominance);
800         }
801 
802         return progress;
803 }
804