• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 Collabora, Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors (Collabora):
24  *      Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25  */
26 
27 /**
28  * Implements framebuffer format conversions in software for Midgard/Bifrost
29  * blend shaders. This pass is designed for a single render target; Midgard
30  * duplicates blend shaders for MRT to simplify everything. A particular
31  * framebuffer format may be categorized as 1) typed load available, 2) typed
32  * unpack available, or 3) software unpack only, and likewise for stores. The
33  * first two types are handled in the compiler backend directly, so this module
34  * is responsible for identifying type 3 formats (hardware dependent) and
35  * inserting appropriate ALU code to perform the conversion from the packed
36  * type to a designated unpacked type, and vice versa.
37  *
38  * The unpacked type depends on the format:
39  *
40  *      - For 32-bit float formats or >8-bit UNORM, 32-bit floats.
41  *      - For other floats, 16-bit floats.
42  *      - For 32-bit ints, 32-bit ints.
43  *      - For 8-bit ints, 8-bit ints.
44  *      - For other ints, 16-bit ints.
45  *
46  * The rationale is to optimize blending and logic op instructions by using the
47  * smallest precision necessary to store the pixel losslessly.
48  */
49 
50 #include "compiler/nir/nir.h"
51 #include "compiler/nir/nir_builder.h"
52 #include "compiler/nir/nir_format_convert.h"
53 #include "util/format/u_format.h"
54 #include "pan_lower_framebuffer.h"
55 #include "panfrost-quirks.h"
56 
57 /* Determines the unpacked type best suiting a given format, so the rest of the
58  * pipeline may be adjusted accordingly */
59 
60 nir_alu_type
pan_unpacked_type_for_format(const struct util_format_description * desc)61 pan_unpacked_type_for_format(const struct util_format_description *desc)
62 {
63         int c = util_format_get_first_non_void_channel(desc->format);
64 
65         if (c == -1)
66                 unreachable("Void format not renderable");
67 
68         bool large = (desc->channel[c].size > 16);
69         bool large_norm = (desc->channel[c].size > 8);
70         bool bit8 = (desc->channel[c].size == 8);
71         assert(desc->channel[c].size <= 32);
72 
73         if (desc->channel[c].normalized)
74                 return large_norm ? nir_type_float32 : nir_type_float16;
75 
76         switch (desc->channel[c].type) {
77         case UTIL_FORMAT_TYPE_UNSIGNED:
78                 return bit8 ? nir_type_uint8 :
79                         large ? nir_type_uint32 : nir_type_uint16;
80         case UTIL_FORMAT_TYPE_SIGNED:
81                 return bit8 ? nir_type_int8 :
82                         large ? nir_type_int32 : nir_type_int16;
83         case UTIL_FORMAT_TYPE_FLOAT:
84                 return large ? nir_type_float32 : nir_type_float16;
85         default:
86                 unreachable("Format not renderable");
87         }
88 }
89 
90 static enum pan_format_class
pan_format_class_load(const struct util_format_description * desc,unsigned quirks)91 pan_format_class_load(const struct util_format_description *desc, unsigned quirks)
92 {
93         /* Pure integers can be loaded via EXT_framebuffer_fetch and should be
94          * handled as a raw load with a size conversion (it's cheap). Likewise,
95          * since float framebuffers are internally implemented as raw (i.e.
96          * integer) framebuffers with blend shaders to go back and forth, they
97          * should be s/w as well */
98 
99         if (util_format_is_pure_integer(desc->format) || util_format_is_float(desc->format))
100                 return PAN_FORMAT_SOFTWARE;
101 
102         /* Check if we can do anything better than software architecturally */
103         if (quirks & MIDGARD_NO_TYPED_BLEND_LOADS) {
104                 return (quirks & NO_BLEND_PACKS)
105                         ? PAN_FORMAT_SOFTWARE : PAN_FORMAT_PACK;
106         }
107 
108         /* Some formats are missing as typed on some GPUs but have unpacks */
109         if (quirks & MIDGARD_MISSING_LOADS) {
110                 switch (desc->format) {
111                 case PIPE_FORMAT_R11G11B10_FLOAT:
112                         return PAN_FORMAT_PACK;
113                 default:
114                         return PAN_FORMAT_NATIVE;
115                 }
116         }
117 
118         /* Otherwise, we can do native */
119         return PAN_FORMAT_NATIVE;
120 }
121 
122 static enum pan_format_class
pan_format_class_store(const struct util_format_description * desc,unsigned quirks)123 pan_format_class_store(const struct util_format_description *desc, unsigned quirks)
124 {
125         /* Check if we can do anything better than software architecturally */
126         if (quirks & MIDGARD_NO_TYPED_BLEND_STORES) {
127                 return (quirks & NO_BLEND_PACKS)
128                         ? PAN_FORMAT_SOFTWARE : PAN_FORMAT_PACK;
129         }
130 
131         return PAN_FORMAT_NATIVE;
132 }
133 
134 /* Convenience method */
135 
136 static enum pan_format_class
pan_format_class(const struct util_format_description * desc,unsigned quirks,bool is_store)137 pan_format_class(const struct util_format_description *desc, unsigned quirks, bool is_store)
138 {
139         if (is_store)
140                 return pan_format_class_store(desc, quirks);
141         else
142                 return pan_format_class_load(desc, quirks);
143 }
144 
145 /* Software packs/unpacks, by format class. Packs take in the pixel value typed
146  * as `pan_unpacked_type_for_format` of the format and return an i32vec4
147  * suitable for storing (with components replicated to fill). Unpacks do the
148  * reverse but cannot rely on replication. */
149 
150 static nir_ssa_def *
pan_replicate(nir_builder * b,nir_ssa_def * v,unsigned num_components)151 pan_replicate(nir_builder *b, nir_ssa_def *v, unsigned num_components)
152 {
153         nir_ssa_def *replicated[4];
154 
155         for (unsigned i = 0; i < 4; ++i)
156                 replicated[i] = nir_channel(b, v, i % num_components);
157 
158         return nir_vec(b, replicated, 4);
159 }
160 
161 static nir_ssa_def *
pan_unpack_pure_32(nir_builder * b,nir_ssa_def * pack,unsigned num_components)162 pan_unpack_pure_32(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
163 {
164         return nir_channels(b, pack, (1 << num_components) - 1);
165 }
166 
167 /* Pure x16 formats are x16 unpacked, so it's similar, but we need to pack
168  * upper/lower halves of course */
169 
170 static nir_ssa_def *
pan_pack_pure_16(nir_builder * b,nir_ssa_def * v,unsigned num_components)171 pan_pack_pure_16(nir_builder *b, nir_ssa_def *v, unsigned num_components)
172 {
173         nir_ssa_def *v4 = pan_replicate(b, v, num_components);
174 
175         nir_ssa_def *lo = nir_pack_32_2x16(b, nir_channels(b, v4, 0x3 << 0));
176         nir_ssa_def *hi = nir_pack_32_2x16(b, nir_channels(b, v4, 0x3 << 2));
177 
178         return nir_vec4(b, lo, hi, lo, hi);
179 }
180 
181 static nir_ssa_def *
pan_unpack_pure_16(nir_builder * b,nir_ssa_def * pack,unsigned num_components)182 pan_unpack_pure_16(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
183 {
184         nir_ssa_def *unpacked[4];
185 
186         assert(num_components <= 4);
187 
188         for (unsigned i = 0; i < num_components; i += 2) {
189                 nir_ssa_def *halves =
190                         nir_unpack_32_2x16(b, nir_channel(b, pack, i >> 1));
191 
192                 unpacked[i + 0] = nir_channel(b, halves, 0);
193                 unpacked[i + 1] = nir_channel(b, halves, 1);
194         }
195 
196         return nir_pad_vec4(b, nir_vec(b, unpacked, num_components));
197 }
198 
199 static nir_ssa_def *
pan_pack_reorder(nir_builder * b,const struct util_format_description * desc,nir_ssa_def * v)200 pan_pack_reorder(nir_builder *b,
201                  const struct util_format_description *desc,
202                  nir_ssa_def *v)
203 {
204         unsigned swizzle[4] = { 0, 1, 2, 3 };
205 
206         for (unsigned i = 0; i < v->num_components; i++) {
207                 if (desc->swizzle[i] <= PIPE_SWIZZLE_W)
208                         swizzle[i] = desc->swizzle[i];
209         }
210 
211         return nir_swizzle(b, v, swizzle, v->num_components);
212 }
213 
214 static nir_ssa_def *
pan_unpack_reorder(nir_builder * b,const struct util_format_description * desc,nir_ssa_def * v)215 pan_unpack_reorder(nir_builder *b,
216                    const struct util_format_description *desc,
217                    nir_ssa_def *v)
218 {
219         unsigned swizzle[4] = { 0, 1, 2, 3 };
220 
221         for (unsigned i = 0; i < v->num_components; i++) {
222                 if (desc->swizzle[i] <= PIPE_SWIZZLE_W)
223                         swizzle[desc->swizzle[i]] = i;
224         }
225 
226         return nir_swizzle(b, v, swizzle, v->num_components);
227 }
228 
229 static nir_ssa_def *
pan_replicate_4(nir_builder * b,nir_ssa_def * v)230 pan_replicate_4(nir_builder *b, nir_ssa_def *v)
231 {
232         return nir_vec4(b, v, v, v, v);
233 }
234 
235 static nir_ssa_def *
pan_pack_pure_8(nir_builder * b,nir_ssa_def * v,unsigned num_components)236 pan_pack_pure_8(nir_builder *b, nir_ssa_def *v, unsigned num_components)
237 {
238         return pan_replicate_4(b, nir_pack_32_4x8(b, pan_replicate(b, v, num_components)));
239 }
240 
241 static nir_ssa_def *
pan_unpack_pure_8(nir_builder * b,nir_ssa_def * pack,unsigned num_components)242 pan_unpack_pure_8(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
243 {
244         nir_ssa_def *unpacked = nir_unpack_32_4x8(b, nir_channel(b, pack, 0));
245         return nir_channels(b, unpacked, (1 << num_components) - 1);
246 }
247 
248 /* For <= 8-bits per channel, [U,S]NORM formats are packed like [U,S]NORM 8,
249  * with zeroes spacing out each component as needed */
250 
251 static nir_ssa_def *
pan_pack_norm(nir_builder * b,nir_ssa_def * v,unsigned x,unsigned y,unsigned z,unsigned w,bool is_signed)252 pan_pack_norm(nir_builder *b, nir_ssa_def *v,
253               unsigned x, unsigned y, unsigned z, unsigned w,
254               bool is_signed)
255 {
256         /* If a channel has N bits, 1.0 is encoded as 2^N - 1 for UNORMs and
257          * 2^(N-1) - 1 for SNORMs */
258         nir_ssa_def *scales =
259                 is_signed ?
260                 nir_imm_vec4_16(b,
261                                 (1 << (x - 1)) - 1, (1 << (y - 1)) - 1,
262                                 (1 << (z - 1)) - 1, (1 << (w - 1)) - 1) :
263                 nir_imm_vec4_16(b,
264                                 (1 << x) - 1, (1 << y) - 1,
265                                 (1 << z) - 1, (1 << w) - 1);
266 
267         /* If a channel has N bits, we pad out to the byte by (8 - N) bits */
268         nir_ssa_def *shifts = nir_imm_ivec4(b, 8 - x, 8 - y, 8 - z, 8 - w);
269 
270         nir_ssa_def *clamped =
271                 is_signed ?
272                 nir_fsat_signed_mali(b, nir_pad_vec4(b, v)) :
273                 nir_fsat(b, nir_pad_vec4(b, v));
274 
275         nir_ssa_def *f = nir_fmul(b, clamped, scales);
276         nir_ssa_def *u8 = nir_f2u8(b, nir_fround_even(b, f));
277         nir_ssa_def *s = nir_ishl(b, u8, shifts);
278         nir_ssa_def *repl = nir_pack_32_4x8(b, s);
279 
280         return pan_replicate_4(b, repl);
281 }
282 
283 static nir_ssa_def *
pan_pack_unorm(nir_builder * b,nir_ssa_def * v,unsigned x,unsigned y,unsigned z,unsigned w)284 pan_pack_unorm(nir_builder *b, nir_ssa_def *v,
285                unsigned x, unsigned y, unsigned z, unsigned w)
286 {
287         return pan_pack_norm(b, v, x, y, z, w, false);
288 }
289 
290 static nir_ssa_def *
pan_pack_snorm(nir_builder * b,nir_ssa_def * v,unsigned x,unsigned y,unsigned z,unsigned w)291 pan_pack_snorm(nir_builder *b, nir_ssa_def *v,
292                unsigned x, unsigned y, unsigned z, unsigned w)
293 {
294         return pan_pack_norm(b, v, x, y, z, w, true);
295 }
296 
297 /* RGB10_A2 is packed in the tilebuffer as the bottom 3 bytes being the top
298  * 8-bits of RGB and the top byte being RGBA as 2-bits packed. As imirkin
299  * pointed out, this means free conversion to RGBX8 */
300 
301 static nir_ssa_def *
pan_pack_unorm_1010102(nir_builder * b,nir_ssa_def * v)302 pan_pack_unorm_1010102(nir_builder *b, nir_ssa_def *v)
303 {
304         nir_ssa_def *scale = nir_imm_vec4(b, 1023.0, 1023.0, 1023.0, 3.0);
305         nir_ssa_def *s = nir_f2u32(b, nir_fround_even(b, nir_fmul(b, nir_fsat(b, v), scale)));
306 
307         nir_ssa_def *top8 = nir_ushr(b, s, nir_imm_ivec4(b, 0x2, 0x2, 0x2, 0x2));
308         nir_ssa_def *top8_rgb = nir_pack_32_4x8(b, nir_u2u8(b, top8));
309 
310         nir_ssa_def *bottom2 = nir_iand(b, s, nir_imm_ivec4(b, 0x3, 0x3, 0x3, 0x3));
311 
312         nir_ssa_def *top =
313                  nir_ior(b,
314                         nir_ior(b,
315                                 nir_ishl(b, nir_channel(b, bottom2, 0), nir_imm_int(b, 24 + 0)),
316                                 nir_ishl(b, nir_channel(b, bottom2, 1), nir_imm_int(b, 24 + 2))),
317                         nir_ior(b,
318                                 nir_ishl(b, nir_channel(b, bottom2, 2), nir_imm_int(b, 24 + 4)),
319                                 nir_ishl(b, nir_channel(b, bottom2, 3), nir_imm_int(b, 24 + 6))));
320 
321         nir_ssa_def *p = nir_ior(b, top, top8_rgb);
322         return pan_replicate_4(b, p);
323 }
324 
325 /* On the other hand, the pure int RGB10_A2 is identical to the spec */
326 
327 static nir_ssa_def *
pan_pack_int_1010102(nir_builder * b,nir_ssa_def * v,bool is_signed)328 pan_pack_int_1010102(nir_builder *b, nir_ssa_def *v, bool is_signed)
329 {
330         v = nir_u2u32(b, v);
331 
332         /* Clamp the values */
333         if (is_signed) {
334                 v = nir_imin(b, v, nir_imm_ivec4(b, 511, 511, 511, 1));
335                 v = nir_imax(b, v, nir_imm_ivec4(b, -512, -512, -512, -2));
336         } else {
337                 v = nir_umin(b, v, nir_imm_ivec4(b, 1023, 1023, 1023, 3));
338         }
339 
340         v = nir_ishl(b, v, nir_imm_ivec4(b, 0, 10, 20, 30));
341         v = nir_ior(b,
342                     nir_ior(b, nir_channel(b, v, 0), nir_channel(b, v, 1)),
343                     nir_ior(b, nir_channel(b, v, 2), nir_channel(b, v, 3)));
344 
345         return pan_replicate_4(b, v);
346 }
347 
348 static nir_ssa_def *
pan_unpack_int_1010102(nir_builder * b,nir_ssa_def * packed,bool is_signed)349 pan_unpack_int_1010102(nir_builder *b, nir_ssa_def *packed, bool is_signed)
350 {
351         nir_ssa_def *v = pan_replicate_4(b, nir_channel(b, packed, 0));
352 
353         /* Left shift all components so the sign bit is on the MSB, and
354          * can be extended by ishr(). The ishl()+[u,i]shr() combination
355          * sets all unused bits to 0 without requiring a mask.
356          */
357         v = nir_ishl(b, v, nir_imm_ivec4(b, 22, 12, 2, 0));
358 
359         if (is_signed)
360                 v = nir_ishr(b, v, nir_imm_ivec4(b, 22, 22, 22, 30));
361         else
362                 v = nir_ushr(b, v, nir_imm_ivec4(b, 22, 22, 22, 30));
363 
364         return nir_i2i16(b, v);
365 }
366 
367 /* NIR means we can *finally* catch a break */
368 
369 static nir_ssa_def *
pan_pack_r11g11b10(nir_builder * b,nir_ssa_def * v)370 pan_pack_r11g11b10(nir_builder *b, nir_ssa_def *v)
371 {
372         return pan_replicate_4(b, nir_format_pack_11f11f10f(b,
373                                 nir_f2f32(b, v)));
374 }
375 
376 static nir_ssa_def *
pan_unpack_r11g11b10(nir_builder * b,nir_ssa_def * v)377 pan_unpack_r11g11b10(nir_builder *b, nir_ssa_def *v)
378 {
379         nir_ssa_def *f32 = nir_format_unpack_11f11f10f(b, nir_channel(b, v, 0));
380         nir_ssa_def *f16 = nir_f2fmp(b, f32);
381 
382         /* Extend to vec4 with alpha */
383         nir_ssa_def *components[4] = {
384                 nir_channel(b, f16, 0),
385                 nir_channel(b, f16, 1),
386                 nir_channel(b, f16, 2),
387                 nir_imm_float16(b, 1.0)
388         };
389 
390         return nir_vec(b, components, 4);
391 }
392 
393 /* Wrapper around sRGB conversion */
394 
395 static nir_ssa_def *
pan_linear_to_srgb(nir_builder * b,nir_ssa_def * linear)396 pan_linear_to_srgb(nir_builder *b, nir_ssa_def *linear)
397 {
398         nir_ssa_def *rgb = nir_channels(b, linear, 0x7);
399 
400         /* TODO: fp16 native conversion */
401         nir_ssa_def *srgb = nir_f2fmp(b,
402                         nir_format_linear_to_srgb(b, nir_f2f32(b, rgb)));
403 
404         nir_ssa_def *comp[4] = {
405                 nir_channel(b, srgb, 0),
406                 nir_channel(b, srgb, 1),
407                 nir_channel(b, srgb, 2),
408                 nir_channel(b, linear, 3),
409         };
410 
411         return nir_vec(b, comp, 4);
412 }
413 
414 /* Generic dispatches for un/pack regardless of format */
415 
416 static nir_ssa_def *
pan_unpack(nir_builder * b,const struct util_format_description * desc,nir_ssa_def * packed)417 pan_unpack(nir_builder *b,
418                 const struct util_format_description *desc,
419                 nir_ssa_def *packed)
420 {
421         if (desc->is_array) {
422                 int c = util_format_get_first_non_void_channel(desc->format);
423                 assert(c >= 0);
424                 struct util_format_channel_description d = desc->channel[c];
425 
426                 if (d.size == 32 || d.size == 16) {
427                         assert(!d.normalized);
428                         assert(d.type == UTIL_FORMAT_TYPE_FLOAT || d.pure_integer);
429 
430                         return d.size == 32 ? pan_unpack_pure_32(b, packed, desc->nr_channels) :
431                                 pan_unpack_pure_16(b, packed, desc->nr_channels);
432                 } else if (d.size == 8) {
433                         assert(d.pure_integer);
434                         return pan_unpack_pure_8(b, packed, desc->nr_channels);
435                 } else {
436                         unreachable("Unrenderable size");
437                 }
438         }
439 
440         switch (desc->format) {
441         case PIPE_FORMAT_R10G10B10A2_UINT:
442         case PIPE_FORMAT_B10G10R10A2_UINT:
443                 return pan_unpack_int_1010102(b, packed, false);
444         case PIPE_FORMAT_R10G10B10A2_SINT:
445         case PIPE_FORMAT_B10G10R10A2_SINT:
446                 return pan_unpack_int_1010102(b, packed, true);
447         case PIPE_FORMAT_R11G11B10_FLOAT:
448                 return pan_unpack_r11g11b10(b, packed);
449         default:
450                 break;
451         }
452 
453         fprintf(stderr, "%s\n", desc->name);
454         unreachable("Unknown format");
455 }
456 
457 static nir_ssa_def *
pan_pack(nir_builder * b,const struct util_format_description * desc,nir_ssa_def * unpacked)458 pan_pack(nir_builder *b,
459                 const struct util_format_description *desc,
460                 nir_ssa_def *unpacked)
461 {
462         if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
463                 unpacked = pan_linear_to_srgb(b, unpacked);
464 
465         if (util_format_is_unorm8(desc))
466                 return pan_pack_unorm(b, unpacked, 8, 8, 8, 8);
467 
468         if (util_format_is_snorm8(desc->format))
469                 return pan_pack_snorm(b, unpacked, 8, 8, 8, 8);
470 
471         if (desc->is_array) {
472                 int c = util_format_get_first_non_void_channel(desc->format);
473                 assert(c >= 0);
474                 struct util_format_channel_description d = desc->channel[c];
475 
476                 if (d.size == 32 || d.size == 16) {
477                         assert(!d.normalized);
478                         assert(d.type == UTIL_FORMAT_TYPE_FLOAT || d.pure_integer);
479 
480                         return d.size == 32 ?
481                                 pan_replicate(b, unpacked, desc->nr_channels) :
482                                 pan_pack_pure_16(b, unpacked, desc->nr_channels);
483                 } else if (d.size == 8) {
484                         assert(d.pure_integer);
485                         return pan_pack_pure_8(b, unpacked, desc->nr_channels);
486                 } else {
487                         unreachable("Unrenderable size");
488                 }
489         }
490 
491         switch (desc->format) {
492         case PIPE_FORMAT_B4G4R4A4_UNORM:
493         case PIPE_FORMAT_B4G4R4X4_UNORM:
494         case PIPE_FORMAT_A4R4_UNORM:
495         case PIPE_FORMAT_R4A4_UNORM:
496         case PIPE_FORMAT_A4B4G4R4_UNORM:
497         case PIPE_FORMAT_R4G4B4A4_UNORM:
498                 return pan_pack_unorm(b, unpacked, 4, 4, 4, 4);
499         case PIPE_FORMAT_B5G5R5A1_UNORM:
500         case PIPE_FORMAT_R5G5B5A1_UNORM:
501                 return pan_pack_unorm(b, unpacked, 5, 6, 5, 1);
502         case PIPE_FORMAT_R5G6B5_UNORM:
503         case PIPE_FORMAT_B5G6R5_UNORM:
504                 return pan_pack_unorm(b, unpacked, 5, 6, 5, 0);
505         case PIPE_FORMAT_R10G10B10A2_UNORM:
506         case PIPE_FORMAT_B10G10R10A2_UNORM:
507                 return pan_pack_unorm_1010102(b, unpacked);
508         case PIPE_FORMAT_R10G10B10A2_UINT:
509         case PIPE_FORMAT_B10G10R10A2_UINT:
510                 return pan_pack_int_1010102(b, unpacked, false);
511         case PIPE_FORMAT_R10G10B10A2_SINT:
512         case PIPE_FORMAT_B10G10R10A2_SINT:
513                 return pan_pack_int_1010102(b, unpacked, true);
514         case PIPE_FORMAT_R11G11B10_FLOAT:
515                 return pan_pack_r11g11b10(b, unpacked);
516         default:
517                 break;
518         }
519 
520         fprintf(stderr, "%s\n", desc->name);
521         unreachable("Unknown format");
522 }
523 
524 static void
pan_lower_fb_store(nir_shader * shader,nir_builder * b,nir_intrinsic_instr * intr,const struct util_format_description * desc,bool reorder_comps,unsigned quirks)525 pan_lower_fb_store(nir_shader *shader,
526                 nir_builder *b,
527                 nir_intrinsic_instr *intr,
528                 const struct util_format_description *desc,
529                 bool reorder_comps,
530                 unsigned quirks)
531 {
532         /* For stores, add conversion before */
533         nir_ssa_def *unpacked = nir_ssa_for_src(b, intr->src[1], 4);
534 
535         /* Re-order the components */
536         if (reorder_comps)
537                 unpacked = pan_pack_reorder(b, desc, unpacked);
538 
539         nir_ssa_def *packed = pan_pack(b, desc, unpacked);
540 
541         nir_store_raw_output_pan(b, packed);
542 }
543 
544 static nir_ssa_def *
pan_sample_id(nir_builder * b,int sample)545 pan_sample_id(nir_builder *b, int sample)
546 {
547         return (sample >= 0) ? nir_imm_int(b, sample) : nir_load_sample_id(b);
548 }
549 
550 static void
pan_lower_fb_load(nir_shader * shader,nir_builder * b,nir_intrinsic_instr * intr,const struct util_format_description * desc,bool reorder_comps,unsigned base,int sample,unsigned quirks)551 pan_lower_fb_load(nir_shader *shader,
552                 nir_builder *b,
553                 nir_intrinsic_instr *intr,
554                 const struct util_format_description *desc,
555                 bool reorder_comps,
556                 unsigned base, int sample, unsigned quirks)
557 {
558         nir_ssa_def *packed =
559                 nir_load_raw_output_pan(b, 4, 32, pan_sample_id(b, sample),
560                                         .base = base);
561 
562         /* Convert the raw value */
563         nir_ssa_def *unpacked = pan_unpack(b, desc, packed);
564 
565         /* Convert to the size of the load intrinsic.
566          *
567          * We can assume that the type will match with the framebuffer format:
568          *
569          * Page 170 of the PDF of the OpenGL ES 3.0.6 spec says:
570          *
571          * If [UNORM or SNORM, convert to fixed-point]; otherwise no type
572          * conversion is applied. If the values written by the fragment shader
573          * do not match the format(s) of the corresponding color buffer(s),
574          * the result is undefined.
575          */
576 
577         unsigned bits = nir_dest_bit_size(intr->dest);
578 
579         nir_alu_type src_type = nir_alu_type_get_base_type(
580                         pan_unpacked_type_for_format(desc));
581 
582         unpacked = nir_convert_to_bit_size(b, unpacked, src_type, bits);
583         unpacked = nir_pad_vector(b, unpacked, nir_dest_num_components(intr->dest));
584 
585         /* Reorder the components */
586         if (reorder_comps)
587                 unpacked = pan_unpack_reorder(b, desc, unpacked);
588 
589         nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, unpacked, &intr->instr);
590 }
591 
592 bool
pan_lower_framebuffer(nir_shader * shader,const enum pipe_format * rt_fmts,uint8_t raw_fmt_mask,bool is_blend,unsigned quirks)593 pan_lower_framebuffer(nir_shader *shader, const enum pipe_format *rt_fmts,
594                       uint8_t raw_fmt_mask, bool is_blend, unsigned quirks)
595 {
596         if (shader->info.stage != MESA_SHADER_FRAGMENT)
597                return false;
598 
599         bool progress = false;
600 
601         nir_foreach_function(func, shader) {
602                 nir_foreach_block(block, func->impl) {
603                         nir_foreach_instr_safe(instr, block) {
604                                 if (instr->type != nir_instr_type_intrinsic)
605                                         continue;
606 
607                                 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
608 
609                                 bool is_load = intr->intrinsic == nir_intrinsic_load_deref;
610                                 bool is_store = intr->intrinsic == nir_intrinsic_store_deref;
611 
612                                 if (!(is_load || (is_store && is_blend)))
613                                         continue;
614 
615                                 nir_variable *var = nir_intrinsic_get_var(intr, 0);
616 
617                                 if (var->data.mode != nir_var_shader_out)
618                                         continue;
619 
620                                 if (var->data.location < FRAG_RESULT_DATA0)
621                                         continue;
622 
623                                 unsigned base = var->data.driver_location;
624                                 unsigned rt = var->data.location - FRAG_RESULT_DATA0;
625 
626                                 if (rt_fmts[rt] == PIPE_FORMAT_NONE)
627                                         continue;
628 
629                                 const struct util_format_description *desc =
630                                    util_format_description(rt_fmts[rt]);
631 
632                                 enum pan_format_class fmt_class =
633                                         pan_format_class(desc, quirks, is_store);
634 
635                                 /* Don't lower */
636                                 if (fmt_class == PAN_FORMAT_NATIVE)
637                                         continue;
638 
639                                 /* EXT_shader_framebuffer_fetch requires
640                                  * per-sample loads.
641                                  * MSAA blend shaders are not yet handled, so
642                                  * for now always load sample 0. */
643                                 int sample = is_blend ? 0 : -1;
644                                 bool reorder_comps = raw_fmt_mask & BITFIELD_BIT(rt);
645 
646                                 nir_builder b;
647                                 nir_builder_init(&b, func->impl);
648 
649                                 if (is_store) {
650                                         b.cursor = nir_before_instr(instr);
651                                         pan_lower_fb_store(shader, &b, intr, desc, reorder_comps, quirks);
652                                 } else {
653                                         b.cursor = nir_after_instr(instr);
654                                         pan_lower_fb_load(shader, &b, intr, desc, reorder_comps, base, sample, quirks);
655                                 }
656 
657                                 nir_instr_remove(instr);
658 
659                                 progress = true;
660                         }
661                 }
662 
663                 nir_metadata_preserve(func->impl, nir_metadata_block_index |
664                                 nir_metadata_dominance);
665         }
666 
667         return progress;
668 }
669