• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 Collabora, Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors (Collabora):
24  *      Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25  */
26 
27 /**
28  * Implements framebuffer format conversions in software for Midgard/Bifrost
29  * blend shaders. This pass is designed for a single render target; Midgard
30  * duplicates blend shaders for MRT to simplify everything. A particular
31  * framebuffer format may be categorized as 1) typed load available, 2) typed
32  * unpack available, or 3) software unpack only, and likewise for stores. The
33  * first two types are handled in the compiler backend directly, so this module
34  * is responsible for identifying type 3 formats (hardware dependent) and
35  * inserting appropriate ALU code to perform the conversion from the packed
36  * type to a designated unpacked type, and vice versa.
37  *
38  * The unpacked type depends on the format:
39  *
40  *      - For 32-bit float formats or >8-bit UNORM, 32-bit floats.
41  *      - For other floats, 16-bit floats.
42  *      - For 32-bit ints, 32-bit ints.
43  *      - For 8-bit ints, 8-bit ints.
44  *      - For other ints, 16-bit ints.
45  *
46  * The rationale is to optimize blending and logic op instructions by using the
47  * smallest precision necessary to store the pixel losslessly.
48  */
49 
50 #include "compiler/nir/nir.h"
51 #include "compiler/nir/nir_builder.h"
52 #include "compiler/nir/nir_format_convert.h"
53 #include "util/format/u_format.h"
54 #include "pan_lower_framebuffer.h"
55 
56 /* Determines the unpacked type best suiting a given format, so the rest of the
57  * pipeline may be adjusted accordingly */
58 
59 nir_alu_type
pan_unpacked_type_for_format(const struct util_format_description * desc)60 pan_unpacked_type_for_format(const struct util_format_description *desc)
61 {
62         int c = util_format_get_first_non_void_channel(desc->format);
63 
64         if (c == -1)
65                 unreachable("Void format not renderable");
66 
67         bool large = (desc->channel[c].size > 16);
68         bool large_norm = (desc->channel[c].size > 8);
69         bool bit8 = (desc->channel[c].size == 8);
70         assert(desc->channel[c].size <= 32);
71 
72         if (desc->channel[c].normalized)
73                 return large_norm ? nir_type_float32 : nir_type_float16;
74 
75         switch (desc->channel[c].type) {
76         case UTIL_FORMAT_TYPE_UNSIGNED:
77                 return bit8 ? nir_type_uint8 :
78                         large ? nir_type_uint32 : nir_type_uint16;
79         case UTIL_FORMAT_TYPE_SIGNED:
80                 return bit8 ? nir_type_int8 :
81                         large ? nir_type_int32 : nir_type_int16;
82         case UTIL_FORMAT_TYPE_FLOAT:
83                 return large ? nir_type_float32 : nir_type_float16;
84         default:
85                 unreachable("Format not renderable");
86         }
87 }
88 
89 static bool
pan_is_format_native(const struct util_format_description * desc,bool broken_ld_special,bool is_store)90 pan_is_format_native(const struct util_format_description *desc, bool broken_ld_special, bool is_store)
91 {
92         if (is_store || broken_ld_special)
93                 return false;
94 
95         if (util_format_is_pure_integer(desc->format) || util_format_is_float(desc->format))
96                 return false;
97 
98         /* Some formats are missing as typed but have unpacks */
99         if (desc->format == PIPE_FORMAT_R11G11B10_FLOAT)
100                 return false;
101 
102         return true;
103 }
104 
105 /* Software packs/unpacks, by format class. Packs take in the pixel value typed
106  * as `pan_unpacked_type_for_format` of the format and return an i32vec4
107  * suitable for storing (with components replicated to fill). Unpacks do the
108  * reverse but cannot rely on replication. */
109 
110 static nir_ssa_def *
pan_replicate(nir_builder * b,nir_ssa_def * v,unsigned num_components)111 pan_replicate(nir_builder *b, nir_ssa_def *v, unsigned num_components)
112 {
113         nir_ssa_def *replicated[4];
114 
115         for (unsigned i = 0; i < 4; ++i)
116                 replicated[i] = nir_channel(b, v, i % num_components);
117 
118         return nir_vec(b, replicated, 4);
119 }
120 
121 static nir_ssa_def *
pan_unpack_pure_32(nir_builder * b,nir_ssa_def * pack,unsigned num_components)122 pan_unpack_pure_32(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
123 {
124         return nir_channels(b, pack, (1 << num_components) - 1);
125 }
126 
127 /* Pure x16 formats are x16 unpacked, so it's similar, but we need to pack
128  * upper/lower halves of course */
129 
130 static nir_ssa_def *
pan_pack_pure_16(nir_builder * b,nir_ssa_def * v,unsigned num_components)131 pan_pack_pure_16(nir_builder *b, nir_ssa_def *v, unsigned num_components)
132 {
133         nir_ssa_def *v4 = pan_replicate(b, v, num_components);
134 
135         nir_ssa_def *lo = nir_pack_32_2x16(b, nir_channels(b, v4, 0x3 << 0));
136         nir_ssa_def *hi = nir_pack_32_2x16(b, nir_channels(b, v4, 0x3 << 2));
137 
138         return nir_vec4(b, lo, hi, lo, hi);
139 }
140 
141 static nir_ssa_def *
pan_unpack_pure_16(nir_builder * b,nir_ssa_def * pack,unsigned num_components)142 pan_unpack_pure_16(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
143 {
144         nir_ssa_def *unpacked[4];
145 
146         assert(num_components <= 4);
147 
148         for (unsigned i = 0; i < num_components; i += 2) {
149                 nir_ssa_def *halves =
150                         nir_unpack_32_2x16(b, nir_channel(b, pack, i >> 1));
151 
152                 unpacked[i + 0] = nir_channel(b, halves, 0);
153                 unpacked[i + 1] = nir_channel(b, halves, 1);
154         }
155 
156         return nir_pad_vec4(b, nir_vec(b, unpacked, num_components));
157 }
158 
159 static nir_ssa_def *
pan_pack_reorder(nir_builder * b,const struct util_format_description * desc,nir_ssa_def * v)160 pan_pack_reorder(nir_builder *b,
161                  const struct util_format_description *desc,
162                  nir_ssa_def *v)
163 {
164         unsigned swizzle[4] = { 0, 1, 2, 3 };
165 
166         for (unsigned i = 0; i < v->num_components; i++) {
167                 if (desc->swizzle[i] <= PIPE_SWIZZLE_W)
168                         swizzle[i] = desc->swizzle[i];
169         }
170 
171         return nir_swizzle(b, v, swizzle, v->num_components);
172 }
173 
174 static nir_ssa_def *
pan_unpack_reorder(nir_builder * b,const struct util_format_description * desc,nir_ssa_def * v)175 pan_unpack_reorder(nir_builder *b,
176                    const struct util_format_description *desc,
177                    nir_ssa_def *v)
178 {
179         unsigned swizzle[4] = { 0, 1, 2, 3 };
180 
181         for (unsigned i = 0; i < v->num_components; i++) {
182                 if (desc->swizzle[i] <= PIPE_SWIZZLE_W)
183                         swizzle[desc->swizzle[i]] = i;
184         }
185 
186         return nir_swizzle(b, v, swizzle, v->num_components);
187 }
188 
189 static nir_ssa_def *
pan_replicate_4(nir_builder * b,nir_ssa_def * v)190 pan_replicate_4(nir_builder *b, nir_ssa_def *v)
191 {
192         return nir_vec4(b, v, v, v, v);
193 }
194 
195 static nir_ssa_def *
pan_pack_pure_8(nir_builder * b,nir_ssa_def * v,unsigned num_components)196 pan_pack_pure_8(nir_builder *b, nir_ssa_def *v, unsigned num_components)
197 {
198         return pan_replicate_4(b, nir_pack_32_4x8(b, pan_replicate(b, v, num_components)));
199 }
200 
201 static nir_ssa_def *
pan_unpack_pure_8(nir_builder * b,nir_ssa_def * pack,unsigned num_components)202 pan_unpack_pure_8(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
203 {
204         nir_ssa_def *unpacked = nir_unpack_32_4x8(b, nir_channel(b, pack, 0));
205         return nir_channels(b, unpacked, (1 << num_components) - 1);
206 }
207 
208 /* For <= 8-bits per channel, [U,S]NORM formats are packed like [U,S]NORM 8,
209  * with zeroes spacing out each component as needed */
210 
211 static nir_ssa_def *
pan_pack_norm(nir_builder * b,nir_ssa_def * v,unsigned x,unsigned y,unsigned z,unsigned w,bool is_signed)212 pan_pack_norm(nir_builder *b, nir_ssa_def *v,
213               unsigned x, unsigned y, unsigned z, unsigned w,
214               bool is_signed)
215 {
216         /* If a channel has N bits, 1.0 is encoded as 2^N - 1 for UNORMs and
217          * 2^(N-1) - 1 for SNORMs */
218         nir_ssa_def *scales =
219                 is_signed ?
220                 nir_imm_vec4_16(b,
221                                 (1 << (x - 1)) - 1, (1 << (y - 1)) - 1,
222                                 (1 << (z - 1)) - 1, (1 << (w - 1)) - 1) :
223                 nir_imm_vec4_16(b,
224                                 (1 << x) - 1, (1 << y) - 1,
225                                 (1 << z) - 1, (1 << w) - 1);
226 
227         /* If a channel has N bits, we pad out to the byte by (8 - N) bits */
228         nir_ssa_def *shifts = nir_imm_ivec4(b, 8 - x, 8 - y, 8 - z, 8 - w);
229 
230         nir_ssa_def *clamped =
231                 is_signed ?
232                 nir_fsat_signed_mali(b, nir_pad_vec4(b, v)) :
233                 nir_fsat(b, nir_pad_vec4(b, v));
234 
235         nir_ssa_def *f = nir_fmul(b, clamped, scales);
236         nir_ssa_def *u8 = nir_f2u8(b, nir_fround_even(b, f));
237         nir_ssa_def *s = nir_ishl(b, u8, shifts);
238         nir_ssa_def *repl = nir_pack_32_4x8(b, s);
239 
240         return pan_replicate_4(b, repl);
241 }
242 
243 static nir_ssa_def *
pan_pack_unorm(nir_builder * b,nir_ssa_def * v,unsigned x,unsigned y,unsigned z,unsigned w)244 pan_pack_unorm(nir_builder *b, nir_ssa_def *v,
245                unsigned x, unsigned y, unsigned z, unsigned w)
246 {
247         return pan_pack_norm(b, v, x, y, z, w, false);
248 }
249 
250 static nir_ssa_def *
pan_pack_snorm(nir_builder * b,nir_ssa_def * v,unsigned x,unsigned y,unsigned z,unsigned w)251 pan_pack_snorm(nir_builder *b, nir_ssa_def *v,
252                unsigned x, unsigned y, unsigned z, unsigned w)
253 {
254         return pan_pack_norm(b, v, x, y, z, w, true);
255 }
256 
257 /* RGB10_A2 is packed in the tilebuffer as the bottom 3 bytes being the top
258  * 8-bits of RGB and the top byte being RGBA as 2-bits packed. As imirkin
259  * pointed out, this means free conversion to RGBX8 */
260 
261 static nir_ssa_def *
pan_pack_unorm_1010102(nir_builder * b,nir_ssa_def * v)262 pan_pack_unorm_1010102(nir_builder *b, nir_ssa_def *v)
263 {
264         nir_ssa_def *scale = nir_imm_vec4(b, 1023.0, 1023.0, 1023.0, 3.0);
265         nir_ssa_def *s = nir_f2u32(b, nir_fround_even(b, nir_fmul(b, nir_fsat(b, v), scale)));
266 
267         nir_ssa_def *top8 = nir_ushr(b, s, nir_imm_ivec4(b, 0x2, 0x2, 0x2, 0x2));
268         nir_ssa_def *top8_rgb = nir_pack_32_4x8(b, nir_u2u8(b, top8));
269 
270         nir_ssa_def *bottom2 = nir_iand(b, s, nir_imm_ivec4(b, 0x3, 0x3, 0x3, 0x3));
271 
272         nir_ssa_def *top =
273                  nir_ior(b,
274                         nir_ior(b,
275                                 nir_ishl(b, nir_channel(b, bottom2, 0), nir_imm_int(b, 24 + 0)),
276                                 nir_ishl(b, nir_channel(b, bottom2, 1), nir_imm_int(b, 24 + 2))),
277                         nir_ior(b,
278                                 nir_ishl(b, nir_channel(b, bottom2, 2), nir_imm_int(b, 24 + 4)),
279                                 nir_ishl(b, nir_channel(b, bottom2, 3), nir_imm_int(b, 24 + 6))));
280 
281         nir_ssa_def *p = nir_ior(b, top, top8_rgb);
282         return pan_replicate_4(b, p);
283 }
284 
285 /* On the other hand, the pure int RGB10_A2 is identical to the spec */
286 
287 static nir_ssa_def *
pan_pack_int_1010102(nir_builder * b,nir_ssa_def * v,bool is_signed)288 pan_pack_int_1010102(nir_builder *b, nir_ssa_def *v, bool is_signed)
289 {
290         v = nir_u2u32(b, v);
291 
292         /* Clamp the values */
293         if (is_signed) {
294                 v = nir_imin(b, v, nir_imm_ivec4(b, 511, 511, 511, 1));
295                 v = nir_imax(b, v, nir_imm_ivec4(b, -512, -512, -512, -2));
296         } else {
297                 v = nir_umin(b, v, nir_imm_ivec4(b, 1023, 1023, 1023, 3));
298         }
299 
300         v = nir_ishl(b, v, nir_imm_ivec4(b, 0, 10, 20, 30));
301         v = nir_ior(b,
302                     nir_ior(b, nir_channel(b, v, 0), nir_channel(b, v, 1)),
303                     nir_ior(b, nir_channel(b, v, 2), nir_channel(b, v, 3)));
304 
305         return pan_replicate_4(b, v);
306 }
307 
308 static nir_ssa_def *
pan_unpack_int_1010102(nir_builder * b,nir_ssa_def * packed,bool is_signed)309 pan_unpack_int_1010102(nir_builder *b, nir_ssa_def *packed, bool is_signed)
310 {
311         nir_ssa_def *v = pan_replicate_4(b, nir_channel(b, packed, 0));
312 
313         /* Left shift all components so the sign bit is on the MSB, and
314          * can be extended by ishr(). The ishl()+[u,i]shr() combination
315          * sets all unused bits to 0 without requiring a mask.
316          */
317         v = nir_ishl(b, v, nir_imm_ivec4(b, 22, 12, 2, 0));
318 
319         if (is_signed)
320                 v = nir_ishr(b, v, nir_imm_ivec4(b, 22, 22, 22, 30));
321         else
322                 v = nir_ushr(b, v, nir_imm_ivec4(b, 22, 22, 22, 30));
323 
324         return nir_i2i16(b, v);
325 }
326 
327 /* NIR means we can *finally* catch a break */
328 
329 static nir_ssa_def *
pan_pack_r11g11b10(nir_builder * b,nir_ssa_def * v)330 pan_pack_r11g11b10(nir_builder *b, nir_ssa_def *v)
331 {
332         return pan_replicate_4(b, nir_format_pack_11f11f10f(b,
333                                 nir_f2f32(b, v)));
334 }
335 
336 static nir_ssa_def *
pan_unpack_r11g11b10(nir_builder * b,nir_ssa_def * v)337 pan_unpack_r11g11b10(nir_builder *b, nir_ssa_def *v)
338 {
339         nir_ssa_def *f32 = nir_format_unpack_11f11f10f(b, nir_channel(b, v, 0));
340         nir_ssa_def *f16 = nir_f2fmp(b, f32);
341 
342         /* Extend to vec4 with alpha */
343         nir_ssa_def *components[4] = {
344                 nir_channel(b, f16, 0),
345                 nir_channel(b, f16, 1),
346                 nir_channel(b, f16, 2),
347                 nir_imm_float16(b, 1.0)
348         };
349 
350         return nir_vec(b, components, 4);
351 }
352 
353 /* Wrapper around sRGB conversion */
354 
355 static nir_ssa_def *
pan_linear_to_srgb(nir_builder * b,nir_ssa_def * linear)356 pan_linear_to_srgb(nir_builder *b, nir_ssa_def *linear)
357 {
358         nir_ssa_def *rgb = nir_channels(b, linear, 0x7);
359 
360         /* TODO: fp16 native conversion */
361         nir_ssa_def *srgb = nir_f2fmp(b,
362                         nir_format_linear_to_srgb(b, nir_f2f32(b, rgb)));
363 
364         nir_ssa_def *comp[4] = {
365                 nir_channel(b, srgb, 0),
366                 nir_channel(b, srgb, 1),
367                 nir_channel(b, srgb, 2),
368                 nir_channel(b, linear, 3),
369         };
370 
371         return nir_vec(b, comp, 4);
372 }
373 
374 /* Generic dispatches for un/pack regardless of format */
375 
376 static nir_ssa_def *
pan_unpack(nir_builder * b,const struct util_format_description * desc,nir_ssa_def * packed)377 pan_unpack(nir_builder *b,
378                 const struct util_format_description *desc,
379                 nir_ssa_def *packed)
380 {
381         if (desc->is_array) {
382                 int c = util_format_get_first_non_void_channel(desc->format);
383                 assert(c >= 0);
384                 struct util_format_channel_description d = desc->channel[c];
385 
386                 if (d.size == 32 || d.size == 16) {
387                         assert(!d.normalized);
388                         assert(d.type == UTIL_FORMAT_TYPE_FLOAT || d.pure_integer);
389 
390                         return d.size == 32 ? pan_unpack_pure_32(b, packed, desc->nr_channels) :
391                                 pan_unpack_pure_16(b, packed, desc->nr_channels);
392                 } else if (d.size == 8) {
393                         assert(d.pure_integer);
394                         return pan_unpack_pure_8(b, packed, desc->nr_channels);
395                 } else {
396                         unreachable("Unrenderable size");
397                 }
398         }
399 
400         switch (desc->format) {
401         case PIPE_FORMAT_R10G10B10A2_UINT:
402         case PIPE_FORMAT_B10G10R10A2_UINT:
403                 return pan_unpack_int_1010102(b, packed, false);
404         case PIPE_FORMAT_R10G10B10A2_SINT:
405         case PIPE_FORMAT_B10G10R10A2_SINT:
406                 return pan_unpack_int_1010102(b, packed, true);
407         case PIPE_FORMAT_R11G11B10_FLOAT:
408                 return pan_unpack_r11g11b10(b, packed);
409         default:
410                 break;
411         }
412 
413         fprintf(stderr, "%s\n", desc->name);
414         unreachable("Unknown format");
415 }
416 
417 static nir_ssa_def *
pan_pack(nir_builder * b,const struct util_format_description * desc,nir_ssa_def * unpacked)418 pan_pack(nir_builder *b,
419                 const struct util_format_description *desc,
420                 nir_ssa_def *unpacked)
421 {
422         if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
423                 unpacked = pan_linear_to_srgb(b, unpacked);
424 
425         if (util_format_is_unorm8(desc))
426                 return pan_pack_unorm(b, unpacked, 8, 8, 8, 8);
427 
428         if (util_format_is_snorm8(desc->format))
429                 return pan_pack_snorm(b, unpacked, 8, 8, 8, 8);
430 
431         if (desc->is_array) {
432                 int c = util_format_get_first_non_void_channel(desc->format);
433                 assert(c >= 0);
434                 struct util_format_channel_description d = desc->channel[c];
435 
436                 if (d.size == 32 || d.size == 16) {
437                         assert(!d.normalized);
438                         assert(d.type == UTIL_FORMAT_TYPE_FLOAT || d.pure_integer);
439 
440                         return d.size == 32 ?
441                                 pan_replicate(b, unpacked, desc->nr_channels) :
442                                 pan_pack_pure_16(b, unpacked, desc->nr_channels);
443                 } else if (d.size == 8) {
444                         assert(d.pure_integer);
445                         return pan_pack_pure_8(b, unpacked, desc->nr_channels);
446                 } else {
447                         unreachable("Unrenderable size");
448                 }
449         }
450 
451         switch (desc->format) {
452         case PIPE_FORMAT_B4G4R4A4_UNORM:
453         case PIPE_FORMAT_B4G4R4X4_UNORM:
454         case PIPE_FORMAT_A4R4_UNORM:
455         case PIPE_FORMAT_R4A4_UNORM:
456         case PIPE_FORMAT_A4B4G4R4_UNORM:
457         case PIPE_FORMAT_R4G4B4A4_UNORM:
458                 return pan_pack_unorm(b, unpacked, 4, 4, 4, 4);
459         case PIPE_FORMAT_B5G5R5A1_UNORM:
460         case PIPE_FORMAT_R5G5B5A1_UNORM:
461                 return pan_pack_unorm(b, unpacked, 5, 6, 5, 1);
462         case PIPE_FORMAT_R5G6B5_UNORM:
463         case PIPE_FORMAT_B5G6R5_UNORM:
464                 return pan_pack_unorm(b, unpacked, 5, 6, 5, 0);
465         case PIPE_FORMAT_R10G10B10A2_UNORM:
466         case PIPE_FORMAT_B10G10R10A2_UNORM:
467                 return pan_pack_unorm_1010102(b, unpacked);
468         case PIPE_FORMAT_R10G10B10A2_UINT:
469         case PIPE_FORMAT_B10G10R10A2_UINT:
470                 return pan_pack_int_1010102(b, unpacked, false);
471         case PIPE_FORMAT_R10G10B10A2_SINT:
472         case PIPE_FORMAT_B10G10R10A2_SINT:
473                 return pan_pack_int_1010102(b, unpacked, true);
474         case PIPE_FORMAT_R11G11B10_FLOAT:
475                 return pan_pack_r11g11b10(b, unpacked);
476         default:
477                 break;
478         }
479 
480         fprintf(stderr, "%s\n", desc->name);
481         unreachable("Unknown format");
482 }
483 
484 static void
pan_lower_fb_store(nir_shader * shader,nir_builder * b,nir_intrinsic_instr * intr,const struct util_format_description * desc,bool reorder_comps)485 pan_lower_fb_store(nir_shader *shader,
486                 nir_builder *b,
487                 nir_intrinsic_instr *intr,
488                 const struct util_format_description *desc,
489                 bool reorder_comps)
490 {
491         /* For stores, add conversion before */
492         nir_ssa_def *unpacked =
493                 nir_ssa_for_src(b, intr->src[1], intr->num_components);
494         unpacked = nir_pad_vec4(b, unpacked);
495 
496         /* Re-order the components */
497         if (reorder_comps)
498                 unpacked = pan_pack_reorder(b, desc, unpacked);
499 
500         nir_ssa_def *packed = pan_pack(b, desc, unpacked);
501 
502         nir_store_raw_output_pan(b, packed);
503 }
504 
505 static nir_ssa_def *
pan_sample_id(nir_builder * b,int sample)506 pan_sample_id(nir_builder *b, int sample)
507 {
508         return (sample >= 0) ? nir_imm_int(b, sample) : nir_load_sample_id(b);
509 }
510 
511 static void
pan_lower_fb_load(nir_shader * shader,nir_builder * b,nir_intrinsic_instr * intr,const struct util_format_description * desc,bool reorder_comps,unsigned base,int sample)512 pan_lower_fb_load(nir_shader *shader,
513                 nir_builder *b,
514                 nir_intrinsic_instr *intr,
515                 const struct util_format_description *desc,
516                 bool reorder_comps,
517                 unsigned base, int sample)
518 {
519         nir_ssa_def *packed =
520                 nir_load_raw_output_pan(b, 4, 32, pan_sample_id(b, sample),
521                                         .base = base);
522 
523         /* Convert the raw value */
524         nir_ssa_def *unpacked = pan_unpack(b, desc, packed);
525 
526         /* Convert to the size of the load intrinsic.
527          *
528          * We can assume that the type will match with the framebuffer format:
529          *
530          * Page 170 of the PDF of the OpenGL ES 3.0.6 spec says:
531          *
532          * If [UNORM or SNORM, convert to fixed-point]; otherwise no type
533          * conversion is applied. If the values written by the fragment shader
534          * do not match the format(s) of the corresponding color buffer(s),
535          * the result is undefined.
536          */
537 
538         unsigned bits = nir_dest_bit_size(intr->dest);
539 
540         nir_alu_type src_type = nir_alu_type_get_base_type(
541                         pan_unpacked_type_for_format(desc));
542 
543         unpacked = nir_convert_to_bit_size(b, unpacked, src_type, bits);
544         unpacked = nir_resize_vector(b, unpacked, intr->dest.ssa.num_components);
545 
546         /* Reorder the components */
547         if (reorder_comps)
548                 unpacked = pan_unpack_reorder(b, desc, unpacked);
549 
550         nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, unpacked, &intr->instr);
551 }
552 
553 bool
pan_lower_framebuffer(nir_shader * shader,const enum pipe_format * rt_fmts,uint8_t raw_fmt_mask,bool is_blend,bool broken_ld_special)554 pan_lower_framebuffer(nir_shader *shader, const enum pipe_format *rt_fmts,
555                       uint8_t raw_fmt_mask, bool is_blend, bool broken_ld_special)
556 {
557         if (shader->info.stage != MESA_SHADER_FRAGMENT)
558                return false;
559 
560         bool progress = false;
561 
562         nir_foreach_function(func, shader) {
563                 nir_foreach_block(block, func->impl) {
564                         nir_foreach_instr_safe(instr, block) {
565                                 if (instr->type != nir_instr_type_intrinsic)
566                                         continue;
567 
568                                 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
569 
570                                 bool is_load = intr->intrinsic == nir_intrinsic_load_deref;
571                                 bool is_store = intr->intrinsic == nir_intrinsic_store_deref;
572 
573                                 if (!(is_load || (is_store && is_blend)))
574                                         continue;
575 
576                                 nir_variable *var = nir_intrinsic_get_var(intr, 0);
577 
578                                 if (var->data.mode != nir_var_shader_out)
579                                         continue;
580 
581                                 if (var->data.location < FRAG_RESULT_DATA0)
582                                         continue;
583 
584                                 unsigned base = var->data.driver_location;
585                                 unsigned rt = var->data.location - FRAG_RESULT_DATA0;
586 
587                                 if (rt_fmts[rt] == PIPE_FORMAT_NONE)
588                                         continue;
589 
590                                 const struct util_format_description *desc =
591                                    util_format_description(rt_fmts[rt]);
592 
593                                 /* Don't lower */
594                                 if (pan_is_format_native(desc, broken_ld_special, is_store))
595                                         continue;
596 
597                                 /* EXT_shader_framebuffer_fetch requires
598                                  * per-sample loads.
599                                  * MSAA blend shaders are not yet handled, so
600                                  * for now always load sample 0. */
601                                 int sample = is_blend ? 0 : -1;
602                                 bool reorder_comps = raw_fmt_mask & BITFIELD_BIT(rt);
603 
604                                 nir_builder b;
605                                 nir_builder_init(&b, func->impl);
606 
607                                 if (is_store) {
608                                         b.cursor = nir_before_instr(instr);
609                                         pan_lower_fb_store(shader, &b, intr, desc, reorder_comps);
610                                 } else {
611                                         b.cursor = nir_after_instr(instr);
612                                         pan_lower_fb_load(shader, &b, intr, desc, reorder_comps, base, sample);
613                                 }
614 
615                                 nir_instr_remove(instr);
616 
617                                 progress = true;
618                         }
619                 }
620 
621                 nir_metadata_preserve(func->impl, nir_metadata_block_index |
622                                 nir_metadata_dominance);
623         }
624 
625         return progress;
626 }
627