1 /*
2 * Copyright © Microsoft Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_format_convert.h"
27
28 #include "pipe/p_state.h"
29 #include "util/format/u_format.h"
30
31 #include "d3d12_compiler.h"
32 #include "d3d12_nir_passes.h"
33
34 static nir_ssa_def *
convert_value(nir_builder * b,nir_ssa_def * value,const struct util_format_description * from_desc,const struct util_format_description * to_desc)35 convert_value(nir_builder *b, nir_ssa_def *value,
36 const struct util_format_description *from_desc,
37 const struct util_format_description *to_desc)
38 {
39 if (from_desc->format == to_desc->format)
40 return value;
41
42 assert(value->num_components == 4);
43 /* No support for 16 or 64 bit data in the shader for image loads/stores */
44 assert(value->bit_size == 32);
45 /* Overall format size needs to be the same */
46 assert(from_desc->block.bits == to_desc->block.bits);
47 assert(from_desc->nr_channels <= 4 && to_desc->nr_channels <= 4);
48
49 const unsigned rgba1010102_bits[] = { 10, 10, 10, 2 };
50
51 /* First, construct a "tightly packed" vector of the input values. For unorm/snorm, convert
52 * from the float we're given into the original bits (only happens while storing). For packed
53 * formats that don't fall on a nice bit size, convert/pack them into 32bit values. Otherwise,
54 * just produce a vecNx4 where N is the expected bit size.
55 */
56 nir_ssa_def *src_as_vec;
57 if (from_desc->format == PIPE_FORMAT_R10G10B10A2_UINT ||
58 from_desc->format == PIPE_FORMAT_R10G10B10A2_UNORM) {
59 if (from_desc->format == PIPE_FORMAT_R10G10B10A2_UNORM)
60 value = nir_format_float_to_unorm(b, value, rgba1010102_bits);
61 nir_ssa_def *channels[4];
62 for (unsigned i = 0; i < 4; ++i)
63 channels[i] = nir_channel(b, value, i);
64
65 src_as_vec = channels[0];
66 src_as_vec = nir_mask_shift_or(b, src_as_vec, channels[1], (1 << 10) - 1, 10);
67 src_as_vec = nir_mask_shift_or(b, src_as_vec, channels[2], (1 << 10) - 1, 20);
68 src_as_vec = nir_mask_shift_or(b, src_as_vec, channels[3], (1 << 2) - 1, 30);
69 } else if (from_desc->format == PIPE_FORMAT_R11G11B10_FLOAT) {
70 src_as_vec = nir_format_pack_11f11f10f(b, value);
71 } else if (from_desc->is_unorm) {
72 if (from_desc->channel[0].size == 8)
73 src_as_vec = nir_pack_unorm_4x8(b, value);
74 else {
75 nir_ssa_def *packed_channels[2];
76 packed_channels[0] = nir_pack_unorm_2x16(b, nir_channels(b, value, 0x3));
77 packed_channels[1] = nir_pack_unorm_2x16(b, nir_channels(b, value, 0x3 << 2));
78 src_as_vec = nir_vec(b, packed_channels, 2);
79 }
80 } else if (from_desc->is_snorm) {
81 if (from_desc->channel[0].size == 8)
82 src_as_vec = nir_pack_snorm_4x8(b, value);
83 else {
84 nir_ssa_def *packed_channels[2];
85 packed_channels[0] = nir_pack_snorm_2x16(b, nir_channels(b, value, 0x3));
86 packed_channels[1] = nir_pack_snorm_2x16(b, nir_channels(b, value, 0x3 << 2));
87 src_as_vec = nir_vec(b, packed_channels, 2);
88 }
89 } else if (util_format_is_float(from_desc->format)) {
90 src_as_vec = nir_f2fN(b, value, from_desc->channel[0].size);
91 } else if (util_format_is_pure_sint(from_desc->format)) {
92 src_as_vec = nir_i2iN(b, value, from_desc->channel[0].size);
93 } else {
94 src_as_vec = nir_u2uN(b, value, from_desc->channel[0].size);
95 }
96
97 /* Now that we have the tightly packed bits, we can use nir_extract_bits to get it into a
98 * vector of differently-sized components. For producing packed formats, get a 32-bit
99 * value and manually extract the bits. For unorm/snorm, get one or two 32-bit values,
100 * and extract it using helpers. Otherwise, get a format-sized dest vector and use a
101 * cast to expand it back to 32-bit.
102 *
103 * Pay extra attention for changing semantics for alpha as 1.
104 */
105 if (to_desc->format == PIPE_FORMAT_R10G10B10A2_UINT ||
106 to_desc->format == PIPE_FORMAT_R10G10B10A2_UNORM) {
107 nir_ssa_def *u32 = nir_extract_bits(b, &src_as_vec, 1, 0, 1, 32);
108 nir_ssa_def *channels[4] = {
109 nir_iand(b, u32, nir_imm_int(b, (1 << 10) - 1)),
110 nir_iand(b, nir_ushr(b, u32, nir_imm_int(b, 10)), nir_imm_int(b, (1 << 10) - 1)),
111 nir_iand(b, nir_ushr(b, u32, nir_imm_int(b, 20)), nir_imm_int(b, (1 << 10) - 1)),
112 nir_ushr(b, u32, nir_imm_int(b, 30))
113 };
114 nir_ssa_def *vec = nir_vec(b, channels, 4);
115 if (to_desc->format == PIPE_FORMAT_R10G10B10A2_UNORM)
116 vec = nir_format_unorm_to_float(b, vec, rgba1010102_bits);
117 return vec;
118 } else if (to_desc->format == PIPE_FORMAT_R11G11B10_FLOAT) {
119 nir_ssa_def *u32 = nir_extract_bits(b, &src_as_vec, 1, 0, 1, 32);
120 nir_ssa_def *vec3 = nir_format_unpack_11f11f10f(b, u32);
121 return nir_vec4(b, nir_channel(b, vec3, 0),
122 nir_channel(b, vec3, 1),
123 nir_channel(b, vec3, 2),
124 nir_imm_float(b, 1.0f));
125 } else if (to_desc->is_unorm || to_desc->is_snorm) {
126 nir_ssa_def *dest_packed = nir_extract_bits(b, &src_as_vec, 1, 0,
127 DIV_ROUND_UP(to_desc->nr_channels * to_desc->channel[0].size, 32), 32);
128 if (to_desc->is_unorm) {
129 if (to_desc->channel[0].size == 8) {
130 nir_ssa_def *unpacked = nir_unpack_unorm_4x8(b, nir_channel(b, dest_packed, 0));
131 if (to_desc->nr_channels < 4)
132 unpacked = nir_vector_insert_imm(b, unpacked, nir_imm_float(b, 1.0f), 3);
133 return unpacked;
134 }
135 nir_ssa_def *vec2s[2] = {
136 nir_unpack_unorm_2x16(b, nir_channel(b, dest_packed, 0)),
137 to_desc->nr_channels > 2 ?
138 nir_unpack_unorm_2x16(b, nir_channel(b, dest_packed, 1)) :
139 nir_vec2(b, nir_imm_float(b, 0.0f), nir_imm_float(b, 1.0f))
140 };
141 if (to_desc->nr_channels == 1)
142 vec2s[0] = nir_vector_insert_imm(b, vec2s[0], nir_imm_float(b, 0.0f), 1);
143 return nir_vec4(b, nir_channel(b, vec2s[0], 0),
144 nir_channel(b, vec2s[0], 1),
145 nir_channel(b, vec2s[1], 0),
146 nir_channel(b, vec2s[1], 1));
147 } else {
148 if (to_desc->channel[0].size == 8) {
149 nir_ssa_def *unpacked = nir_unpack_snorm_4x8(b, nir_channel(b, dest_packed, 0));
150 if (to_desc->nr_channels < 4)
151 unpacked = nir_vector_insert_imm(b, unpacked, nir_imm_float(b, 1.0f), 3);
152 return unpacked;
153 }
154 nir_ssa_def *vec2s[2] = {
155 nir_unpack_snorm_2x16(b, nir_channel(b, dest_packed, 0)),
156 to_desc->nr_channels > 2 ?
157 nir_unpack_snorm_2x16(b, nir_channel(b, dest_packed, 1)) :
158 nir_vec2(b, nir_imm_float(b, 0.0f), nir_imm_float(b, 1.0f))
159 };
160 if (to_desc->nr_channels == 1)
161 vec2s[0] = nir_vector_insert_imm(b, vec2s[0], nir_imm_float(b, 0.0f), 1);
162 return nir_vec4(b, nir_channel(b, vec2s[0], 0),
163 nir_channel(b, vec2s[0], 1),
164 nir_channel(b, vec2s[1], 0),
165 nir_channel(b, vec2s[1], 1));
166 }
167 } else {
168 nir_ssa_def *dest_packed = nir_extract_bits(b, &src_as_vec, 1, 0,
169 to_desc->nr_channels, to_desc->channel[0].size);
170 nir_ssa_def *final_channels[4];
171 for (unsigned i = 0; i < 4; ++i) {
172 if (i >= dest_packed->num_components)
173 final_channels[i] = util_format_is_float(to_desc->format) ?
174 nir_imm_floatN_t(b, i == 3 ? 1.0f : 0.0f, to_desc->channel[0].size) :
175 nir_imm_intN_t(b, i == 3 ? 1 : 0, to_desc->channel[0].size);
176 else
177 final_channels[i] = nir_channel(b, dest_packed, i);
178 }
179 nir_ssa_def *final_vec = nir_vec(b, final_channels, 4);
180 if (util_format_is_float(to_desc->format))
181 return nir_f2f32(b, final_vec);
182 else if (util_format_is_pure_sint(to_desc->format))
183 return nir_i2i32(b, final_vec);
184 else
185 return nir_u2u32(b, final_vec);
186 }
187 }
188
189 static bool
lower_image_cast_instr(nir_builder * b,nir_instr * instr,void * _data)190 lower_image_cast_instr(nir_builder *b, nir_instr *instr, void *_data)
191 {
192 if (instr->type != nir_instr_type_intrinsic)
193 return false;
194
195 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
196 if (intr->intrinsic != nir_intrinsic_image_deref_load &&
197 intr->intrinsic != nir_intrinsic_image_deref_store)
198 return false;
199
200 const struct d3d12_image_format_conversion_info *info = _data;
201 nir_variable *image = nir_deref_instr_get_variable(nir_src_as_deref(intr->src[0]));
202 assert(image);
203
204 enum pipe_format emulation_format = info[image->data.driver_location].emulated_format;
205 if (emulation_format == PIPE_FORMAT_NONE)
206 return false;
207
208 enum pipe_format real_format = info[image->data.driver_location].view_format;
209 assert(real_format != emulation_format);
210
211 nir_ssa_def *value;
212 const struct util_format_description *from_desc, *to_desc;
213 if (intr->intrinsic == nir_intrinsic_image_deref_load) {
214 b->cursor = nir_after_instr(instr);
215 value = &intr->dest.ssa;
216 from_desc = util_format_description(emulation_format);
217 to_desc = util_format_description(real_format);
218 } else {
219 b->cursor = nir_before_instr(instr);
220 value = intr->src[3].ssa;
221 from_desc = util_format_description(real_format);
222 to_desc = util_format_description(emulation_format);
223 }
224
225 nir_ssa_def *new_value = convert_value(b, value, from_desc, to_desc);
226
227 nir_alu_type alu_type = util_format_is_pure_uint(emulation_format) ?
228 nir_type_uint : (util_format_is_pure_sint(emulation_format) ?
229 nir_type_int : nir_type_float);
230
231 if (intr->intrinsic == nir_intrinsic_image_deref_load) {
232 nir_ssa_def_rewrite_uses_after(value, new_value, new_value->parent_instr);
233 nir_intrinsic_set_dest_type(intr, alu_type);
234 } else {
235 nir_instr_rewrite_src_ssa(instr, &intr->src[3], new_value);
236 nir_intrinsic_set_src_type(intr, alu_type);
237 }
238 nir_intrinsic_set_format(intr, emulation_format);
239 return true;
240 }
241
242 /* Given a shader that does image loads/stores expecting to load from the format embedded in the intrinsic,
243 * if the corresponding entry in formats is not PIPE_FORMAT_NONE, replace the image format and convert
244 * the data being loaded/stored to/from the app's expected format.
245 */
246 bool
d3d12_lower_image_casts(nir_shader * s,struct d3d12_image_format_conversion_info * info)247 d3d12_lower_image_casts(nir_shader *s, struct d3d12_image_format_conversion_info *info)
248 {
249 bool progress = nir_shader_instructions_pass(s, lower_image_cast_instr,
250 nir_metadata_block_index | nir_metadata_dominance, info);
251
252 if (progress) {
253 nir_foreach_image_variable(var, s) {
254 if (info[var->data.driver_location].emulated_format != PIPE_FORMAT_NONE) {
255 var->data.image.format = info[var->data.driver_location].emulated_format;
256 }
257 }
258 }
259
260 return progress;
261 }
262