1 /*
2 * Copyright © Microsoft Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "dxil_nir_lower_int_samplers.h"
25 #include "nir_builder.h"
26 #include "nir_builtin_builder.h"
27
28 static bool
lower_sample_to_txf_for_integer_tex_filter(const nir_instr * instr,UNUSED const void * _options)29 lower_sample_to_txf_for_integer_tex_filter(const nir_instr *instr,
30 UNUSED const void *_options)
31 {
32 if (instr->type != nir_instr_type_tex)
33 return false;
34
35 nir_tex_instr *tex = nir_instr_as_tex(instr);
36 if (tex->op != nir_texop_tex &&
37 tex->op != nir_texop_txb &&
38 tex->op != nir_texop_txl &&
39 tex->op != nir_texop_txd)
40 return false;
41
42 return (tex->dest_type & (nir_type_int | nir_type_uint));
43 }
44
45 static nir_ssa_def *
dx_get_texture_lod(nir_builder * b,nir_tex_instr * tex)46 dx_get_texture_lod(nir_builder *b, nir_tex_instr *tex)
47 {
48 nir_tex_instr *tql;
49
50 unsigned num_srcs = 0;
51 for (unsigned i = 0; i < tex->num_srcs; i++) {
52 if (tex->src[i].src_type == nir_tex_src_coord ||
53 tex->src[i].src_type == nir_tex_src_texture_deref ||
54 tex->src[i].src_type == nir_tex_src_sampler_deref ||
55 tex->src[i].src_type == nir_tex_src_texture_offset ||
56 tex->src[i].src_type == nir_tex_src_sampler_offset ||
57 tex->src[i].src_type == nir_tex_src_texture_handle ||
58 tex->src[i].src_type == nir_tex_src_sampler_handle)
59 num_srcs++;
60 }
61
62 tql = nir_tex_instr_create(b->shader, num_srcs);
63 tql->op = nir_texop_lod;
64 unsigned coord_components = tex->coord_components;
65 if (tex->is_array)
66 --coord_components;
67
68 tql->coord_components = coord_components;
69 tql->sampler_dim = tex->sampler_dim;
70 tql->is_shadow = tex->is_shadow;
71 tql->is_new_style_shadow = tex->is_new_style_shadow;
72 tql->texture_index = tex->texture_index;
73 tql->sampler_index = tex->sampler_index;
74 tql->dest_type = nir_type_float32;
75
76 /* The coordinate needs special handling because we might have
77 * to strip the array index. Don't clutter the code with an additional
78 * check for is_array though, in the worst case we create an additional
79 * move the the optimization will remove later again. */
80 int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
81 nir_ssa_def *ssa_src = nir_channels(b, tex->src[coord_index].src.ssa,
82 (1 << coord_components) - 1);
83 nir_src src = nir_src_for_ssa(ssa_src);
84 nir_src_copy(&tql->src[0].src, &src);
85 tql->src[0].src_type = nir_tex_src_coord;
86
87 unsigned idx = 1;
88 for (unsigned i = 0; i < tex->num_srcs; i++) {
89 if (tex->src[i].src_type == nir_tex_src_texture_deref ||
90 tex->src[i].src_type == nir_tex_src_sampler_deref ||
91 tex->src[i].src_type == nir_tex_src_texture_offset ||
92 tex->src[i].src_type == nir_tex_src_sampler_offset ||
93 tex->src[i].src_type == nir_tex_src_texture_handle ||
94 tex->src[i].src_type == nir_tex_src_sampler_handle) {
95 nir_src_copy(&tql->src[idx].src, &tex->src[i].src);
96 tql->src[idx].src_type = tex->src[i].src_type;
97 idx++;
98 }
99 }
100
101 nir_ssa_dest_init(&tql->instr, &tql->dest, 2, 32, NULL);
102 nir_builder_instr_insert(b, &tql->instr);
103
104 /* DirectX LOD only has a value in x channel */
105 return nir_channel(b, &tql->dest.ssa, 0);
106 }
107
108 typedef struct {
109 nir_ssa_def *coords;
110 nir_ssa_def *use_border_color;
111 } wrap_result_t;
112
113 typedef struct {
114 nir_ssa_def *lod;
115 nir_ssa_def *size;
116 int ncoord_comp;
117 wrap_result_t wrap[3];
118 } wrap_lower_param_t;
119
120 static void
wrap_clamp_to_edge(nir_builder * b,wrap_result_t * wrap_params,nir_ssa_def * size)121 wrap_clamp_to_edge(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size)
122 {
123 /* clamp(coord, 0, size - 1) */
124 wrap_params->coords = nir_fmin(b, nir_fsub(b, size, nir_imm_float(b, 1.0f)),
125 nir_fmax(b, wrap_params->coords, nir_imm_float(b, 0.0f)));
126 }
127
128 static void
wrap_repeat(nir_builder * b,wrap_result_t * wrap_params,nir_ssa_def * size)129 wrap_repeat(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size)
130 {
131 /* mod(coord, size)
132 * This instruction must be exact, otherwise certain sizes result in
133 * incorrect sampling */
134 wrap_params->coords = nir_fmod(b, wrap_params->coords, size);
135 nir_instr_as_alu(wrap_params->coords->parent_instr)->exact = true;
136 }
137
138 static nir_ssa_def *
mirror(nir_builder * b,nir_ssa_def * coord)139 mirror(nir_builder *b, nir_ssa_def *coord)
140 {
141 /* coord if >= 0, otherwise -(1 + coord) */
142 return nir_bcsel(b, nir_fge(b, coord, nir_imm_float(b, 0.0f)), coord,
143 nir_fneg(b, nir_fadd(b, nir_imm_float(b, 1.0f), coord)));
144 }
145
146 static void
wrap_mirror_repeat(nir_builder * b,wrap_result_t * wrap_params,nir_ssa_def * size)147 wrap_mirror_repeat(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size)
148 {
149 /* (size − 1) − mirror(mod(coord, 2 * size) − size) */
150 nir_ssa_def *coord_mod2size = nir_fmod(b, wrap_params->coords, nir_fmul(b, nir_imm_float(b, 2.0f), size));
151 nir_instr_as_alu(coord_mod2size->parent_instr)->exact = true;
152 nir_ssa_def *a = nir_fsub(b, coord_mod2size, size);
153 wrap_params->coords = nir_fsub(b, nir_fsub(b, size, nir_imm_float(b, 1.0f)), mirror(b, a));
154 }
155
156 static void
wrap_mirror_clamp_to_edge(nir_builder * b,wrap_result_t * wrap_params,nir_ssa_def * size)157 wrap_mirror_clamp_to_edge(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size)
158 {
159 /* clamp(mirror(coord), 0, size - 1) */
160 wrap_params->coords = nir_fmin(b, nir_fsub(b, size, nir_imm_float(b, 1.0f)),
161 nir_fmax(b, mirror(b, wrap_params->coords), nir_imm_float(b, 0.0f)));
162 }
163
164 static void
wrap_clamp(nir_builder * b,wrap_result_t * wrap_params,nir_ssa_def * size)165 wrap_clamp(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size)
166 {
167 nir_ssa_def *is_low = nir_flt(b, wrap_params->coords, nir_imm_float(b, 0.0));
168 nir_ssa_def *is_high = nir_fge(b, wrap_params->coords, size);
169 wrap_params->use_border_color = nir_ior(b, is_low, is_high);
170 }
171
172 static void
wrap_mirror_clamp(nir_builder * b,wrap_result_t * wrap_params,nir_ssa_def * size)173 wrap_mirror_clamp(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size)
174 {
175 /* We have to take care of the boundaries */
176 nir_ssa_def *is_low = nir_flt(b, wrap_params->coords, nir_fmul(b, size, nir_imm_float(b, -1.0)));
177 nir_ssa_def *is_high = nir_flt(b, nir_fmul(b, size, nir_imm_float(b, 2.0)), wrap_params->coords);
178 wrap_params->use_border_color = nir_ior(b, is_low, is_high);
179
180 /* Within the boundaries this acts like mirror_repeat */
181 wrap_mirror_repeat(b, wrap_params, size);
182
183 }
184
185 static wrap_result_t
wrap_coords(nir_builder * b,nir_ssa_def * coords,enum pipe_tex_wrap wrap,nir_ssa_def * size)186 wrap_coords(nir_builder *b, nir_ssa_def *coords, enum pipe_tex_wrap wrap,
187 nir_ssa_def *size)
188 {
189 wrap_result_t result = {coords, nir_imm_false(b)};
190
191 switch (wrap) {
192 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
193 wrap_clamp_to_edge(b, &result, size);
194 break;
195 case PIPE_TEX_WRAP_REPEAT:
196 wrap_repeat(b, &result, size);
197 break;
198 case PIPE_TEX_WRAP_MIRROR_REPEAT:
199 wrap_mirror_repeat(b, &result, size);
200 break;
201 case PIPE_TEX_WRAP_MIRROR_CLAMP:
202 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
203 wrap_mirror_clamp_to_edge(b, &result, size);
204 break;
205 case PIPE_TEX_WRAP_CLAMP:
206 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
207 wrap_clamp(b, &result, size);
208 break;
209 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
210 wrap_mirror_clamp(b, &result, size);
211 break;
212 }
213 return result;
214 }
215
216 static nir_ssa_def *
load_bordercolor(nir_builder * b,nir_tex_instr * tex,dxil_wrap_sampler_state * active_state,const dxil_texture_swizzle_state * tex_swizzle)217 load_bordercolor(nir_builder *b, nir_tex_instr *tex, dxil_wrap_sampler_state *active_state,
218 const dxil_texture_swizzle_state *tex_swizzle)
219 {
220 nir_const_value const_value[4] = {{0}};
221 int ndest_comp = nir_dest_num_components(tex->dest);
222
223 unsigned swizzle[4] = {
224 tex_swizzle->swizzle_r,
225 tex_swizzle->swizzle_g,
226 tex_swizzle->swizzle_b,
227 tex_swizzle->swizzle_a
228 };
229
230 for (int i = 0; i < ndest_comp; ++i) {
231 switch (swizzle[i]) {
232 case PIPE_SWIZZLE_0:
233 const_value[i].f32 = 0;
234 break;
235 case PIPE_SWIZZLE_1:
236 const_value[i].i32 = 1;
237 break;
238 case PIPE_SWIZZLE_X:
239 case PIPE_SWIZZLE_Y:
240 case PIPE_SWIZZLE_Z:
241 case PIPE_SWIZZLE_W:
242 const_value[i].f32 = active_state->border_color[swizzle[i]];
243 break;
244 default:
245 unreachable("Unexpected swizzle value");
246 }
247 }
248
249 return nir_build_imm(b, ndest_comp, 32, const_value);
250 }
251
252 static nir_tex_instr *
create_txf_from_tex(nir_builder * b,nir_tex_instr * tex)253 create_txf_from_tex(nir_builder *b, nir_tex_instr *tex)
254 {
255 nir_tex_instr *txf;
256
257 unsigned num_srcs = 0;
258 for (unsigned i = 0; i < tex->num_srcs; i++) {
259 if (tex->src[i].src_type == nir_tex_src_texture_deref ||
260 tex->src[i].src_type == nir_tex_src_texture_offset ||
261 tex->src[i].src_type == nir_tex_src_texture_handle)
262 num_srcs++;
263 }
264
265 txf = nir_tex_instr_create(b->shader, num_srcs);
266 txf->op = nir_texop_txf;
267 txf->coord_components = tex->coord_components;
268 txf->sampler_dim = tex->sampler_dim;
269 txf->is_array = tex->is_array;
270 txf->is_shadow = tex->is_shadow;
271 txf->is_new_style_shadow = tex->is_new_style_shadow;
272 txf->texture_index = tex->texture_index;
273 txf->sampler_index = tex->sampler_index;
274 txf->dest_type = tex->dest_type;
275
276 unsigned idx = 0;
277 for (unsigned i = 0; i < tex->num_srcs; i++) {
278 if (tex->src[i].src_type == nir_tex_src_texture_deref ||
279 tex->src[i].src_type == nir_tex_src_texture_offset ||
280 tex->src[i].src_type == nir_tex_src_texture_handle) {
281 nir_src_copy(&txf->src[idx].src, &tex->src[i].src);
282 txf->src[idx].src_type = tex->src[i].src_type;
283 idx++;
284 }
285 }
286
287 nir_ssa_dest_init(&txf->instr, &txf->dest,
288 nir_tex_instr_dest_size(txf), 32, NULL);
289 nir_builder_instr_insert(b, &txf->instr);
290
291 return txf;
292 }
293
294 static nir_ssa_def *
load_texel(nir_builder * b,nir_tex_instr * tex,wrap_lower_param_t * params)295 load_texel(nir_builder *b, nir_tex_instr *tex, wrap_lower_param_t *params)
296 {
297 nir_ssa_def *texcoord = NULL;
298
299 /* Put coordinates back together */
300 switch (tex->coord_components) {
301 case 1:
302 texcoord = params->wrap[0].coords;
303 break;
304 case 2:
305 texcoord = nir_vec2(b, params->wrap[0].coords, params->wrap[1].coords);
306 break;
307 case 3:
308 texcoord = nir_vec3(b, params->wrap[0].coords, params->wrap[1].coords, params->wrap[2].coords);
309 break;
310 default:
311 ;
312 }
313
314 texcoord = nir_f2i32(b, texcoord);
315
316 nir_tex_instr *load = create_txf_from_tex(b, tex);
317 nir_tex_instr_add_src(load, nir_tex_src_lod, nir_src_for_ssa(params->lod));
318 nir_tex_instr_add_src(load, nir_tex_src_coord, nir_src_for_ssa(texcoord));
319 b->cursor = nir_after_instr(&load->instr);
320 return &load->dest.ssa;
321 }
322
323 typedef struct {
324 dxil_wrap_sampler_state *aws;
325 float max_bias;
326 nir_ssa_def *size;
327 int ncoord_comp;
328 } lod_params;
329
330 static nir_ssa_def *
evalute_active_lod(nir_builder * b,nir_tex_instr * tex,lod_params * params)331 evalute_active_lod(nir_builder *b, nir_tex_instr *tex, lod_params *params)
332 {
333 static nir_ssa_def *lod = NULL;
334
335 /* Later we use min_lod for clamping the LOD to a legal value */
336 float min_lod = MAX2(params->aws->min_lod, 0.0f);
337
338 /* Evaluate the LOD to be used for the texel fetch */
339 if (unlikely(tex->op == nir_texop_txl)) {
340 int lod_index = nir_tex_instr_src_index(tex, nir_tex_src_lod);
341 /* if we have an explicite LOD, take it */
342 lod = tex->src[lod_index].src.ssa;
343 } else if (unlikely(tex->op == nir_texop_txd)) {
344 int ddx_index = nir_tex_instr_src_index(tex, nir_tex_src_ddx);
345 int ddy_index = nir_tex_instr_src_index(tex, nir_tex_src_ddy);
346 assert(ddx_index >= 0 && ddy_index >= 0);
347
348 nir_ssa_def *grad = nir_fmax(b,
349 tex->src[ddx_index].src.ssa,
350 tex->src[ddy_index].src.ssa);
351
352 nir_ssa_def *r = nir_fmul(b, grad, nir_i2f32(b, params->size));
353 nir_ssa_def *rho = nir_channel(b, r, 0);
354 for (int i = 1; i < params->ncoord_comp; ++i)
355 rho = nir_fmax(b, rho, nir_channel(b, r, i));
356 lod = nir_flog2(b, rho);
357 } else if (b->shader->info.stage == MESA_SHADER_FRAGMENT){
358 lod = dx_get_texture_lod(b, tex);
359 } else {
360 /* Only fragment shaders provide the gradient information to evaluate a LOD,
361 * so force 0 otherwise */
362 lod = nir_imm_float(b, 0.0);
363 }
364
365 /* Evaluate bias according to OpenGL (4.6 (Compatibility Profile) October 22, 2019),
366 * sec. 8.14.1, eq. (8.9)
367 *
368 * lod' = lambda + CLAMP(bias_texobj + bias_texunit + bias_shader)
369 *
370 * bias_texobj is the value of TEXTURE_LOD_BIAS for the bound texture object. ...
371 * bias_textunt is the value of TEXTURE_LOD_BIAS for the current texture unit, ...
372 * bias shader is the value of the optional bias parameter in the texture
373 * lookup functions available to fragment shaders. ... The sum of these values
374 * is clamped to the range [−bias_max, bias_max] where bias_max is the value
375 * of the implementation defined constant MAX_TEXTURE_LOD_BIAS.
376 * In core contexts the value bias_texunit is dropped from above equation.
377 *
378 * Gallium provides the value lod_bias as the sum of bias_texobj and bias_texunit
379 * in compatibility contexts and as bias_texobj in core contexts, hence the
380 * implementation here is the same in both cases.
381 */
382 nir_ssa_def *lod_bias = nir_imm_float(b, params->aws->lod_bias);
383
384 if (unlikely(tex->op == nir_texop_txb)) {
385 int bias_index = nir_tex_instr_src_index(tex, nir_tex_src_bias);
386 lod_bias = nir_fadd(b, lod_bias, tex->src[bias_index].src.ssa);
387 }
388
389 lod = nir_fadd(b, lod, nir_fclamp(b, lod_bias,
390 nir_imm_float(b, -params->max_bias),
391 nir_imm_float(b, params->max_bias)));
392
393 /* Clamp lod according to ibid. eq. (8.10) */
394 lod = nir_fmax(b, lod, nir_imm_float(b, min_lod));
395
396 /* If the max lod is > max_bias = log2(max_texture_size), the lod will be clamped
397 * by the number of levels, no need to clamp it againt the max_lod first. */
398 if (params->aws->max_lod <= params->max_bias)
399 lod = nir_fmin(b, lod, nir_imm_float(b, params->aws->max_lod));
400
401 /* Pick nearest LOD */
402 lod = nir_f2i32(b, nir_fround_even(b, lod));
403
404 /* cap actual lod by number of available levels */
405 return nir_imin(b, lod, nir_imm_int(b, params->aws->last_level));
406 }
407
408 typedef struct {
409 dxil_wrap_sampler_state *wrap_states;
410 dxil_texture_swizzle_state *tex_swizzles;
411 float max_bias;
412 } sampler_states;
413
414
415 static nir_ssa_def *
lower_sample_to_txf_for_integer_tex_impl(nir_builder * b,nir_instr * instr,void * options)416 lower_sample_to_txf_for_integer_tex_impl(nir_builder *b, nir_instr *instr,
417 void *options)
418 {
419 sampler_states *states = (sampler_states *)options;
420 wrap_lower_param_t params = {0};
421
422 nir_tex_instr *tex = nir_instr_as_tex(instr);
423 dxil_wrap_sampler_state *active_wrap_state = &states->wrap_states[tex->sampler_index];
424
425 b->cursor = nir_before_instr(instr);
426
427 int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
428 nir_ssa_def *old_coord = tex->src[coord_index].src.ssa;
429 params.ncoord_comp = tex->coord_components;
430 if (tex->is_array)
431 params.ncoord_comp -= 1;
432
433 /* This helper to get the texture size always uses LOD 0, and DirectX doesn't support
434 * giving another LOD when querying the texture size */
435 nir_ssa_def *size0 = nir_get_texture_size(b, tex);
436
437 params.lod = nir_imm_int(b, 0);
438
439 if (active_wrap_state->last_level > 0) {
440 lod_params p = {
441 .aws = active_wrap_state,
442 .max_bias = states->max_bias,
443 .size = size0,
444 .ncoord_comp = params.ncoord_comp
445 };
446 params.lod = evalute_active_lod(b, tex, &p);
447
448 /* Evaluate actual level size*/
449 params.size = nir_i2f32(b, nir_imax(b, nir_ishr(b, size0, params.lod),
450 nir_imm_int(b, 1)));
451 } else {
452 params.size = nir_i2f32(b, size0);
453 }
454
455 nir_ssa_def *new_coord = old_coord;
456 if (!active_wrap_state->is_nonnormalized_coords) {
457 /* Evaluate the integer lookup coordinates for the requested LOD, don't touch the
458 * array index */
459 if (!tex->is_array) {
460 new_coord = nir_fmul(b, params.size, old_coord);
461 } else {
462 nir_ssa_def *array_index = nir_channel(b, old_coord, params.ncoord_comp);
463 int mask = (1 << params.ncoord_comp) - 1;
464 nir_ssa_def *coord = nir_fmul(b, nir_channels(b, params.size, mask),
465 nir_channels(b, old_coord, mask));
466 switch (params.ncoord_comp) {
467 case 1:
468 new_coord = nir_vec2(b, coord, array_index);
469 break;
470 case 2:
471 new_coord = nir_vec3(b, nir_channel(b, coord, 0),
472 nir_channel(b, coord, 1),
473 array_index);
474 break;
475 default:
476 unreachable("unsupported number of non-array coordinates");
477 }
478 }
479 }
480
481 nir_ssa_def *coord_help[3];
482 for (int i = 0; i < params.ncoord_comp; ++i)
483 coord_help[i] = nir_ffloor(b, nir_channel(b, new_coord, i));
484
485 // Note: array index needs to be rounded to nearest before clamp rather than floored
486 if (tex->is_array)
487 coord_help[params.ncoord_comp] = nir_fround_even(b, nir_channel(b, new_coord, params.ncoord_comp));
488
489 /* Correct the texture coordinates for the offsets. */
490 int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
491 if (offset_index >= 0) {
492 nir_ssa_def *offset = tex->src[offset_index].src.ssa;
493 for (int i = 0; i < params.ncoord_comp; ++i)
494 coord_help[i] = nir_fadd(b, coord_help[i], nir_i2f32(b, nir_channel(b, offset, i)));
495 }
496
497 nir_ssa_def *use_border_color = nir_imm_false(b);
498
499 if (!active_wrap_state->skip_boundary_conditions) {
500
501 for (int i = 0; i < params.ncoord_comp; ++i) {
502 params.wrap[i] = wrap_coords(b, coord_help[i], active_wrap_state->wrap[i], nir_channel(b, params.size, i));
503 use_border_color = nir_ior(b, use_border_color, params.wrap[i].use_border_color);
504 }
505
506 if (tex->is_array)
507 params.wrap[params.ncoord_comp] =
508 wrap_coords(b, coord_help[params.ncoord_comp],
509 PIPE_TEX_WRAP_CLAMP_TO_EDGE,
510 nir_i2f32(b, nir_channel(b, size0, params.ncoord_comp)));
511 } else {
512 /* When we emulate a cube map by using a texture array, the coordinates are always
513 * in range, and we don't have to take care of boundary conditions */
514 for (unsigned i = 0; i < 3; ++i) {
515 params.wrap[i].coords = coord_help[i];
516 params.wrap[i].use_border_color = nir_imm_false(b);
517 }
518 }
519
520 const dxil_texture_swizzle_state one2one = {
521 PIPE_SWIZZLE_X, PIPE_SWIZZLE_Y, PIPE_SWIZZLE_Z, PIPE_SWIZZLE_W
522 };
523
524 nir_if *border_if = nir_push_if(b, use_border_color);
525 const dxil_texture_swizzle_state *swizzle = states->tex_swizzles ?
526 &states->tex_swizzles[tex->sampler_index]:
527 &one2one;
528
529 nir_ssa_def *border_color = load_bordercolor(b, tex, active_wrap_state, swizzle);
530 nir_if *border_else = nir_push_else(b, border_if);
531 nir_ssa_def *sampler_color = load_texel(b, tex, ¶ms);
532 nir_pop_if(b, border_else);
533
534 return nir_if_phi(b, border_color, sampler_color);
535 }
536
537 /* Sampling from integer textures is not allowed in DirectX, so we have
538 * to use texel fetches. For this we have to scale the coordiantes
539 * to be integer based, and evaluate the LOD the texel fetch has to be
540 * applied on, and take care of the boundary conditions .
541 */
542 bool
dxil_lower_sample_to_txf_for_integer_tex(nir_shader * s,dxil_wrap_sampler_state * wrap_states,dxil_texture_swizzle_state * tex_swizzles,float max_bias)543 dxil_lower_sample_to_txf_for_integer_tex(nir_shader *s,
544 dxil_wrap_sampler_state *wrap_states,
545 dxil_texture_swizzle_state *tex_swizzles,
546 float max_bias)
547 {
548 sampler_states states = {wrap_states, tex_swizzles, max_bias};
549
550 bool result =
551 nir_shader_lower_instructions(s,
552 lower_sample_to_txf_for_integer_tex_filter,
553 lower_sample_to_txf_for_integer_tex_impl,
554 &states);
555 return result;
556 }
557