• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © Microsoft Corporation
3  * Copyright © 2022 Valve Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include "nir_builder.h"
26 #include "nir_builtin_builder.h"
27 
28 
29 static const struct glsl_type *
make_2darray_sampler_from_cubemap(const struct glsl_type * type)30 make_2darray_sampler_from_cubemap(const struct glsl_type *type)
31 {
32    return  glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_CUBE ?
33             glsl_sampler_type(
34                GLSL_SAMPLER_DIM_2D,
35                false, true,
36                glsl_get_sampler_result_type(type)) : type;
37 }
38 
39 static const struct glsl_type *
make_2darray_from_cubemap_with_array(const struct glsl_type * type)40 make_2darray_from_cubemap_with_array(const struct glsl_type *type)
41 {
42    if (glsl_type_is_array(type)) {
43       const struct glsl_type *new_type = glsl_without_array(type);
44       return new_type != type ? glsl_array_type(make_2darray_from_cubemap_with_array(glsl_without_array(type)),
45                                                 glsl_get_length(type), 0) : type;
46    }
47    return make_2darray_sampler_from_cubemap(type);
48 }
49 
50 static bool
lower_cubemap_to_array_filter(const nir_instr * instr,const void * mask)51 lower_cubemap_to_array_filter(const nir_instr *instr, const void *mask)
52 {
53    const uint32_t *nonseamless_cube_mask = mask;
54    if (instr->type == nir_instr_type_tex) {
55       nir_tex_instr *tex = nir_instr_as_tex(instr);
56 
57       if (tex->sampler_dim != GLSL_SAMPLER_DIM_CUBE)
58          return false;
59 
60       switch (tex->op) {
61       case nir_texop_tex:
62       case nir_texop_txb:
63       case nir_texop_txd:
64       case nir_texop_txl:
65       case nir_texop_txs:
66       case nir_texop_lod:
67       case nir_texop_tg4:
68          break;
69       default:
70          return false;
71       }
72       return (BITFIELD_BIT(tex->sampler_index) & (*nonseamless_cube_mask)) != 0;
73    }
74 
75    return false;
76 }
77 
78 typedef struct {
79    nir_ssa_def *rx;
80    nir_ssa_def *ry;
81    nir_ssa_def *rz;
82    nir_ssa_def *arx;
83    nir_ssa_def *ary;
84    nir_ssa_def *arz;
85    nir_ssa_def *array;
86 } coord_t;
87 
88 
89 /* This is taken from from sp_tex_sample:convert_cube */
90 static nir_ssa_def *
evaluate_face_x(nir_builder * b,coord_t * coord)91 evaluate_face_x(nir_builder *b, coord_t *coord)
92 {
93    nir_ssa_def *sign = nir_fsign(b, coord->rx);
94    nir_ssa_def *positive = nir_fge(b, coord->rx, nir_imm_float(b, 0.0));
95    nir_ssa_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arx);
96 
97    nir_ssa_def *x = nir_fadd(b, nir_fmul(b, nir_fmul(b, sign, ima), coord->rz), nir_imm_float(b, 0.5));
98    nir_ssa_def *y = nir_fadd(b, nir_fmul(b, ima, coord->ry), nir_imm_float(b, 0.5));
99    nir_ssa_def *face = nir_bcsel(b, positive, nir_imm_float(b, 0.0), nir_imm_float(b, 1.0));
100 
101    if (coord->array)
102       face = nir_fadd(b, face, coord->array);
103 
104    return nir_vec3(b, x,y, face);
105 }
106 
107 static nir_ssa_def *
evaluate_face_y(nir_builder * b,coord_t * coord)108 evaluate_face_y(nir_builder *b, coord_t *coord)
109 {
110    nir_ssa_def *sign = nir_fsign(b, coord->ry);
111    nir_ssa_def *positive = nir_fge(b, coord->ry, nir_imm_float(b, 0.0));
112    nir_ssa_def *ima = nir_fdiv(b, nir_imm_float(b, 0.5), coord->ary);
113 
114    nir_ssa_def *x = nir_fadd(b, nir_fmul(b, ima, coord->rx), nir_imm_float(b, 0.5));
115    nir_ssa_def *y = nir_fadd(b, nir_fmul(b, nir_fmul(b, sign, ima), coord->rz), nir_imm_float(b, 0.5));
116    nir_ssa_def *face = nir_bcsel(b, positive, nir_imm_float(b, 2.0), nir_imm_float(b, 3.0));
117 
118    if (coord->array)
119       face = nir_fadd(b, face, coord->array);
120 
121    return nir_vec3(b, x,y, face);
122 }
123 
124 static nir_ssa_def *
evaluate_face_z(nir_builder * b,coord_t * coord)125 evaluate_face_z(nir_builder *b, coord_t *coord)
126 {
127    nir_ssa_def *sign = nir_fsign(b, coord->rz);
128    nir_ssa_def *positive = nir_fge(b, coord->rz, nir_imm_float(b, 0.0));
129    nir_ssa_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arz);
130 
131    nir_ssa_def *x = nir_fadd(b, nir_fmul(b, nir_fmul(b, sign, ima), nir_fneg(b, coord->rx)), nir_imm_float(b, 0.5));
132    nir_ssa_def *y = nir_fadd(b, nir_fmul(b, ima, coord->ry), nir_imm_float(b, 0.5));
133    nir_ssa_def *face = nir_bcsel(b, positive, nir_imm_float(b, 4.0), nir_imm_float(b, 5.0));
134 
135    if (coord->array)
136       face = nir_fadd(b, face, coord->array);
137 
138    return nir_vec3(b, x,y, face);
139 }
140 
141 static nir_ssa_def *
create_array_tex_from_cube_tex(nir_builder * b,nir_tex_instr * tex,nir_ssa_def * coord,nir_texop op)142 create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *coord, nir_texop op)
143 {
144    nir_tex_instr *array_tex;
145 
146    unsigned num_srcs = tex->num_srcs;
147    if (op == nir_texop_txf && nir_tex_instr_src_index(tex, nir_tex_src_comparator) != -1)
148       num_srcs--;
149    array_tex = nir_tex_instr_create(b->shader, num_srcs);
150    array_tex->op = op;
151    array_tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
152    array_tex->is_array = true;
153    array_tex->is_shadow = tex->is_shadow;
154    array_tex->is_sparse = tex->is_sparse;
155    array_tex->is_new_style_shadow = tex->is_new_style_shadow;
156    array_tex->texture_index = tex->texture_index;
157    array_tex->sampler_index = tex->sampler_index;
158    array_tex->dest_type = tex->dest_type;
159    array_tex->coord_components = 3;
160 
161    nir_src coord_src = nir_src_for_ssa(coord);
162    unsigned s = 0;
163    for (unsigned i = 0; i < tex->num_srcs; i++) {
164       if (op == nir_texop_txf && tex->src[i].src_type == nir_tex_src_comparator)
165          continue;
166       nir_src *psrc = (tex->src[i].src_type == nir_tex_src_coord) ?
167                          &coord_src : &tex->src[i].src;
168 
169       array_tex->src[s].src_type = tex->src[i].src_type;
170       if (psrc->ssa->num_components != nir_tex_instr_src_size(array_tex, s)) {
171          nir_ssa_def *c = nir_channels(b, psrc->ssa, BITFIELD_MASK(nir_tex_instr_src_size(array_tex, s)));
172          array_tex->src[s].src = nir_src_for_ssa(c);
173       } else
174          nir_src_copy(&array_tex->src[s].src, psrc);
175       s++;
176    }
177 
178    nir_ssa_dest_init(&array_tex->instr, &array_tex->dest,
179                      nir_tex_instr_dest_size(array_tex), nir_dest_bit_size(tex->dest), NULL);
180    nir_builder_instr_insert(b, &array_tex->instr);
181    return &array_tex->dest.ssa;
182 }
183 
184 static nir_ssa_def *
handle_cube_edge(nir_builder * b,nir_ssa_def * x,nir_ssa_def * y,nir_ssa_def * face,nir_ssa_def * array_slice_cube_base,nir_ssa_def * tex_size)185 handle_cube_edge(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *face, nir_ssa_def *array_slice_cube_base, nir_ssa_def *tex_size)
186 {
187    enum cube_remap
188    {
189       cube_remap_zero = 0,
190       cube_remap_x,
191       cube_remap_y,
192       cube_remap_tex_size,
193       cube_remap_tex_size_minus_x,
194       cube_remap_tex_size_minus_y,
195 
196       cube_remap_size,
197    };
198 
199    struct cube_remap_table
200    {
201       enum cube_remap remap_x;
202       enum cube_remap remap_y;
203       uint32_t        remap_face;
204    };
205 
206    static const struct cube_remap_table cube_remap_neg_x[6] =
207    {
208        {cube_remap_tex_size,         cube_remap_y,         4},
209        {cube_remap_tex_size,         cube_remap_y,         5},
210        {cube_remap_y,                cube_remap_zero,      1},
211        {cube_remap_tex_size_minus_y, cube_remap_tex_size,  1},
212        {cube_remap_tex_size,         cube_remap_y,         1},
213        {cube_remap_tex_size,         cube_remap_y,         0},
214    };
215 
216    static const struct cube_remap_table cube_remap_pos_x[6] =
217    {
218        {cube_remap_zero,             cube_remap_y,         5},
219        {cube_remap_zero,             cube_remap_y,         4},
220        {cube_remap_tex_size_minus_y, cube_remap_zero,      0},
221        {cube_remap_y,                cube_remap_tex_size,  0},
222        {cube_remap_zero,             cube_remap_y,         0},
223        {cube_remap_zero,             cube_remap_y,         1},
224    };
225 
226    static const struct cube_remap_table cube_remap_neg_y[6] =
227    {
228        {cube_remap_tex_size,         cube_remap_tex_size_minus_x, 2},
229        {cube_remap_zero,             cube_remap_x,                2},
230        {cube_remap_tex_size_minus_x, cube_remap_zero,             5},
231        {cube_remap_x,                cube_remap_tex_size,         4},
232        {cube_remap_x,                cube_remap_tex_size,         2},
233        {cube_remap_tex_size_minus_x, cube_remap_zero,             2},
234    };
235 
236    static const struct cube_remap_table cube_remap_pos_y[6] =
237    {
238        {cube_remap_tex_size,         cube_remap_x,                   3},
239        {cube_remap_zero,             cube_remap_tex_size_minus_x,    3},
240        {cube_remap_x,                cube_remap_zero,                4},
241        {cube_remap_tex_size_minus_x, cube_remap_tex_size,            5},
242        {cube_remap_x,                cube_remap_zero,                3},
243        {cube_remap_tex_size_minus_x, cube_remap_tex_size,            3},
244    };
245 
246    static const struct cube_remap_table* remap_tables[4] = {
247       cube_remap_neg_x,
248       cube_remap_pos_x,
249       cube_remap_neg_y,
250       cube_remap_pos_y
251    };
252 
253    nir_ssa_def *zero = nir_imm_int(b, 0);
254 
255    /* Doesn't matter since the texture is square */
256    tex_size = nir_channel(b, tex_size, 0);
257 
258    nir_ssa_def *x_on = nir_iand(b, nir_ige(b, x, zero), nir_ige(b, tex_size, x));
259    nir_ssa_def *y_on = nir_iand(b, nir_ige(b, y, zero), nir_ige(b, tex_size, y));
260    nir_ssa_def *one_on = nir_ixor(b, x_on, y_on);
261 
262    /* If the sample did not fall off the face in either dimension, then set output = input */
263    nir_ssa_def *x_result = x;
264    nir_ssa_def *y_result = y;
265    nir_ssa_def *face_result = face;
266 
267    /* otherwise, if the sample fell off the face in either the X or the Y direction, remap to the new face */
268    nir_ssa_def *remap_predicates[4] =
269    {
270       nir_iand(b, one_on, nir_ilt(b, x, zero)),
271       nir_iand(b, one_on, nir_ilt(b, tex_size, x)),
272       nir_iand(b, one_on, nir_ilt(b, y, zero)),
273       nir_iand(b, one_on, nir_ilt(b, tex_size, y)),
274    };
275 
276    nir_ssa_def *remap_array[cube_remap_size];
277 
278    remap_array[cube_remap_zero] = zero;
279    remap_array[cube_remap_x] = x;
280    remap_array[cube_remap_y] = y;
281    remap_array[cube_remap_tex_size] = tex_size;
282    remap_array[cube_remap_tex_size_minus_x] = nir_isub(b, tex_size, x);
283    remap_array[cube_remap_tex_size_minus_y] = nir_isub(b, tex_size, y);
284 
285    /* For each possible way the sample could have fallen off */
286    for (unsigned i = 0; i < 4; i++) {
287       const struct cube_remap_table* remap_table = remap_tables[i];
288 
289       /* For each possible original face */
290       for (unsigned j = 0; j < 6; j++) {
291          nir_ssa_def *predicate = nir_iand(b, remap_predicates[i], nir_ieq(b, face, nir_imm_int(b, j)));
292 
293          x_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_x], x_result);
294          y_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_y], y_result);
295          face_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_face], face_result);
296       }
297    }
298 
299    return nir_vec3(b, x_result, y_result, nir_iadd(b, face_result, array_slice_cube_base));
300 }
301 
302 static nir_ssa_def *
handle_cube_gather(nir_builder * b,nir_tex_instr * tex,nir_ssa_def * coord)303 handle_cube_gather(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *coord)
304 {
305    tex->is_array = true;
306    nir_ssa_def *tex_size = nir_get_texture_size(b, tex);
307 
308    /* nir_get_texture_size puts the cursor before the tex op */
309    b->cursor = nir_after_instr(coord->parent_instr);
310 
311    nir_ssa_def *const_05 = nir_imm_float(b, 0.5f);
312    nir_ssa_def *texel_coords = nir_fmul(b, nir_channels(b, coord, 3),
313       nir_i2f32(b, nir_channels(b, tex_size, 3)));
314 
315    nir_ssa_def *x_orig = nir_channel(b, texel_coords, 0);
316    nir_ssa_def *y_orig = nir_channel(b, texel_coords, 1);
317 
318    nir_ssa_def *x_pos = nir_f2i32(b, nir_fadd(b, x_orig, const_05));
319    nir_ssa_def *x_neg = nir_f2i32(b, nir_fsub(b, x_orig, const_05));
320    nir_ssa_def *y_pos = nir_f2i32(b, nir_fadd(b, y_orig, const_05));
321    nir_ssa_def *y_neg = nir_f2i32(b, nir_fsub(b, y_orig, const_05));
322    nir_ssa_def *coords[4][2] = {
323       { x_neg, y_pos },
324       { x_pos, y_pos },
325       { x_pos, y_neg },
326       { x_neg, y_neg },
327    };
328 
329    nir_ssa_def *array_slice_2d = nir_f2i32(b, nir_channel(b, coord, 2));
330    nir_ssa_def *face = nir_imod(b, array_slice_2d, nir_imm_int(b, 6));
331    nir_ssa_def *array_slice_cube_base = nir_isub(b, array_slice_2d, face);
332 
333    nir_ssa_def *channels[4];
334    for (unsigned i = 0; i < 4; ++i) {
335       nir_ssa_def *final_coord = handle_cube_edge(b, coords[i][0], coords[i][1], face, array_slice_cube_base, tex_size);
336       nir_ssa_def *sampled_val = create_array_tex_from_cube_tex(b, tex, final_coord, nir_texop_txf);
337       channels[i] = nir_channel(b, sampled_val, tex->component);
338    }
339 
340    return nir_vec(b, channels, 4);
341 }
342 
343 static nir_ssa_def *
lower_cube_coords(nir_builder * b,nir_ssa_def * coord,bool is_array)344 lower_cube_coords(nir_builder *b, nir_ssa_def *coord, bool is_array)
345 {
346    coord_t coords;
347    coords.rx = nir_channel(b, coord, 0);
348    coords.ry = nir_channel(b, coord, 1);
349    coords.rz = nir_channel(b, coord, 2);
350    coords.arx = nir_fabs(b, coords.rx);
351    coords.ary = nir_fabs(b, coords.ry);
352    coords.arz = nir_fabs(b, coords.rz);
353    coords.array = NULL;
354    if (is_array)
355       coords.array = nir_fmul(b, nir_channel(b, coord, 3), nir_imm_float(b, 6.0f));
356 
357    nir_ssa_def *use_face_x = nir_iand(b,
358                                       nir_fge(b, coords.arx, coords.ary),
359                                       nir_fge(b, coords.arx, coords.arz));
360 
361    nir_if *use_face_x_if = nir_push_if(b, use_face_x);
362    nir_ssa_def *face_x_coord = evaluate_face_x(b, &coords);
363    nir_if *use_face_x_else = nir_push_else(b, use_face_x_if);
364 
365    nir_ssa_def *use_face_y = nir_iand(b,
366                                       nir_fge(b, coords.ary, coords.arx),
367                                       nir_fge(b, coords.ary, coords.arz));
368 
369    nir_if *use_face_y_if = nir_push_if(b, use_face_y);
370    nir_ssa_def *face_y_coord = evaluate_face_y(b, &coords);
371    nir_if *use_face_y_else = nir_push_else(b, use_face_y_if);
372 
373    nir_ssa_def *face_z_coord = evaluate_face_z(b, &coords);
374 
375    nir_pop_if(b, use_face_y_else);
376    nir_ssa_def *face_y_or_z_coord = nir_if_phi(b, face_y_coord, face_z_coord);
377    nir_pop_if(b, use_face_x_else);
378 
379    // This contains in xy the normalized sample coordinates, and in z the face index
380    nir_ssa_def *coord_and_face = nir_if_phi(b, face_x_coord, face_y_or_z_coord);
381 
382    return coord_and_face;
383 }
384 
385 static void
rewrite_cube_var_type(nir_builder * b,nir_tex_instr * tex)386 rewrite_cube_var_type(nir_builder *b, nir_tex_instr *tex)
387 {
388    unsigned index = tex->texture_index;
389    nir_variable *sampler = NULL;
390    int highest = -1;
391    nir_foreach_variable_with_modes(var, b->shader, nir_var_uniform) {
392       if (!glsl_type_is_sampler(glsl_without_array(var->type)))
393          continue;
394       unsigned size = glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
395       if (var->data.driver_location == index ||
396           (var->data.driver_location < index && var->data.driver_location + size > index)) {
397          sampler = var;
398          break;
399       }
400       /* handle array sampler access: use the next-closest sampler */
401       if (var->data.driver_location > highest && var->data.driver_location < index) {
402          highest = var->data.driver_location;
403          sampler = var;
404       }
405    }
406    assert(sampler);
407    sampler->type = make_2darray_from_cubemap_with_array(sampler->type);
408 }
409 
410 /* txb(s, coord, bias) = txl(s, coord, lod(s, coord).y + bias) */
411 /* tex(s, coord) = txl(s, coord, lod(s, coord).x) */
412 static nir_tex_instr *
lower_tex_to_txl(nir_builder * b,nir_tex_instr * tex)413 lower_tex_to_txl(nir_builder *b, nir_tex_instr *tex)
414 {
415    b->cursor = nir_after_instr(&tex->instr);
416    int bias_idx = nir_tex_instr_src_index(tex, nir_tex_src_bias);
417    unsigned num_srcs = bias_idx >= 0 ? tex->num_srcs : tex->num_srcs + 1;
418    nir_tex_instr *txl = nir_tex_instr_create(b->shader, num_srcs);
419 
420    txl->op = nir_texop_txl;
421    txl->sampler_dim = tex->sampler_dim;
422    txl->dest_type = tex->dest_type;
423    txl->coord_components = tex->coord_components;
424    txl->texture_index = tex->texture_index;
425    txl->sampler_index = tex->sampler_index;
426    txl->is_array = tex->is_array;
427    txl->is_shadow = tex->is_shadow;
428    txl->is_sparse = tex->is_sparse;
429    txl->is_new_style_shadow = tex->is_new_style_shadow;
430 
431    unsigned s = 0;
432    for (int i = 0; i < tex->num_srcs; i++) {
433       if (i == bias_idx)
434          continue;
435       nir_src_copy(&txl->src[s].src, &tex->src[i].src);
436       txl->src[s].src_type = tex->src[i].src_type;
437       s++;
438    }
439    nir_ssa_def *lod = nir_get_texture_lod(b, tex);
440 
441    if (bias_idx >= 0)
442       lod = nir_fadd(b, lod, nir_ssa_for_src(b, tex->src[bias_idx].src, 1));
443    lod = nir_fadd_imm(b, lod, -1.0);
444    txl->src[s].src = nir_src_for_ssa(lod);
445    txl->src[s].src_type = nir_tex_src_lod;
446 
447    b->cursor = nir_before_instr(&tex->instr);
448    nir_ssa_dest_init(&txl->instr, &txl->dest, nir_dest_num_components(tex->dest),
449                      nir_dest_bit_size(tex->dest), NULL);
450    nir_builder_instr_insert(b, &txl->instr);
451    nir_ssa_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa);
452    return txl;
453 }
454 
455 static nir_ssa_def *
lower_cube_sample(nir_builder * b,nir_tex_instr * tex)456 lower_cube_sample(nir_builder *b, nir_tex_instr *tex)
457 {
458    if (!tex->is_shadow && (tex->op == nir_texop_txb || tex->op == nir_texop_tex)) {
459       tex = lower_tex_to_txl(b, tex);
460    }
461 
462    int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
463    assert(coord_index >= 0);
464 
465    /* Evaluate the face and the xy coordinates for a 2D tex op */
466    nir_ssa_def *coord = tex->src[coord_index].src.ssa;
467    nir_ssa_def *coord_and_face = lower_cube_coords(b, coord, tex->is_array);
468 
469    rewrite_cube_var_type(b, tex);
470 
471    if (tex->op == nir_texop_tg4 && !tex->is_shadow)
472       return handle_cube_gather(b, tex, coord_and_face);
473    else
474       return create_array_tex_from_cube_tex(b, tex, coord_and_face, tex->op);
475 }
476 
477 static nir_ssa_def *
lower_cube_txs(nir_builder * b,nir_tex_instr * tex)478 lower_cube_txs(nir_builder *b, nir_tex_instr *tex)
479 {
480    b->cursor = nir_after_instr(&tex->instr);
481 
482    rewrite_cube_var_type(b, tex);
483    unsigned num_components = tex->dest.ssa.num_components;
484    /* force max components to unbreak textureSize().xy */
485    tex->dest.ssa.num_components = 3;
486    tex->is_array = true;
487    nir_ssa_def *array_dim = nir_channel(b, &tex->dest.ssa, 2);
488    nir_ssa_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6));
489    nir_ssa_def *size = nir_vec3(b, nir_channel(b, &tex->dest.ssa, 0),
490                                    nir_channel(b, &tex->dest.ssa, 1),
491                                    cube_array_dim);
492    return nir_channels(b, size, BITFIELD_MASK(num_components));
493 }
494 
495 static nir_ssa_def *
lower_cubemap_to_array_tex(nir_builder * b,nir_tex_instr * tex)496 lower_cubemap_to_array_tex(nir_builder *b, nir_tex_instr *tex)
497 {
498    switch (tex->op) {
499    case nir_texop_tex:
500    case nir_texop_txb:
501    case nir_texop_txd:
502    case nir_texop_txl:
503    case nir_texop_lod:
504    case nir_texop_tg4:
505       return lower_cube_sample(b, tex);
506    case nir_texop_txs:
507       return lower_cube_txs(b, tex);
508    default:
509       unreachable("Unsupported cupe map texture operation");
510    }
511 }
512 
513 static nir_ssa_def *
lower_cubemap_to_array_impl(nir_builder * b,nir_instr * instr,UNUSED void * _options)514 lower_cubemap_to_array_impl(nir_builder *b, nir_instr *instr,
515                                UNUSED void *_options)
516 {
517    if (instr->type == nir_instr_type_tex)
518       return lower_cubemap_to_array_tex(b, nir_instr_as_tex(instr));
519    return NULL;
520 }
521 
522 bool
523 zink_lower_cubemap_to_array(nir_shader *s, uint32_t nonseamless_cube_mask);
524 bool
zink_lower_cubemap_to_array(nir_shader * s,uint32_t nonseamless_cube_mask)525 zink_lower_cubemap_to_array(nir_shader *s, uint32_t nonseamless_cube_mask)
526 {
527    return nir_shader_lower_instructions(s,
528                                         lower_cubemap_to_array_filter,
529                                         lower_cubemap_to_array_impl,
530                                         &nonseamless_cube_mask);
531 }
532