• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "nir.h"
26 #include "nir_builder.h"
27 
28 /*
29  * lowers:
30  *
31  * packDouble2x32(foo) -> packDouble2x32Split(foo.x, foo.y)
32  * unpackDouble2x32(foo) -> vec2(unpackDouble2x32_x(foo), unpackDouble2x32_y(foo))
33  * packInt2x32(foo) -> packInt2x32Split(foo.x, foo.y)
34  * unpackInt2x32(foo) -> vec2(unpackInt2x32_x(foo), unpackInt2x32_y(foo))
35  */
36 
37 static nir_def *
lower_pack_64_from_32(nir_builder * b,nir_def * src)38 lower_pack_64_from_32(nir_builder *b, nir_def *src)
39 {
40    return nir_pack_64_2x32_split(b, nir_channel(b, src, 0),
41                                  nir_channel(b, src, 1));
42 }
43 
44 static nir_def *
lower_unpack_64_to_32(nir_builder * b,nir_def * src)45 lower_unpack_64_to_32(nir_builder *b, nir_def *src)
46 {
47    return nir_vec2(b, nir_unpack_64_2x32_split_x(b, src),
48                    nir_unpack_64_2x32_split_y(b, src));
49 }
50 
51 static nir_def *
lower_pack_32_from_16(nir_builder * b,nir_def * src)52 lower_pack_32_from_16(nir_builder *b, nir_def *src)
53 {
54    return nir_pack_32_2x16_split(b, nir_channel(b, src, 0),
55                                  nir_channel(b, src, 1));
56 }
57 
58 static nir_def *
lower_unpack_32_to_16(nir_builder * b,nir_def * src)59 lower_unpack_32_to_16(nir_builder *b, nir_def *src)
60 {
61    return nir_vec2(b, nir_unpack_32_2x16_split_x(b, src),
62                    nir_unpack_32_2x16_split_y(b, src));
63 }
64 
65 static nir_def *
lower_pack_64_from_16(nir_builder * b,nir_def * src)66 lower_pack_64_from_16(nir_builder *b, nir_def *src)
67 {
68    nir_def *xy = nir_pack_32_2x16_split(b, nir_channel(b, src, 0),
69                                         nir_channel(b, src, 1));
70 
71    nir_def *zw = nir_pack_32_2x16_split(b, nir_channel(b, src, 2),
72                                         nir_channel(b, src, 3));
73 
74    return nir_pack_64_2x32_split(b, xy, zw);
75 }
76 
77 static nir_def *
lower_unpack_64_to_16(nir_builder * b,nir_def * src)78 lower_unpack_64_to_16(nir_builder *b, nir_def *src)
79 {
80    nir_def *xy = nir_unpack_64_2x32_split_x(b, src);
81    nir_def *zw = nir_unpack_64_2x32_split_y(b, src);
82 
83    return nir_vec4(b, nir_unpack_32_2x16_split_x(b, xy),
84                    nir_unpack_32_2x16_split_y(b, xy),
85                    nir_unpack_32_2x16_split_x(b, zw),
86                    nir_unpack_32_2x16_split_y(b, zw));
87 }
88 
89 static nir_def *
lower_pack_32_from_8(nir_builder * b,nir_def * src)90 lower_pack_32_from_8(nir_builder *b, nir_def *src)
91 {
92    if (b->shader->options->has_pack_32_4x8) {
93       return nir_pack_32_4x8_split(b,
94                                    nir_channel(b, src, 0),
95                                    nir_channel(b, src, 1),
96                                    nir_channel(b, src, 2),
97                                    nir_channel(b, src, 3));
98    } else {
99       nir_def *src32 = nir_u2u32(b, src);
100 
101       return nir_ior(b,
102                      nir_ior(b,
103                                              nir_channel(b, src32, 0)     ,
104                              nir_ishl_imm(b, nir_channel(b, src32, 1), 8)),
105                      nir_ior(b,
106                              nir_ishl_imm(b, nir_channel(b, src32, 2), 16),
107                              nir_ishl_imm(b, nir_channel(b, src32, 3), 24)));
108    }
109 }
110 
111 static nir_def *
lower_unpack_32_to_8(nir_builder * b,nir_def * src)112 lower_unpack_32_to_8(nir_builder *b, nir_def *src)
113 {
114    /* Some drivers call nir_lower_pack after the last time nir_opt_algebraic
115     * is called. To prevent issues there, don't generate byte extraction
116     * instructions when the lowering flag is set.
117     */
118    if (b->shader->options->lower_extract_byte) {
119       return nir_vec4(b, nir_u2u8(b,                 src     ),
120                          nir_u2u8(b, nir_ushr_imm(b, src,  8)),
121                          nir_u2u8(b, nir_ushr_imm(b, src, 16)),
122                          nir_u2u8(b, nir_ushr_imm(b, src, 24)));
123    } else {
124       return nir_vec4(b, nir_u2u8(b, nir_extract_u8_imm(b, src, 0)),
125                          nir_u2u8(b, nir_extract_u8_imm(b, src, 1)),
126                          nir_u2u8(b, nir_extract_u8_imm(b, src, 2)),
127                          nir_u2u8(b, nir_extract_u8_imm(b, src, 3)));
128    }
129 }
130 
131 static bool
lower_pack_instr(nir_builder * b,nir_instr * instr,void * data)132 lower_pack_instr(nir_builder *b, nir_instr *instr, void *data)
133 {
134    if (instr->type != nir_instr_type_alu)
135       return false;
136 
137    nir_alu_instr *alu_instr = (nir_alu_instr *)instr;
138 
139    if (alu_instr->op != nir_op_pack_64_2x32 &&
140        alu_instr->op != nir_op_unpack_64_2x32 &&
141        alu_instr->op != nir_op_pack_64_4x16 &&
142        alu_instr->op != nir_op_unpack_64_4x16 &&
143        alu_instr->op != nir_op_pack_32_2x16 &&
144        alu_instr->op != nir_op_unpack_32_2x16 &&
145        alu_instr->op != nir_op_pack_32_4x8 &&
146        alu_instr->op != nir_op_unpack_32_4x8)
147       return false;
148 
149    b->cursor = nir_before_instr(&alu_instr->instr);
150 
151    nir_def *src = nir_ssa_for_alu_src(b, alu_instr, 0);
152    nir_def *dest;
153 
154    switch (alu_instr->op) {
155    case nir_op_pack_64_2x32:
156       dest = lower_pack_64_from_32(b, src);
157       break;
158    case nir_op_unpack_64_2x32:
159       dest = lower_unpack_64_to_32(b, src);
160       break;
161    case nir_op_pack_64_4x16:
162       dest = lower_pack_64_from_16(b, src);
163       break;
164    case nir_op_unpack_64_4x16:
165       dest = lower_unpack_64_to_16(b, src);
166       break;
167    case nir_op_pack_32_2x16:
168       dest = lower_pack_32_from_16(b, src);
169       break;
170    case nir_op_unpack_32_2x16:
171       dest = lower_unpack_32_to_16(b, src);
172       break;
173    case nir_op_pack_32_4x8:
174       dest = lower_pack_32_from_8(b, src);
175       break;
176    case nir_op_unpack_32_4x8:
177       dest = lower_unpack_32_to_8(b, src);
178       break;
179    default:
180       unreachable("Impossible opcode");
181    }
182    nir_def_rewrite_uses(&alu_instr->def, dest);
183    nir_instr_remove(&alu_instr->instr);
184 
185    return true;
186 }
187 
188 bool
nir_lower_pack(nir_shader * shader)189 nir_lower_pack(nir_shader *shader)
190 {
191    return nir_shader_instructions_pass(shader, lower_pack_instr,
192                                        nir_metadata_block_index | nir_metadata_dominance, NULL);
193 }
194