• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include "si_pipe.h"
26 #include "tgsi/tgsi_text.h"
27 #include "tgsi/tgsi_ureg.h"
28 
si_get_blitter_vs(struct si_context * sctx,enum blitter_attrib_type type,unsigned num_layers)29 void *si_get_blitter_vs(struct si_context *sctx, enum blitter_attrib_type type, unsigned num_layers)
30 {
31    unsigned vs_blit_property;
32    void **vs;
33 
34    switch (type) {
35    case UTIL_BLITTER_ATTRIB_NONE:
36       vs = num_layers > 1 ? &sctx->vs_blit_pos_layered : &sctx->vs_blit_pos;
37       vs_blit_property = SI_VS_BLIT_SGPRS_POS;
38       break;
39    case UTIL_BLITTER_ATTRIB_COLOR:
40       vs = num_layers > 1 ? &sctx->vs_blit_color_layered : &sctx->vs_blit_color;
41       vs_blit_property = SI_VS_BLIT_SGPRS_POS_COLOR;
42       break;
43    case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
44    case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
45       assert(num_layers == 1);
46       vs = &sctx->vs_blit_texcoord;
47       vs_blit_property = SI_VS_BLIT_SGPRS_POS_TEXCOORD;
48       break;
49    default:
50       assert(0);
51       return NULL;
52    }
53    if (*vs)
54       return *vs;
55 
56    struct ureg_program *ureg = ureg_create(PIPE_SHADER_VERTEX);
57    if (!ureg)
58       return NULL;
59 
60    /* Tell the shader to load VS inputs from SGPRs: */
61    ureg_property(ureg, TGSI_PROPERTY_VS_BLIT_SGPRS_AMD, vs_blit_property);
62    ureg_property(ureg, TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION, true);
63 
64    /* This is just a pass-through shader with 1-3 MOV instructions. */
65    ureg_MOV(ureg, ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0), ureg_DECL_vs_input(ureg, 0));
66 
67    if (type != UTIL_BLITTER_ATTRIB_NONE) {
68       ureg_MOV(ureg, ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 0), ureg_DECL_vs_input(ureg, 1));
69    }
70 
71    if (num_layers > 1) {
72       struct ureg_src instance_id = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_INSTANCEID, 0);
73       struct ureg_dst layer = ureg_DECL_output(ureg, TGSI_SEMANTIC_LAYER, 0);
74 
75       ureg_MOV(ureg, ureg_writemask(layer, TGSI_WRITEMASK_X),
76                ureg_scalar(instance_id, TGSI_SWIZZLE_X));
77    }
78    ureg_END(ureg);
79 
80    *vs = ureg_create_shader_and_destroy(ureg, &sctx->b);
81    return *vs;
82 }
83 
84 /**
85  * This is used when TCS is NULL in the VS->TCS->TES chain. In this case,
86  * VS passes its outputs to TES directly, so the fixed-function shader only
87  * has to write TESSOUTER and TESSINNER.
88  */
si_create_fixed_func_tcs(struct si_context * sctx)89 void *si_create_fixed_func_tcs(struct si_context *sctx)
90 {
91    struct ureg_src outer, inner;
92    struct ureg_dst tessouter, tessinner;
93    struct ureg_program *ureg = ureg_create(PIPE_SHADER_TESS_CTRL);
94 
95    if (!ureg)
96       return NULL;
97 
98    outer = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_TESS_DEFAULT_OUTER_LEVEL, 0);
99    inner = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_TESS_DEFAULT_INNER_LEVEL, 0);
100 
101    tessouter = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSOUTER, 0);
102    tessinner = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSINNER, 0);
103 
104    ureg_MOV(ureg, tessouter, outer);
105    ureg_MOV(ureg, tessinner, inner);
106    ureg_END(ureg);
107 
108    return ureg_create_shader_and_destroy(ureg, &sctx->b);
109 }
110 
111 /* Create a compute shader implementing clear_buffer or copy_buffer. */
si_create_dma_compute_shader(struct pipe_context * ctx,unsigned num_dwords_per_thread,bool dst_stream_cache_policy,bool is_copy)112 void *si_create_dma_compute_shader(struct pipe_context *ctx, unsigned num_dwords_per_thread,
113                                    bool dst_stream_cache_policy, bool is_copy)
114 {
115    struct si_screen *sscreen = (struct si_screen *)ctx->screen;
116    assert(util_is_power_of_two_nonzero(num_dwords_per_thread));
117 
118    unsigned store_qualifier = TGSI_MEMORY_COHERENT | TGSI_MEMORY_RESTRICT;
119    if (dst_stream_cache_policy)
120       store_qualifier |= TGSI_MEMORY_STREAM_CACHE_POLICY;
121 
122    /* Don't cache loads, because there is no reuse. */
123    unsigned load_qualifier = store_qualifier | TGSI_MEMORY_STREAM_CACHE_POLICY;
124 
125    unsigned num_mem_ops = MAX2(1, num_dwords_per_thread / 4);
126    unsigned *inst_dwords = alloca(num_mem_ops * sizeof(unsigned));
127 
128    for (unsigned i = 0; i < num_mem_ops; i++) {
129       if (i * 4 < num_dwords_per_thread)
130          inst_dwords[i] = MIN2(4, num_dwords_per_thread - i * 4);
131    }
132 
133    struct ureg_program *ureg = ureg_create(PIPE_SHADER_COMPUTE);
134    if (!ureg)
135       return NULL;
136 
137    ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH, sscreen->compute_wave_size);
138    ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT, 1);
139    ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH, 1);
140 
141    struct ureg_src value;
142    if (!is_copy) {
143       ureg_property(ureg, TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD, inst_dwords[0]);
144       value = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_CS_USER_DATA_AMD, 0);
145    }
146 
147    struct ureg_src tid = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_THREAD_ID, 0);
148    struct ureg_src blk = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_BLOCK_ID, 0);
149    struct ureg_dst store_addr = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_X);
150    struct ureg_dst load_addr = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_X);
151    struct ureg_dst dstbuf = ureg_dst(ureg_DECL_buffer(ureg, 0, false));
152    struct ureg_src srcbuf;
153    struct ureg_src *values = NULL;
154 
155    if (is_copy) {
156       srcbuf = ureg_DECL_buffer(ureg, 1, false);
157       values = malloc(num_mem_ops * sizeof(struct ureg_src));
158    }
159 
160    /* If there are multiple stores, the first store writes into 0*wavesize+tid,
161     * the 2nd store writes into 1*wavesize+tid, the 3rd store writes into 2*wavesize+tid, etc.
162     */
163    ureg_UMAD(ureg, store_addr, blk, ureg_imm1u(ureg, sscreen->compute_wave_size * num_mem_ops),
164              tid);
165    /* Convert from a "store size unit" into bytes. */
166    ureg_UMUL(ureg, store_addr, ureg_src(store_addr), ureg_imm1u(ureg, 4 * inst_dwords[0]));
167    ureg_MOV(ureg, load_addr, ureg_src(store_addr));
168 
169    /* Distance between a load and a store for latency hiding. */
170    unsigned load_store_distance = is_copy ? 8 : 0;
171 
172    for (unsigned i = 0; i < num_mem_ops + load_store_distance; i++) {
173       int d = i - load_store_distance;
174 
175       if (is_copy && i < num_mem_ops) {
176          if (i) {
177             ureg_UADD(ureg, load_addr, ureg_src(load_addr),
178                       ureg_imm1u(ureg, 4 * inst_dwords[i] * sscreen->compute_wave_size));
179          }
180 
181          values[i] = ureg_src(ureg_DECL_temporary(ureg));
182          struct ureg_dst dst =
183             ureg_writemask(ureg_dst(values[i]), u_bit_consecutive(0, inst_dwords[i]));
184          struct ureg_src srcs[] = {srcbuf, ureg_src(load_addr)};
185          ureg_memory_insn(ureg, TGSI_OPCODE_LOAD, &dst, 1, srcs, 2, load_qualifier,
186                           TGSI_TEXTURE_BUFFER, 0);
187       }
188 
189       if (d >= 0) {
190          if (d) {
191             ureg_UADD(ureg, store_addr, ureg_src(store_addr),
192                       ureg_imm1u(ureg, 4 * inst_dwords[d] * sscreen->compute_wave_size));
193          }
194 
195          struct ureg_dst dst = ureg_writemask(dstbuf, u_bit_consecutive(0, inst_dwords[d]));
196          struct ureg_src srcs[] = {ureg_src(store_addr), is_copy ? values[d] : value};
197          ureg_memory_insn(ureg, TGSI_OPCODE_STORE, &dst, 1, srcs, 2, store_qualifier,
198                           TGSI_TEXTURE_BUFFER, 0);
199       }
200    }
201    ureg_END(ureg);
202 
203    struct pipe_compute_state state = {};
204    state.ir_type = PIPE_SHADER_IR_TGSI;
205    state.prog = ureg_get_tokens(ureg, NULL);
206 
207    void *cs = ctx->create_compute_state(ctx, &state);
208    ureg_destroy(ureg);
209    ureg_free_tokens(state.prog);
210 
211    free(values);
212    return cs;
213 }
214 
215 /* Create a compute shader that copies DCC from one buffer to another
216  * where each DCC buffer has a different layout.
217  *
218  * image[0]: offset remap table (pairs of <src_offset, dst_offset>),
219  *           2 pairs are read
220  * image[1]: DCC source buffer, typed r8_uint
221  * image[2]: DCC destination buffer, typed r8_uint
222  */
si_create_dcc_retile_cs(struct pipe_context * ctx)223 void *si_create_dcc_retile_cs(struct pipe_context *ctx)
224 {
225    struct ureg_program *ureg = ureg_create(PIPE_SHADER_COMPUTE);
226    if (!ureg)
227       return NULL;
228 
229    ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH, 64);
230    ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT, 1);
231    ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH, 1);
232 
233    /* Compute the global thread ID (in idx). */
234    struct ureg_src tid = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_THREAD_ID, 0);
235    struct ureg_src blk = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_BLOCK_ID, 0);
236    struct ureg_dst idx = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_X);
237    ureg_UMAD(ureg, idx, blk, ureg_imm1u(ureg, 64), tid);
238 
239    /* Load 2 pairs of offsets for DCC load & store. */
240    struct ureg_src map = ureg_DECL_image(ureg, 0, TGSI_TEXTURE_BUFFER, 0, false, false);
241    struct ureg_dst offsets = ureg_DECL_temporary(ureg);
242    struct ureg_src map_load_args[] = {map, ureg_src(idx)};
243 
244    ureg_memory_insn(ureg, TGSI_OPCODE_LOAD, &offsets, 1, map_load_args, 2, TGSI_MEMORY_RESTRICT,
245                     TGSI_TEXTURE_BUFFER, 0);
246 
247    struct ureg_src dcc_src = ureg_DECL_image(ureg, 1, TGSI_TEXTURE_BUFFER, 0, false, false);
248    struct ureg_dst dcc_dst =
249       ureg_dst(ureg_DECL_image(ureg, 2, TGSI_TEXTURE_BUFFER, 0, true, false));
250    struct ureg_dst dcc_value[2];
251 
252    /* Copy DCC values:
253     *   dst[offsets.y] = src[offsets.x];
254     *   dst[offsets.w] = src[offsets.z];
255     */
256    for (unsigned i = 0; i < 2; i++) {
257       dcc_value[i] = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_X);
258 
259       struct ureg_src load_args[] = {dcc_src,
260                                      ureg_scalar(ureg_src(offsets), TGSI_SWIZZLE_X + i * 2)};
261       ureg_memory_insn(ureg, TGSI_OPCODE_LOAD, &dcc_value[i], 1, load_args, 2, TGSI_MEMORY_RESTRICT,
262                        TGSI_TEXTURE_BUFFER, 0);
263    }
264 
265    dcc_dst = ureg_writemask(dcc_dst, TGSI_WRITEMASK_X);
266 
267    for (unsigned i = 0; i < 2; i++) {
268       struct ureg_src store_args[] = {ureg_scalar(ureg_src(offsets), TGSI_SWIZZLE_Y + i * 2),
269                                       ureg_src(dcc_value[i])};
270       ureg_memory_insn(ureg, TGSI_OPCODE_STORE, &dcc_dst, 1, store_args, 2, TGSI_MEMORY_RESTRICT,
271                        TGSI_TEXTURE_BUFFER, 0);
272    }
273    ureg_END(ureg);
274 
275    struct pipe_compute_state state = {};
276    state.ir_type = PIPE_SHADER_IR_TGSI;
277    state.prog = ureg_get_tokens(ureg, NULL);
278 
279    void *cs = ctx->create_compute_state(ctx, &state);
280    ureg_destroy(ureg);
281    ureg_free_tokens(state.prog);
282    return cs;
283 }
284 
285 /* Create the compute shader that is used to collect the results.
286  *
287  * One compute grid with a single thread is launched for every query result
288  * buffer. The thread (optionally) reads a previous summary buffer, then
289  * accumulates data from the query result buffer, and writes the result either
290  * to a summary buffer to be consumed by the next grid invocation or to the
291  * user-supplied buffer.
292  *
293  * Data layout:
294  *
295  * CONST
296  *  0.x = end_offset
297  *  0.y = result_stride
298  *  0.z = result_count
299  *  0.w = bit field:
300  *          1: read previously accumulated values
301  *          2: write accumulated values for chaining
302  *          4: write result available
303  *          8: convert result to boolean (0/1)
304  *         16: only read one dword and use that as result
305  *         32: apply timestamp conversion
306  *         64: store full 64 bits result
307  *        128: store signed 32 bits result
308  *        256: SO_OVERFLOW mode: take the difference of two successive half-pairs
309  *  1.x = fence_offset
310  *  1.y = pair_stride
311  *  1.z = pair_count
312  *
313  * BUFFER[0] = query result buffer
314  * BUFFER[1] = previous summary buffer
315  * BUFFER[2] = next summary buffer or user-supplied buffer
316  */
si_create_query_result_cs(struct si_context * sctx)317 void *si_create_query_result_cs(struct si_context *sctx)
318 {
319    /* TEMP[0].xy = accumulated result so far
320     * TEMP[0].z = result not available
321     *
322     * TEMP[1].x = current result index
323     * TEMP[1].y = current pair index
324     */
325    static const char text_tmpl[] =
326       "COMP\n"
327       "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
328       "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
329       "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
330       "DCL BUFFER[0]\n"
331       "DCL BUFFER[1]\n"
332       "DCL BUFFER[2]\n"
333       "DCL CONST[0][0..1]\n"
334       "DCL TEMP[0..5]\n"
335       "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
336       "IMM[1] UINT32 {1, 2, 4, 8}\n"
337       "IMM[2] UINT32 {16, 32, 64, 128}\n"
338       "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
339       "IMM[4] UINT32 {256, 0, 0, 0}\n"
340 
341       "AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n"
342       "UIF TEMP[5]\n"
343       /* Check result availability. */
344       "LOAD TEMP[1].x, BUFFER[0], CONST[0][1].xxxx\n"
345       "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
346       "MOV TEMP[1], TEMP[0].zzzz\n"
347       "NOT TEMP[0].z, TEMP[0].zzzz\n"
348 
349       /* Load result if available. */
350       "UIF TEMP[1]\n"
351       "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
352       "ENDIF\n"
353       "ELSE\n"
354       /* Load previously accumulated result if requested. */
355       "MOV TEMP[0], IMM[0].xxxx\n"
356       "AND TEMP[4], CONST[0][0].wwww, IMM[1].xxxx\n"
357       "UIF TEMP[4]\n"
358       "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
359       "ENDIF\n"
360 
361       "MOV TEMP[1].x, IMM[0].xxxx\n"
362       "BGNLOOP\n"
363       /* Break if accumulated result so far is not available. */
364       "UIF TEMP[0].zzzz\n"
365       "BRK\n"
366       "ENDIF\n"
367 
368       /* Break if result_index >= result_count. */
369       "USGE TEMP[5], TEMP[1].xxxx, CONST[0][0].zzzz\n"
370       "UIF TEMP[5]\n"
371       "BRK\n"
372       "ENDIF\n"
373 
374       /* Load fence and check result availability */
375       "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n"
376       "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
377       "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
378       "NOT TEMP[0].z, TEMP[0].zzzz\n"
379       "UIF TEMP[0].zzzz\n"
380       "BRK\n"
381       "ENDIF\n"
382 
383       "MOV TEMP[1].y, IMM[0].xxxx\n"
384       "BGNLOOP\n"
385       /* Load start and end. */
386       "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n"
387       "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n"
388       "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
389 
390       "UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n"
391       "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
392 
393       "U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n"
394 
395       "AND TEMP[5].z, CONST[0][0].wwww, IMM[4].xxxx\n"
396       "UIF TEMP[5].zzzz\n"
397       /* Load second start/end half-pair and
398        * take the difference
399        */
400       "UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n"
401       "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
402       "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
403 
404       "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
405       "U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n"
406       "ENDIF\n"
407 
408       "U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n"
409 
410       /* Increment pair index */
411       "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
412       "USGE TEMP[5], TEMP[1].yyyy, CONST[0][1].zzzz\n"
413       "UIF TEMP[5]\n"
414       "BRK\n"
415       "ENDIF\n"
416       "ENDLOOP\n"
417 
418       /* Increment result index */
419       "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
420       "ENDLOOP\n"
421       "ENDIF\n"
422 
423       "AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n"
424       "UIF TEMP[4]\n"
425       /* Store accumulated data for chaining. */
426       "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
427       "ELSE\n"
428       "AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n"
429       "UIF TEMP[4]\n"
430       /* Store result availability. */
431       "NOT TEMP[0].z, TEMP[0]\n"
432       "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
433       "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
434 
435       "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
436       "UIF TEMP[4]\n"
437       "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
438       "ENDIF\n"
439       "ELSE\n"
440       /* Store result if it is available. */
441       "NOT TEMP[4], TEMP[0].zzzz\n"
442       "UIF TEMP[4]\n"
443       /* Apply timestamp conversion */
444       "AND TEMP[4], CONST[0][0].wwww, IMM[2].yyyy\n"
445       "UIF TEMP[4]\n"
446       "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
447       "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
448       "ENDIF\n"
449 
450       /* Convert to boolean */
451       "AND TEMP[4], CONST[0][0].wwww, IMM[1].wwww\n"
452       "UIF TEMP[4]\n"
453       "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
454       "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
455       "MOV TEMP[0].y, IMM[0].xxxx\n"
456       "ENDIF\n"
457 
458       "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
459       "UIF TEMP[4]\n"
460       "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
461       "ELSE\n"
462       /* Clamping */
463       "UIF TEMP[0].yyyy\n"
464       "MOV TEMP[0].x, IMM[0].wwww\n"
465       "ENDIF\n"
466 
467       "AND TEMP[4], CONST[0][0].wwww, IMM[2].wwww\n"
468       "UIF TEMP[4]\n"
469       "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
470       "ENDIF\n"
471 
472       "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
473       "ENDIF\n"
474       "ENDIF\n"
475       "ENDIF\n"
476       "ENDIF\n"
477 
478       "END\n";
479 
480    char text[sizeof(text_tmpl) + 32];
481    struct tgsi_token tokens[1024];
482    struct pipe_compute_state state = {};
483 
484    /* Hard code the frequency into the shader so that the backend can
485     * use the full range of optimizations for divide-by-constant.
486     */
487    snprintf(text, sizeof(text), text_tmpl, sctx->screen->info.clock_crystal_freq);
488 
489    if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
490       assert(false);
491       return NULL;
492    }
493 
494    state.ir_type = PIPE_SHADER_IR_TGSI;
495    state.prog = tokens;
496 
497    return sctx->b.create_compute_state(&sctx->b, &state);
498 }
499 
500 /* Create a compute shader implementing copy_image.
501  * Luckily, this works with all texture targets except 1D_ARRAY.
502  */
si_create_copy_image_compute_shader(struct pipe_context * ctx)503 void *si_create_copy_image_compute_shader(struct pipe_context *ctx)
504 {
505    static const char text[] =
506       "COMP\n"
507       "DCL SV[0], THREAD_ID\n"
508       "DCL SV[1], BLOCK_ID\n"
509       "DCL SV[2], BLOCK_SIZE\n"
510       "DCL IMAGE[0], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
511       "DCL IMAGE[1], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
512       "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
513       "DCL TEMP[0..4], LOCAL\n"
514 
515       "MOV TEMP[0].xyz, CONST[0][0].xyzw\n"
516       "UMAD TEMP[1].xyz, SV[1].xyzz, SV[2].xyzz, SV[0].xyzz\n"
517       "UADD TEMP[2].xyz, TEMP[1].xyzx, TEMP[0].xyzx\n"
518       "LOAD TEMP[3], IMAGE[0], TEMP[2].xyzx, 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
519       "MOV TEMP[4].xyz, CONST[0][1].xyzw\n"
520       "UADD TEMP[2].xyz, TEMP[1].xyzx, TEMP[4].xyzx\n"
521       "STORE IMAGE[1], TEMP[2].xyzz, TEMP[3], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
522       "END\n";
523 
524    struct tgsi_token tokens[1024];
525    struct pipe_compute_state state = {0};
526 
527    if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
528       assert(false);
529       return NULL;
530    }
531 
532    state.ir_type = PIPE_SHADER_IR_TGSI;
533    state.prog = tokens;
534 
535    return ctx->create_compute_state(ctx, &state);
536 }
537 
si_create_copy_image_compute_shader_1d_array(struct pipe_context * ctx)538 void *si_create_copy_image_compute_shader_1d_array(struct pipe_context *ctx)
539 {
540    static const char text[] =
541       "COMP\n"
542       "PROPERTY CS_FIXED_BLOCK_WIDTH 64\n"
543       "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
544       "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
545       "DCL SV[0], THREAD_ID\n"
546       "DCL SV[1], BLOCK_ID\n"
547       "DCL IMAGE[0], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
548       "DCL IMAGE[1], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
549       "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
550       "DCL TEMP[0..4], LOCAL\n"
551       "IMM[0] UINT32 {64, 1, 0, 0}\n"
552       "MOV TEMP[0].xy, CONST[0][0].xzzw\n"
553       "UMAD TEMP[1].xy, SV[1].xyzz, IMM[0].xyyy, SV[0].xyzz\n"
554       "UADD TEMP[2].xy, TEMP[1].xyzx, TEMP[0].xyzx\n"
555       "LOAD TEMP[3], IMAGE[0], TEMP[2].xyzx, 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
556       "MOV TEMP[4].xy, CONST[0][1].xzzw\n"
557       "UADD TEMP[2].xy, TEMP[1].xyzx, TEMP[4].xyzx\n"
558       "STORE IMAGE[1], TEMP[2].xyzz, TEMP[3], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
559       "END\n";
560 
561    struct tgsi_token tokens[1024];
562    struct pipe_compute_state state = {0};
563 
564    if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
565       assert(false);
566       return NULL;
567    }
568 
569    state.ir_type = PIPE_SHADER_IR_TGSI;
570    state.prog = tokens;
571 
572    return ctx->create_compute_state(ctx, &state);
573 }
574 
575 /* Create a compute shader implementing DCC decompression via a blit.
576  * This is a trivial copy_image shader except that it has a variable block
577  * size and a barrier.
578  */
si_create_dcc_decompress_cs(struct pipe_context * ctx)579 void *si_create_dcc_decompress_cs(struct pipe_context *ctx)
580 {
581    static const char text[] =
582       "COMP\n"
583       "DCL SV[0], THREAD_ID\n"
584       "DCL SV[1], BLOCK_ID\n"
585       "DCL SV[2], BLOCK_SIZE\n"
586       "DCL IMAGE[0], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
587       "DCL IMAGE[1], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
588       "DCL TEMP[0..1]\n"
589 
590       "UMAD TEMP[0].xyz, SV[1].xyzz, SV[2].xyzz, SV[0].xyzz\n"
591       "LOAD TEMP[1], IMAGE[0], TEMP[0].xyzz, 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
592       /* Wait for the whole threadgroup (= DCC block) to load texels before
593        * overwriting them, because overwriting any pixel within a DCC block
594        * can break compression for the whole block.
595        */
596       "BARRIER\n"
597       "STORE IMAGE[1], TEMP[0].xyzz, TEMP[1], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
598       "END\n";
599 
600    struct tgsi_token tokens[1024];
601    struct pipe_compute_state state = {0};
602 
603    if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
604       assert(false);
605       return NULL;
606    }
607 
608    state.ir_type = PIPE_SHADER_IR_TGSI;
609    state.prog = tokens;
610 
611    return ctx->create_compute_state(ctx, &state);
612 }
613 
si_clear_render_target_shader(struct pipe_context * ctx)614 void *si_clear_render_target_shader(struct pipe_context *ctx)
615 {
616    static const char text[] =
617       "COMP\n"
618       "PROPERTY CS_FIXED_BLOCK_WIDTH 8\n"
619       "PROPERTY CS_FIXED_BLOCK_HEIGHT 8\n"
620       "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
621       "DCL SV[0], THREAD_ID\n"
622       "DCL SV[1], BLOCK_ID\n"
623       "DCL IMAGE[0], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
624       "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
625       "DCL TEMP[0..3], LOCAL\n"
626       "IMM[0] UINT32 {8, 1, 0, 0}\n"
627       "MOV TEMP[0].xyz, CONST[0][0].xyzw\n"
628       "UMAD TEMP[1].xyz, SV[1].xyzz, IMM[0].xxyy, SV[0].xyzz\n"
629       "UADD TEMP[2].xyz, TEMP[1].xyzx, TEMP[0].xyzx\n"
630       "MOV TEMP[3].xyzw, CONST[0][1].xyzw\n"
631       "STORE IMAGE[0], TEMP[2].xyzz, TEMP[3], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
632       "END\n";
633 
634    struct tgsi_token tokens[1024];
635    struct pipe_compute_state state = {0};
636 
637    if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
638       assert(false);
639       return NULL;
640    }
641 
642    state.ir_type = PIPE_SHADER_IR_TGSI;
643    state.prog = tokens;
644 
645    return ctx->create_compute_state(ctx, &state);
646 }
647 
648 /* TODO: Didn't really test 1D_ARRAY */
si_clear_render_target_shader_1d_array(struct pipe_context * ctx)649 void *si_clear_render_target_shader_1d_array(struct pipe_context *ctx)
650 {
651    static const char text[] =
652       "COMP\n"
653       "PROPERTY CS_FIXED_BLOCK_WIDTH 64\n"
654       "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
655       "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
656       "DCL SV[0], THREAD_ID\n"
657       "DCL SV[1], BLOCK_ID\n"
658       "DCL IMAGE[0], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
659       "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
660       "DCL TEMP[0..3], LOCAL\n"
661       "IMM[0] UINT32 {64, 1, 0, 0}\n"
662       "MOV TEMP[0].xy, CONST[0][0].xzzw\n"
663       "UMAD TEMP[1].xy, SV[1].xyzz, IMM[0].xyyy, SV[0].xyzz\n"
664       "UADD TEMP[2].xy, TEMP[1].xyzx, TEMP[0].xyzx\n"
665       "MOV TEMP[3].xyzw, CONST[0][1].xyzw\n"
666       "STORE IMAGE[0], TEMP[2].xyzz, TEMP[3], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
667       "END\n";
668 
669    struct tgsi_token tokens[1024];
670    struct pipe_compute_state state = {0};
671 
672    if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
673       assert(false);
674       return NULL;
675    }
676 
677    state.ir_type = PIPE_SHADER_IR_TGSI;
678    state.prog = tokens;
679 
680    return ctx->create_compute_state(ctx, &state);
681 }
682 
si_clear_12bytes_buffer_shader(struct pipe_context * ctx)683 void *si_clear_12bytes_buffer_shader(struct pipe_context *ctx)
684 {
685    static const char text[] = "COMP\n"
686                               "PROPERTY CS_FIXED_BLOCK_WIDTH 64\n"
687                               "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
688                               "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
689                               "DCL SV[0], THREAD_ID\n"
690                               "DCL SV[1], BLOCK_ID\n"
691                               "DCL BUFFER[0]\n"
692                               "DCL CONST[0][0..0]\n" // 0:xyzw
693                               "DCL TEMP[0..0]\n"
694                               "IMM[0] UINT32 {64, 1, 12, 0}\n"
695                               "UMAD TEMP[0].x, SV[1].xyzz, IMM[0].xyyy, SV[0].xyzz\n"
696                               "UMUL TEMP[0].x, TEMP[0].xyzz, IMM[0].zzzz\n" // 12 bytes
697                               "STORE BUFFER[0].xyz, TEMP[0].xxxx, CONST[0][0].xyzw\n"
698                               "END\n";
699 
700    struct tgsi_token tokens[1024];
701    struct pipe_compute_state state = {0};
702 
703    if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
704       assert(false);
705       return NULL;
706    }
707 
708    state.ir_type = PIPE_SHADER_IR_TGSI;
709    state.prog = tokens;
710 
711    return ctx->create_compute_state(ctx, &state);
712 }
713 
714 /* Load samples from the image, and copy them to the same image. This looks like
715  * a no-op, but it's not. Loads use FMASK, while stores don't, so samples are
716  * reordered to match expanded FMASK.
717  *
718  * After the shader finishes, FMASK should be cleared to identity.
719  */
si_create_fmask_expand_cs(struct pipe_context * ctx,unsigned num_samples,bool is_array)720 void *si_create_fmask_expand_cs(struct pipe_context *ctx, unsigned num_samples, bool is_array)
721 {
722    enum tgsi_texture_type target = is_array ? TGSI_TEXTURE_2D_ARRAY_MSAA : TGSI_TEXTURE_2D_MSAA;
723    struct ureg_program *ureg = ureg_create(PIPE_SHADER_COMPUTE);
724    if (!ureg)
725       return NULL;
726 
727    ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH, 8);
728    ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT, 8);
729    ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH, 1);
730 
731    /* Compute the image coordinates. */
732    struct ureg_src image = ureg_DECL_image(ureg, 0, target, 0, true, false);
733    struct ureg_src tid = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_THREAD_ID, 0);
734    struct ureg_src blk = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_BLOCK_ID, 0);
735    struct ureg_dst coord = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_XYZW);
736    ureg_UMAD(ureg, ureg_writemask(coord, TGSI_WRITEMASK_XY), ureg_swizzle(blk, 0, 1, 1, 1),
737              ureg_imm2u(ureg, 8, 8), ureg_swizzle(tid, 0, 1, 1, 1));
738    if (is_array) {
739       ureg_MOV(ureg, ureg_writemask(coord, TGSI_WRITEMASK_Z), ureg_scalar(blk, TGSI_SWIZZLE_Z));
740    }
741 
742    /* Load samples, resolving FMASK. */
743    struct ureg_dst sample[8];
744    assert(num_samples <= ARRAY_SIZE(sample));
745 
746    for (unsigned i = 0; i < num_samples; i++) {
747       sample[i] = ureg_DECL_temporary(ureg);
748 
749       ureg_MOV(ureg, ureg_writemask(coord, TGSI_WRITEMASK_W), ureg_imm1u(ureg, i));
750 
751       struct ureg_src srcs[] = {image, ureg_src(coord)};
752       ureg_memory_insn(ureg, TGSI_OPCODE_LOAD, &sample[i], 1, srcs, 2, TGSI_MEMORY_RESTRICT, target,
753                        0);
754    }
755 
756    /* Store samples, ignoring FMASK. */
757    for (unsigned i = 0; i < num_samples; i++) {
758       ureg_MOV(ureg, ureg_writemask(coord, TGSI_WRITEMASK_W), ureg_imm1u(ureg, i));
759 
760       struct ureg_dst dst_image = ureg_dst(image);
761       struct ureg_src srcs[] = {ureg_src(coord), ureg_src(sample[i])};
762       ureg_memory_insn(ureg, TGSI_OPCODE_STORE, &dst_image, 1, srcs, 2, TGSI_MEMORY_RESTRICT,
763                        target, 0);
764    }
765    ureg_END(ureg);
766 
767    struct pipe_compute_state state = {};
768    state.ir_type = PIPE_SHADER_IR_TGSI;
769    state.prog = ureg_get_tokens(ureg, NULL);
770 
771    void *cs = ctx->create_compute_state(ctx, &state);
772    ureg_destroy(ureg);
773    return cs;
774 }
775 
776 /* Create the compute shader that is used to collect the results of gfx10+
777  * shader queries.
778  *
779  * One compute grid with a single thread is launched for every query result
780  * buffer. The thread (optionally) reads a previous summary buffer, then
781  * accumulates data from the query result buffer, and writes the result either
782  * to a summary buffer to be consumed by the next grid invocation or to the
783  * user-supplied buffer.
784  *
785  * Data layout:
786  *
787  * BUFFER[0] = query result buffer (layout is defined by gfx10_sh_query_buffer_mem)
788  * BUFFER[1] = previous summary buffer
789  * BUFFER[2] = next summary buffer or user-supplied buffer
790  *
791  * CONST
792  *  0.x = config; the low 3 bits indicate the mode:
793  *          0: sum up counts
794  *          1: determine result availability and write it as a boolean
795  *          2: SO_OVERFLOW
796  *          3: SO_ANY_OVERFLOW
797  *        the remaining bits form a bitfield:
798  *          8: write result as a 64-bit value
799  *  0.y = offset in bytes to counts or stream for SO_OVERFLOW mode
800  *  0.z = chain bit field:
801  *          1: have previous summary buffer
802  *          2: write next summary buffer
803  *  0.w = result_count
804  */
gfx10_create_sh_query_result_cs(struct si_context * sctx)805 void *gfx10_create_sh_query_result_cs(struct si_context *sctx)
806 {
807    /* TEMP[0].x = accumulated result so far
808     * TEMP[0].y = result missing
809     * TEMP[0].z = whether we're in overflow mode
810     */
811    static const char text_tmpl[] = "COMP\n"
812                                    "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
813                                    "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
814                                    "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
815                                    "DCL BUFFER[0]\n"
816                                    "DCL BUFFER[1]\n"
817                                    "DCL BUFFER[2]\n"
818                                    "DCL CONST[0][0..0]\n"
819                                    "DCL TEMP[0..5]\n"
820                                    "IMM[0] UINT32 {0, 7, 0, 4294967295}\n"
821                                    "IMM[1] UINT32 {1, 2, 4, 8}\n"
822                                    "IMM[2] UINT32 {16, 32, 64, 128}\n"
823 
824                                    /*
825                                    acc_result = 0;
826                                    acc_missing = 0;
827                                    if (chain & 1) {
828                                            acc_result = buffer[1][0];
829                                            acc_missing = buffer[1][1];
830                                    }
831                                    */
832                                    "MOV TEMP[0].xy, IMM[0].xxxx\n"
833                                    "AND TEMP[5], CONST[0][0].zzzz, IMM[1].xxxx\n"
834                                    "UIF TEMP[5]\n"
835                                    "LOAD TEMP[0].xy, BUFFER[1], IMM[0].xxxx\n"
836                                    "ENDIF\n"
837 
838                                    /*
839                                    is_overflow (TEMP[0].z) = (config & 7) >= 2;
840                                    result_remaining (TEMP[1].x) = (is_overflow && acc_result) ? 0 :
841                                    result_count; base_offset (TEMP[1].y) = 0; for (;;) { if
842                                    (!result_remaining) break; result_remaining--;
843                                    */
844                                    "AND TEMP[5].x, CONST[0][0].xxxx, IMM[0].yyyy\n"
845                                    "USGE TEMP[0].z, TEMP[5].xxxx, IMM[1].yyyy\n"
846 
847                                    "AND TEMP[5].x, TEMP[0].zzzz, TEMP[0].xxxx\n"
848                                    "UCMP TEMP[1].x, TEMP[5].xxxx, IMM[0].xxxx, CONST[0][0].wwww\n"
849                                    "MOV TEMP[1].y, IMM[0].xxxx\n"
850 
851                                    "BGNLOOP\n"
852                                    "USEQ TEMP[5], TEMP[1].xxxx, IMM[0].xxxx\n"
853                                    "UIF TEMP[5]\n"
854                                    "BRK\n"
855                                    "ENDIF\n"
856                                    "UADD TEMP[1].x, TEMP[1].xxxx, IMM[0].wwww\n"
857 
858                                    /*
859                                    fence = buffer[0]@(base_offset + 32);
860                                    if (!fence) {
861                                            acc_missing = ~0u;
862                                            break;
863                                    }
864                                    */
865                                    "UADD TEMP[5].x, TEMP[1].yyyy, IMM[2].yyyy\n"
866                                    "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
867                                    "USEQ TEMP[5], TEMP[5].xxxx, IMM[0].xxxx\n"
868                                    "UIF TEMP[5]\n"
869                                    "MOV TEMP[0].y, TEMP[5].xxxx\n"
870                                    "BRK\n"
871                                    "ENDIF\n"
872 
873                                    /*
874                                    stream_offset (TEMP[2].x) = base_offset + offset;
875 
876                                    if (!(config & 7)) {
877                                            acc_result += buffer[0]@stream_offset;
878                                    }
879                                    */
880                                    "UADD TEMP[2].x, TEMP[1].yyyy, CONST[0][0].yyyy\n"
881 
882                                    "AND TEMP[5].x, CONST[0][0].xxxx, IMM[0].yyyy\n"
883                                    "USEQ TEMP[5], TEMP[5].xxxx, IMM[0].xxxx\n"
884                                    "UIF TEMP[5]\n"
885                                    "LOAD TEMP[5].x, BUFFER[0], TEMP[2].xxxx\n"
886                                    "UADD TEMP[0].x, TEMP[0].xxxx, TEMP[5].xxxx\n"
887                                    "ENDIF\n"
888 
889                                    /*
890                                    if ((config & 7) >= 2) {
891                                            count (TEMP[2].y) = (config & 1) ? 4 : 1;
892                                    */
893                                    "AND TEMP[5].x, CONST[0][0].xxxx, IMM[0].yyyy\n"
894                                    "USGE TEMP[5], TEMP[5].xxxx, IMM[1].yyyy\n"
895                                    "UIF TEMP[5]\n"
896                                    "AND TEMP[5].x, CONST[0][0].xxxx, IMM[1].xxxx\n"
897                                    "UCMP TEMP[2].y, TEMP[5].xxxx, IMM[1].zzzz, IMM[1].xxxx\n"
898 
899                                    /*
900                                    do {
901                                            generated = buffer[0]@stream_offset;
902                                            emitted = buffer[0]@(stream_offset + 16);
903                                            if (generated != emitted) {
904                                                    acc_result = 1;
905                                                    result_remaining = 0;
906                                                    break;
907                                            }
908 
909                                            stream_offset += 4;
910                                    } while (--count);
911                                    */
912                                    "BGNLOOP\n"
913                                    "UADD TEMP[5].x, TEMP[2].xxxx, IMM[2].xxxx\n"
914                                    "LOAD TEMP[4].x, BUFFER[0], TEMP[2].xxxx\n"
915                                    "LOAD TEMP[4].y, BUFFER[0], TEMP[5].xxxx\n"
916                                    "USNE TEMP[5], TEMP[4].xxxx, TEMP[4].yyyy\n"
917                                    "UIF TEMP[5]\n"
918                                    "MOV TEMP[0].x, IMM[1].xxxx\n"
919                                    "MOV TEMP[1].y, IMM[0].xxxx\n"
920                                    "BRK\n"
921                                    "ENDIF\n"
922 
923                                    "UADD TEMP[2].y, TEMP[2].yyyy, IMM[0].wwww\n"
924                                    "USEQ TEMP[5], TEMP[2].yyyy, IMM[0].xxxx\n"
925                                    "UIF TEMP[5]\n"
926                                    "BRK\n"
927                                    "ENDIF\n"
928                                    "UADD TEMP[2].x, TEMP[2].xxxx, IMM[1].zzzz\n"
929                                    "ENDLOOP\n"
930                                    "ENDIF\n"
931 
932                                    /*
933                                            base_offset += 64;
934                                    } // end outer loop
935                                    */
936                                    "UADD TEMP[1].y, TEMP[1].yyyy, IMM[2].zzzz\n"
937                                    "ENDLOOP\n"
938 
939                                    /*
940                                    if (chain & 2) {
941                                            buffer[2][0] = acc_result;
942                                            buffer[2][1] = acc_missing;
943                                    } else {
944                                    */
945                                    "AND TEMP[5], CONST[0][0].zzzz, IMM[1].yyyy\n"
946                                    "UIF TEMP[5]\n"
947                                    "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0]\n"
948                                    "ELSE\n"
949 
950                                    /*
951                                    if ((config & 7) == 1) {
952                                            acc_result = acc_missing ? 0 : 1;
953                                            acc_missing = 0;
954                                    }
955                                    */
956                                    "AND TEMP[5], CONST[0][0].xxxx, IMM[0].yyyy\n"
957                                    "USEQ TEMP[5], TEMP[5].xxxx, IMM[1].xxxx\n"
958                                    "UIF TEMP[5]\n"
959                                    "UCMP TEMP[0].x, TEMP[0].yyyy, IMM[0].xxxx, IMM[1].xxxx\n"
960                                    "MOV TEMP[0].y, IMM[0].xxxx\n"
961                                    "ENDIF\n"
962 
963                                    /*
964                                    if (!acc_missing) {
965                                            buffer[2][0] = acc_result;
966                                            if (config & 8)
967                                                    buffer[2][1] = 0;
968                                    }
969                                    */
970                                    "USEQ TEMP[5], TEMP[0].yyyy, IMM[0].xxxx\n"
971                                    "UIF TEMP[5]\n"
972                                    "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
973 
974                                    "AND TEMP[5], CONST[0][0].xxxx, IMM[1].wwww\n"
975                                    "UIF TEMP[5]\n"
976                                    "STORE BUFFER[2].x, IMM[1].zzzz, TEMP[0].yyyy\n"
977                                    "ENDIF\n"
978                                    "ENDIF\n"
979                                    "ENDIF\n"
980 
981                                    "END\n";
982 
983    struct tgsi_token tokens[1024];
984    struct pipe_compute_state state = {};
985 
986    if (!tgsi_text_translate(text_tmpl, tokens, ARRAY_SIZE(tokens))) {
987       assert(false);
988       return NULL;
989    }
990 
991    state.ir_type = PIPE_SHADER_IR_TGSI;
992    state.prog = tokens;
993 
994    return sctx->b.create_compute_state(&sctx->b, &state);
995 }
996