1 /*
2 * Copyright (C) 2017-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #define GPU 600
28
29 #include "ir3_context.h"
30 #include "ir3_image.h"
31
32 /*
33 * Handlers for instructions changed/added in a6xx:
34 *
35 * Starting with a6xx, isam and stbi is used for SSBOs as well; stbi and the
36 * atomic instructions (used for both SSBO and image) use a new instruction
37 * encoding compared to a4xx/a5xx.
38 */
39
40 /* src[] = { buffer_index, offset }. No const_index */
41 static void
emit_intrinsic_load_ssbo(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)42 emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
43 struct ir3_instruction **dst)
44 {
45 struct ir3_block *b = ctx->block;
46 struct ir3_instruction *offset;
47 struct ir3_instruction *ldib;
48
49 offset = ir3_get_src(ctx, &intr->src[2])[0];
50
51 ldib = ir3_LDIB(b, ir3_ssbo_to_ibo(ctx, intr->src[0]), 0, offset, 0);
52 ldib->dsts[0]->wrmask = MASK(intr->num_components);
53 ldib->cat6.iim_val = intr->num_components;
54 ldib->cat6.d = 1;
55 ldib->cat6.type = intr->def.bit_size == 16 ? TYPE_U16 : TYPE_U32;
56 ldib->barrier_class = IR3_BARRIER_BUFFER_R;
57 ldib->barrier_conflict = IR3_BARRIER_BUFFER_W;
58 ir3_handle_bindless_cat6(ldib, intr->src[0]);
59 ir3_handle_nonuniform(ldib, intr);
60
61 ir3_split_dest(b, dst, ldib, 0, intr->num_components);
62 }
63
64 /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
65 static void
emit_intrinsic_store_ssbo(struct ir3_context * ctx,nir_intrinsic_instr * intr)66 emit_intrinsic_store_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
67 {
68 struct ir3_block *b = ctx->block;
69 struct ir3_instruction *stib, *val, *offset;
70 unsigned wrmask = nir_intrinsic_write_mask(intr);
71 unsigned ncomp = ffs(~wrmask) - 1;
72
73 assert(wrmask == BITFIELD_MASK(intr->num_components));
74
75 /* src0 is offset, src1 is value:
76 */
77 val = ir3_create_collect(b, ir3_get_src(ctx, &intr->src[0]), ncomp);
78 offset = ir3_get_src(ctx, &intr->src[3])[0];
79
80 stib = ir3_STIB(b, ir3_ssbo_to_ibo(ctx, intr->src[1]), 0, offset, 0, val, 0);
81 stib->cat6.iim_val = ncomp;
82 stib->cat6.d = 1;
83 stib->cat6.type = intr->src[0].ssa->bit_size == 16 ? TYPE_U16 : TYPE_U32;
84 stib->barrier_class = IR3_BARRIER_BUFFER_W;
85 stib->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
86 ir3_handle_bindless_cat6(stib, intr->src[1]);
87 ir3_handle_nonuniform(stib, intr);
88
89 array_insert(b, b->keeps, stib);
90 }
91
92 static struct ir3_instruction *
emit_atomic(struct ir3_block * b,nir_atomic_op op,struct ir3_instruction * ibo,struct ir3_instruction * src0,struct ir3_instruction * src1)93 emit_atomic(struct ir3_block *b,
94 nir_atomic_op op,
95 struct ir3_instruction *ibo,
96 struct ir3_instruction *src0,
97 struct ir3_instruction *src1)
98 {
99 switch (op) {
100 case nir_atomic_op_iadd:
101 return ir3_ATOMIC_B_ADD(b, ibo, 0, src0, 0, src1, 0);
102 case nir_atomic_op_imin:
103 return ir3_ATOMIC_B_MIN(b, ibo, 0, src0, 0, src1, 0);
104 case nir_atomic_op_umin:
105 return ir3_ATOMIC_B_MIN(b, ibo, 0, src0, 0, src1, 0);
106 case nir_atomic_op_imax:
107 return ir3_ATOMIC_B_MAX(b, ibo, 0, src0, 0, src1, 0);
108 case nir_atomic_op_umax:
109 return ir3_ATOMIC_B_MAX(b, ibo, 0, src0, 0, src1, 0);
110 case nir_atomic_op_iand:
111 return ir3_ATOMIC_B_AND(b, ibo, 0, src0, 0, src1, 0);
112 case nir_atomic_op_ior:
113 return ir3_ATOMIC_B_OR(b, ibo, 0, src0, 0, src1, 0);
114 case nir_atomic_op_ixor:
115 return ir3_ATOMIC_B_XOR(b, ibo, 0, src0, 0, src1, 0);
116 case nir_atomic_op_xchg:
117 return ir3_ATOMIC_B_XCHG(b, ibo, 0, src0, 0, src1, 0);
118 case nir_atomic_op_cmpxchg:
119 return ir3_ATOMIC_B_CMPXCHG(b, ibo, 0, src0, 0, src1, 0);
120 default:
121 unreachable("boo");
122 }
123 }
124
125 /*
126 * SSBO atomic intrinsics
127 *
128 * All of the SSBO atomic memory operations read a value from memory,
129 * compute a new value using one of the operations below, write the new
130 * value to memory, and return the original value read.
131 *
132 * All operations take 3 sources except CompSwap that takes 4. These
133 * sources represent:
134 *
135 * 0: The SSBO buffer index.
136 * 1: The offset into the SSBO buffer of the variable that the atomic
137 * operation will operate on.
138 * 2: The data parameter to the atomic function (i.e. the value to add
139 * in, etc).
140 * 3: For CompSwap only: the second data parameter.
141 */
142 static struct ir3_instruction *
emit_intrinsic_atomic_ssbo(struct ir3_context * ctx,nir_intrinsic_instr * intr)143 emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
144 {
145 struct ir3_block *b = ctx->block;
146 struct ir3_instruction *atomic, *ibo, *src0, *src1, *data, *dummy;
147 nir_atomic_op op = nir_intrinsic_atomic_op(intr);
148 type_t type = nir_atomic_op_type(op) == nir_type_int ? TYPE_S32 : TYPE_U32;
149
150 ibo = ir3_ssbo_to_ibo(ctx, intr->src[0]);
151
152 data = ir3_get_src(ctx, &intr->src[2])[0];
153
154 /* So this gets a bit creative:
155 *
156 * src0 - vecN offset/coords
157 * src1.x - is actually destination register
158 * src1.y - is 'data' except for cmpxchg where src2.y is 'compare'
159 * src1.z - is 'data' for cmpxchg
160 *
161 * The combining src and dest kinda doesn't work out so well with how
162 * scheduling and RA work. So we create a dummy src2 which is tied to the
163 * destination in RA (i.e. must be allocated to the same vec2/vec3
164 * register) and then immediately extract the first component.
165 *
166 * Note that nir already multiplies the offset by four
167 */
168 dummy = create_immed(b, 0);
169
170 if (op == nir_atomic_op_cmpxchg) {
171 src0 = ir3_get_src(ctx, &intr->src[4])[0];
172 struct ir3_instruction *compare = ir3_get_src(ctx, &intr->src[3])[0];
173 src1 = ir3_collect(b, dummy, compare, data);
174 } else {
175 src0 = ir3_get_src(ctx, &intr->src[3])[0];
176 src1 = ir3_collect(b, dummy, data);
177 }
178
179 atomic = emit_atomic(b, op, ibo, src0, src1);
180 atomic->cat6.iim_val = 1;
181 atomic->cat6.d = 1;
182 atomic->cat6.type = type;
183 atomic->barrier_class = IR3_BARRIER_BUFFER_W;
184 atomic->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
185 ir3_handle_bindless_cat6(atomic, intr->src[0]);
186
187 /* even if nothing consume the result, we can't DCE the instruction: */
188 array_insert(b, b->keeps, atomic);
189
190 atomic->dsts[0]->wrmask = src1->dsts[0]->wrmask;
191 ir3_reg_tie(atomic->dsts[0], atomic->srcs[2]);
192 ir3_handle_nonuniform(atomic, intr);
193 struct ir3_instruction *split;
194 ir3_split_dest(b, &split, atomic, 0, 1);
195 return split;
196 }
197
198 /* src[] = { deref, coord, sample_index }. const_index[] = {} */
199 static void
emit_intrinsic_load_image(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)200 emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
201 struct ir3_instruction **dst)
202 {
203 struct ir3_block *b = ctx->block;
204 struct ir3_instruction *ldib;
205 struct ir3_instruction *const *coords = ir3_get_src(ctx, &intr->src[1]);
206 unsigned ncoords = ir3_get_image_coords(intr, NULL);
207
208 ldib = ir3_LDIB(b, ir3_image_to_ibo(ctx, intr->src[0]), 0,
209 ir3_create_collect(b, coords, ncoords), 0);
210 ldib->dsts[0]->wrmask = MASK(intr->num_components);
211 ldib->cat6.iim_val = intr->num_components;
212 ldib->cat6.d = ncoords;
213 ldib->cat6.type = ir3_get_type_for_image_intrinsic(intr);
214 ldib->cat6.typed = true;
215 ldib->barrier_class = IR3_BARRIER_IMAGE_R;
216 ldib->barrier_conflict = IR3_BARRIER_IMAGE_W;
217 ir3_handle_bindless_cat6(ldib, intr->src[0]);
218 ir3_handle_nonuniform(ldib, intr);
219
220 ir3_split_dest(b, dst, ldib, 0, intr->num_components);
221 }
222
223 /* src[] = { deref, coord, sample_index, value }. const_index[] = {} */
224 static void
emit_intrinsic_store_image(struct ir3_context * ctx,nir_intrinsic_instr * intr)225 emit_intrinsic_store_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
226 {
227 struct ir3_block *b = ctx->block;
228 struct ir3_instruction *stib;
229 struct ir3_instruction *const *value = ir3_get_src(ctx, &intr->src[3]);
230 struct ir3_instruction *const *coords = ir3_get_src(ctx, &intr->src[1]);
231 unsigned ncoords = ir3_get_image_coords(intr, NULL);
232 enum pipe_format format = nir_intrinsic_format(intr);
233 unsigned ncomp = ir3_get_num_components_for_image_format(format);
234
235 /* src0 is offset, src1 is value:
236 */
237 stib = ir3_STIB(b, ir3_image_to_ibo(ctx, intr->src[0]), 0,
238 ir3_create_collect(b, coords, ncoords), 0,
239 ir3_create_collect(b, value, ncomp), 0);
240 stib->cat6.iim_val = ncomp;
241 stib->cat6.d = ncoords;
242 stib->cat6.type = ir3_get_type_for_image_intrinsic(intr);
243 stib->cat6.typed = true;
244 stib->barrier_class = IR3_BARRIER_IMAGE_W;
245 stib->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
246 ir3_handle_bindless_cat6(stib, intr->src[0]);
247 ir3_handle_nonuniform(stib, intr);
248
249 array_insert(b, b->keeps, stib);
250 }
251
252 /* src[] = { deref, coord, sample_index, value, compare }. const_index[] = {} */
253 static struct ir3_instruction *
emit_intrinsic_atomic_image(struct ir3_context * ctx,nir_intrinsic_instr * intr)254 emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
255 {
256 struct ir3_block *b = ctx->block;
257 struct ir3_instruction *atomic, *ibo, *src0, *src1, *dummy;
258 struct ir3_instruction *const *coords = ir3_get_src(ctx, &intr->src[1]);
259 struct ir3_instruction *value = ir3_get_src(ctx, &intr->src[3])[0];
260 unsigned ncoords = ir3_get_image_coords(intr, NULL);
261 nir_atomic_op op = nir_intrinsic_atomic_op(intr);
262
263 ibo = ir3_image_to_ibo(ctx, intr->src[0]);
264
265 /* So this gets a bit creative:
266 *
267 * src0 - vecN offset/coords
268 * src1.x - is actually destination register
269 * src1.y - is 'value' except for cmpxchg where src2.y is 'compare'
270 * src1.z - is 'value' for cmpxchg
271 *
272 * The combining src and dest kinda doesn't work out so well with how
273 * scheduling and RA work. So we create a dummy src2 which is tied to the
274 * destination in RA (i.e. must be allocated to the same vec2/vec3
275 * register) and then immediately extract the first component.
276 */
277 dummy = create_immed(b, 0);
278 src0 = ir3_create_collect(b, coords, ncoords);
279
280 if (op == nir_atomic_op_cmpxchg) {
281 struct ir3_instruction *compare = ir3_get_src(ctx, &intr->src[4])[0];
282 src1 = ir3_collect(b, dummy, compare, value);
283 } else {
284 src1 = ir3_collect(b, dummy, value);
285 }
286
287 atomic = emit_atomic(b, op, ibo, src0, src1);
288 atomic->cat6.iim_val = 1;
289 atomic->cat6.d = ncoords;
290 atomic->cat6.type = ir3_get_type_for_image_intrinsic(intr);
291 atomic->cat6.typed = true;
292 atomic->barrier_class = IR3_BARRIER_IMAGE_W;
293 atomic->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
294 ir3_handle_bindless_cat6(atomic, intr->src[0]);
295
296 /* even if nothing consume the result, we can't DCE the instruction: */
297 array_insert(b, b->keeps, atomic);
298
299 atomic->dsts[0]->wrmask = src1->dsts[0]->wrmask;
300 ir3_reg_tie(atomic->dsts[0], atomic->srcs[2]);
301 ir3_handle_nonuniform(atomic, intr);
302 struct ir3_instruction *split;
303 ir3_split_dest(b, &split, atomic, 0, 1);
304 return split;
305 }
306
307 static void
emit_intrinsic_image_size(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)308 emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
309 struct ir3_instruction **dst)
310 {
311 struct ir3_block *b = ctx->block;
312 struct ir3_instruction *ibo = ir3_image_to_ibo(ctx, intr->src[0]);
313 struct ir3_instruction *resinfo = ir3_RESINFO(b, ibo, 0);
314 resinfo->cat6.iim_val = 1;
315 resinfo->cat6.d = intr->num_components;
316 resinfo->cat6.type = TYPE_U32;
317 resinfo->cat6.typed = false;
318 /* resinfo has no writemask and always writes out 3 components: */
319 compile_assert(ctx, intr->num_components <= 3);
320 resinfo->dsts[0]->wrmask = MASK(3);
321 ir3_handle_bindless_cat6(resinfo, intr->src[0]);
322 ir3_handle_nonuniform(resinfo, intr);
323
324 ir3_split_dest(b, dst, resinfo, 0, intr->num_components);
325 }
326
327 static void
emit_intrinsic_load_global_ir3(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)328 emit_intrinsic_load_global_ir3(struct ir3_context *ctx,
329 nir_intrinsic_instr *intr,
330 struct ir3_instruction **dst)
331 {
332 struct ir3_block *b = ctx->block;
333 unsigned dest_components = nir_intrinsic_dest_components(intr);
334 struct ir3_instruction *addr, *offset;
335
336 addr = ir3_collect(b, ir3_get_src(ctx, &intr->src[0])[0],
337 ir3_get_src(ctx, &intr->src[0])[1]);
338
339 struct ir3_instruction *load;
340
341 unsigned shift = ctx->compiler->gen >= 7 ? 2 : 0;
342 bool const_offset_in_bounds =
343 nir_src_is_const(intr->src[1]) &&
344 nir_src_as_int(intr->src[1]) < ((1 << 10) >> shift) &&
345 nir_src_as_int(intr->src[1]) > -((1 << 10) >> shift);
346
347 if (const_offset_in_bounds) {
348 load = ir3_LDG(b, addr, 0,
349 create_immed(b, nir_src_as_int(intr->src[1]) << shift),
350 0, create_immed(b, dest_components), 0);
351 } else {
352 offset = ir3_get_src(ctx, &intr->src[1])[0];
353 if (shift) {
354 /* A7XX TODO: Move to NIR for it to be properly optimized? */
355 offset = ir3_SHL_B(b, offset, 0, create_immed(b, shift), 0);
356 }
357 load =
358 ir3_LDG_A(b, addr, 0, offset, 0, create_immed(b, 0), 0,
359 create_immed(b, 0), 0, create_immed(b, dest_components), 0);
360 }
361
362 load->cat6.type = type_uint_size(intr->def.bit_size);
363 load->dsts[0]->wrmask = MASK(dest_components);
364
365 load->barrier_class = IR3_BARRIER_BUFFER_R;
366 load->barrier_conflict = IR3_BARRIER_BUFFER_W;
367
368 ir3_split_dest(b, dst, load, 0, dest_components);
369 }
370
371 static void
emit_intrinsic_store_global_ir3(struct ir3_context * ctx,nir_intrinsic_instr * intr)372 emit_intrinsic_store_global_ir3(struct ir3_context *ctx,
373 nir_intrinsic_instr *intr)
374 {
375 struct ir3_block *b = ctx->block;
376 struct ir3_instruction *value, *addr, *offset;
377 unsigned ncomp = nir_intrinsic_src_components(intr, 0);
378
379 addr = ir3_collect(b, ir3_get_src(ctx, &intr->src[1])[0],
380 ir3_get_src(ctx, &intr->src[1])[1]);
381
382 value = ir3_create_collect(b, ir3_get_src(ctx, &intr->src[0]), ncomp);
383
384 struct ir3_instruction *stg;
385
386 bool const_offset_in_bounds = nir_src_is_const(intr->src[2]) &&
387 nir_src_as_int(intr->src[2]) < (1 << 10) &&
388 nir_src_as_int(intr->src[2]) > -(1 << 10);
389
390 if (const_offset_in_bounds) {
391 stg = ir3_STG(b, addr, 0,
392 create_immed(b, nir_src_as_int(intr->src[2])), 0,
393 value, 0,
394 create_immed(b, ncomp), 0);
395 } else {
396 offset = ir3_get_src(ctx, &intr->src[2])[0];
397 if (ctx->compiler->gen >= 7) {
398 /* A7XX TODO: Move to NIR for it to be properly optimized? */
399 offset = ir3_SHL_B(b, offset, 0, create_immed(b, 2), 0);
400 }
401 stg =
402 ir3_STG_A(b, addr, 0, offset, 0, create_immed(b, 0), 0,
403 create_immed(b, 0), 0, value, 0, create_immed(b, ncomp), 0);
404 }
405
406 stg->cat6.type = type_uint_size(intr->src[0].ssa->bit_size);
407 stg->cat6.iim_val = 1;
408
409 array_insert(b, b->keeps, stg);
410
411 stg->barrier_class = IR3_BARRIER_BUFFER_W;
412 stg->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
413 }
414
415 static struct ir3_instruction *
emit_intrinsic_atomic_global(struct ir3_context * ctx,nir_intrinsic_instr * intr)416 emit_intrinsic_atomic_global(struct ir3_context *ctx, nir_intrinsic_instr *intr)
417 {
418 struct ir3_block *b = ctx->block;
419 struct ir3_instruction *addr, *atomic, *src1;
420 struct ir3_instruction *value = ir3_get_src(ctx, &intr->src[1])[0];
421 nir_atomic_op op = nir_intrinsic_atomic_op(intr);
422 type_t type = nir_atomic_op_type(op) == nir_type_int ? TYPE_S32 : TYPE_U32;
423
424 addr = ir3_collect(b, ir3_get_src(ctx, &intr->src[0])[0],
425 ir3_get_src(ctx, &intr->src[0])[1]);
426
427 if (op == nir_atomic_op_cmpxchg) {
428 struct ir3_instruction *compare = ir3_get_src(ctx, &intr->src[2])[0];
429 src1 = ir3_collect(b, compare, value);
430 } else {
431 src1 = value;
432 }
433
434 switch (op) {
435 case nir_atomic_op_iadd:
436 atomic = ir3_ATOMIC_G_ADD(b, addr, 0, src1, 0);
437 break;
438 case nir_atomic_op_imin:
439 atomic = ir3_ATOMIC_G_MIN(b, addr, 0, src1, 0);
440 type = TYPE_S32;
441 break;
442 case nir_atomic_op_umin:
443 atomic = ir3_ATOMIC_G_MIN(b, addr, 0, src1, 0);
444 break;
445 case nir_atomic_op_imax:
446 atomic = ir3_ATOMIC_G_MAX(b, addr, 0, src1, 0);
447 type = TYPE_S32;
448 break;
449 case nir_atomic_op_umax:
450 atomic = ir3_ATOMIC_G_MAX(b, addr, 0, src1, 0);
451 break;
452 case nir_atomic_op_iand:
453 atomic = ir3_ATOMIC_G_AND(b, addr, 0, src1, 0);
454 break;
455 case nir_atomic_op_ior:
456 atomic = ir3_ATOMIC_G_OR(b, addr, 0, src1, 0);
457 break;
458 case nir_atomic_op_ixor:
459 atomic = ir3_ATOMIC_G_XOR(b, addr, 0, src1, 0);
460 break;
461 case nir_atomic_op_xchg:
462 atomic = ir3_ATOMIC_G_XCHG(b, addr, 0, src1, 0);
463 break;
464 case nir_atomic_op_cmpxchg:
465 atomic = ir3_ATOMIC_G_CMPXCHG(b, addr, 0, src1, 0);
466 break;
467 default:
468 unreachable("Unknown global atomic op");
469 }
470
471 atomic->cat6.iim_val = 1;
472 atomic->cat6.d = 1;
473 atomic->cat6.type = type;
474 atomic->barrier_class = IR3_BARRIER_BUFFER_W;
475 atomic->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
476
477 /* even if nothing consume the result, we can't DCE the instruction: */
478 array_insert(b, b->keeps, atomic);
479
480 return atomic;
481 }
482
483 const struct ir3_context_funcs ir3_a6xx_funcs = {
484 .emit_intrinsic_load_ssbo = emit_intrinsic_load_ssbo,
485 .emit_intrinsic_store_ssbo = emit_intrinsic_store_ssbo,
486 .emit_intrinsic_atomic_ssbo = emit_intrinsic_atomic_ssbo,
487 .emit_intrinsic_load_image = emit_intrinsic_load_image,
488 .emit_intrinsic_store_image = emit_intrinsic_store_image,
489 .emit_intrinsic_atomic_image = emit_intrinsic_atomic_image,
490 .emit_intrinsic_image_size = emit_intrinsic_image_size,
491 .emit_intrinsic_load_global_ir3 = emit_intrinsic_load_global_ir3,
492 .emit_intrinsic_store_global_ir3 = emit_intrinsic_store_global_ir3,
493 .emit_intrinsic_atomic_global = emit_intrinsic_atomic_global,
494 };
495