1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Connor Abbott (cwabbott0@gmail.com) 25 * 26 */ 27 28 /** 29 * This header file defines all the available intrinsics in one place. It 30 * expands to a list of macros of the form: 31 * 32 * INTRINSIC(name, num_srcs, src_components, has_dest, dest_components, 33 * num_variables, num_indices, idx0, idx1, idx2, flags) 34 * 35 * Which should correspond one-to-one with the nir_intrinsic_info structure. It 36 * is included in both ir.h to create the nir_intrinsic enum (with members of 37 * the form nir_intrinsic_(name)) and and in opcodes.c to create 38 * nir_intrinsic_infos, which is a const array of nir_intrinsic_info structures 39 * for each intrinsic. 40 */ 41 42 #define ARR(...) { __VA_ARGS__ } 43 44 INTRINSIC(nop, 0, ARR(0), false, 0, 0, 0, xx, xx, xx, 45 NIR_INTRINSIC_CAN_ELIMINATE) 46 47 INTRINSIC(load_var, 0, ARR(0), true, 0, 1, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE) 48 INTRINSIC(store_var, 1, ARR(0), false, 0, 1, 1, WRMASK, xx, xx, 0) 49 INTRINSIC(copy_var, 0, ARR(0), false, 0, 2, 0, xx, xx, xx, 0) 50 51 /* 52 * Interpolation of input. The interp_var_at* intrinsics are similar to the 53 * load_var intrinsic acting on a shader input except that they interpolate 54 * the input differently. The at_sample and at_offset intrinsics take an 55 * additional source that is an integer sample id or a vec2 position offset 56 * respectively. 57 */ 58 59 INTRINSIC(interp_var_at_centroid, 0, ARR(0), true, 0, 1, 0, xx, xx, xx, 60 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 61 INTRINSIC(interp_var_at_sample, 1, ARR(1), true, 0, 1, 0, xx, xx, xx, 62 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 63 INTRINSIC(interp_var_at_offset, 1, ARR(2), true, 0, 1, 0, xx, xx, xx, 64 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 65 66 /* 67 * Ask the driver for the size of a given buffer. It takes the buffer index 68 * as source. 69 */ 70 INTRINSIC(get_buffer_size, 1, ARR(1), true, 1, 0, 0, xx, xx, xx, 71 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 72 73 /* 74 * a barrier is an intrinsic with no inputs/outputs but which can't be moved 75 * around/optimized in general 76 */ 77 #define BARRIER(name) INTRINSIC(name, 0, ARR(0), false, 0, 0, 0, xx, xx, xx, 0) 78 79 BARRIER(barrier) 80 BARRIER(discard) 81 82 /* 83 * Memory barrier with semantics analogous to the memoryBarrier() GLSL 84 * intrinsic. 85 */ 86 BARRIER(memory_barrier) 87 88 /* 89 * Shader clock intrinsic with semantics analogous to the clock2x32ARB() 90 * GLSL intrinsic. 91 * The latter can be used as code motion barrier, which is currently not 92 * feasible with NIR. 93 */ 94 INTRINSIC(shader_clock, 0, ARR(0), true, 2, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE) 95 96 /* 97 * Shader ballot intrinsics with semantics analogous to the 98 * 99 * ballotARB() 100 * readInvocationARB() 101 * readFirstInvocationARB() 102 * 103 * GLSL functions from ARB_shader_ballot. 104 */ 105 INTRINSIC(ballot, 1, ARR(1), true, 0, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE) 106 INTRINSIC(read_invocation, 2, ARR(0, 1), true, 0, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE) 107 INTRINSIC(read_first_invocation, 1, ARR(0), true, 0, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE) 108 109 /* 110 * Memory barrier with semantics analogous to the compute shader 111 * groupMemoryBarrier(), memoryBarrierAtomicCounter(), memoryBarrierBuffer(), 112 * memoryBarrierImage() and memoryBarrierShared() GLSL intrinsics. 113 */ 114 BARRIER(group_memory_barrier) 115 BARRIER(memory_barrier_atomic_counter) 116 BARRIER(memory_barrier_buffer) 117 BARRIER(memory_barrier_image) 118 BARRIER(memory_barrier_shared) 119 120 /** A conditional discard, with a single boolean source. */ 121 INTRINSIC(discard_if, 1, ARR(1), false, 0, 0, 0, xx, xx, xx, 0) 122 123 /** ARB_shader_group_vote intrinsics */ 124 INTRINSIC(vote_any, 1, ARR(1), true, 1, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE) 125 INTRINSIC(vote_all, 1, ARR(1), true, 1, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE) 126 INTRINSIC(vote_eq, 1, ARR(1), true, 1, 0, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE) 127 128 /** 129 * Basic Geometry Shader intrinsics. 130 * 131 * emit_vertex implements GLSL's EmitStreamVertex() built-in. It takes a single 132 * index, which is the stream ID to write to. 133 * 134 * end_primitive implements GLSL's EndPrimitive() built-in. 135 */ 136 INTRINSIC(emit_vertex, 0, ARR(0), false, 0, 0, 1, STREAM_ID, xx, xx, 0) 137 INTRINSIC(end_primitive, 0, ARR(0), false, 0, 0, 1, STREAM_ID, xx, xx, 0) 138 139 /** 140 * Geometry Shader intrinsics with a vertex count. 141 * 142 * Alternatively, drivers may implement these intrinsics, and use 143 * nir_lower_gs_intrinsics() to convert from the basic intrinsics. 144 * 145 * These maintain a count of the number of vertices emitted, as an additional 146 * unsigned integer source. 147 */ 148 INTRINSIC(emit_vertex_with_counter, 1, ARR(1), false, 0, 0, 1, STREAM_ID, xx, xx, 0) 149 INTRINSIC(end_primitive_with_counter, 1, ARR(1), false, 0, 0, 1, STREAM_ID, xx, xx, 0) 150 INTRINSIC(set_vertex_count, 1, ARR(1), false, 0, 0, 0, xx, xx, xx, 0) 151 152 /* 153 * Atomic counters 154 * 155 * The *_var variants take an atomic_uint nir_variable, while the other, 156 * lowered, variants take a constant buffer index and register offset. 157 */ 158 159 #define ATOMIC(name, flags) \ 160 INTRINSIC(name##_var, 0, ARR(0), true, 1, 1, 0, xx, xx, xx, flags) \ 161 INTRINSIC(name, 1, ARR(1), true, 1, 0, 1, BASE, xx, xx, flags) 162 #define ATOMIC2(name) \ 163 INTRINSIC(name##_var, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0) \ 164 INTRINSIC(name, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0) 165 #define ATOMIC3(name) \ 166 INTRINSIC(name##_var, 2, ARR(1, 1), true, 1, 1, 0, xx, xx, xx, 0) \ 167 INTRINSIC(name, 3, ARR(1, 1, 1), true, 1, 0, 1, BASE, xx, xx, 0) 168 169 ATOMIC(atomic_counter_inc, 0) 170 ATOMIC(atomic_counter_dec, 0) 171 ATOMIC(atomic_counter_read, NIR_INTRINSIC_CAN_ELIMINATE) 172 ATOMIC2(atomic_counter_add) 173 ATOMIC2(atomic_counter_min) 174 ATOMIC2(atomic_counter_max) 175 ATOMIC2(atomic_counter_and) 176 ATOMIC2(atomic_counter_or) 177 ATOMIC2(atomic_counter_xor) 178 ATOMIC2(atomic_counter_exchange) 179 ATOMIC3(atomic_counter_comp_swap) 180 181 /* 182 * Image load, store and atomic intrinsics. 183 * 184 * All image intrinsics take an image target passed as a nir_variable. Image 185 * variables contain a number of memory and layout qualifiers that influence 186 * the semantics of the intrinsic. 187 * 188 * All image intrinsics take a four-coordinate vector and a sample index as 189 * first two sources, determining the location within the image that will be 190 * accessed by the intrinsic. Components not applicable to the image target 191 * in use are undefined. Image store takes an additional four-component 192 * argument with the value to be written, and image atomic operations take 193 * either one or two additional scalar arguments with the same meaning as in 194 * the ARB_shader_image_load_store specification. 195 */ 196 INTRINSIC(image_load, 2, ARR(4, 1), true, 4, 1, 0, xx, xx, xx, 197 NIR_INTRINSIC_CAN_ELIMINATE) 198 INTRINSIC(image_store, 3, ARR(4, 1, 4), false, 0, 1, 0, xx, xx, xx, 0) 199 INTRINSIC(image_atomic_add, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0) 200 INTRINSIC(image_atomic_min, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0) 201 INTRINSIC(image_atomic_max, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0) 202 INTRINSIC(image_atomic_and, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0) 203 INTRINSIC(image_atomic_or, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0) 204 INTRINSIC(image_atomic_xor, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0) 205 INTRINSIC(image_atomic_exchange, 3, ARR(4, 1, 1), true, 1, 1, 0, xx, xx, xx, 0) 206 INTRINSIC(image_atomic_comp_swap, 4, ARR(4, 1, 1, 1), true, 1, 1, 0, xx, xx, xx, 0) 207 INTRINSIC(image_size, 0, ARR(0), true, 0, 1, 0, xx, xx, xx, 208 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 209 INTRINSIC(image_samples, 0, ARR(0), true, 1, 1, 0, xx, xx, xx, 210 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 211 212 /* 213 * Vulkan descriptor set intrinsics 214 * 215 * The Vulkan API uses a different binding model from GL. In the Vulkan 216 * API, all external resources are represented by a tuple: 217 * 218 * (descriptor set, binding, array index) 219 * 220 * where the array index is the only thing allowed to be indirect. The 221 * vulkan_surface_index intrinsic takes the descriptor set and binding as 222 * its first two indices and the array index as its source. The third 223 * index is a nir_variable_mode in case that's useful to the backend. 224 * 225 * The intended usage is that the shader will call vulkan_surface_index to 226 * get an index and then pass that as the buffer index ubo/ssbo calls. 227 * 228 * The vulkan_resource_reindex intrinsic takes a resource index in src0 229 * (the result of a vulkan_resource_index or vulkan_resource_reindex) which 230 * corresponds to the tuple (set, binding, index) and computes an index 231 * corresponding to tuple (set, binding, idx + src1). 232 */ 233 INTRINSIC(vulkan_resource_index, 1, ARR(1), true, 1, 0, 2, 234 DESC_SET, BINDING, xx, 235 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 236 INTRINSIC(vulkan_resource_reindex, 2, ARR(1, 1), true, 1, 0, 0, xx, xx, xx, 237 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 238 239 /* 240 * variable atomic intrinsics 241 * 242 * All of these variable atomic memory operations read a value from memory, 243 * compute a new value using one of the operations below, write the new value 244 * to memory, and return the original value read. 245 * 246 * All operations take 1 source except CompSwap that takes 2. These sources 247 * represent: 248 * 249 * 0: The data parameter to the atomic function (i.e. the value to add 250 * in shared_atomic_add, etc). 251 * 1: For CompSwap only: the second data parameter. 252 * 253 * All operations take 1 variable deref. 254 */ 255 INTRINSIC(var_atomic_add, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0) 256 INTRINSIC(var_atomic_imin, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0) 257 INTRINSIC(var_atomic_umin, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0) 258 INTRINSIC(var_atomic_imax, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0) 259 INTRINSIC(var_atomic_umax, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0) 260 INTRINSIC(var_atomic_and, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0) 261 INTRINSIC(var_atomic_or, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0) 262 INTRINSIC(var_atomic_xor, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0) 263 INTRINSIC(var_atomic_exchange, 1, ARR(1), true, 1, 1, 0, xx, xx, xx, 0) 264 INTRINSIC(var_atomic_comp_swap, 2, ARR(1, 1), true, 1, 1, 0, xx, xx, xx, 0) 265 266 /* 267 * SSBO atomic intrinsics 268 * 269 * All of the SSBO atomic memory operations read a value from memory, 270 * compute a new value using one of the operations below, write the new 271 * value to memory, and return the original value read. 272 * 273 * All operations take 3 sources except CompSwap that takes 4. These 274 * sources represent: 275 * 276 * 0: The SSBO buffer index. 277 * 1: The offset into the SSBO buffer of the variable that the atomic 278 * operation will operate on. 279 * 2: The data parameter to the atomic function (i.e. the value to add 280 * in ssbo_atomic_add, etc). 281 * 3: For CompSwap only: the second data parameter. 282 */ 283 INTRINSIC(ssbo_atomic_add, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0) 284 INTRINSIC(ssbo_atomic_imin, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0) 285 INTRINSIC(ssbo_atomic_umin, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0) 286 INTRINSIC(ssbo_atomic_imax, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0) 287 INTRINSIC(ssbo_atomic_umax, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0) 288 INTRINSIC(ssbo_atomic_and, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0) 289 INTRINSIC(ssbo_atomic_or, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0) 290 INTRINSIC(ssbo_atomic_xor, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0) 291 INTRINSIC(ssbo_atomic_exchange, 3, ARR(1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0) 292 INTRINSIC(ssbo_atomic_comp_swap, 4, ARR(1, 1, 1, 1), true, 1, 0, 0, xx, xx, xx, 0) 293 294 /* 295 * CS shared variable atomic intrinsics 296 * 297 * All of the shared variable atomic memory operations read a value from 298 * memory, compute a new value using one of the operations below, write the 299 * new value to memory, and return the original value read. 300 * 301 * All operations take 2 sources except CompSwap that takes 3. These 302 * sources represent: 303 * 304 * 0: The offset into the shared variable storage region that the atomic 305 * operation will operate on. 306 * 1: The data parameter to the atomic function (i.e. the value to add 307 * in shared_atomic_add, etc). 308 * 2: For CompSwap only: the second data parameter. 309 */ 310 INTRINSIC(shared_atomic_add, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0) 311 INTRINSIC(shared_atomic_imin, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0) 312 INTRINSIC(shared_atomic_umin, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0) 313 INTRINSIC(shared_atomic_imax, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0) 314 INTRINSIC(shared_atomic_umax, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0) 315 INTRINSIC(shared_atomic_and, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0) 316 INTRINSIC(shared_atomic_or, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0) 317 INTRINSIC(shared_atomic_xor, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0) 318 INTRINSIC(shared_atomic_exchange, 2, ARR(1, 1), true, 1, 0, 1, BASE, xx, xx, 0) 319 INTRINSIC(shared_atomic_comp_swap, 3, ARR(1, 1, 1), true, 1, 0, 1, BASE, xx, xx, 0) 320 321 /* Used by nir_builder.h to generate loader helpers for the system values. */ 322 #ifndef DEFINE_SYSTEM_VALUE 323 #define DEFINE_SYSTEM_VALUE(name) 324 #endif 325 326 #define SYSTEM_VALUE(name, components, num_indices, idx0, idx1, idx2) \ 327 DEFINE_SYSTEM_VALUE(name) \ 328 INTRINSIC(load_##name, 0, ARR(0), true, components, 0, num_indices, \ 329 idx0, idx1, idx2, \ 330 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 331 332 SYSTEM_VALUE(frag_coord, 4, 0, xx, xx, xx) 333 SYSTEM_VALUE(front_face, 1, 0, xx, xx, xx) 334 SYSTEM_VALUE(vertex_id, 1, 0, xx, xx, xx) 335 SYSTEM_VALUE(vertex_id_zero_base, 1, 0, xx, xx, xx) 336 SYSTEM_VALUE(base_vertex, 1, 0, xx, xx, xx) 337 SYSTEM_VALUE(instance_id, 1, 0, xx, xx, xx) 338 SYSTEM_VALUE(base_instance, 1, 0, xx, xx, xx) 339 SYSTEM_VALUE(draw_id, 1, 0, xx, xx, xx) 340 SYSTEM_VALUE(sample_id, 1, 0, xx, xx, xx) 341 SYSTEM_VALUE(sample_pos, 2, 0, xx, xx, xx) 342 SYSTEM_VALUE(sample_mask_in, 1, 0, xx, xx, xx) 343 SYSTEM_VALUE(primitive_id, 1, 0, xx, xx, xx) 344 SYSTEM_VALUE(invocation_id, 1, 0, xx, xx, xx) 345 SYSTEM_VALUE(tess_coord, 3, 0, xx, xx, xx) 346 SYSTEM_VALUE(tess_level_outer, 4, 0, xx, xx, xx) 347 SYSTEM_VALUE(tess_level_inner, 2, 0, xx, xx, xx) 348 SYSTEM_VALUE(patch_vertices_in, 1, 0, xx, xx, xx) 349 SYSTEM_VALUE(local_invocation_id, 3, 0, xx, xx, xx) 350 SYSTEM_VALUE(local_invocation_index, 1, 0, xx, xx, xx) 351 SYSTEM_VALUE(work_group_id, 3, 0, xx, xx, xx) 352 SYSTEM_VALUE(user_clip_plane, 4, 1, UCP_ID, xx, xx) 353 SYSTEM_VALUE(num_work_groups, 3, 0, xx, xx, xx) 354 SYSTEM_VALUE(helper_invocation, 1, 0, xx, xx, xx) 355 SYSTEM_VALUE(alpha_ref_float, 1, 0, xx, xx, xx) 356 SYSTEM_VALUE(layer_id, 1, 0, xx, xx, xx) 357 SYSTEM_VALUE(view_index, 1, 0, xx, xx, xx) 358 SYSTEM_VALUE(subgroup_size, 1, 0, xx, xx, xx) 359 SYSTEM_VALUE(subgroup_invocation, 1, 0, xx, xx, xx) 360 SYSTEM_VALUE(subgroup_eq_mask, 0, 0, xx, xx, xx) 361 SYSTEM_VALUE(subgroup_ge_mask, 0, 0, xx, xx, xx) 362 SYSTEM_VALUE(subgroup_gt_mask, 0, 0, xx, xx, xx) 363 SYSTEM_VALUE(subgroup_le_mask, 0, 0, xx, xx, xx) 364 SYSTEM_VALUE(subgroup_lt_mask, 0, 0, xx, xx, xx) 365 SYSTEM_VALUE(subgroup_id, 1, 0, xx, xx, xx) 366 SYSTEM_VALUE(local_group_size, 3, 0, xx, xx, xx) 367 368 /* Blend constant color values. Float values are clamped. */ 369 SYSTEM_VALUE(blend_const_color_r_float, 1, 0, xx, xx, xx) 370 SYSTEM_VALUE(blend_const_color_g_float, 1, 0, xx, xx, xx) 371 SYSTEM_VALUE(blend_const_color_b_float, 1, 0, xx, xx, xx) 372 SYSTEM_VALUE(blend_const_color_a_float, 1, 0, xx, xx, xx) 373 SYSTEM_VALUE(blend_const_color_rgba8888_unorm, 1, 0, xx, xx, xx) 374 SYSTEM_VALUE(blend_const_color_aaaa8888_unorm, 1, 0, xx, xx, xx) 375 376 /** 377 * Barycentric coordinate intrinsics. 378 * 379 * These set up the barycentric coordinates for a particular interpolation. 380 * The first three are for the simple cases: pixel, centroid, or per-sample 381 * (at gl_SampleID). The next two handle interpolating at a specified 382 * sample location, or interpolating with a vec2 offset, 383 * 384 * The interp_mode index should be either the INTERP_MODE_SMOOTH or 385 * INTERP_MODE_NOPERSPECTIVE enum values. 386 * 387 * The vec2 value produced by these intrinsics is intended for use as the 388 * barycoord source of a load_interpolated_input intrinsic. 389 */ 390 391 #define BARYCENTRIC(name, sources, source_components) \ 392 INTRINSIC(load_barycentric_##name, sources, ARR(source_components), \ 393 true, 2, 0, 1, INTERP_MODE, xx, xx, \ 394 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 395 396 /* no sources. const_index[] = { interp_mode } */ 397 BARYCENTRIC(pixel, 0, 0) 398 BARYCENTRIC(centroid, 0, 0) 399 BARYCENTRIC(sample, 0, 0) 400 /* src[] = { sample_id }. const_index[] = { interp_mode } */ 401 BARYCENTRIC(at_sample, 1, 1) 402 /* src[] = { offset.xy }. const_index[] = { interp_mode } */ 403 BARYCENTRIC(at_offset, 1, 2) 404 405 /* 406 * Load operations pull data from some piece of GPU memory. All load 407 * operations operate in terms of offsets into some piece of theoretical 408 * memory. Loads from externally visible memory (UBO and SSBO) simply take a 409 * byte offset as a source. Loads from opaque memory (uniforms, inputs, etc.) 410 * take a base+offset pair where the base (const_index[0]) gives the location 411 * of the start of the variable being loaded and and the offset source is a 412 * offset into that variable. 413 * 414 * Uniform load operations have a second "range" index that specifies the 415 * range (starting at base) of the data from which we are loading. If 416 * const_index[1] == 0, then the range is unknown. 417 * 418 * Some load operations such as UBO/SSBO load and per_vertex loads take an 419 * additional source to specify which UBO/SSBO/vertex to load from. 420 * 421 * The exact address type depends on the lowering pass that generates the 422 * load/store intrinsics. Typically, this is vec4 units for things such as 423 * varying slots and float units for fragment shader inputs. UBO and SSBO 424 * offsets are always in bytes. 425 */ 426 427 #define LOAD(name, srcs, num_indices, idx0, idx1, idx2, flags) \ 428 INTRINSIC(load_##name, srcs, ARR(1, 1, 1, 1), true, 0, 0, num_indices, idx0, idx1, idx2, flags) 429 430 /* src[] = { offset }. const_index[] = { base, range } */ 431 LOAD(uniform, 1, 2, BASE, RANGE, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 432 /* src[] = { buffer_index, offset }. No const_index */ 433 LOAD(ubo, 2, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 434 /* src[] = { offset }. const_index[] = { base, component } */ 435 LOAD(input, 1, 2, BASE, COMPONENT, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 436 /* src[] = { vertex, offset }. const_index[] = { base, component } */ 437 LOAD(per_vertex_input, 2, 2, BASE, COMPONENT, xx, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 438 /* src[] = { barycoord, offset }. const_index[] = { base, component } */ 439 INTRINSIC(load_interpolated_input, 2, ARR(2, 1), true, 0, 0, 440 2, BASE, COMPONENT, xx, 441 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 442 443 /* src[] = { buffer_index, offset }. No const_index */ 444 LOAD(ssbo, 2, 0, xx, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE) 445 /* src[] = { offset }. const_index[] = { base, component } */ 446 LOAD(output, 1, 2, BASE, COMPONENT, xx, NIR_INTRINSIC_CAN_ELIMINATE) 447 /* src[] = { vertex, offset }. const_index[] = { base, component } */ 448 LOAD(per_vertex_output, 2, 2, BASE, COMPONENT, xx, NIR_INTRINSIC_CAN_ELIMINATE) 449 /* src[] = { offset }. const_index[] = { base } */ 450 LOAD(shared, 1, 1, BASE, xx, xx, NIR_INTRINSIC_CAN_ELIMINATE) 451 /* src[] = { offset }. const_index[] = { base, range } */ 452 LOAD(push_constant, 1, 2, BASE, RANGE, xx, 453 NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER) 454 455 /* 456 * Stores work the same way as loads, except now the first source is the value 457 * to store and the second (and possibly third) source specify where to store 458 * the value. SSBO and shared memory stores also have a write mask as 459 * const_index[0]. 460 */ 461 462 #define STORE(name, srcs, num_indices, idx0, idx1, idx2, flags) \ 463 INTRINSIC(store_##name, srcs, ARR(0, 1, 1, 1), false, 0, 0, num_indices, idx0, idx1, idx2, flags) 464 465 /* src[] = { value, offset }. const_index[] = { base, write_mask, component } */ 466 STORE(output, 2, 3, BASE, WRMASK, COMPONENT, 0) 467 /* src[] = { value, vertex, offset }. 468 * const_index[] = { base, write_mask, component } 469 */ 470 STORE(per_vertex_output, 3, 3, BASE, WRMASK, COMPONENT, 0) 471 /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */ 472 STORE(ssbo, 3, 1, WRMASK, xx, xx, 0) 473 /* src[] = { value, offset }. const_index[] = { base, write_mask } */ 474 STORE(shared, 2, 2, BASE, WRMASK, xx, 0) 475 476 LAST_INTRINSIC(store_shared) 477 478 #undef DEFINE_SYSTEM_VALUE 479 #undef INTRINSIC 480 #undef LAST_INTRINSIC 481