1 /* 2 * Copyright © 2016 Red Hat. 3 * Copyright © 2016 Bas Nieuwenhuizen 4 * SPDX-License-Identifier: MIT 5 * 6 * based in part on anv driver which is: 7 * Copyright © 2015 Intel Corporation 8 */ 9 10 #ifndef TU_SHADER_H 11 #define TU_SHADER_H 12 13 #include "tu_common.h" 14 #include "tu_cs.h" 15 #include "tu_suballoc.h" 16 #include "tu_descriptor_set.h" 17 18 struct tu_inline_ubo 19 { 20 /* Push the data at BINDLESS_BASE[base] + offset */ 21 unsigned base; 22 unsigned offset; 23 24 /* If true, push the base address instead */ 25 bool push_address; 26 27 /* Push it to this location in the const file, in vec4s */ 28 unsigned const_offset_vec4; 29 30 /* How much to push */ 31 unsigned size_vec4; 32 }; 33 34 /* The meaning of the range depends on "type". If it's 35 * IR3_PUSH_CONSTS_PER_STAGE, then it's the range used by this shader. If 36 * it's IR3_PUSH_CONSTS_SHARED then it's the overall range as provided by 37 * the pipeline layout and must match between shaders where it's non-zero. 38 */ 39 struct tu_push_constant_range 40 { 41 uint32_t lo; 42 uint32_t dwords; 43 enum ir3_push_consts_type type; 44 }; 45 46 struct tu_const_state 47 { 48 struct tu_push_constant_range push_consts; 49 uint32_t dynamic_offset_loc; 50 unsigned num_inline_ubos; 51 struct tu_inline_ubo ubos[MAX_INLINE_UBOS]; 52 53 struct ir3_driver_ubo fdm_ubo; 54 struct ir3_driver_ubo dynamic_offsets_ubo; 55 struct ir3_driver_ubo inline_uniforms_ubo; 56 }; 57 58 struct tu_shader 59 { 60 struct vk_pipeline_cache_object base; 61 62 const struct ir3_shader_variant *variant; 63 const struct ir3_shader_variant *safe_const_variant; 64 65 struct tu_suballoc_bo bo; 66 struct tu_cs cs; 67 struct tu_bo *pvtmem_bo; 68 69 struct tu_draw_state state; 70 struct tu_draw_state safe_const_state; 71 struct tu_draw_state binning_state; 72 73 struct tu_const_state const_state; 74 uint32_t view_mask; 75 uint8_t active_desc_sets; 76 77 /* The dynamic buffer descriptor size for descriptor sets that we know 78 * about. This is used when linking to piece together the sizes and from 79 * there calculate the offsets. It's -1 if we don't know because the 80 * descriptor set layout is NULL. 81 */ 82 int dynamic_descriptor_sizes[MAX_SETS]; 83 84 union { 85 struct { 86 unsigned patch_type; 87 enum a6xx_tess_output tess_output_upper_left, tess_output_lower_left; 88 enum a6xx_tess_spacing tess_spacing; 89 } tes; 90 91 struct { 92 bool per_samp; 93 bool has_fdm; 94 95 struct { 96 uint32_t status; 97 bool force_late_z; 98 } lrz; 99 } fs; 100 }; 101 }; 102 103 struct tu_shader_key { 104 unsigned multiview_mask; 105 bool force_sample_interp; 106 bool fragment_density_map; 107 uint8_t unscaled_input_fragcoord; 108 enum ir3_wavesize_option api_wavesize, real_wavesize; 109 }; 110 111 extern const struct vk_pipeline_cache_object_ops tu_shader_ops; 112 bool 113 tu_nir_lower_multiview(nir_shader *nir, uint32_t mask, struct tu_device *dev); 114 115 nir_shader * 116 tu_spirv_to_nir(struct tu_device *dev, 117 void *mem_ctx, 118 const VkPipelineShaderStageCreateInfo *stage_info, 119 gl_shader_stage stage); 120 121 void 122 tu6_emit_xs(struct tu_cs *cs, 123 gl_shader_stage stage, 124 const struct ir3_shader_variant *xs, 125 const struct tu_pvtmem_config *pvtmem, 126 uint64_t binary_iova); 127 128 template <chip CHIP> 129 void 130 tu6_emit_vs(struct tu_cs *cs, const struct ir3_shader_variant *vs, 131 uint32_t view_mask); 132 133 template <chip CHIP> 134 void 135 tu6_emit_hs(struct tu_cs *cs, const struct ir3_shader_variant *hs); 136 137 template <chip CHIP> 138 void 139 tu6_emit_ds(struct tu_cs *cs, const struct ir3_shader_variant *hs); 140 141 template <chip CHIP> 142 void 143 tu6_emit_gs(struct tu_cs *cs, const struct ir3_shader_variant *hs); 144 145 template <chip CHIP> 146 void 147 tu6_emit_fs(struct tu_cs *cs, const struct ir3_shader_variant *fs); 148 149 VkResult 150 tu_shader_create(struct tu_device *dev, 151 struct tu_shader **shader_out, 152 nir_shader *nir, 153 const struct tu_shader_key *key, 154 const struct ir3_shader_key *ir3_key, 155 const void *key_data, 156 size_t key_size, 157 struct tu_pipeline_layout *layout, 158 bool executable_info); 159 160 void 161 tu_shader_key_subgroup_size(struct tu_shader_key *key, 162 bool allow_varying_subgroup_size, 163 bool require_full_subgroups, 164 const VkPipelineShaderStageRequiredSubgroupSizeCreateInfo *subgroup_info, 165 struct tu_device *dev); 166 167 VkResult 168 tu_compile_shaders(struct tu_device *device, 169 const VkPipelineShaderStageCreateInfo **stage_infos, 170 nir_shader **nir, 171 const struct tu_shader_key *keys, 172 struct tu_pipeline_layout *layout, 173 const unsigned char *pipeline_sha1, 174 struct tu_shader **shaders, 175 char **nir_initial_disasm, 176 void *nir_initial_disasm_mem_ctx, 177 nir_shader **nir_out, 178 VkPipelineCreationFeedback *stage_feedbacks); 179 180 VkResult 181 tu_init_empty_shaders(struct tu_device *device); 182 183 void 184 tu_destroy_empty_shaders(struct tu_device *device); 185 186 void 187 tu_shader_destroy(struct tu_device *dev, 188 struct tu_shader *shader); 189 190 #endif /* TU_SHADER_H */ 191