1 /* 2 * Copyright © 2011 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #pragma once 25 26 #include "elk_shader.h" 27 28 #ifdef __cplusplus 29 #include "elk_ir_vec4.h" 30 #include "elk_ir_performance.h" 31 #include "elk_vec4_builder.h" 32 #include "elk_vec4_live_variables.h" 33 #endif 34 35 #include "compiler/glsl/ir.h" 36 #include "compiler/nir/nir.h" 37 38 39 #ifdef __cplusplus 40 extern "C" { 41 #endif 42 43 const unsigned * 44 elk_vec4_generate_assembly(const struct elk_compiler *compiler, 45 const struct elk_compile_params *params, 46 const nir_shader *nir, 47 struct elk_vue_prog_data *prog_data, 48 const struct elk_cfg_t *cfg, 49 const elk::performance &perf, 50 bool debug_enabled); 51 52 #ifdef __cplusplus 53 } /* extern "C" */ 54 55 namespace elk { 56 /** 57 * The vertex shader front-end. 58 * 59 * Translates either GLSL IR or Mesa IR (for ARB_vertex_program and 60 * fixed-function) into VS IR. 61 */ 62 class vec4_visitor : public elk_backend_shader 63 { 64 public: 65 vec4_visitor(const struct elk_compiler *compiler, 66 const struct elk_compile_params *params, 67 const struct elk_sampler_prog_key_data *key, 68 struct elk_vue_prog_data *prog_data, 69 const nir_shader *shader, 70 bool no_spills, 71 bool debug_enabled); 72 dst_null_f()73 dst_reg dst_null_f() 74 { 75 return dst_reg(elk_null_reg()); 76 } 77 dst_null_df()78 dst_reg dst_null_df() 79 { 80 return dst_reg(retype(elk_null_reg(), ELK_REGISTER_TYPE_DF)); 81 } 82 dst_null_d()83 dst_reg dst_null_d() 84 { 85 return dst_reg(retype(elk_null_reg(), ELK_REGISTER_TYPE_D)); 86 } 87 dst_null_ud()88 dst_reg dst_null_ud() 89 { 90 return dst_reg(retype(elk_null_reg(), ELK_REGISTER_TYPE_UD)); 91 } 92 93 const struct elk_sampler_prog_key_data * const key_tex; 94 struct elk_vue_prog_data * const prog_data; 95 char *fail_msg; 96 bool failed; 97 98 /** 99 * GLSL IR currently being processed, which is associated with our 100 * driver IR instructions for debugging purposes. 101 */ 102 const void *base_ir; 103 const char *current_annotation; 104 105 int first_non_payload_grf; 106 unsigned ubo_push_start[4]; 107 unsigned push_length; 108 unsigned int max_grf; 109 elk_analysis<elk::vec4_live_variables, elk_backend_shader> live_analysis; 110 elk_analysis<elk::performance, vec4_visitor> performance_analysis; 111 112 /* Regs for vertex results. Generated at ir_variable visiting time 113 * for the ir->location's used. 114 */ 115 dst_reg output_reg[VARYING_SLOT_TESS_MAX][4]; 116 unsigned output_num_components[VARYING_SLOT_TESS_MAX][4]; 117 const char *output_reg_annotation[VARYING_SLOT_TESS_MAX]; 118 int uniforms; 119 120 bool run(); 121 void fail(const char *msg, ...); 122 123 int setup_uniforms(int payload_reg); 124 125 bool reg_allocate_trivial(); 126 bool reg_allocate(); 127 void evaluate_spill_costs(float *spill_costs, bool *no_spill); 128 int choose_spill_reg(struct ra_graph *g); 129 void spill_reg(unsigned spill_reg); 130 void move_grf_array_access_to_scratch(); 131 void split_uniform_registers(); 132 void setup_push_ranges(); 133 virtual void invalidate_analysis(elk::analysis_dependency_class c); 134 void split_virtual_grfs(); 135 bool opt_vector_float(); 136 bool opt_reduce_swizzle(); 137 bool dead_code_eliminate(); 138 bool opt_cmod_propagation(); 139 bool opt_copy_propagation(bool do_constant_prop = true); 140 bool opt_cse_local(elk_bblock_t *block, const vec4_live_variables &live); 141 bool opt_cse(); 142 bool opt_algebraic(); 143 bool opt_register_coalesce(); 144 bool eliminate_find_live_channel(); 145 bool is_dep_ctrl_unsafe(const vec4_instruction *inst); 146 void opt_set_dependency_control(); 147 void opt_schedule_instructions(); 148 void convert_to_hw_regs(); 149 void fixup_3src_null_dest(); 150 151 bool is_supported_64bit_region(vec4_instruction *inst, unsigned arg); 152 bool lower_simd_width(); 153 bool scalarize_df(); 154 bool lower_64bit_mad_to_mul_add(); 155 void apply_logical_swizzle(struct elk_reg *hw_reg, 156 vec4_instruction *inst, int arg); 157 158 vec4_instruction *emit(vec4_instruction *inst); 159 160 vec4_instruction *emit(enum elk_opcode opcode); 161 vec4_instruction *emit(enum elk_opcode opcode, const dst_reg &dst); 162 vec4_instruction *emit(enum elk_opcode opcode, const dst_reg &dst, 163 const src_reg &src0); 164 vec4_instruction *emit(enum elk_opcode opcode, const dst_reg &dst, 165 const src_reg &src0, const src_reg &src1); 166 vec4_instruction *emit(enum elk_opcode opcode, const dst_reg &dst, 167 const src_reg &src0, const src_reg &src1, 168 const src_reg &src2); 169 170 vec4_instruction *emit_before(elk_bblock_t *block, 171 vec4_instruction *inst, 172 vec4_instruction *new_inst); 173 174 #define EMIT1(op) vec4_instruction *op(const dst_reg &, const src_reg &); 175 #define EMIT2(op) vec4_instruction *op(const dst_reg &, const src_reg &, const src_reg &); 176 #define EMIT3(op) vec4_instruction *op(const dst_reg &, const src_reg &, const src_reg &, const src_reg &); 177 EMIT1(MOV) 178 EMIT1(NOT) 179 EMIT1(RNDD) 180 EMIT1(RNDE) 181 EMIT1(RNDZ) 182 EMIT1(FRC) 183 EMIT1(F32TO16) 184 EMIT1(F16TO32) 185 EMIT2(ADD) 186 EMIT2(MUL) 187 EMIT2(MACH) 188 EMIT2(MAC) 189 EMIT2(AND) 190 EMIT2(OR) 191 EMIT2(XOR) 192 EMIT2(DP3) 193 EMIT2(DP4) 194 EMIT2(DPH) 195 EMIT2(SHL) 196 EMIT2(SHR) 197 EMIT2(ASR) 198 vec4_instruction *CMP(dst_reg dst, src_reg src0, src_reg src1, 199 enum elk_conditional_mod condition); 200 vec4_instruction *IF(src_reg src0, src_reg src1, 201 enum elk_conditional_mod condition); 202 vec4_instruction *IF(enum elk_predicate predicate); 203 EMIT1(SCRATCH_READ) 204 EMIT2(SCRATCH_WRITE) 205 EMIT3(LRP) 206 EMIT1(BFREV) 207 EMIT3(BFE) 208 EMIT2(BFI1) 209 EMIT3(BFI2) 210 EMIT1(FBH) 211 EMIT1(FBL) 212 EMIT1(CBIT) 213 EMIT1(LZD) 214 EMIT3(MAD) 215 EMIT2(ADDC) 216 EMIT2(SUBB) 217 EMIT1(DIM) 218 219 #undef EMIT1 220 #undef EMIT2 221 #undef EMIT3 222 223 vec4_instruction *emit_minmax(enum elk_conditional_mod conditionalmod, dst_reg dst, 224 src_reg src0, src_reg src1); 225 226 /** 227 * Copy any live channel from \p src to the first channel of the 228 * result. 229 */ 230 src_reg emit_uniformize(const src_reg &src); 231 232 /** Fix all float operands of a 3-source instruction. */ 233 void fix_float_operands(src_reg op[3], nir_alu_instr *instr); 234 235 src_reg fix_3src_operand(const src_reg &src); 236 237 vec4_instruction *emit_math(enum elk_opcode opcode, const dst_reg &dst, const src_reg &src0, 238 const src_reg &src1 = src_reg()); 239 240 src_reg fix_math_operand(const src_reg &src); 241 242 void emit_pack_half_2x16(dst_reg dst, src_reg src0); 243 void emit_unpack_half_2x16(dst_reg dst, src_reg src0); 244 void emit_unpack_unorm_4x8(const dst_reg &dst, src_reg src0); 245 void emit_unpack_snorm_4x8(const dst_reg &dst, src_reg src0); 246 void emit_pack_unorm_4x8(const dst_reg &dst, const src_reg &src0); 247 void emit_pack_snorm_4x8(const dst_reg &dst, const src_reg &src0); 248 249 src_reg emit_mcs_fetch(const glsl_type *coordinate_type, src_reg coordinate, 250 src_reg surface); 251 252 void emit_ndc_computation(); 253 void emit_psiz_and_flags(dst_reg reg); 254 vec4_instruction *emit_generic_urb_slot(dst_reg reg, int varying, int comp); 255 virtual void emit_urb_slot(dst_reg reg, int varying); 256 257 src_reg get_scratch_offset(elk_bblock_t *block, vec4_instruction *inst, 258 src_reg *reladdr, int reg_offset); 259 void emit_scratch_read(elk_bblock_t *block, vec4_instruction *inst, 260 dst_reg dst, 261 src_reg orig_src, 262 int base_offset); 263 void emit_scratch_write(elk_bblock_t *block, vec4_instruction *inst, 264 int base_offset); 265 void emit_pull_constant_load_reg(dst_reg dst, 266 src_reg surf_index, 267 src_reg offset, 268 elk_bblock_t *before_block, 269 vec4_instruction *before_inst); 270 src_reg emit_resolve_reladdr(int scratch_loc[], elk_bblock_t *block, 271 vec4_instruction *inst, src_reg src); 272 273 void resolve_ud_negate(src_reg *reg); 274 275 void emit_shader_float_controls_execution_mode(); 276 277 bool lower_minmax(); 278 279 src_reg get_timestamp(); 280 281 virtual void dump_instruction_to_file(const elk_backend_instruction *inst, FILE *file) const; 282 283 bool optimize_predicate(nir_alu_instr *instr, enum elk_predicate *predicate); 284 285 void emit_conversion_from_double(dst_reg dst, src_reg src); 286 void emit_conversion_to_double(dst_reg dst, src_reg src); 287 288 vec4_instruction *shuffle_64bit_data(dst_reg dst, src_reg src, 289 bool for_write, 290 bool for_scratch = false, 291 elk_bblock_t *block = NULL, 292 vec4_instruction *ref = NULL); 293 294 virtual void emit_nir_code(); 295 virtual void nir_setup_uniforms(); 296 virtual void nir_emit_impl(nir_function_impl *impl); 297 virtual void nir_emit_cf_list(exec_list *list); 298 virtual void nir_emit_if(nir_if *if_stmt); 299 virtual void nir_emit_loop(nir_loop *loop); 300 virtual void nir_emit_block(nir_block *block); 301 virtual void nir_emit_instr(nir_instr *instr); 302 virtual void nir_emit_load_const(nir_load_const_instr *instr); 303 src_reg get_nir_ssbo_intrinsic_index(nir_intrinsic_instr *instr); 304 virtual void nir_emit_intrinsic(nir_intrinsic_instr *instr); 305 virtual void nir_emit_alu(nir_alu_instr *instr); 306 virtual void nir_emit_jump(nir_jump_instr *instr); 307 virtual void nir_emit_texture(nir_tex_instr *instr); 308 virtual void nir_emit_undef(nir_undef_instr *instr); 309 virtual void nir_emit_ssbo_atomic(int op, nir_intrinsic_instr *instr); 310 311 dst_reg get_nir_def(const nir_def &def, enum elk_reg_type type); 312 dst_reg get_nir_def(const nir_def &def, nir_alu_type type); 313 dst_reg get_nir_def(const nir_def &def); 314 src_reg get_nir_src(const nir_src &src, enum elk_reg_type type, 315 unsigned num_components = 4); 316 src_reg get_nir_src(const nir_src &src, nir_alu_type type, 317 unsigned num_components = 4); 318 src_reg get_nir_src(const nir_src &src, 319 unsigned num_components = 4); 320 src_reg get_nir_src_imm(const nir_src &src); 321 src_reg get_indirect_offset(nir_intrinsic_instr *instr); 322 323 dst_reg *nir_ssa_values; 324 325 protected: 326 void emit_vertex(); 327 void setup_payload_interference(struct ra_graph *g, int first_payload_node, 328 int reg_node_count); 329 virtual void setup_payload() = 0; 330 virtual void emit_prolog() = 0; 331 virtual void emit_thread_end() = 0; 332 virtual void emit_urb_write_header(int mrf) = 0; 333 virtual vec4_instruction *emit_urb_write_opcode(bool complete) = 0; 334 virtual void gs_emit_vertex(int stream_id); 335 virtual void gs_end_primitive(); 336 337 private: 338 /** 339 * If true, then register allocation should fail instead of spilling. 340 */ 341 const bool no_spills; 342 343 unsigned last_scratch; /**< measured in 32-byte (register size) units */ 344 }; 345 346 } /* namespace elk */ 347 #endif /* __cplusplus */ 348