1 /* 2 * Copyright © 2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 /** 25 * \file ir_optimization.h 26 * 27 * Prototypes for optimization passes to be called by the compiler and drivers. 28 */ 29 30 #ifndef GLSL_IR_OPTIMIZATION_H 31 #define GLSL_IR_OPTIMIZATION_H 32 33 /* Operations for lower_instructions() */ 34 #define SUB_TO_ADD_NEG 0x01 35 #define FDIV_TO_MUL_RCP 0x02 36 #define EXP_TO_EXP2 0x04 37 #define POW_TO_EXP2 0x08 38 #define LOG_TO_LOG2 0x10 39 #define MOD_TO_FLOOR 0x20 40 #define INT_DIV_TO_MUL_RCP 0x40 41 #define LDEXP_TO_ARITH 0x80 42 #define CARRY_TO_ARITH 0x100 43 #define BORROW_TO_ARITH 0x200 44 #define SAT_TO_CLAMP 0x400 45 #define DOPS_TO_DFRAC 0x800 46 #define DFREXP_DLDEXP_TO_ARITH 0x1000 47 #define BIT_COUNT_TO_MATH 0x02000 48 #define EXTRACT_TO_SHIFTS 0x04000 49 #define INSERT_TO_SHIFTS 0x08000 50 #define REVERSE_TO_SHIFTS 0x10000 51 #define FIND_LSB_TO_FLOAT_CAST 0x20000 52 #define FIND_MSB_TO_FLOAT_CAST 0x40000 53 #define IMUL_HIGH_TO_MUL 0x80000 54 #define DDIV_TO_MUL_RCP 0x100000 55 #define DIV_TO_MUL_RCP (FDIV_TO_MUL_RCP | DDIV_TO_MUL_RCP) 56 #define SQRT_TO_ABS_SQRT 0x200000 57 58 /* Opertaions for lower_64bit_integer_instructions() */ 59 #define MUL64 (1U << 0) 60 #define SIGN64 (1U << 1) 61 #define DIV64 (1U << 2) 62 #define MOD64 (1U << 3) 63 64 /** 65 * \see class lower_packing_builtins_visitor 66 */ 67 enum lower_packing_builtins_op { 68 LOWER_PACK_UNPACK_NONE = 0x0000, 69 70 LOWER_PACK_SNORM_2x16 = 0x0001, 71 LOWER_UNPACK_SNORM_2x16 = 0x0002, 72 73 LOWER_PACK_UNORM_2x16 = 0x0004, 74 LOWER_UNPACK_UNORM_2x16 = 0x0008, 75 76 LOWER_PACK_HALF_2x16 = 0x0010, 77 LOWER_UNPACK_HALF_2x16 = 0x0020, 78 79 LOWER_PACK_SNORM_4x8 = 0x0040, 80 LOWER_UNPACK_SNORM_4x8 = 0x0080, 81 82 LOWER_PACK_UNORM_4x8 = 0x0100, 83 LOWER_UNPACK_UNORM_4x8 = 0x0200, 84 85 LOWER_PACK_USE_BFI = 0x0400, 86 LOWER_PACK_USE_BFE = 0x0800, 87 }; 88 89 bool do_common_optimization(exec_list *ir, bool linked, 90 bool uniform_locations_assigned, 91 const struct gl_shader_compiler_options *options, 92 bool native_integers); 93 94 bool ir_constant_fold(ir_rvalue **rvalue); 95 96 bool do_rebalance_tree(exec_list *instructions); 97 bool do_algebraic(exec_list *instructions, bool native_integers, 98 const struct gl_shader_compiler_options *options); 99 bool opt_conditional_discard(exec_list *instructions); 100 bool do_constant_folding(exec_list *instructions); 101 bool do_constant_variable(exec_list *instructions); 102 bool do_constant_variable_unlinked(exec_list *instructions); 103 bool do_copy_propagation(exec_list *instructions); 104 bool do_copy_propagation_elements(exec_list *instructions); 105 bool do_constant_propagation(exec_list *instructions); 106 void do_dead_builtin_varyings(struct gl_context *ctx, 107 gl_linked_shader *producer, 108 gl_linked_shader *consumer, 109 unsigned num_tfeedback_decls, 110 class tfeedback_decl *tfeedback_decls); 111 bool do_dead_code(exec_list *instructions, bool uniform_locations_assigned); 112 bool do_dead_code_local(exec_list *instructions); 113 bool do_dead_code_unlinked(exec_list *instructions); 114 bool do_dead_functions(exec_list *instructions); 115 bool opt_flip_matrices(exec_list *instructions); 116 bool do_function_inlining(exec_list *instructions); 117 bool do_lower_jumps(exec_list *instructions, bool pull_out_jumps = true, bool lower_sub_return = true, bool lower_main_return = false, bool lower_continue = false, bool lower_break = false); 118 bool do_lower_texture_projection(exec_list *instructions); 119 bool do_if_simplification(exec_list *instructions); 120 bool opt_flatten_nested_if_blocks(exec_list *instructions); 121 bool do_discard_simplification(exec_list *instructions); 122 bool lower_if_to_cond_assign(gl_shader_stage stage, exec_list *instructions, 123 unsigned max_depth = 0, unsigned min_branch_cost = 0); 124 bool do_mat_op_to_vec(exec_list *instructions); 125 bool do_minmax_prune(exec_list *instructions); 126 bool do_structure_splitting(exec_list *instructions); 127 bool optimize_swizzles(exec_list *instructions); 128 bool do_vectorize(exec_list *instructions); 129 bool do_tree_grafting(exec_list *instructions); 130 bool do_vec_index_to_cond_assign(exec_list *instructions); 131 bool do_vec_index_to_swizzle(exec_list *instructions); 132 bool lower_discard(exec_list *instructions); 133 void lower_discard_flow(exec_list *instructions); 134 bool lower_instructions(exec_list *instructions, unsigned what_to_lower); 135 bool lower_noise(exec_list *instructions); 136 bool lower_variable_index_to_cond_assign(gl_shader_stage stage, 137 exec_list *instructions, bool lower_input, bool lower_output, 138 bool lower_temp, bool lower_uniform); 139 bool lower_quadop_vector(exec_list *instructions, bool dont_lower_swz); 140 bool lower_const_arrays_to_uniforms(exec_list *instructions, unsigned stage); 141 bool lower_clip_cull_distance(struct gl_shader_program *prog, 142 gl_linked_shader *shader); 143 void lower_output_reads(unsigned stage, exec_list *instructions); 144 bool lower_packing_builtins(exec_list *instructions, int op_mask); 145 void lower_shared_reference(struct gl_context *ctx, 146 struct gl_shader_program *prog, 147 struct gl_linked_shader *shader); 148 void lower_ubo_reference(struct gl_linked_shader *shader, 149 bool clamp_block_indices, bool use_std430_as_default); 150 void lower_packed_varyings(void *mem_ctx, 151 unsigned locations_used, 152 const uint8_t *components, 153 ir_variable_mode mode, 154 unsigned gs_input_vertices, 155 gl_linked_shader *shader, 156 bool disable_varying_packing, bool xfb_enabled); 157 bool lower_vector_insert(exec_list *instructions, bool lower_nonconstant_index); 158 bool lower_vector_derefs(gl_linked_shader *shader); 159 void lower_named_interface_blocks(void *mem_ctx, gl_linked_shader *shader); 160 bool optimize_redundant_jumps(exec_list *instructions); 161 bool optimize_split_arrays(exec_list *instructions, bool linked); 162 bool lower_offset_arrays(exec_list *instructions); 163 void optimize_dead_builtin_variables(exec_list *instructions, 164 enum ir_variable_mode other); 165 bool lower_tess_level(gl_linked_shader *shader); 166 167 bool lower_vertex_id(gl_linked_shader *shader); 168 bool lower_cs_derived(gl_linked_shader *shader); 169 bool lower_blend_equation_advanced(gl_linked_shader *shader); 170 171 bool lower_subroutine(exec_list *instructions, struct _mesa_glsl_parse_state *state); 172 void propagate_invariance(exec_list *instructions); 173 174 namespace ir_builder { class ir_factory; }; 175 176 ir_variable *compare_index_block(ir_builder::ir_factory &body, 177 ir_variable *index, 178 unsigned base, unsigned components); 179 180 bool lower_64bit_integer_instructions(exec_list *instructions, 181 unsigned what_to_lower); 182 183 #endif /* GLSL_IR_OPTIMIZATION_H */ 184