/external/mesa3d/src/intel/compiler/ |
D | brw_fs_lower_regioning.cpp | 41 return type_sz(inst->dst.type) == 1 && in is_byte_raw_mov() 69 return inst->dst.stride * type_sz(inst->dst.type); in required_dst_byte_stride() 70 } else if (type_sz(inst->dst.type) < get_exec_type_size(inst) && in required_dst_byte_stride() 78 unsigned max_stride = inst->dst.stride * type_sz(inst->dst.type); in required_dst_byte_stride() 79 unsigned min_size = type_sz(inst->dst.type); in required_dst_byte_stride() 80 unsigned max_size = type_sz(inst->dst.type); in required_dst_byte_stride() 84 const unsigned size = type_sz(inst->src[i].type); in required_dst_byte_stride() 151 const unsigned dst_byte_stride = inst->dst.stride * type_sz(inst->dst.type); in has_invalid_src_region() 153 type_sz(inst->src[i].type); in has_invalid_src_region() 176 const unsigned dst_byte_stride = inst->dst.stride * type_sz(inst->dst.type); in has_invalid_dst_region() [all …]
|
D | brw_ir_fs.h | 118 return byte_offset(reg, delta * reg.stride * type_sz(reg.type)); in horiz_offset() 125 return byte_offset(reg, delta * stride * type_sz(reg.type)); in horiz_offset() 199 return (MAX2(1, stride) - 1) * type_sz(r.type); in reg_padding() 294 assert((i + 1) * type_sz(type) <= type_sz(reg.type)); in subscript() 300 const int delta = util_logbase2(type_sz(reg.type)) - in subscript() 301 util_logbase2(type_sz(type)); in subscript() 309 reg.stride *= type_sz(reg.type) / type_sz(type); in subscript() 312 return byte_offset(retype(reg, type), i * type_sz(type)); in subscript() 478 if (type_sz(t) > type_sz(exec_type)) in get_exec_type() 480 else if (type_sz(t) == type_sz(exec_type) && in get_exec_type() [all …]
|
D | brw_ir_vec4.h | 111 return byte_offset(reg, num_components * type_sz(reg.type) * delta); in offset() 117 return byte_offset(reg, delta * type_sz(reg.type)); in horiz_offset() 192 return byte_offset(reg, num_components * type_sz(reg.type) * delta); in offset() 201 return byte_offset(reg, delta * type_sz(reg.type)); in horiz_offset() 447 if (type_sz(t) > type_sz(exec_type)) in get_exec_type() 449 else if (type_sz(t) == type_sz(exec_type) && in get_exec_type() 469 return type_sz(get_exec_type(inst)); in get_exec_type_size()
|
D | brw_fs_copy_propagation.cpp | 381 !(type_sz(inst->src[arg].type) * stride == in can_take_stride() 382 type_sz(inst->dst.type) * inst->dst.stride || in can_take_stride() 397 if (type_sz(inst->src[arg].type) > 4) in can_take_stride() 555 if (type_sz(entry->dst.type) < type_sz(inst->src[arg].type)) in try_copy_propagate() 573 type_sz(inst->src[arg].type)) % type_sz(entry->src.type) != 0) in try_copy_propagate() 585 type_sz(entry->dst.type) != type_sz(inst->src[arg].type))) in try_copy_propagate() 624 const unsigned reg_width = REG_SIZE / (type_sz(inst->src[arg].type) * in try_copy_propagate() 650 const unsigned component = rel_offset / type_sz(entry->dst.type); in try_copy_propagate() 651 const unsigned suboffset = rel_offset % type_sz(entry->dst.type); in try_copy_propagate() 657 component * entry_stride * type_sz(entry->src.type) + suboffset); in try_copy_propagate() [all …]
|
D | brw_eu_validate.c | 674 (type_sz(dst_type) == 1 || type_sz(src0_type) == 1)) { in is_byte_conversion() 679 (type_sz(dst_type) == 1 || type_sz(src1_type) == 1); in is_byte_conversion() 780 ERROR_IF(type_sz(dst_type) == 1 && in general_restrictions_based_on_operand_types() 781 (type_sz(src0_type) == 8 || in general_restrictions_based_on_operand_types() 782 (num_sources > 1 && type_sz(src1_type) == 8)), in general_restrictions_based_on_operand_types() 785 ERROR_IF(type_sz(dst_type) == 8 && in general_restrictions_based_on_operand_types() 786 (type_sz(src0_type) == 1 || in general_restrictions_based_on_operand_types() 787 (num_sources > 1 && type_sz(src1_type) == 1)), in general_restrictions_based_on_operand_types() 808 (type_sz(src0_type) == 8 || in general_restrictions_based_on_operand_types() 809 (num_sources > 1 && type_sz(src1_type) == 8)), in general_restrictions_based_on_operand_types() [all …]
|
D | brw_vec4.cpp | 231 return 4 * type_sz(src[arg].type); in size_read() 234 return exec_size * type_sz(src[arg].type); in size_read() 426 type_sz(inst->src[0].type) < 8 && in opt_vector_float() 678 assert(type_sz(inst->src[i].type) % 4 == 0); in pack_uniform_registers() 679 int channel_size = type_sz(inst->src[i].type) / 4; in pack_uniform_registers() 714 int channel_size = type_sz(inst->src[0].type) / 4; in pack_uniform_registers() 983 const glsl_type *temp_type = type_sz(inst->src[i].type) == 8 ? in move_push_constants_to_pull_constants() 1009 #define IS_64BIT(reg) (reg.file != BAD_FILE && type_sz(reg.type) == 8) in is_dep_ctrl_unsafe() 1340 if (type_sz(inst->src[0].type) != type_sz(scan_inst->src[0].type)) in opt_register_coalesce() 1362 type_sz(scan_inst->dst.type)) > 8 || in opt_register_coalesce() [all …]
|
D | brw_fs_generator.cpp | 78 const unsigned reg_width = REG_SIZE / (reg->stride * type_sz(reg->type)); in brw_reg_from_fs_reg() 98 assert(reg->stride * type_sz(reg->type) <= REG_SIZE); in brw_reg_from_fs_reg() 129 if (type_sz(reg->type) == 8) { in brw_reg_from_fs_reg() 143 type_sz(inst->dst.type) < 8) { in brw_reg_from_fs_reg() 175 type_sz(reg->type) == 8 && in brw_reg_from_fs_reg() 471 if (type_sz(reg.type) > 4 && !devinfo->has_64bit_float) { in generate_mov_indirect() 550 if (type_sz(reg.type) > 4 && in generate_mov_indirect() 605 assert(devinfo->gen >= 8 || devinfo->is_haswell || type_sz(src.type) <= 4); in generate_shuffle() 615 (devinfo->gen <= 7 || type_sz(src.type) > 4) ? in generate_shuffle() 642 assert(type_sz(group_idx.type) <= 4); in generate_shuffle() [all …]
|
D | brw_vec4_live_variables.h | 116 const unsigned csize = DIV_ROUND_UP(type_sz(reg.type), 4); 130 const unsigned csize = DIV_ROUND_UP(type_sz(reg.type), 4);
|
D | brw_vec4_copy_propagation.cpp | 148 if (type_sz(value.type) == 8 || type_sz(inst->src[arg].type) == 8) in try_constant_propagate() 334 type_sz(value.type) == 4) in try_copy_propagate() 341 if (type_sz(value.type) != type_sz(inst->src[arg].type)) in try_copy_propagate()
|
D | brw_ir_performance.cpp | 142 sx = DIV_ROUND_UP(inst->exec_size * type_sz(tx), REG_SIZE); in instruction_info() 148 !brw_reg_type_is_floating_point(tx) && type_sz(tx) == 4 && in instruction_info() 149 type_sz(inst->src[0].type) == type_sz(inst->src[1].type)) in instruction_info() 165 sx = DIV_ROUND_UP(inst->exec_size * type_sz(tx), REG_SIZE); in instruction_info() 171 !brw_reg_type_is_floating_point(tx) && type_sz(tx) == 4 && in instruction_info() 172 type_sz(inst->src[0].type) == type_sz(inst->src[1].type)) in instruction_info() 362 if (type_sz(info.tx) > 4) in instruction_desc() 385 if (type_sz(info.tx) > 4) in instruction_desc() 435 if (type_sz(info.tx) > 4) in instruction_desc() 1264 const unsigned offset = (inst->group + i) * type_sz(tx) * in accum_reg_of_channel()
|
D | brw_vec4_reg_allocate.cpp | 377 return type_sz(type) == 8 ? 2.25f : 1.0f; in spill_cost_for_type() 419 if (type_sz(inst->src[i].type) == 8 && inst->exec_size != 8) in evaluate_spill_costs() 426 unsigned type_size = type_sz(inst->src[i].type); in evaluate_spill_costs() 446 if (type_sz(inst->dst.type) == 8 && inst->exec_size != 8) in evaluate_spill_costs() 452 unsigned type_size = type_sz(inst->dst.type); in evaluate_spill_costs()
|
D | brw_fs_nir.cpp | 893 if (type_sz(op[0].type) == 2) { in emit_fsign() 913 } else if (type_sz(op[0].type) == 4) { in emit_fsign() 1089 assert(type_sz(op[0].type) < 8); /* brw_nir_lower_conversions */ in nir_emit_alu() 1130 assert(type_sz(op[0].type) < 8); /* brw_nir_lower_conversions */ in nir_emit_alu() 1135 assert(type_sz(result.type) < 8); /* brw_nir_lower_conversions */ in nir_emit_alu() 1167 assert(type_sz(result.type) < 8); /* brw_nir_lower_conversions */ in nir_emit_alu() 1427 const uint32_t bit_size = type_sz(op[0].type) * 8; in nir_emit_alu() 2415 assert(type_sz(dst.type) == 4); in emit_gs_input_load() 2859 assert(type_sz(dst.type) == 4); in nir_emit_tcs_intrinsic() 3906 nir_const_value value = nir_alu_binop_identity(op, type_sz(type) * 8); in brw_nir_reduction_op_identity() [all …]
|
D | brw_reg.h | 315 type_sz(unsigned type) in type_sz() function 431 reg.subnr = subnr * type_sz(type); in brw_reg() 596 return byte_offset(reg, delta * type_sz(reg.type)); in suboffset() 1028 unsigned scale = type_sz(reg.type) / type_sz(type); in subscript()
|
D | brw_fs_builder.h | 205 DIV_ROUND_UP(n * type_sz(type) * dispatch_width(), 448 if (dispatch_width() * type_sz(tmp.type) > 2 * REG_SIZE) { in emit_scan() 470 if (type_sz(tmp.type) <= 4) { in emit_scan() 704 ALIGN(dispatch_width() * type_sz(src[i].type) * dst.stride, in LOAD_PAYLOAD()
|
D | brw_fs_cmod_propagation.cpp | 335 if (type_sz(scan_inst->dst.type) != type_sz(inst->dst.type)) in opt_cmod_propagation_local()
|
D | brw_vec4_cse.cpp | 214 unsigned component_size = width * type_sz(entry->tmp.type); in opt_cse_local() 235 unsigned component_size = width * type_sz(inst->dst.type); in opt_cse_local()
|
D | brw_shader.cpp | 565 const unsigned size = type_sz(type); in brw_saturate_immediate() 749 assert(type_sz(type) > 1); in is_zero() 780 assert(type_sz(type) > 1); in is_one() 811 assert(type_sz(type) > 1); in is_negative_one()
|
D | brw_fs.cpp | 199 (const_offset & 0xf) / type_sz(dst.type), 1); in VARYING_PULL_CONSTANT_LOAD() 451 MIN2(type_sz(src[1].type), type_sz(src[2].type)) : in can_do_source_mods() 452 MIN2(type_sz(src[0].type), type_sz(src[1].type)); in can_do_source_mods() 455 type_sz(exec_type) >= 4 && in can_do_source_mods() 456 type_sz(exec_type) != min_type_sz) in can_do_source_mods() 566 return MAX2(width * stride, 1) * type_sz(type); in component_size() 736 (this->exec_size * type_sz(this->dst.type)) < 32 || in is_partial_write() 1025 return components_read(arg) * type_sz(src[arg].type); in size_read() 1876 type_sz(inst->src[i].type); in convert_attr_sources_to_hw_regs() 2351 assert(inst->src[i].offset % type_sz(inst->src[i].type) == 0); in assign_constant_locations() [all …]
|
D | brw_vec4_visitor.cpp | 61 0 : this->exec_size * type_sz(dst.type)); in vec4_instruction() 1360 if (type_sz(inst->dst.type) < 8) { in get_scratch_offset() 1393 if (type_sz(orig_src.type) < 8) { in emit_scratch_read() 1429 bool is_64bit = type_sz(inst->dst.type) == 8; in emit_scratch_write() 1516 dst_reg temp = dst_reg(this, type_sz(src.type) == 8 ? in emit_resolve_reladdr() 1625 bool is_64bit = type_sz(orig_src.type) == 8; in emit_pull_constant_load() 1627 assert(type_sz(temp.type) == 8); in emit_pull_constant_load()
|
D | brw_fs_sel_peephole.cpp | 207 if (src1.file == IMM && type_sz(src1.type) == 8) { in opt_peephole_sel()
|
D | brw_vec4_builder.h | 178 n * DIV_ROUND_UP(type_sz(type), 4))), 316 inst->size_written = inst->exec_size * type_sz(inst->dst.type); in emit()
|
D | brw_vec4_generator.cpp | 1929 assert(type_sz(src[0].type) == 8); in generate_code() 1930 assert(type_sz(dst.type) == 8); in generate_code() 1969 assert(type_sz(src[0].type) == 4); in generate_code() 1970 assert(type_sz(dst.type) == 8); in generate_code() 1985 assert(type_sz(src[0].type) == 8); in generate_code() 1986 assert(type_sz(dst.type) == 4); in generate_code() 2008 assert(type_sz(src[0].type) == 4); in generate_code() 2009 assert(type_sz(dst.type) == 8); in generate_code()
|
D | brw_fs_combine_constants.cpp | 393 uint8_t size = type_sz(type); in opt_combine_constants() 479 if (reg.offset + type_sz(imm_reg.type) * width > REG_SIZE) { in opt_combine_constants()
|
/external/igt-gpu-tools/assembler/ |
D | brw_eu_debug.c | 83 hwreg.subnr / type_sz(hwreg.type), in brw_print_reg()
|
D | brw_reg.h | 148 type_sz(unsigned type) in type_sz() function 201 reg.subnr = subnr * type_sz(type); in brw_reg() 317 reg.subnr += delta * type_sz(reg.type); in suboffset()
|