/third_party/libffi/src/tile/ |
D | tile.S | 32 #define REG_SIZE FFI_SIZEOF_ARG macro 41 #define LINKAGE_SIZE (2 * REG_SIZE) 97 addi TMP, sp, -REG_SIZE 101 REG_SIZE + REG_SIZE + LINKAGE_SIZE + 7 107 addi TMP2, sp, -(REG_SIZE * 2) 112 .cfi_offset r52, -REG_SIZE 121 addi TMP, sp, REG_SIZE 140 addi INCOMING_STACK_ARGS, INCOMING_STACK_ARGS, REG_SIZE 145 addi OUTGOING_STACK_ARGS, OUTGOING_STACK_ARGS, REG_SIZE 146 addi STACK_ARG_BYTES, STACK_ARG_BYTES, -REG_SIZE [all …]
|
/third_party/mesa3d/src/intel/compiler/ |
D | brw_fs_register_coalesce.cpp | 58 dst.offset += (i < inst->header_size ? REG_SIZE : in is_nop_mov() 235 const int offset = inst->src[0].offset / REG_SIZE; in register_coalesce() 246 for (unsigned i = 0; i < MAX2(inst->size_written / REG_SIZE, 1); i++) in register_coalesce() 247 dst_reg_offset[offset + i] = inst->dst.offset / REG_SIZE + i; in register_coalesce() 307 scan_inst->dst.offset = scan_inst->dst.offset % REG_SIZE + in register_coalesce() 308 dst_reg_offset[scan_inst->dst.offset / REG_SIZE] * REG_SIZE; in register_coalesce() 315 scan_inst->src[j].offset = scan_inst->src[j].offset % REG_SIZE + in register_coalesce() 316 dst_reg_offset[scan_inst->src[j].offset / REG_SIZE] * REG_SIZE; in register_coalesce()
|
D | brw_ir_vec4.h | 79 reg->nr += suboffset / REG_SIZE; in add_byte_offset() 80 reg->offset = suboffset % REG_SIZE; in add_byte_offset() 87 reg->nr += suboffset / REG_SIZE; in add_byte_offset() 88 reg->subnr = suboffset % REG_SIZE; in add_byte_offset() 235 (r.file == UNIFORM ? 16 : REG_SIZE) + r.offset + in reg_offset() 255 t1.offset += 4 * REG_SIZE; in regions_overlap() 421 return DIV_ROUND_UP(reg_offset(inst->dst) % REG_SIZE + inst->size_written, in regs_written() 422 REG_SIZE); in regs_written() 435 inst->src[i].file == UNIFORM || inst->src[i].file == IMM ? 16 : REG_SIZE; in regs_read()
|
D | brw_vec4_copy_propagation.cpp | 49 inst->dst.offset % REG_SIZE == 0 && in is_direct_copy() 76 return regions_overlap(*src, REG_SIZE, inst->dst, inst->size_written) && in is_channel_updated() 325 if (inst->size_written > REG_SIZE && is_uniform(value)) in try_copy_propagate() 346 if (inst->src[arg].offset % REG_SIZE || value.offset % REG_SIZE) in try_copy_propagate() 498 if (inst->size_read(i) != REG_SIZE || in opt_copy_propagation() 499 inst->src[i].offset % REG_SIZE) in opt_copy_propagation() 503 inst->src[i].offset / REG_SIZE); in opt_copy_propagation() 515 alloc.offsets[inst->dst.nr] + inst->dst.offset / REG_SIZE; in opt_copy_propagation()
|
D | brw_ir_fs.h | 85 reg.nr += suboffset / REG_SIZE; in byte_offset() 86 reg.offset = suboffset % REG_SIZE; in byte_offset() 92 reg.nr += suboffset / REG_SIZE; in byte_offset() 93 reg.subnr = suboffset % REG_SIZE; in byte_offset() 184 (r.file == UNIFORM ? 4 : REG_SIZE) + r.offset + in reg_offset() 217 regions_overlap(byte_offset(t, 4 * REG_SIZE), dr / 2, s, ds); in regions_overlap() 448 return DIV_ROUND_UP(reg_offset(inst->dst) % REG_SIZE + in regs_written() 451 REG_SIZE); in regs_written() 466 const unsigned reg_size = inst->src[i].file == UNIFORM ? 4 : REG_SIZE; in regs_read() 687 alloc.sizes[inst->src[0].nr] * REG_SIZE == inst->size_written; in is_coalescing_payload()
|
D | brw_fs_reg_allocate.cpp | 40 reg->nr = reg_hw_locations[reg->nr] + reg->offset / REG_SIZE; in assign_reg() 41 reg->offset %= REG_SIZE; in assign_reg() 753 REG_SIZE; in emit_unspill() 783 unspill_inst->size_written = reg_size * REG_SIZE; in emit_unspill() 791 } else if (devinfo->ver >= 7 && spill_offset < (1 << 12) * REG_SIZE) { in emit_unspill() 809 dst.offset += reg_size * REG_SIZE; in emit_unspill() 810 spill_offset += reg_size * REG_SIZE; in emit_unspill() 822 REG_SIZE; in emit_spill() 870 src.offset += reg_size * REG_SIZE; in emit_spill() 871 spill_offset += reg_size * REG_SIZE; in emit_spill() [all …]
|
D | brw_vec4.cpp | 201 return size_written > REG_SIZE; in has_source_and_destination_hazard() 214 return mlen * REG_SIZE; in size_read() 218 return mlen * REG_SIZE; in size_read() 797 int reg = inst->src[i].nr + inst->src[i].offset / REG_SIZE; in opt_set_dependency_control() 816 int reg = inst->dst.nr + inst->dst.offset / REG_SIZE; in opt_set_dependency_control() 1288 inst->dst.offset / REG_SIZE != 0) { in split_virtual_grfs() 1290 inst->dst.offset / REG_SIZE - 1); in split_virtual_grfs() 1291 inst->dst.offset %= REG_SIZE; in split_virtual_grfs() 1295 inst->src[i].offset / REG_SIZE != 0) { in split_virtual_grfs() 1297 inst->src[i].offset / REG_SIZE - 1); in split_virtual_grfs() [all …]
|
D | brw_fs.cpp | 648 this->dst.offset % REG_SIZE != 0); in is_partial_write() 885 return mlen * REG_SIZE; in size_read() 887 return ex_mlen * REG_SIZE; in size_read() 895 return src[0].file == BAD_FILE ? 0 : 2 * REG_SIZE; in size_read() 897 return mlen * REG_SIZE; in size_read() 905 return mlen * REG_SIZE; in size_read() 916 return mlen * REG_SIZE; in size_read() 926 return REG_SIZE; in size_read() 931 return REG_SIZE; in size_read() 942 return mlen * REG_SIZE; in size_read() [all …]
|
D | brw_fs_validate.cpp | 61 fsv_assert(inst->dst.offset / REG_SIZE + regs_written(inst) <= in validate() 67 fsv_assert(inst->src[i].offset / REG_SIZE + regs_read(inst, i) <= in validate()
|
D | brw_fs_lower_regioning.cpp | 114 if (reg_offset(inst->src[i]) % REG_SIZE != in required_dst_byte_offset() 115 reg_offset(inst->dst) % REG_SIZE) in required_dst_byte_offset() 119 return reg_offset(inst->dst) % REG_SIZE; in required_dst_byte_offset() 226 reg_offset(inst->src[i]) % REG_SIZE > 0 && in has_invalid_src_region() 234 const unsigned dst_byte_offset = reg_offset(inst->dst) % REG_SIZE; in has_invalid_src_region() 235 const unsigned src_byte_offset = reg_offset(inst->src[i]) % REG_SIZE; in has_invalid_src_region() 255 const unsigned dst_byte_offset = reg_offset(inst->dst) % REG_SIZE; in has_invalid_dst_region()
|
D | brw_ir_performance.cpp | 125 td(inst->dst.type), sd(DIV_ROUND_UP(inst->size_written, REG_SIZE)), in instruction_info() 134 ss = DIV_ROUND_UP(inst->size_read(2), REG_SIZE) + in instruction_info() 135 DIV_ROUND_UP(inst->size_read(3), REG_SIZE); in instruction_info() 138 ss = MAX2(ss, DIV_ROUND_UP(inst->size_read(i), REG_SIZE)); in instruction_info() 142 sx = DIV_ROUND_UP(inst->exec_size * type_sz(tx), REG_SIZE); in instruction_info() 156 td(inst->dst.type), sd(DIV_ROUND_UP(inst->size_written, REG_SIZE)), in instruction_info() 162 ss = MAX2(ss, DIV_ROUND_UP(inst->size_read(i), REG_SIZE)); in instruction_info() 165 sx = DIV_ROUND_UP(inst->exec_size * type_sz(tx), REG_SIZE); in instruction_info() 1242 const unsigned i = r.nr + r.offset / REG_SIZE + delta; in reg_dependency_id() 1253 r.nr + r.offset / REG_SIZE + delta; in reg_dependency_id() [all …]
|
D | brw_fs_generator.cpp | 79 const unsigned reg_width = REG_SIZE / (reg->stride * type_sz(reg->type)); in brw_reg_from_fs_reg() 99 assert(reg->stride * type_sz(reg->type) <= REG_SIZE); in brw_reg_from_fs_reg() 330 const unsigned rlen = dst_is_null ? 0 : inst->size_written / REG_SIZE; in generate_send() 445 assert(inst->size_written % REG_SIZE == 0); in generate_fb_read() 451 inst->header_size, inst->size_written / REG_SIZE, in generate_fb_read() 466 unsigned imm_byte_offset = reg.nr * REG_SIZE + reg.subnr; in generate_mov_indirect() 471 reg.nr = imm_byte_offset / REG_SIZE; in generate_mov_indirect() 472 reg.subnr = imm_byte_offset % REG_SIZE; in generate_mov_indirect() 661 uint32_t src_start_offset = src.nr * REG_SIZE + src.subnr; in generate_shuffle() 996 assert(inst->size_written % REG_SIZE == 0); in generate_tex() [all …]
|
D | brw_fs_saturate_propagation.cpp | 129 scan_inst->src[i].offset / REG_SIZE == in opt_saturate_propagation_local() 130 inst->src[0].offset / REG_SIZE) { in opt_saturate_propagation_local()
|
D | brw_vec4_reg_allocate.cpp | 36 reg->nr = reg_hw_locations[reg->nr] + reg->offset / REG_SIZE; in assign() 37 reg->offset %= REG_SIZE; in assign() 373 inst->src[i].offset >= REG_SIZE) in evaluate_spill_costs() 401 if (inst->dst.reladdr || inst->dst.offset >= REG_SIZE) in evaluate_spill_costs()
|
D | brw_vec4_visitor.cpp | 1118 assert(orig_src.offset % REG_SIZE == 0); in emit_scratch_read() 1119 int reg_offset = base_offset + orig_src.offset / REG_SIZE; in emit_scratch_read() 1131 SCRATCH_READ(byte_offset(shuffled_float, REG_SIZE), index); in emit_scratch_read() 1147 assert(inst->dst.offset % REG_SIZE == 0); in emit_scratch_write() 1148 int reg_offset = base_offset + inst->dst.offset / REG_SIZE; in emit_scratch_write() 1208 SCRATCH_WRITE(dst, byte_offset(shuffled_float, REG_SIZE), index); in emit_scratch_write() 1219 inst->dst.offset %= REG_SIZE; in emit_scratch_write() 1250 src.offset %= REG_SIZE; in emit_resolve_reladdr()
|
D | brw_fs_bank_conflicts.cpp | 499 return r.nr + r.offset / REG_SIZE; in reg_of() 501 return reg_offset(r) / REG_SIZE; in reg_of() 662 REG_SIZE); in shader_conflict_weight_matrix() 902 r.offset = r.offset % REG_SIZE; in transform()
|
D | brw_reg.h | 208 #define REG_SIZE (8*4) macro 579 unsigned newoffset = reg.nr * REG_SIZE + reg.subnr + bytes; in byte_offset() 580 reg.nr = newoffset / REG_SIZE; in byte_offset() 581 reg.subnr = newoffset % REG_SIZE; in byte_offset() 759 return brw_imm_uw(reg.nr * REG_SIZE + reg.subnr); in brw_address()
|
D | brw_fs_builder.h | 206 REG_SIZE)), 514 if (dispatch_width() * type_sz(tmp.type) > 2 * REG_SIZE) { in emit_scan() 772 inst->size_written = header_size * REG_SIZE; in LOAD_PAYLOAD() 787 inst->size_written = shader->alloc.sizes[dst.nr] * REG_SIZE; in UNDEF()
|
D | brw_fs_live_variables.h | 97 return var_from_vgrf[reg.nr] + reg.offset / REG_SIZE; in var_from_reg()
|
D | brw_fs_scoreboard.cpp | 745 const unsigned reg = (r.file == VGRF ? r.nr + r.offset / REG_SIZE : in dep() 746 reg_offset(r) / REG_SIZE); in dep() 1015 const fs_reg r = byte_offset(inst->src[i], REG_SIZE * j); in update_inst_scoreboard() 1042 sb.set(byte_offset(inst->dst, REG_SIZE * j), wr_dep); in update_inst_scoreboard() 1137 sb.get(byte_offset(inst->src[i], REG_SIZE * j)))); in gather_inst_dependencies() 1167 sb.get(byte_offset(inst->dst, REG_SIZE * j)))); in gather_inst_dependencies()
|
/third_party/lzma/Asm/x86/ |
D | 7zCrcOpt.asm | 17 crc_OFFS equ (REG_SIZE * 5) 18 data_OFFS equ (REG_SIZE + crc_OFFS) 19 size_OFFS equ (REG_SIZE + data_OFFS) 21 size_OFFS equ (REG_SIZE * 5) 23 table_OFFS equ (REG_SIZE + size_OFFS)
|
D | LzFindOpt.asm | 88 maxLen_OFFS equ (REG_SIZE * (6 + 1)) 90 cutValue_OFFS equ (REG_SIZE * (8 + 1 + 4)) 91 d_OFFS equ (REG_SIZE + cutValue_OFFS) 92 maxLen_OFFS equ (REG_SIZE + d_OFFS) 94 hash_OFFS equ (REG_SIZE + maxLen_OFFS) 95 limit_OFFS equ (REG_SIZE + hash_OFFS) 96 size_OFFS equ (REG_SIZE + limit_OFFS) 97 cycPos_OFFS equ (REG_SIZE + size_OFFS) 98 cycSize_OFFS equ (REG_SIZE + cycPos_OFFS) 99 posRes_OFFS equ (REG_SIZE + cycSize_OFFS)
|
D | AesOpt.asm | 67 stack_param_offset equ (REG_SIZE * (1 + num_regs_push)) 79 data_OFFS equ (REG_SIZE + aes_OFFS) 80 size_OFFS equ (REG_SIZE + data_OFFS)
|
D | XzCrc64Opt.asm | 111 crc_OFFS equ (REG_SIZE * 5) 121 size_OFFS equ (REG_SIZE + data_OFFS) 122 table_OFFS equ (REG_SIZE + size_OFFS)
|
D | Sha256Opt.asm | 116 PARAM_OFFSET equ (REG_SIZE * (1 + NUM_PUSH_REGS)) 119 mov rData, [r4 + PARAM_OFFSET + REG_SIZE * 1] 120 mov rNum, [r4 + PARAM_OFFSET + REG_SIZE * 2]
|