/external/mesa3d/src/gallium/drivers/radeonsi/ |
D | cik_sdma.c | 61 struct si_texture *sdst = (struct si_texture *)dst; in si_sdma_v4_copy_texture() local 63 unsigned bpp = sdst->surface.bpe; in si_sdma_v4_copy_texture() 64 uint64_t dst_address = sdst->buffer.gpu_address + sdst->surface.u.gfx9.surf_offset; in si_sdma_v4_copy_texture() 66 unsigned dst_pitch = sdst->surface.u.gfx9.surf_pitch; in si_sdma_v4_copy_texture() 68 uint64_t dst_slice_pitch = ((uint64_t)sdst->surface.u.gfx9.surf_slice_size) / bpp; in si_sdma_v4_copy_texture() 80 assert(sdst->surface.u.gfx9.surf_offset + dst_slice_pitch * bpp * (dstz + src_box->depth) <= in si_sdma_v4_copy_texture() 81 sdst->buffer.buf->size); in si_sdma_v4_copy_texture() 85 if (!si_prepare_for_dma_blit(sctx, sdst, dst_level, dstx, dsty, dstz, ssrc, src_level, src_box)) in si_sdma_v4_copy_texture() 88 dstx /= sdst->surface.blk_w; in si_sdma_v4_copy_texture() 89 dsty /= sdst->surface.blk_h; in si_sdma_v4_copy_texture() [all …]
|
D | si_dma_cs.c | 70 struct si_resource *sdst = si_resource(dst); in si_sdma_clear_buffer() local 86 util_range_add(dst, &sdst->valid_buffer_range, offset, offset + size); in si_sdma_clear_buffer() 88 offset += sdst->gpu_address; in si_sdma_clear_buffer() 93 si_need_dma_space(sctx, ncopy * 4, sdst, NULL); in si_sdma_clear_buffer() 113 si_need_dma_space(sctx, ncopy * 5, sdst, NULL); in si_sdma_clear_buffer() 134 struct si_resource *sdst = si_resource(dst); in si_sdma_copy_buffer() local 138 (ssrc->flags & RADEON_FLAG_ENCRYPTED) != (sdst->flags & RADEON_FLAG_ENCRYPTED)) { in si_sdma_copy_buffer() 146 util_range_add(dst, &sdst->valid_buffer_range, dst_offset, dst_offset + size); in si_sdma_copy_buffer() 148 dst_offset += sdst->gpu_address; in si_sdma_copy_buffer() 166 si_need_dma_space(sctx, ncopy * 5, sdst, ssrc); in si_sdma_copy_buffer() [all …]
|
D | si_cp_dma.c | 205 struct si_resource *sdst = si_resource(dst); in si_cp_dma_clear_buffer() local 206 uint64_t va = (sdst ? sdst->gpu_address : 0) + offset; in si_cp_dma_clear_buffer() 214 if (sdst) in si_cp_dma_clear_buffer() 215 util_range_add(dst, &sdst->valid_buffer_range, offset, offset + size); in si_cp_dma_clear_buffer() 218 if (sdst && !(user_flags & SI_CPDMA_SKIP_GFX_SYNC)) { in si_cp_dma_clear_buffer() 225 unsigned dma_flags = CP_DMA_CLEAR | (sdst ? 0 : CP_DMA_DST_IS_GDS); in si_cp_dma_clear_buffer() 237 if (sdst && cache_policy != L2_BYPASS) in si_cp_dma_clear_buffer() 238 sdst->TC_L2_dirty = true; in si_cp_dma_clear_buffer()
|
D | si_buffer.c | 310 struct si_resource *sdst = si_resource(dst); in si_replace_buffer_storage() local 313 pb_reference(&sdst->buf, ssrc->buf); in si_replace_buffer_storage() 314 sdst->gpu_address = ssrc->gpu_address; in si_replace_buffer_storage() 315 sdst->b.b.bind = ssrc->b.b.bind; in si_replace_buffer_storage() 316 sdst->b.max_forced_staging_uploads = ssrc->b.max_forced_staging_uploads; in si_replace_buffer_storage() 317 sdst->max_forced_staging_uploads = ssrc->max_forced_staging_uploads; in si_replace_buffer_storage() 318 sdst->flags = ssrc->flags; in si_replace_buffer_storage() 320 assert(sdst->vram_usage == ssrc->vram_usage); in si_replace_buffer_storage() 321 assert(sdst->gart_usage == ssrc->gart_usage); in si_replace_buffer_storage() 322 assert(sdst->bo_size == ssrc->bo_size); in si_replace_buffer_storage() [all …]
|
D | si_fence.c | 185 struct si_multi_fence **sdst = (struct si_multi_fence **)dst; in si_fence_reference() local 188 if (pipe_reference(&(*sdst)->reference, &ssrc->reference)) { in si_fence_reference() 189 ws->fence_reference(&(*sdst)->gfx, NULL); in si_fence_reference() 190 ws->fence_reference(&(*sdst)->sdma, NULL); in si_fence_reference() 191 tc_unflushed_batch_token_reference(&(*sdst)->tc_token, NULL); in si_fence_reference() 192 si_resource_reference(&(*sdst)->fine.buf, NULL); in si_fence_reference() 193 FREE(*sdst); in si_fence_reference() 195 *sdst = ssrc; in si_fence_reference()
|
D | si_test_dma.c | 202 struct si_texture *sdst; in si_test_dma() local 276 sdst = (struct si_texture *)dst; in si_test_dma() 284 array_mode_to_string(sscreen, &sdst->surface), tsrc.width0, tsrc.height0, in si_test_dma() 294 si_clear_buffer(sctx, dst, 0, sdst->surface.surf_size, &zero, 4, SI_COHERENCY_SHADER, false); in si_test_dma() 325 if (!ssrc->surface.is_linear && !sdst->surface.is_linear && rand() & 1) { in si_test_dma() 351 if (ssrc->surface.is_linear && !sdst->surface.is_linear && rand() % 4 == 0) { in si_test_dma()
|
D | si_clear.c | 690 struct si_texture *sdst = (struct si_texture *)dst->texture; in si_clear_render_target() local 692 if (dst->texture->nr_samples <= 1 && !vi_dcc_enabled(sdst, dst->u.tex.level)) { in si_clear_render_target()
|
D | si_blit.c | 856 struct si_texture *sdst = (struct si_texture *)dst; in si_resource_copy_region() local 873 !vi_dcc_enabled(sdst, dst_level) && in si_resource_copy_region()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/ |
D | SOPInstructions.td | 72 bits<7> sdst; 77 let Inst{22-16} = !if(ps.has_sdst, sdst, ?); 82 opName, (outs SReg_32:$sdst), (ins SSrc_b32:$src0), 83 "$sdst, $src0", pattern 100 opName, (outs SReg_64:$sdst), (ins SSrc_b64:$src0), 101 "$sdst, $src0", pattern 106 opName, (outs SReg_32:$sdst), (ins SSrc_b64:$src0), 107 "$sdst, $src0", pattern 112 opName, (outs SReg_64:$sdst), (ins SSrc_b32:$src0), 113 "$sdst, $src0", pattern [all …]
|
D | SMInstructions.td | 61 bits<7> sdst; 112 (outs dstClass:$sdst), 114 " $sdst, $sbase, $offset$glc", []> { 122 (outs dstClass:$sdst), 124 " $sdst, $sbase, $offset$glc", []> { 158 opName, (outs SReg_64_XEXEC:$sdst), (ins), 159 " $sdst", [(set i64:$sdst, (node))]> { 207 !if(isRet, (outs dataClass:$sdst), (outs)), 211 !if(isRet, " $sdst", " $sdata") # ", $sbase, $offset" # !if(isRet, " glc", ""), 218 let Constraints = !if(isRet, "$sdst = $sdata", ""); [all …]
|
D | VOPCInstructions.td | 60 let Outs64 = (outs VOPDstS64:$sdst); 117 // This class is used only with VOPC instructions. Use $sdst for out operand 128 (inst p.DstRC:$sdst), 131 (inst p.DstRC:$sdst, p.Src0RC32:$src0), 134 (inst p.DstRC:$sdst, p.Src0RC32:$src0, p.Src1RC32:$src1), 154 [(set i1:$sdst, 161 [(set i1:$sdst, (setcc P.Src0VT:$src0, P.Src1VT:$src1, cond))]); 537 let Asm64 = "$sdst, $src0_modifiers, $src1"; 551 [(set i1:$sdst, 691 // Differs from VOP3e by destination name (sdst) as VOPC doesn't have vector dst [all …]
|
D | VOPInstructions.td | 247 bits<7> sdst; 251 let Inst{14-8} = sdst; 394 bits<8> sdst; // {vcc_sdst{0}, sdst{6-0}} 396 let Inst{46-40} = !if(P.EmitDst, sdst{6-0}, 0); 397 let Inst{47} = !if(P.EmitDst, sdst{7}, 0);
|
D | SIPeepholeSDWA.cpp | 876 const MachineOperand *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst); in isConvertibleToSDWA() 886 } else if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst) || in isConvertibleToSDWA() 930 } else if ((Dst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst))) { in convertToSDWA() 932 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1); in convertToSDWA() 935 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1); in convertToSDWA()
|
D | SIInstrInfo.td | 1470 (outs DstRCSDWA:$sdst), 1479 string dst = !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"); // use $sdst for VOPC 1493 string dst = !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"); // use $sdst for VOPC 1556 "$sdst", 1558 ""); // use $sdst for VOPC 1601 "$sdst", // VOPC
|
D | VOP3Instructions.td | 194 let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst); 195 let Asm64 = " $vdst, $sdst, $src0, $src1, $src2"; 214 let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst); 215 let Asm64 = " $vdst, $sdst, $src0, $src1, $src2$clamp";
|
D | VOP2Instructions.td | 252 let Asm64 = "$vdst, $sdst, $src0, $src1"; 257 let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst); 270 let Asm64 = "$vdst, $sdst, $src0, $src1, $src2"; 275 let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst);
|
D | SIInstructions.td | 125 def EXIT_WWM : SPseudoInstSI <(outs SReg_64:$sdst), (ins SReg_64:$src0)> { 158 (outs SReg_64:$vdst, VOPDstS64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1) 162 (outs SReg_64:$vdst, VOPDstS64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1) 168 def GET_GROUPSTATICSIZE : PseudoInstSI <(outs SReg_32:$sdst), (ins), 169 [(set SReg_32:$sdst, (int_amdgcn_groupstaticsize))]>;
|
D | SILoadStoreOptimizer.cpp | 666 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst); in mergeSBufferLoadImmPair() 667 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst); in mergeSBufferLoadImmPair()
|
D | SIShrinkInstructions.cpp | 465 AMDGPU::OpName::sdst); in runOnMachineFunction()
|
/external/icu/icu4j/main/classes/localespi/src/com/ibm/icu/impl/javaspi/util/ |
D | TimeZoneNameProviderICU.java | 35 String sdst = tznames.getDisplayName(canonicalID, NameType.SHORT_DAYLIGHT, date); in getDisplayName() local 37 if (lstd != null && ldst != null && sstd != null && sdst != null) { in getDisplayName() 43 dispName = daylight ? sdst : sstd; in getDisplayName()
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIInstrFormats.td | 182 bits<7> sdst; 187 let Inst{22-16} = sdst; 192 bits<7> sdst; 198 let Inst{22-16} = sdst; 214 bits <7> sdst; 218 let Inst{22-16} = sdst; 224 bits <7> sdst = 0; 229 let Inst{22-16} = sdst; 245 bits<7> sdst; 250 let Inst{21-15} = sdst; [all …]
|
D | VIInstrFormats.td | 94 bits<7> sdst; 98 let Inst{12-6} = sdst; 147 // Differs from VOP3e by destination name (sdst) as VOPC doesn't have vector dst 149 bits<8> sdst; 151 let Inst{7-0} = sdst; 162 bits<7> sdst; 167 let Inst{14-8} = sdst;
|
D | SIInstrInfo.td | 756 op, opName, (outs SReg_32:$sdst), (ins SSrc_32:$src0), 757 opName#" $sdst, $src0", pattern 761 op, opName, (outs SReg_64:$sdst), (ins SSrc_64:$src0), 762 opName#" $sdst, $src0", pattern 767 def "" : SOP1_Pseudo <opName, (outs SReg_64:$sdst), (ins), pattern>; 769 def _si : SOP1_Real_si <op, opName, (outs SReg_64:$sdst), (ins), 770 opName#" $sdst"> { 774 def _vi : SOP1_Real_vi <op, opName, (outs SReg_64:$sdst), (ins), 775 opName#" $sdst"> { 786 let sdst = 0; [all …]
|
D | SIInstructions.td | 79 (outs SReg_64:$sdst), ?, " $sdst", [(set i64:$sdst, (int_amdgcn_s_memtime))] 104 [(set i32:$sdst, (not i32:$src0))] 108 [(set i64:$sdst, (not i64:$src0))] 116 [(set i32:$sdst, (bitreverse i32:$src0))] 124 [(set i32:$sdst, (ctpop i32:$src0))] 132 [(set i32:$sdst, (cttz_zero_undef i32:$src0))] 137 [(set i32:$sdst, (AMDGPUffbh_u32 i32:$src0))] 142 [(set i32:$sdst, (int_AMDGPU_flbit_i32 i32:$src0))] 146 [(set i32:$sdst, (sext_inreg i32:$src0, i8))] 149 [(set i32:$sdst, (sext_inreg i32:$src0, i16))] [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/Disassembler/ |
D | AMDGPUDisassembler.cpp | 271 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1) in convertSDWAInst() 275 int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst); in convertSDWAInst() 279 AMDGPU::OpName::sdst); in convertSDWAInst()
|