/third_party/flatbuffers/swift/Sources/FlatBuffers/ |
D | Constants.swift | 30 public typealias SOffset = Int32 typealias 33 public let FlatBufferMaxSize = UInt32.max << ((MemoryLayout<SOffset>.size * 8 - 1) - 1)
|
D | FlatBufferBuilder.swift | 168 let vTableOffset = push(element: SOffset(0)) in endTable()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | AMDGPUISelDAGToDAG.cpp | 209 SDValue &SOffset, SDValue &Offset, SDValue &Offen, 213 SDValue &SOffset, SDValue &Offset, SDValue &GLC, 217 SDValue &VAddr, SDValue &SOffset, SDValue &Offset, 221 SDValue &SOffset, SDValue &ImmOffset) const; 226 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset, 1339 SDValue &VAddr, SDValue &SOffset, in SelectMUBUF() argument 1362 SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32); in SelectMUBUF() 1425 SOffset = in SelectMUBUF() 1434 SDValue &VAddr, SDValue &SOffset, in SelectMUBUFAddr64() argument 1444 if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64, in SelectMUBUFAddr64() [all …]
|
D | SIRegisterInfo.cpp | 634 unsigned SOffset = ScratchOffsetReg; in buildSpillLoadStore() local 653 SOffset = AMDGPU::NoRegister; in buildSpillLoadStore() 663 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false); in buildSpillLoadStore() 665 if (SOffset == AMDGPU::NoRegister) { in buildSpillLoadStore() 673 SOffset = ScratchOffsetReg; in buildSpillLoadStore() 679 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset) in buildSpillLoadStore() 718 .addReg(SOffset, SOffsetRegState) in buildSpillLoadStore()
|
D | GCNHazardRecognizer.cpp | 689 const MachineOperand *SOffset = in createsVALUHazard() local 694 (!SOffset || !SOffset->isReg())) in createsVALUHazard()
|
D | AMDGPUInstructionSelector.cpp | 988 Register SOffset = MI.getOperand(4).getReg(); in selectStoreIntrinsic() local 1011 .addUse(SOffset) in selectStoreIntrinsic() 2165 Register SOffset = FI.hasValue() ? Info->getStackPtrOffsetReg() in selectMUBUFScratchOffen() local 2178 MIB.addReg(SOffset); in selectMUBUFScratchOffen()
|
D | SIInstrInfo.cpp | 326 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset); in getMemOperandWithOffset() local 327 if (SOffset && SOffset->isReg()) { in getMemOperandWithOffset() 342 BaseOp = SOffset; in getMemOperandWithOffset() 355 if (SOffset) // soffset can be an inline immediate. in getMemOperandWithOffset() 356 Offset += SOffset->getImm(); in getMemOperandWithOffset() 4756 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); in legalizeOperands() local 4771 .add(*SOffset) in legalizeOperands() 4802 .add(*SOffset) in legalizeOperands()
|
D | SIISelLowering.cpp | 6116 SDValue SOffset, in getBufferOffsetForMMO() argument 6120 if (!isa<ConstantSDNode>(VOffset) || !isa<ConstantSDNode>(SOffset) || in getBufferOffsetForMMO() 6130 cast<ConstantSDNode>(SOffset)->getSExtValue() + in getBufferOffsetForMMO() 7191 uint32_t SOffset, ImmOffset; in setBufferOffsets() local 7192 if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, Align)) { in setBufferOffsets() 7194 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); in setBufferOffsets() 7196 return SOffset + ImmOffset; in setBufferOffsets() 7202 uint32_t SOffset, ImmOffset; in setBufferOffsets() local 7204 if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset, in setBufferOffsets() 7207 Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); in setBufferOffsets()
|
D | AMDGPURegisterBankInfo.cpp | 1381 Register SOffset = MI.getOperand(4).getReg(); in selectStoreIntrinsic() local 1419 .addUse(SOffset) in selectStoreIntrinsic()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Hexagon/MCTargetDesc/ |
D | HexagonMCCodeEmitter.cpp | 735 unsigned SOffset = 0; in getMachineOpValue() local 750 ++SOffset; in getMachineOpValue() 776 : SOffset; in getMachineOpValue()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARC/ |
D | ARCISelLowering.cpp | 293 SDValue SOffset = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl); in LowerCall() local 295 ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), StackPtr, SOffset); in LowerCall()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/Utils/ |
D | AMDGPUBaseInfo.h | 660 bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset,
|
D | AMDGPUBaseInfo.cpp | 1269 bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset, in splitMUBUFOffset() argument 1304 SOffset = Overflow; in splitMUBUFOffset()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64InstrInfo.cpp | 3430 StackOffset &SOffset, in isAArch64FrameOffsetLegal() argument 3474 IsMulVL ? (SOffset.getScalableBytes()) : (SOffset.getBytes()); in isAArch64FrameOffsetLegal() 3511 SOffset = StackOffset(Offset, MVT::nxv1i8) + in isAArch64FrameOffsetLegal() 3512 StackOffset(SOffset.getBytes(), MVT::i8); in isAArch64FrameOffsetLegal() 3514 SOffset = StackOffset(Offset, MVT::i8) + in isAArch64FrameOffsetLegal() 3515 StackOffset(SOffset.getScalableBytes(), MVT::nxv1i8); in isAArch64FrameOffsetLegal() 3517 (SOffset ? 0 : AArch64FrameOffsetIsLegal); in isAArch64FrameOffsetLegal()
|