| /external/ComputeLibrary/src/runtime/NEON/functions/ |
| D | NEElementwiseOperations.cpp | 34 struct NEElementwiseMax::Impl struct in arm_compute::NEElementwiseMax 36 const ITensor *src_0{ nullptr }; 37 const ITensor *src_1{ nullptr }; 38 ITensor *dst{ nullptr }; 39 std::unique_ptr<cpu::CpuElementwiseMax> op{ nullptr }; 75 struct NEElementwiseMin::Impl struct in arm_compute::NEElementwiseMin 77 const ITensor *src_0{ nullptr }; 78 const ITensor *src_1{ nullptr }; 79 ITensor *dst{ nullptr }; 80 std::unique_ptr<cpu::CpuElementwiseMin> op{ nullptr }; [all …]
|
| D | NEDepthwiseConvolutionLayer.cpp | 40 struct NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::Impl struct in arm_compute::NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal 42 ITensor *src{ nullptr }; // SRC_0 43 ITensor *dst{ nullptr }; // DST_0 44 const ITensor *weights 48 const ITensor *biases 52 Tensor permuted_input{}; // INT_0 53 Tensor permuted_weights{}; // INT_1 54 Tensor permuted_output{}; // INT_2 55 Tensor workspace{}; // INT_3 56 Tensor packed_weights{}; // INT_4 [all …]
|
| D | NEWinogradConvolutionLayer.cpp | 42 struct NEWinogradConvolutionLayer::Impl struct in arm_compute::NEWinogradConvolutionLayer 44 MemoryGroup memory_group{}; 45 std::unique_ptr<cpu::CpuWinogradConv2d> op{ nullptr }; 46 ITensorPack run_pack{}; 47 ITensorPack prep_pack{}; 48 WorkspaceData<Tensor> workspace{}; 49 experimental::MemoryRequirements aux_mem_req{}; 50 const ITensor *original_weights{ nullptr }; 51 bool is_prepared{ false }; 52 bool is_activationlayer_enabled{ false }; [all …]
|
| D | NEGEMMLowpMatrixMultiplyCore.cpp | 40 struct NEGEMMLowpMatrixMultiplyCore::Impl struct in arm_compute::NEGEMMLowpMatrixMultiplyCore 42 const ITensor *b{ nullptr }; 43 std::unique_ptr<cpu::CpuGemmLowpMatrixMultiplyCore> op{ nullptr }; 44 ITensorPack run_pack{}; 45 ITensorPack prep_pack{}; 46 MemoryGroup memory_group{}; 47 IWeightsManager *weights_manager{ nullptr }; 48 MemoryRequirements aux_mem_req{}; 49 WorkspaceData<Tensor> workspace_tensors{}; 50 bool is_prepared{ false };
|
| D | NEPoolingLayer.cpp | 34 struct NEPoolingLayer::Impl struct in arm_compute::NEPoolingLayer 36 ITensor *src{ nullptr }; 37 ITensor *dst{ nullptr }; 38 ITensor *indices{ nullptr }; 39 std::unique_ptr<cpu::CpuPool2d> op{ nullptr }; 40 MemoryGroup memory_group{}; 41 ITensorPack run_pack{}; 42 WorkspaceData<Tensor> workspace_tensors{};
|
| D | NEGEMMConv2d.cpp | 36 struct NEGEMMConv2d::Impl struct in arm_compute::NEGEMMConv2d 38 const ITensor *weights{ nullptr }; 39 std::unique_ptr<OperatorType> op{ nullptr }; 40 ITensorPack run_pack{}; 41 ITensorPack prep_pack{}; 42 WorkspaceData<Tensor> workspace{}; 43 MemoryGroup memory_group{}; 44 bool is_prepared{ false }; 45 experimental::MemoryRequirements aux_mem_req{};
|
| D | NESoftmaxLayer.cpp | 36 struct NESoftmaxLayerGeneric<IS_LOG>::Impl struct in arm_compute::NESoftmaxLayerGeneric 38 const ITensor *src{ nullptr }; 39 ITensor *dst{ nullptr }; 40 Tensor max{ nullptr }; 41 std::unique_ptr<cpu::CpuSoftmaxGeneric<IS_LOG>> op{ nullptr }; 42 MemoryGroup memory_group{}; 43 ITensorPack run_pack{}; 44 WorkspaceData<Tensor> workspace_tensors{};
|
| D | NEPixelWiseMultiplication.cpp | 33 struct NEPixelWiseMultiplication::Impl struct in arm_compute::NEPixelWiseMultiplication 35 const ITensor *src_0{ nullptr }; 36 const ITensor *src_1{ nullptr }; 37 ITensor *dst{ nullptr }; 38 std::unique_ptr<cpu::CpuMul> op{ nullptr }; 72 struct NEComplexPixelWiseMultiplication::Impl struct in arm_compute::NEComplexPixelWiseMultiplication 74 ITensor *src_0{ nullptr }; 75 ITensor *src_1{ nullptr }; 76 ITensor *dst{ nullptr }; 77 std::unique_ptr<cpu::CpuComplexMul> op{ nullptr };
|
| D | NEPooling3dLayer.cpp | 34 struct NEPooling3dLayer::Impl struct in arm_compute::NEPooling3dLayer 36 const ITensor *src{ nullptr }; 37 ITensor *dst{ nullptr }; 38 std::unique_ptr<cpu::CpuPool3d> op{ nullptr }; 39 MemoryGroup memory_group{}; 40 ITensorPack run_pack{}; 41 WorkspaceData<Tensor> workspace_tensors{};
|
| D | NEGEMM.cpp | 39 struct NEGEMM::Impl struct in arm_compute::NEGEMM 41 MemoryGroup memory_group{}; 42 IWeightsManager *weights_manager{ nullptr }; 44 std::unique_ptr<cpu::CpuGemm> op{ nullptr }; 46 const ITensor *original_b{ nullptr }; 47 bool is_prepared{ false }; 49 ITensorPack run_pack{}; 50 ITensorPack prep_pack{}; 51 WorkspaceData<Tensor> workspace{}; 52 experimental::MemoryRequirements aux_mem_req{};
|
| D | NEFullyConnectedLayer.cpp | 38 struct NEFullyConnectedLayer::Impl struct in arm_compute::NEFullyConnectedLayer 40 MemoryGroup memory_group{}; 41 IWeightsManager *weights_manager{ nullptr }; 43 std::unique_ptr<cpu::CpuFullyConnected> op{ nullptr }; 45 const ITensor *original_weights{ nullptr }; 47 ITensorPack run_pack{}; 48 WorkspaceData<Tensor> workspace{}; 49 experimental::MemoryRequirements aux_mem_req{}; 51 bool is_prepared{ false };
|
| D | NEGEMMLowpOutputStage.cpp | 32 struct NEGEMMLowpOutputStage::Impl struct in arm_compute::NEGEMMLowpOutputStage 34 const ITensor *src{ nullptr }; 35 const ITensor *bias{ nullptr }; 36 ITensor *dst{ nullptr }; 37 ITensorPack run_pack{}; 38 std::unique_ptr<cpu::CpuGemmLowpOutputStage> op{ nullptr };
|
| D | NEGEMMConvolutionLayer.cpp | 37 struct NEGEMMConvolutionLayer::Impl struct in arm_compute::NEGEMMConvolutionLayer 39 const ITensor *weights{ nullptr }; 40 std::unique_ptr<cpu::CpuGemmConv2d> op{ nullptr }; 41 ITensorPack run_pack{}; 42 MemoryGroup memory_group{}; 43 IWeightsManager *weights_manager{ nullptr }; 44 MemoryRequirements aux_mem_req{}; 45 WorkspaceData<Tensor> workspace_tensors{}; 46 bool is_prepared{ false };
|
| D | NEConvolutionLayer.cpp | 42 struct NEConvolutionLayer::Impl struct in arm_compute::NEConvolutionLayer 44 MemoryGroup memory_group{}; 45 std::shared_ptr<IMemoryManager> memory_manager{}; 46 std::unique_ptr<cpu::ICpuOperator> op{ nullptr }; 47 ITensorPack run_pack{}; 48 ITensorPack prep_pack{}; 49 WorkspaceData<Tensor> workspace{}; 50 experimental::MemoryRequirements aux_mem_req{}; 51 std::unique_ptr<IFunction> func{ nullptr };
|
| /external/ComputeLibrary/src/runtime/CL/functions/ |
| D | CLElementwiseUnaryLayer.cpp | 33 struct CLRsqrtLayer::Impl struct in arm_compute::CLRsqrtLayer 35 const ICLTensor *src{ nullptr }; 36 ICLTensor *dst{ nullptr }; 37 std::unique_ptr<opencl::ClRsqrt> op{ nullptr }; 75 struct CLExpLayer::Impl struct in arm_compute::CLExpLayer 77 const ICLTensor *src{ nullptr }; 78 ICLTensor *dst{ nullptr }; 79 std::unique_ptr<opencl::ClExp> op{ nullptr }; 117 struct CLNegLayer::Impl struct in arm_compute::CLNegLayer 119 const ICLTensor *src{ nullptr }; [all …]
|
| D | CLElementwiseOperations.cpp | 37 struct CLArithmeticAddition::Impl struct in arm_compute::CLArithmeticAddition 39 const ICLTensor *src_0{ nullptr }; 40 const ICLTensor *src_1{ nullptr }; 41 ICLTensor *dst{ nullptr }; 42 std::unique_ptr<opencl::ClAdd> op{ nullptr }; 83 struct CLArithmeticSubtraction::Impl struct in arm_compute::CLArithmeticSubtraction 85 const ICLTensor *src_0{ nullptr }; 86 const ICLTensor *src_1{ nullptr }; 87 ICLTensor *dst{ nullptr }; 88 std::unique_ptr<opencl::ClSub> op{ nullptr }; [all …]
|
| D | CLWinogradConvolutionLayer.cpp | 36 struct CLWinogradConvolutionLayer::Impl struct in arm_compute::CLWinogradConvolutionLayer 38 const ICLTensor *src{ nullptr }; 39 const ICLTensor *weights{ nullptr }; 40 const ICLTensor *biases{ nullptr }; 41 ICLTensor *dst{ nullptr }; 42 std::unique_ptr<opencl::ClWinogradConv2d> op{ nullptr }; 43 ITensorPack run_pack{}; 44 MemoryGroup memory_group{}; 45 WorkspaceData<CLTensor> workspace_tensors{}; 46 bool is_prepared{ false };
|
| D | CLFullyConnectedLayer.cpp | 35 struct CLFullyConnectedLayer::Impl struct in arm_compute::CLFullyConnectedLayer 37 MemoryGroup memory_group{}; 38 IWeightsManager *weights_manager{ nullptr }; 40 std::unique_ptr<opencl::ClFullyConnected> op{ nullptr }; 42 const ITensor *original_weights{ nullptr }; 44 ITensorPack run_pack{}; 45 WorkspaceData<CLTensor> workspace{}; 46 experimental::MemoryRequirements aux_mem_req{}; 48 bool is_prepared{ false };
|
| D | CLGEMM.cpp | 41 struct CLGEMM::Impl struct in arm_compute::CLGEMM 43 const ICLTensor *b{ nullptr }; 44 std::unique_ptr<OperatorType> op{ nullptr }; 45 MemoryGroup memory_group{}; 46 IWeightsManager *weights_manager{ nullptr }; 47 ITensorPack run_pack{}; 48 ITensorPack prep_pack{}; 49 MemoryRequirements aux_mem_req{}; 50 WorkspaceData<CLTensor> workspace_tensors{}; 51 bool is_prepared{ false };
|
| D | CLGEMMLowpMatrixMultiplyCore.cpp | 47 struct CLGEMMLowpMatrixMultiplyCore::Impl struct in arm_compute::CLGEMMLowpMatrixMultiplyCore 49 const ICLTensor *b{ nullptr }; 50 std::unique_ptr<OperatorType> op{ nullptr }; 51 MemoryGroup memory_group{}; 52 ITensorPack run_pack{}; 53 MemoryRequirements aux_mem_req{}; 54 WorkspaceData<CLTensor> workspace_tensors{}; 55 bool is_prepared{ false };
|
| D | CLPixelWiseMultiplication.cpp | 35 struct CLPixelWiseMultiplication::Impl struct in arm_compute::CLPixelWiseMultiplication 37 const ICLTensor *src_0{ nullptr }; 38 const ICLTensor *src_1{ nullptr }; 39 ICLTensor *dst{ nullptr }; 40 std::unique_ptr<opencl::ClMul> op{ nullptr }; 83 struct CLComplexPixelWiseMultiplication::Impl struct in arm_compute::CLComplexPixelWiseMultiplication 85 const ICLTensor *src_0{ nullptr }; 86 const ICLTensor *src_1{ nullptr }; 87 ICLTensor *dst{ nullptr }; 88 std::unique_ptr<opencl::ClComplexMul> op{ nullptr };
|
| /external/cronet/base/allocator/dispatcher/ |
| D | dispatcher.cc | 23 struct Dispatcher::Impl { struct in base::allocator::dispatcher::Dispatcher 24 void Initialize(const internal::DispatchData& dispatch_data) { in Initialize() 33 void Reset() { in Reset() 51 static void ConnectToEmitters(const internal::DispatchData& dispatch_data) { in ConnectToEmitters() 70 static void DisconnectFromEmitters(internal::DispatchData& dispatch_data) { in DisconnectFromEmitters() 84 internal::DispatchData dispatch_data_; 89 std::atomic_flag is_initialized_check_flag_ = ATOMIC_FLAG_INIT; 91 std::atomic_flag is_initialized_check_flag_;
|
| /external/ComputeLibrary/src/runtime/CPP/ |
| D | CPPScheduler.cpp | 311 struct CPPScheduler::Impl final struct in arm_compute::CPPScheduler 313 constexpr static unsigned int m_default_wake_fanout = 4; 314 enum class Mode 319 enum class ModeToggle 325 explicit Impl(unsigned int thread_hint) in Impl() function 342 void set_num_threads(unsigned int num_threads, unsigned int thread_hint) in set_num_threads() 348 …id set_num_threads_with_affinity(unsigned int num_threads, unsigned int thread_hint, BindFunc func) in set_num_threads_with_affinity() 363 void auto_switch_mode(unsigned int num_threads_to_use) in auto_switch_mode() 377 void set_linear_mode() in set_linear_mode() 386 void set_fanout_mode(unsigned int wake_fanout, unsigned int num_threads_to_use) in set_fanout_mode() [all …]
|
| /external/skia/src/sksl/ |
| D | SkSLModuleLoader.cpp | 144 struct ModuleLoader::Impl { struct in SkSL::ModuleLoader 151 SkMutex fMutex; 152 const BuiltinTypes fBuiltinTypes; 153 ModifiersPool fCoreModifiers; 155 std::unique_ptr<const Module> fRootModule; 157 std::unique_ptr<const Module> fSharedModule; // [Root] + Public intrinsics 158 std::unique_ptr<const Module> fGPUModule; // [Shared] + Non-public intrinsics/ 160 std::unique_ptr<const Module> fVertexModule; // [GPU] + Vertex stage decls 161 std::unique_ptr<const Module> fFragmentModule; // [GPU] + Fragment stage decls 162 std::unique_ptr<const Module> fComputeModule; // [GPU] + Compute stage decls [all …]
|
| /external/armnn/src/armnnUtils/ |
| D | LeakChecking.cpp | 14 struct ScopedLeakChecker::Impl struct in armnnUtils::ScopedLeakChecker 16 HeapLeakChecker m_LeakChecker; 18 Impl(const std::string & name) in Impl() function 51 struct ScopedDisableLeakChecking::Impl struct in armnnUtils::ScopedDisableLeakChecking 53 HeapLeakChecker::Disabler m_Disabler;
|