/external/ComputeLibrary/src/gpu/cl/operators/ |
D | ClSoftmax.cpp | 135 auto src = tensors.get_const_tensor(TensorType::ACL_SRC); in run() 148 pack.add_const_tensor(TensorType::ACL_SRC, src); in run() 157 sum_pack.add_const_tensor(TensorType::ACL_SRC, permuted_src.get()); in run() 162 sum_pack.add_const_tensor(TensorType::ACL_SRC, src); in run() 169 norm_pack.add_const_tensor(TensorType::ACL_SRC, tmp.get()); in run() 178 pack.add_const_tensor(TensorType::ACL_SRC, permuted_dst.get()); in run()
|
D | ClGemmLowpOutputStage.cpp | 93 const ITensor *src = tensors.get_const_tensor(ACL_SRC); in run() 97 ITensorPack pack{ { ACL_SRC, src }, { ACL_BIAS, bias }, { ACL_DST, dst } }; in run()
|
D | ClGemmLowpMatrixMultiplyCore.cpp | 686 { TensorType::ACL_SRC, _convert_to_qasymm8 ? rhs_qasymm8.get() : b }, in run() 698 { TensorType::ACL_SRC, _convert_to_qasymm8 ? rhs_qasymm8.get() : b }, in run() 709 { TensorType::ACL_SRC, matrix_a }, in run() 730 { TensorType::ACL_SRC, matrix_a }, in run() 768 { TensorType::ACL_SRC, res32.get() }, in run() 805 ITensorPack convert_to_qs8_pack = { { ACL_SRC, b }, { ACL_DST, rhs_qasymm8.get() } }; in prepare() 815 { TensorType::ACL_SRC, _convert_to_qasymm8 ? rhs_qasymm8.get() : b }, in prepare() 827 { TensorType::ACL_SRC, _convert_to_qasymm8 ? rhs_qasymm8.get() : b }, in prepare()
|
/external/ComputeLibrary/src/dynamic_fusion/sketch/gpu/operators/ |
D | GpuClamp.cpp | 76 arguments.add_const_tensor(ACL_SRC, src); in is_supported_op_helper() 114 tensors.add_const_tensor(ACL_SRC, src); in validate_op() 156 arguments.add_const_tensor(ACL_SRC, src); in create_op() 170 tensors.add_const_tensor(ACL_SRC, src); in create_op()
|
/external/ComputeLibrary/src/runtime/CL/functions/ |
D | CLElementwiseUnaryLayer.cpp | 70 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run() 112 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run() 153 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run() 194 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run() 235 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run() 276 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run() 317 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run()
|
D | CLQuantizationLayer.cpp | 68 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run()
|
D | CLLogicalNot.cpp | 69 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run()
|
/external/armnn/build-tool/scripts/ |
D | build-armnn.sh | 19 cd "$ACL_SRC" 105 -DARMCOMPUTE_ROOT="$ACL_SRC" \ 177 rm -rf "$ACL_SRC" 185 cd "$ACL_SRC" 412 if [ -d "$ACL_SRC" ] && check_if_repository "$ACL_SRC"; then 413 …echo -e "\n***** ACL source repository already located at $ACL_SRC. Skipping cloning of ACL. *****"
|
/external/ComputeLibrary/src/cpu/operators/ |
D | CpuSoftmax.cpp | 162 auto src = tensors.get_const_tensor(TensorType::ACL_SRC); in run() 176 …ITensorPack permute_in_pack = { { TensorType::ACL_SRC, src }, { TensorType::ACL_DST, input_permute… in run() 179 … max_pack = { { TensorType::ACL_SRC, input_permuted.get() }, { TensorType::ACL_DST, max.get() } }; in run() 191 max_pack = { { TensorType::ACL_SRC, src }, { TensorType::ACL_DST, max.get() } }; in run() 208 permute_out_pack.add_tensor(TensorType::ACL_SRC, output_permuted.get()); in run()
|
D | CpuGemm.cpp | 300 ITensorPack pack{ { ACL_SRC, d }, { ACL_DST, d } }; in run() 314 ITensorPack interleave_pack{ { ACL_SRC, a }, { ACL_DST, interleaved_a.get() } }; in run() 320 ITensorPack transpose_pack{ { ACL_SRC, b }, { ACL_DST, transposed_b.get() } }; in run() 342 ITensorPack c_add_pack{ { ACL_SRC, c }, { ACL_DST, d } }; in run() 349 ITensorPack pack{ { ACL_SRC, d }, { ACL_DST, d } }; in run() 369 ITensorPack transpose_pack{ { ACL_SRC, b }, { ACL_DST, transposed_b.get() } }; in prepare()
|
D | CpuDepthwiseConv2d.cpp | 163 pack.add_tensor(TensorType::ACL_SRC, src); in run() 205 pack.add_tensor(TensorType::ACL_SRC, dst_perm); in run() 214 pack.add_tensor(TensorType::ACL_SRC, dst); in run() 253 pack.add_tensor(TensorType::ACL_SRC, weights); in prepare() 386 pack.add_tensor(TensorType::ACL_SRC, src); in run() 411 pack.add_tensor(TensorType::ACL_SRC, dst_perm); in run() 419 pack.add_tensor(TensorType::ACL_SRC, dst); in run() 435 pack.add_tensor(TensorType::ACL_SRC, weights); in prepare()
|
D | CpuGemmLowpMatrixMultiplyCore.cpp | 530 { TensorType::ACL_SRC, a }, in run() 567 { TensorType::ACL_SRC, a_to_use }, in run() 576 { TensorType::ACL_SRC, b }, in run() 606 { TensorType::ACL_SRC, a_to_use }, in run() 617 { TensorType::ACL_SRC, b }, in run() 652 { TensorType::ACL_SRC, signed_output.get() }, in run() 663 { TensorType::ACL_SRC, dst }, in run() 688 { TensorType::ACL_SRC, original_b }, in prepare() 701 { TensorType::ACL_SRC, original_b }, in prepare()
|
D | CpuWinogradConv2d.cpp | 337 ITensorPack pack{ { ACL_SRC, src }, { ACL_DST, input_nhwc.get() } }; in run() 345 …ITensorPack transform_input_pack{ { ACL_SRC, is_nchw ? input_nhwc.get() : src }, { ACL_DST, winogr… in run() 352 gemm_pack.add_const_tensor(ACL_SRC, winograd_input_transformed.get()); in run() 364 ITensorPack pack{ { ACL_SRC, output_nhwc.get() }, { ACL_DST, output } }; in run() 369 ITensorPack pack{ { ACL_SRC, output }, { ACL_DST, output } }; in run() 382 … ITensorPack permute_tensors{ { ACL_SRC, weights }, { ACL_DST, permuted_weights.get() } }; in prepare()
|
D | CpuElementwiseUnary.cpp | 56 auto src_info = tensors.get_const_tensor(TensorType::ACL_SRC)->info(); in run()
|
/external/ComputeLibrary/src/dynamic_fusion/sketch/ |
D | ArgumentPack.h | 174 …for(int id = static_cast<int>(TensorType::ACL_SRC); id <= static_cast<int>(TensorType::ACL_SRC_END… in get_src_tensors() 191 …for(int id = static_cast<int>(TensorType::ACL_SRC); id <= static_cast<int>(TensorType::ACL_SRC_END… in get_const_src_tensors()
|
/external/tensorflow/third_party/compute_library/ |
D | activation_func_correct_args.patch | 11 + ITensorPack pack{ { ACL_SRC, io }, { ACL_DST, io } };
|
/external/ComputeLibrary/src/runtime/NEON/functions/ |
D | NEQuantizationLayer.cpp | 62 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run()
|
D | NEDequantizationLayer.cpp | 62 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run()
|
D | NEFloor.cpp | 65 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run()
|
D | NETranspose.cpp | 67 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run()
|
D | NEPermute.cpp | 66 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run()
|
D | NECopy.cpp | 69 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run()
|
D | NEReshapeLayer.cpp | 69 pack.add_tensor(TensorType::ACL_SRC, _impl->src); in run()
|
D | NECast.cpp | 65 ITensorPack pack = { { ACL_SRC, _impl->src }, { ACL_DST, _impl->dst } }; in run()
|
/external/ComputeLibrary/src/dynamic_fusion/sketch/gpu/components/cl/ |
D | ClComponentActivation.cpp | 42 const ITensorInfo *const src = tensors.get_const_tensor(TensorType::ACL_SRC); in validate()
|