/external/ComputeLibrary/src/dynamic_fusion/sketch/gpu/operators/ |
D | GpuSoftmax.cpp | 81 arguments_exp_sum.add_const_tensor(ACL_SRC_0, src); in is_supported_op() 82 arguments_exp_sum.add_const_tensor(ACL_DST_0, &sum); in is_supported_op() 83 arguments_exp_sum.add_const_tensor(ACL_DST_1, &logits); in is_supported_op() 85 arguments_norm.add_const_tensor(ACL_SRC_0, &logits); in is_supported_op() 86 arguments_norm.add_const_tensor(ACL_SRC_1, &sum); in is_supported_op() 87 arguments_norm.add_const_tensor(ACL_DST_0, &dst_info_to_validate); in is_supported_op() 120 tensors.add_const_tensor(ACL_SRC_0, src); in validate_op() 121 tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate); in validate_op() 167 arguments_exp_sum.add_const_tensor(ACL_SRC_0, src); in create_op() 168 arguments_exp_sum.add_const_tensor(ACL_DST_0, sum); in create_op() [all …]
|
D | GpuDepthwiseConv2d.cpp | 123 arguments.add_const_tensor(ACL_SRC_0, src); in is_supported_op_helper() 124 arguments.add_const_tensor(ACL_SRC_1, wei); in is_supported_op_helper() 125 arguments.add_const_tensor(ACL_SRC_2, bia); in is_supported_op_helper() 126 arguments.add_const_tensor(ACL_DST_0, dst_info_to_validate_ptr); in is_supported_op_helper() 173 tensors.add_const_tensor(ACL_SRC_0, src); in validate_op() 174 tensors.add_const_tensor(ACL_SRC_1, wei); in validate_op() 175 tensors.add_const_tensor(ACL_SRC_2, bia); in validate_op() 176 tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate); in validate_op() 240 arguments.add_const_tensor(ACL_SRC_0, src); in create_op() 241 arguments.add_const_tensor(ACL_SRC_1, wei); in create_op() [all …]
|
D | GpuConv2d.cpp | 162 arguments.add_const_tensor(ACL_SRC_0, src); in is_supported_op_helper() 163 arguments.add_const_tensor(ACL_SRC_1, wei); in is_supported_op_helper() 164 arguments.add_const_tensor(ACL_SRC_2, bia); in is_supported_op_helper() 165 arguments.add_const_tensor(ACL_DST_0, dst_info_to_validate_ptr); in is_supported_op_helper() 215 tensors.add_const_tensor(ACL_SRC_0, src); in validate_op() 216 tensors.add_const_tensor(ACL_SRC_1, wei); in validate_op() 217 tensors.add_const_tensor(ACL_SRC_2, bia); in validate_op() 218 tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate); in validate_op() 284 arguments.add_const_tensor(ACL_SRC_0, src); in create_op() 285 arguments.add_const_tensor(ACL_SRC_1, wei); in create_op() [all …]
|
D | GpuClamp.cpp | 76 arguments.add_const_tensor(ACL_SRC, src); in is_supported_op_helper() 77 arguments.add_const_tensor(ACL_DST, dst_info_to_validate_ptr); in is_supported_op_helper() 114 tensors.add_const_tensor(ACL_SRC, src); in validate_op() 115 tensors.add_const_tensor(ACL_DST, &dst_info_to_validate); in validate_op() 156 arguments.add_const_tensor(ACL_SRC, src); in create_op() 157 arguments.add_const_tensor(ACL_DST, dst); in create_op() 170 tensors.add_const_tensor(ACL_SRC, src); in create_op() 171 tensors.add_const_tensor(ACL_DST, dst); in create_op()
|
D | GpuCast.cpp | 82 arguments.add_const_tensor(ACL_SRC_0, src); in is_supported_op_helper() 83 arguments.add_const_tensor(ACL_DST_0, dst_info_to_validate_ptr); in is_supported_op_helper() 120 tensors.add_const_tensor(ACL_SRC_0, src); in validate_op() 121 tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate); in validate_op() 158 arguments.add_const_tensor(ACL_SRC_0, src); in create_op() 159 arguments.add_const_tensor(ACL_DST_0, dst); in create_op() 173 tensors.add_const_tensor(ACL_SRC_0, src); in create_op() 174 tensors.add_const_tensor(ACL_DST_0, dst); in create_op()
|
D | GpuResize.cpp | 94 arguments.add_const_tensor(ACL_SRC_0, src); in is_supported_op_helper() 95 arguments.add_const_tensor(ACL_DST_0, dst_info_to_validate_ptr); in is_supported_op_helper() 133 tensors.add_const_tensor(ACL_SRC_0, src); in validate_op() 134 tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate); in validate_op() 171 arguments.add_const_tensor(ACL_SRC_0, src); in create_op() 172 arguments.add_const_tensor(ACL_DST_0, dst); in create_op() 186 tensors.add_const_tensor(ACL_SRC_0, src); in create_op() 187 tensors.add_const_tensor(ACL_DST_0, dst); in create_op()
|
D | GpuReshape.cpp | 65 arguments.add_const_tensor(ACL_SRC_0, src); in is_supported_op_helper() 66 arguments.add_const_tensor(ACL_DST_0, dst_info_to_validate_ptr); in is_supported_op_helper() 104 tensors.add_const_tensor(ACL_SRC_0, src); in validate_op() 105 tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate); in validate_op() 141 arguments.add_const_tensor(ACL_SRC_0, src); in create_op() 142 arguments.add_const_tensor(ACL_DST_0, dst); in create_op() 155 tensors.add_const_tensor(ACL_SRC_0, src); in create_op()
|
D | GpuOutput.cpp | 80 tensors.add_const_tensor(ACL_SRC_0, src); in validate_op() 81 tensors.add_const_tensor(ACL_DST_0, &dst_to_validate); in validate_op() 117 arguments.add_const_tensor(ACL_SRC_0, src); in create_op() 118 arguments.add_const_tensor(ACL_DST_0, dst); in create_op() 132 tensors.add_const_tensor(ACL_SRC_0, src); in create_op() 133 tensors.add_const_tensor(ACL_DST_0, dst); in create_op()
|
D | GpuPool2d.cpp | 82 tensors.add_const_tensor(ACL_SRC_0, src); in validate_op() 83 tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate); in validate_op() 125 arguments.add_const_tensor(ACL_SRC_0, src); in is_supported_op() 126 arguments.add_const_tensor(ACL_DST_0, &dst_info_to_validate); in is_supported_op() 169 arguments.add_const_tensor(ACL_SRC_0, src); in create_op() 170 arguments.add_const_tensor(ACL_DST_0, dst); in create_op() 184 tensors.add_const_tensor(ACL_SRC_0, src); in create_op()
|
/external/ComputeLibrary/src/dynamic_fusion/sketch/gpu/operators/internal/ |
D | GpuElementwiseBinaryCommon.cpp | 75 arguments.add_const_tensor(ACL_SRC_0, lhs); in is_supported_op_helper() 76 arguments.add_const_tensor(ACL_SRC_1, rhs); in is_supported_op_helper() 77 arguments.add_const_tensor(ACL_DST_0, dst_info_to_validate_ptr); in is_supported_op_helper() 129 tensors.add_const_tensor(ACL_SRC_0, lhs); in validate_op() 130 tensors.add_const_tensor(ACL_SRC_1, rhs); in validate_op() 131 tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate); in validate_op() 170 arguments.add_const_tensor(ACL_SRC_0, lhs); in create_op() 171 arguments.add_const_tensor(ACL_SRC_1, rhs); in create_op() 172 arguments.add_const_tensor(ACL_DST_0, dst); in create_op() 186 tensors.add_const_tensor(ACL_SRC_0, lhs); in create_op() [all …]
|
/external/ComputeLibrary/src/gpu/cl/operators/ |
D | ClSoftmax.cpp | 148 pack.add_const_tensor(TensorType::ACL_SRC, src); in run() 157 sum_pack.add_const_tensor(TensorType::ACL_SRC, permuted_src.get()); in run() 162 sum_pack.add_const_tensor(TensorType::ACL_SRC, src); in run() 169 norm_pack.add_const_tensor(TensorType::ACL_SRC, tmp.get()); in run() 178 pack.add_const_tensor(TensorType::ACL_SRC, permuted_dst.get()); in run()
|
D | ClGemmConv2d.cpp | 572 pack_mm.add_const_tensor(TensorType::ACL_SRC_0, gemm_input_to_use); in run() 573 pack_mm.add_const_tensor(TensorType::ACL_SRC_1, weights_reshaped.get()); in run() 576 pack_mm.add_const_tensor(TensorType::ACL_SRC_2, biases); in run() 632 pack.add_const_tensor(TensorType::ACL_BIAS, biases); in prepare() 635 tensors.add_const_tensor(TensorType::ACL_SRC_1, weights_reshaped.get()); in prepare()
|
D | ClFullyConnected.cpp | 436 gemm_pack.add_const_tensor(ACL_SRC_0, (_is_fc_after_conv) ? flattened_src.get() : src); in run() 439 gemm_pack.add_const_tensor(ACL_SRC_1, weights.get()); in run() 490 tensors.add_const_tensor(ACL_SRC_1, cur_weights); in prepare()
|
D | ClWinogradConv2d.cpp | 264 pack_mm.add_const_tensor(TensorType::ACL_SRC_0, input0.get()); in run() 266 …is_gemm_reshaped ? pack_mm.remove_tensor(TensorType::ACL_SRC_1) : pack_mm.add_const_tensor(TensorT… in run()
|
/external/ComputeLibrary/src/runtime/CL/functions/ |
D | CLGEMM.cpp | 82 _impl->run_pack.add_const_tensor(ACL_SRC_0, a); in configure() 125 _impl->run_pack.add_const_tensor(ACL_SRC_1, _impl->b); in prepare()
|
D | CLGEMMConvolutionLayer.cpp | 105 …_impl->run_pack.add_const_tensor(experimental::get_post_op_arg_type(post_op_tensor_index++), *tens… in configure() 147 _impl->run_pack.add_const_tensor(ACL_SRC_1, _impl->weights); in prepare()
|
D | CLGEMMLowpMatrixMultiplyCore.cpp | 85 _impl->run_pack.add_const_tensor(ACL_SRC_0, a); in configure()
|
/external/ComputeLibrary/arm_compute/core/ |
D | ITensorPack.h | 83 void add_const_tensor(int id, const ITensor *tensor);
|
/external/ComputeLibrary/src/core/ |
D | ITensorPack.cpp | 49 void ITensorPack::add_const_tensor(int id, const ITensor *tensor) in add_const_tensor() function in arm_compute::ITensorPack
|
/external/ComputeLibrary/src/cpu/operators/ |
D | CpuWinogradConv2d.cpp | 352 gemm_pack.add_const_tensor(ACL_SRC, winograd_input_transformed.get()); in run() 353 gemm_pack.add_const_tensor(ACL_SRC_1, winograd_weights_transformed.get()); in run() 354 gemm_pack.add_const_tensor(ACL_BIAS, nullptr); in run() 417 gemm_pack.add_const_tensor(ACL_SRC_1, winograd_transformed_weights.get()); in prepare()
|
D | CpuGemmLowpMatrixMultiplyCore.cpp | 545 asm_glue_tensors.add_const_tensor(TensorType::ACL_SRC_0, a_to_use); in run() 546 asm_glue_tensors.add_const_tensor(TensorType::ACL_SRC_1, b); in run() 547 asm_glue_tensors.add_const_tensor(TensorType::ACL_SRC_2, c); in run() 552 asm_glue_tensors.add_const_tensor(TensorType::ACL_SRC_0, a_to_use); in run() 553 asm_glue_tensors.add_const_tensor(TensorType::ACL_SRC_1, b); in run()
|
D | CpuGemm.cpp | 296 asm_pack.add_const_tensor(ACL_SRC_2, (_reshape_b_only_on_first_run) ? c : nullptr); in run() 325 mm_pack.add_const_tensor(ACL_SRC_0, interleaved_a.get()); in run() 326 mm_pack.add_const_tensor(ACL_SRC_1, transposed_b.get()); in run()
|
D | CpuFullyConnected.cpp | 475 gemm_pack.add_const_tensor(ACL_SRC_0, (_is_fc_after_conv) ? flattened_src.get() : src); in run() 478 gemm_pack.add_const_tensor(ACL_SRC_1, transformed_wei.get()); in run() 526 gemm_pack.add_const_tensor(ACL_SRC_1, cur_weights); in prepare()
|
/external/ComputeLibrary/tests/validation/fixtures/ |
D | WeightsReshapeFixture.h | 110 pack.add_const_tensor(arm_compute::TensorType::ACL_BIAS, &bias); in compute_target()
|
/external/ComputeLibrary/src/runtime/NEON/functions/ |
D | NEGEMMConv2d.cpp | 101 _impl->run_pack.add_const_tensor(ACL_SRC_1, _impl->weights); in prepare()
|