Home
last modified time | relevance | path

Searched defs:bn_add (Results 1 – 8 of 8) sorted by relevance

/external/ComputeLibrary/src/cpu/kernels/
DCpuAddMulAddKernel.cpp71 const ITensorInfo *bn_mul, const ITensorInfo *bn_add, in validate_arguments()
128 const ITensorInfo *bn_mul, const ITensorInfo *bn_add, in configure()
162 const ITensorInfo *bn_mul, const ITensorInfo *bn_add, in validate()
186 const ITensor *bn_add = tensors.get_const_tensor(TensorType::ACL_SRC_3); in run_op() local
/external/ComputeLibrary/src/cpu/operators/
DCpuAddMulAdd.cpp38 const ITensorInfo *bn_mul, const ITensorInfo *bn_add, in configure()
67 const ITensorInfo *bn_mul, const ITensorInfo *bn_add, in validate()
95 const ITensor *bn_add = tensors.get_const_tensor(TensorType::ACL_SRC_3); in run() local
/external/ComputeLibrary/src/runtime/NEON/functions/
DNEAddMulAdd.cpp49 void NEAddMulAdd::configure(ITensor *input1, ITensor *input2, ITensor *bn_mul, ITensor *bn_add, ITe… in configure()
72const ITensorInfo *bn_add, const ITensorInfo *add_output, const ITensorInfo *final_output, in validate()
/external/ComputeLibrary/src/cpu/kernels/addmuladd/generic/neon/
Dfp16.cpp43 const float16_t *bn_add, in a64_add_bn_clamp_direct_fp16_2x32()
868 …16_neon(const ITensor *input1, const ITensor *input2, const ITensor *bn_mul, const ITensor *bn_add, in add_mul_add_fp16_neon()
Dfp32.cpp43 const float *bn_add, in a64_add_bn_clamp_direct_fp32_2x16()
643 …32_neon(const ITensor *input1, const ITensor *input2, const ITensor *bn_mul, const ITensor *bn_add, in add_mul_add_fp32_neon()
Dqasymm8.cpp44 const float *bn_add, in a64_add_bn_clamp_direct_u8_fp32_2x16()
722 …u8_neon(const ITensor *input1, const ITensor *input2, const ITensor *bn_mul, const ITensor *bn_add, in add_mul_add_u8_neon()
Dqasymm8_signed.cpp44 const float *bn_add, in a64_add_bn_clamp_direct_s8_fp32_2x16()
722 …s8_neon(const ITensor *input1, const ITensor *input2, const ITensor *bn_mul, const ITensor *bn_add, in add_mul_add_s8_neon()
/external/ComputeLibrary/tests/validation/fixtures/
DAddMulAddFixture.h84 TensorType bn_add = create_tensor<TensorType>(b_shape, data_type, 1, _bn_add_qinfo); in compute_target() local