Home
last modified time | relevance | path

Searched refs:input_weights (Results 1 – 19 of 19) sorted by relevance

/external/ComputeLibrary/src/core/CL/kernels/
DCLFuseBatchNormalizationKernel.cpp42 Status validate_arguments(const ITensorInfo *input_weights, const ITensorInfo *bn_mean, const ITens… in validate_arguments() argument
48 ARM_COMPUTE_ERROR_ON_NULLPTR(input_weights, bn_mean, bn_var); in validate_arguments()
49 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input_weights); in validate_arguments()
50 …ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input_weights, 1, DataType::F16, DataType::F3… in validate_arguments()
52 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, bn_mean, bn_var); in validate_arguments()
58 ARM_COMPUTE_RETURN_ERROR_ON(input_weights->dimension(3) != bn_mean->dimension(0)); in validate_arguments()
62 …const size_t channel_idx = get_data_layout_dimension_index(input_weights->data_layout(), DataLayou… in validate_arguments()
63 ARM_COMPUTE_RETURN_ERROR_ON(input_weights->dimension(channel_idx) != bn_mean->dimension(0)); in validate_arguments()
70 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, input_bias); in validate_arguments()
76 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, bn_beta); in validate_arguments()
[all …]
DCLFuseBatchNormalizationKernel.h65 …void configure(const ICLTensor *input_weights, const ICLTensor *bn_mean, const ICLTensor *bn_var, …
84 …void configure(const CLCompileContext &compile_context, const ICLTensor *input_weights, const ICLT…
104 …static Status validate(const ITensorInfo *input_weights, const ITensorInfo *bn_mean, const ITensor…
/external/ComputeLibrary/src/core/NEON/kernels/
DNEFuseBatchNormalizationKernel.cpp153 Status validate_arguments(const ITensorInfo *input_weights, const ITensorInfo *bn_mean, const ITens… in validate_arguments() argument
159 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input_weights, bn_mean, bn_var); in validate_arguments()
160 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input_weights); in validate_arguments()
161 …ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input_weights, 1, DataType::F16, DataType::F3… in validate_arguments()
163 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, bn_mean, bn_var); in validate_arguments()
169 ARM_COMPUTE_RETURN_ERROR_ON(input_weights->dimension(3) != bn_mean->dimension(0)); in validate_arguments()
173 …const size_t channel_idx = get_data_layout_dimension_index(input_weights->data_layout(), DataLayou… in validate_arguments()
174 ARM_COMPUTE_RETURN_ERROR_ON(input_weights->dimension(channel_idx) != bn_mean->dimension(0)); in validate_arguments()
180 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, input_bias); in validate_arguments()
186 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, bn_beta); in validate_arguments()
[all …]
DNEFuseBatchNormalizationKernel.h69 …void configure(const ITensor *input_weights, const ITensor *bn_mean, const ITensor *bn_var, ITenso…
89 …static Status validate(const ITensorInfo *input_weights, const ITensorInfo *bn_mean, const ITensor…
110 …using FuseBatchNormFunction = void(const ITensor *input_weights, const ITensor *input_bias, ITenso…
/external/tensorflow/tensorflow/lite/kernels/
Dbasic_rnn.cc65 const TfLiteTensor* input_weights; in Prepare() local
67 context, GetInputSafe(context, node, kWeightsTensor, &input_weights)); in Prepare()
81 const int num_units = input_weights->dims->data[0]; in Prepare()
83 input_weights->dims->data[1]); in Prepare()
84 TF_LITE_ENSURE_EQ(context, input_weights->dims->data[0], bias->dims->data[0]); in Prepare()
90 TF_LITE_ENSURE_TYPES_EQ(context, input_weights->type, in Prepare()
107 const bool is_hybrid = IsHybridOp(input, input_weights); in Prepare()
120 input_quantized->type = input_weights->type; in Prepare()
131 hidden_state_quantized->type = input_weights->type; in Prepare()
202 const TfLiteTensor* input_weights, in EvalFloat() argument
[all …]
Dunidirectional_sequence_rnn.cc66 const TfLiteTensor* input_weights; in Prepare() local
68 context, GetInputSafe(context, node, kWeightsTensor, &input_weights)); in Prepare()
87 const int num_units = input_weights->dims->data[0]; in Prepare()
89 input_weights->dims->data[1]); in Prepare()
90 TF_LITE_ENSURE_EQ(context, input_weights->dims->data[0], bias->dims->data[0]); in Prepare()
96 TF_LITE_ENSURE_TYPES_EQ(context, input_weights->type, in Prepare()
114 const bool is_hybrid = IsHybridOp(input, input_weights); in Prepare()
127 input_quantized->type = input_weights->type; in Prepare()
138 hidden_state_quantized->type = input_weights->type; in Prepare()
208 const TfLiteTensor* input_weights, in EvalFloat() argument
[all …]
/external/ComputeLibrary/src/runtime/CL/functions/
DCLFuseBatchNormalization.cpp44 void CLFuseBatchNormalization::configure(const ICLTensor *input_weights, const ICLTensor *bn_mean, … in configure() argument
49 …configure(CLKernelLibrary::get().get_compile_context(), input_weights, bn_mean, bn_var, fused_weig… in configure()
52 …::configure(const CLCompileContext &compile_context, const ICLTensor *input_weights, const ICLTens… in configure() argument
57 …ARM_COMPUTE_LOG_PARAMS(input_weights, bn_mean, bn_var, fused_weights, fused_bias, input_bias, bn_b… in configure()
58 …_fuse_bn_kernel->configure(compile_context, input_weights, bn_mean, bn_var, fused_weights, fused_b… in configure()
61 Status CLFuseBatchNormalization::validate(const ITensorInfo *input_weights, const ITensorInfo *bn_m… in validate() argument
66 …return CLFuseBatchNormalizationKernel::validate(input_weights, bn_mean, bn_var, fused_weights, fus… in validate()
DCLLSTMLayerQuantized.cpp336 …const TensorInfo input_weights(TensorShape(input_size, 4 * output_size), 1, DataType::QASYMM… in validate() local
337 …ARM_COMPUTE_RETURN_ON_ERROR(CLConcatenateLayer::validate(inputs_weights_vector, &input_weights, Wi… in validate()
351 weights_vector.emplace_back(&input_weights); in validate()
/external/ComputeLibrary/src/runtime/NEON/functions/
DNEFuseBatchNormalization.cpp43 void NEFuseBatchNormalization::configure(const ITensor *input_weights, const ITensor *bn_mean, cons… in configure() argument
48 ARM_COMPUTE_LOG_PARAMS(input_weights, bn_mean, bn_var, fused_weights, fused_bias, input_bias, in configure()
52 …_fuse_bn_kernel->configure(input_weights, bn_mean, bn_var, fused_weights, fused_bias, input_bias, … in configure()
55 Status NEFuseBatchNormalization::validate(const ITensorInfo *input_weights, const ITensorInfo *bn_m… in validate() argument
60 …return NEFuseBatchNormalizationKernel::validate(input_weights, bn_mean, bn_var, fused_weights, fus… in validate()
DNELSTMLayerQuantized.cpp303 …const TensorInfo input_weights(TensorShape(input_size, 4 * output_size), 1, DataType::QASYMM… in validate() local
304 …ARM_COMPUTE_RETURN_ON_ERROR(NEConcatenateLayer::validate(inputs_weights_vector, &input_weights, Wi… in validate()
318 weights_vector.emplace_back(&input_weights); in validate()
/external/rnnoise/src/
Drnn_reader.c116 INPUT_ARRAY(name->input_weights, name->nb_inputs * name->nb_neurons); \ in rnnoise_model_from_file()
125 INPUT_ARRAY(name->input_weights, name->nb_inputs * name->nb_neurons * 3); \ in rnnoise_model_from_file()
145 free((void *) model->name->input_weights); \ in rnnoise_model_free()
152 free((void *) model->name->input_weights); \ in rnnoise_model_free()
Drnn.h46 const rnn_weight *input_weights; member
54 const rnn_weight *input_weights; member
Drnn.c92 sum += layer->input_weights[j*stride + i]*input[j]; in compute_dense()
125 sum += gru->input_weights[j*stride + i]*input[j]; in compute_gru()
135 sum += gru->input_weights[N + j*stride + i]*input[j]; in compute_gru()
145 sum += gru->input_weights[2*N + j*stride + i]*input[j]; in compute_gru()
/external/ComputeLibrary/arm_compute/runtime/CL/functions/
DCLFuseBatchNormalization.h81 …void configure(const ICLTensor *input_weights, const ICLTensor *bn_mean, const ICLTensor *bn_var, …
100 …void configure(const CLCompileContext &compile_context, const ICLTensor *input_weights, const ICLT…
120 …static Status validate(const ITensorInfo *input_weights, const ITensorInfo *bn_mean, const ITensor…
/external/libopus/src/
Dmlp.c92 gemm_accum(output, layer->input_weights, N, M, stride, input); in compute_dense()
119 gemm_accum(z, gru->input_weights, N, M, stride, input); in compute_gru()
127 gemm_accum(r, &gru->input_weights[N], N, M, stride, input); in compute_gru()
137 gemm_accum(h, &gru->input_weights[2*N], N, M, stride, input); in compute_gru()
Dmlp.h38 const opus_int8 *input_weights; member
46 const opus_int8 *input_weights; member
/external/ComputeLibrary/arm_compute/runtime/NEON/functions/
DNEFuseBatchNormalization.h78 …void configure(const ITensor *input_weights, const ITensor *bn_mean, const ITensor *bn_var, ITenso…
98 …static Status validate(const ITensorInfo *input_weights, const ITensorInfo *bn_mean, const ITensor…
/external/ComputeLibrary/tests/datasets/
DLSTMLayerDataset.h126 …void add_config(TensorShape src, TensorShape input_weights, TensorShape recurrent_weights, TensorS… in add_config() argument
130 _input_weights_shapes.emplace_back(std::move(input_weights)); in add_config()
/external/tensorflow/tensorflow/lite/toco/graph_transformations/
Dquantize.cc267 const auto& input_weights = model->GetArray(op.inputs[weights_input_index]); in ChooseQuantizationForOperatorInput() local
269 !input_weights.quantization_params) { in ChooseQuantizationForOperatorInput()
276 const auto input_weights_scale = input_weights.quantization_params->scale; in ChooseQuantizationForOperatorInput()