Home
last modified time | relevance | path

Searched refs:bias (Results 1 – 25 of 74) sorted by relevance

123

/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/
Dfully_connected_quant8_signed.mod.py23 bias = Parameter("b0", "TENSOR_INT32", "{3}, 0.25f, 0", [4, 8, 12]) variable
26 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act_relu).To(out0)
43 bias = Parameter("b0", "TENSOR_INT32", "{1}, 0.04, 0", [10]) variable
46 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
62 bias = Input("b0", "TENSOR_INT32", "{1}, 0.04, 0") variable
65 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
72 bias:
85 bias = Parameter("b0", "TENSOR_INT32", "{1}, 0.25f, 0", [4]) variable
88 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
104 bias = Input("b0", "TENSOR_INT32", "{1}, 0.25f, 0") variable
[all …]
Dunidirectional_sequence_rnn.mod.py19 def test(name, input, weights, recurrent_weights, bias, hidden_state, argument
26 recurrent_weights, bias, hidden_state, activation,
33 bias: bias_data,
185 bias=Input("bias", "TENSOR_FLOAT32", "{{{}}}".format(num_units)),
211 bias=Input("bias", "TENSOR_FLOAT32", "{{{}}}".format(num_units)),
/packages/modules/NeuralNetworks/runtime/test/fuzzing/
DTestRandomGraph.cpp479 .float32 = {.bias = 1e-7f, .mse = 1e-10f, .atol = 1e-6f, .rtol = 1e-6f},
480 .float16 = {.bias = 1e-4f, .mse = 1e-8f, .atol = 1e-3f, .rtol = 1e-3f},
482 .quant8Asymm = {.bias = 0.1f, .mse = 0.1f, .atol = 1},
483 .quant8AsymmSigned = {.bias = 0.1f, .mse = 0.1f, .atol = 1},
484 .quant8Symm = {.bias = 0.1f, .mse = 0.1f, .atol = 1},
485 .quant16Asymm = {.bias = 0.1f, .mse = 0.1f, .atol = 1},
486 .quant16Symm = {.bias = 0.1f, .mse = 0.1f, .atol = 1},
493 .float32 = {.bias = 1e-6f, .mse = 1e-8f, .atol = 1e-5f, .rtol = 1e-5f},
494 .float16 = {.bias = 1e-3f, .mse = 1e-5f, .atol = 1e-2f, .rtol = 1e-2f},
496 .quant8Asymm = {.bias = 1.2, .mse = 1.2, .atol = 2},
[all …]
/packages/modules/NeuralNetworks/common/operations/
DQuantizedLSTM.cpp298 auto checkBiasShape = [&](const RunTimeOperandInfo* bias) -> bool { in prepare() argument
299 NN_RET_CHECK_EQ(NumDimensions(bias), 1); in prepare()
300 NN_RET_CHECK_EQ(SizeOfDimension(bias, 0), outputSize); in prepare()
301 NN_RET_CHECK_EQ(bias->scale, biasScale); in prepare()
302 NN_RET_CHECK_EQ(bias->zeroPoint, biasZeroPoint); in prepare()
365 void QuantizedLSTMCell::concatenateBiases(uint32_t outputSize, int32_t* bias) { in concatenateBiases() argument
366 memcpy(bias + 0 * outputSize, GetBuffer<int32_t>(inputGateBias_), sizeof(int32_t) * outputSize); in concatenateBiases()
367 memcpy(bias + 1 * outputSize, GetBuffer<int32_t>(cellGateBias_), sizeof(int32_t) * outputSize); in concatenateBiases()
368 memcpy(bias + 2 * outputSize, GetBuffer<int32_t>(forgetGateBias_), in concatenateBiases()
370 memcpy(bias + 3 * outputSize, GetBuffer<int32_t>(outputGateBias_), in concatenateBiases()
[all …]
DLocalResponseNormalization.cpp52 int32_t radius, float bias, float alpha, float beta, in localResponseNormFloat32Impl() argument
73 float multiplier = std::pow(bias + alpha * sum, -beta); in localResponseNormFloat32Impl()
82 bool localResponseNorm(const T* inputData, const Shape& inputShape, int32_t radius, T bias, T alpha,
87 float bias, float alpha, float beta, int32_t axis, float* outputData, in localResponseNorm() argument
96 .range = radius, .bias = bias, .alpha = alpha, .beta = beta}; in localResponseNorm()
102 return localResponseNormFloat32Impl(inputData, inputShape, radius, bias, alpha, beta, axis, in localResponseNorm()
109 _Float16 bias, _Float16 alpha, _Float16 beta, int32_t axis, in localResponseNorm() argument
116 localResponseNorm<float>(inputDataFloat32.data(), inputShape, radius, bias, alpha, beta, axis, in localResponseNorm()
DFullyConnected.cpp185 bool validateShapes(const Shape& input, const Shape& weights, const Shape& bias, in validateShapes() argument
192 NN_RET_CHECK(bias.type == OperandType::TENSOR_INT32); in validateShapes()
194 NN_RET_CHECK(bias.type == input.type); in validateShapes()
201 NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1); in validateShapes()
205 uint32_t bias_len = getSizeOfDimension(bias, 0); in validateShapes()
286 Shape bias = context->getInputShape(kBiasTensor); in validate() local
287 if (hasKnownRank(input) && hasKnownRank(weights) && hasKnownRank(bias)) { in validate()
288 NN_RET_CHECK(validateShapes(input, weights, bias)); in validate()
298 Shape bias = context->getInputShape(kBiasTensor); in prepare() local
300 NN_RET_CHECK(validateShapes(input, weights, bias, &output)); in prepare()
DUnidirectionalSequenceRNN.cpp72 const T* bias = context->getInputBuffer<T>(kBiasTensor); in executeTyped() local
108 RNN::RNNStep<T>(input, fixedTimeInputShape, hiddenState, bias, weights, weightsShape, in executeTyped()
157 Shape bias = context->getInputShape(kBiasTensor); in prepare() local
172 NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1); in prepare()
176 NN_RET_CHECK_EQ(numUnits, getSizeOfDimension(bias, 0)); in prepare()
/packages/modules/NeuralNetworks/runtime/test/specs/V1_2/
Dunidirectional_sequence_rnn.mod.py19 def test(name, input, weights, recurrent_weights, bias, hidden_state, argument
25 recurrent_weights, bias, hidden_state, activation,
31 bias: bias_data,
147 bias=Input("bias", "TENSOR_FLOAT32", "{{{}}}".format(num_units)),
169 bias=Input("bias", "TENSOR_FLOAT32", "{{{}}}".format(num_units)),
Dsvdf_state_float16.mod.py27 bias = Input("bias", "TENSOR_FLOAT16", "{%d}" % (units)) variable
34 model = model.Operation("SVDF", input, weights_feature, weights_time, bias, state_in,
56 bias: [],
Dsvdf_float16.mod.py29 bias = Input("bias", "TENSOR_FLOAT16", "{%d}" % (units)) variable
36 model = model.Operation("SVDF", input, weights_feature, weights_time, bias, state_in,
59 bias: [],
Drnn_float16.mod.py26 bias = Input("bias", "TENSOR_FLOAT16", "{%d}" % (units)) variable
34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in,
80 bias: [
/packages/modules/NeuralNetworks/runtime/test/
DTestValidateOperations.cpp2106 ANeuralNetworksOperandType bias = {.type = inputOperandCode, in convOpTest() local
2112 bias.type = ANEURALNETWORKS_TENSOR_INT32; in convOpTest()
2113 bias.scale = 0.25f; in convOpTest()
2116 bias.type = ANEURALNETWORKS_TENSOR_INT32; in convOpTest()
2117 bias.scale = 0.25f; in convOpTest()
2120 bias.type = ANEURALNETWORKS_TENSOR_INT32; in convOpTest()
2121 bias.scale = 0.0f; in convOpTest()
2140 {input, filter, bias, padLeft, padRight, padTop, padBottom, in convOpTest()
2151 {input, filter, bias, padImplicit, strideWidth, strideHeight, activation}, {output}, in convOpTest()
2166 {input, filter, bias, padLeft, padRight, padTop, padBottom, strideWidth, strideHeight, in convOpTest()
[all …]
/packages/modules/NeuralNetworks/runtime/test/specs/V1_0/
Dfully_connected_float_large_weights_as_inputs.mod.py20 bias = Input("b0", "TENSOR_FLOAT32", "{1}") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
30 bias:
Dfully_connected_quant8_weights_as_inputs.mod.py20 bias = Input("b0", "TENSOR_INT32", "{1}, 0.25f, 0") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
29 bias: [4]}
Dfully_connected_quant8_large_weights_as_inputs.mod.py20 bias = Input("b0", "TENSOR_INT32", "{1}, 0.04, 0") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
30 bias:
Dfully_connected_float_weights_as_inputs.mod.py20 bias = Input("b0", "TENSOR_FLOAT32", "{1}") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
29 bias: [4]}
Drnn_state.mod.py26 bias = Input("bias", "TENSOR_FLOAT32", "{%d}" % (units)) variable
34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in,
80 bias: [
Dsvdf_state.mod.py27 bias = Input("bias", "TENSOR_FLOAT32", "{%d}" % (units)) variable
34 model = model.Operation("SVDF", input, weights_feature, weights_time, bias, state_in,
56 bias: [],
Drnn.mod.py26 bias = Input("bias", "TENSOR_FLOAT32", "{%d}" % (units)) variable
34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in,
80 bias: [
Dsvdf_bias_present.mod.py29 bias = Input("bias", "TENSOR_FLOAT32", "{%d}" % (units)) variable
36 model = model.Operation("SVDF", input, weights_feature, weights_time, bias, state_in,
59 bias: [1.0, 2.0, 3.0, 4.0],
/packages/modules/NeuralNetworks/runtime/test/specs/V1_1/
Dfully_connected_float_weights_as_inputs_relaxed.mod.py20 bias = Input("b0", "TENSOR_FLOAT32", "{1}") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
30 bias: [4]}
Dfully_connected_float_large_weights_as_inputs_relaxed.mod.py20 bias = Input("b0", "TENSOR_FLOAT32", "{1}") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
31 bias:
Drnn_state_relaxed.mod.py26 bias = Input("bias", "TENSOR_FLOAT32", "{%d}" % (units)) variable
34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in,
81 bias: [
Dsvdf_state_relaxed.mod.py27 bias = Input("bias", "TENSOR_FLOAT32", "{%d}" % (units)) variable
34 model = model.Operation("SVDF", input, weights_feature, weights_time, bias, state_in,
57 bias: [],
Drnn_relaxed.mod.py26 bias = Input("bias", "TENSOR_FLOAT32", "{%d}" % (units)) variable
34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in,
81 bias: [

123