Home
last modified time | relevance | path

Searched refs:beta (Results 1 – 25 of 48) sorted by relevance

12

/packages/modules/NeuralNetworks/common/operations/
DSoftmax.cpp53 inline bool softmaxSlowFloat32(const float* inputData, const Shape& inputShape, const float beta, in softmaxSlowFloat32() argument
73 sum += std::exp((*p - maxValue) * beta); in softmaxSlowFloat32()
78 *pOut = std::exp((*p - maxValue) * beta) / sum; in softmaxSlowFloat32()
85 bool softmaxFloat32(const float* inputData, const Shape& inputShape, const float beta, int32_t axis, in softmaxFloat32() argument
92 tflite::SoftmaxParams param = {.beta = beta}; in softmaxFloat32()
97 return softmaxSlowFloat32(inputData, inputShape, beta, axis, outputData, outputShape); in softmaxFloat32()
101 bool softmaxFloat16(const _Float16* inputData, const Shape& inputShape, const float beta, in softmaxFloat16() argument
108 softmaxFloat32(inputData_float32.data(), inputShape, beta, axis, outputData_float32.data(), in softmaxFloat16()
116 bool softmaxQuant8Impl(const T* inputData, const Shape& inputShape, const float beta, int32_t axis, in softmaxQuant8Impl() argument
203 bool softmaxQuant8(const T* inputData, const Shape& inputShape, const float beta, int32_t axis, in softmaxQuant8() argument
[all …]
DLocalResponseNormalization.cpp52 int32_t radius, float bias, float alpha, float beta, in localResponseNormFloat32Impl() argument
73 float multiplier = std::pow(bias + alpha * sum, -beta); in localResponseNormFloat32Impl()
83 T beta, int32_t axis, T* outputData, const Shape& outputShape);
87 float bias, float alpha, float beta, int32_t axis, float* outputData, in localResponseNorm() argument
96 .range = radius, .bias = bias, .alpha = alpha, .beta = beta}; in localResponseNorm()
102 return localResponseNormFloat32Impl(inputData, inputShape, radius, bias, alpha, beta, axis, in localResponseNorm()
109 _Float16 bias, _Float16 alpha, _Float16 beta, int32_t axis, in localResponseNorm() argument
116 localResponseNorm<float>(inputDataFloat32.data(), inputShape, radius, bias, alpha, beta, axis, in localResponseNorm()
DInstanceNormalization.cpp49 inline bool instanceNormNhwc(const T* inputData, const Shape& inputShape, T gamma, T beta, in instanceNormNhwc() argument
83 outputData[ind] = (inputData[ind] - mean) * gamma / sigma + beta; in instanceNormNhwc()
92 inline bool instanceNorm(const T* inputData, const Shape& inputShape, T gamma, T beta, T epsilon, in instanceNorm() argument
98 NN_RET_CHECK(instanceNormNhwc(input.getNhwcBuffer(), input.getNhwcShape(), gamma, beta, epsilon, in instanceNorm()
DLogSoftmax.cpp42 inline bool compute(const T* input, const Shape& shape, T beta, uint32_t axis, T* output) { in compute() argument
59 (input[(outer * axisSize + i) * innerSize + inner] - maxValue) * beta)); in compute()
65 (input[(outer * axisSize + i) * innerSize + inner] - maxValue) * beta - in compute()
/packages/modules/NeuralNetworks/runtime/test/specs/V1_2/
Dlog_softmax.mod.py19 def test(input0, output0, input_data, beta, axis, output_data): argument
20 model = Model().Operation("LOG_SOFTMAX", input0, beta, axis).To(output0)
31 beta=1.0,
44 beta=1.0,
57 beta=1.0,
68 beta=10.0,
/packages/apps/Launcher3/src/com/android/launcher3/anim/
DSpringAnimationBuilder.java58 private double beta; field in SpringAnimationBuilder
142 beta = 2 * mDampingRatio * naturalFreq; in computeParams()
145 b = beta * a / (2 * gamma) + mVelocity / gamma; in computeParams()
147 va = a * beta / 2 - b * gamma; in computeParams()
148 vb = a * gamma + beta * b / 2; in computeParams()
218 return Math.pow(Math.E, - beta * t / 2); in exponentialComponent()
/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/
Dsoftmax_quant8_signed.mod.py19 beta = Float32Scalar("beta", 0.00001) # close to 0 variable
23 model = model.Operation("SOFTMAX", i1, beta).To(output)
38 beta = Float32Scalar("beta", 1.) variable
42 model = model.Operation("SOFTMAX", i1, beta).To(output)
/packages/modules/NeuralNetworks/runtime/test/specs/V1_0/
Dsoftmax_quant8_1.mod.py5 beta = Float32Scalar("beta", 0.00001) # close to 0 variable
9 model = model.Operation("SOFTMAX", i1, beta).To(output)
Dsoftmax_quant8_2.mod.py5 beta = Float32Scalar("beta", 1.) variable
9 model = model.Operation("SOFTMAX", i1, beta).To(output)
Dsoftmax_float_2.mod.py5 beta = Float32Scalar("beta", 1.) variable
9 model = model.Operation("SOFTMAX", i1, beta).To(output)
Dsoftmax_float_1.mod.py5 beta = Float32Scalar("beta", 0.000001) variable
9 model = model.Operation("SOFTMAX", i1, beta).To(output)
Dlocal_response_norm_float_1.mod.py6 beta = Float32Scalar("beta", .5) variable
9 model = model.Operation("LOCAL_RESPONSE_NORMALIZATION", i1, radius, bias, alpha, beta).To(output)
Dlocal_response_norm_float_4.mod.py6 beta = Float32Scalar("beta", .5) variable
9 model = model.Operation("LOCAL_RESPONSE_NORMALIZATION", i1, radius, bias, alpha, beta).To(output)
/packages/modules/NeuralNetworks/runtime/test/specs/V1_1/
Dsoftmax_float_1_relaxed.mod.py21 beta = Float32Scalar("beta", 0.000001) variable
25 model = model.Operation("SOFTMAX", i1, beta).To(output)
Dsoftmax_float_2_relaxed.mod.py21 beta = Float32Scalar("beta", 1.) variable
25 model = model.Operation("SOFTMAX", i1, beta).To(output)
Dlocal_response_norm_float_1_relaxed.mod.py22 beta = Float32Scalar("beta", .5) variable
25 model = model.Operation("LOCAL_RESPONSE_NORMALIZATION", i1, radius, bias, alpha, beta).To(output)
Dlocal_response_norm_float_4_relaxed.mod.py22 beta = Float32Scalar("beta", .5) variable
25 model = model.Operation("LOCAL_RESPONSE_NORMALIZATION", i1, radius, bias, alpha, beta).To(output)
/packages/apps/Gallery2/jni/filters/
Dedge.c30 float const beta = p; in JNIFUNCF() local
96 float ret = 1.0f - exp (- alpha * pow(mag, beta)); in JNIFUNCF()
/packages/apps/Test/connectivity/sl4n/rapidjson/
DCHANGELOG.md46 ## 1.0-beta - 2015-04-8
79 [1.0.0]: https://github.com/miloyip/rapidjson/compare/v1.0-beta...v1.0.0
/packages/apps/LegacyCamera/jni/feature_mos/src/mosaic/
DDelaunay.cpp148 EdgePointer alpha, beta, temp; in splice() local
150 beta = (EdgePointer) rot(onext(b)); in splice()
152 onext(alpha) = onext(beta); in splice()
153 onext(beta) = temp; in splice()
/packages/screensavers/PhotoTable/src/com/android/dreams/phototable/
DPhotoTable.java366 final double beta = Math.toRadians(Math.min(angle, 180f) / 2f); in moveFocus() local
367 final double[] left = { Math.sin(alpha - beta), in moveFocus()
368 Math.cos(alpha - beta) }; in moveFocus()
369 final double[] right = { Math.sin(alpha + beta), in moveFocus()
370 Math.cos(alpha + beta) }; in moveFocus()
/packages/modules/NeuralNetworks/common/include/
DOperations.h84 float bias, float alpha, float beta, int32_t axis,
87 float bias, float alpha, float beta, int32_t axis, float* outputData,
/packages/modules/NeuralNetworks/tools/api/
DREADME.md160 %{test alpha beta}
166 second is beta, first is alpha
172 error, but `%{test alpha beta gamma}` would not.
Dtypes.spec1362 * output = input / pow((bias + alpha * sqr_sum), beta)
1401 * * 4: A scalar, specifying the exponent, beta.
1403 * For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the beta
1406 * For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, the beta
2233 * exp((input[batch, i] - max(input[batch, :])) * beta) /
2234 * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
2264 * beta. If input0 is of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32},
2270 * beta. If input0 is of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} or
4464 * sqrt(var[b, c] + epsilon) + beta
4490 * * 2: A scalar, specifying beta, the offset applied to the normalized
[all …]
/packages/modules/NeuralNetworks/runtime/test/
DTestValidateOperations.cpp1684 ANeuralNetworksOperandType beta = {.type = (inputOperandCode == ANEURALNETWORKS_TENSOR_FLOAT32) in logSoftmaxOpTest() local
1703 OperationTestBase test(ANEURALNETWORKS_LOG_SOFTMAX, {input, beta, axis}, {output}); in logSoftmaxOpTest()
1797 ANeuralNetworksOperandType beta = getOpType(ANEURALNETWORKS_FLOAT32); in softmaxOpTest() local
1799 beta = getOpType(ANEURALNETWORKS_FLOAT16); in softmaxOpTest()
1802 OperationTestBase softmaxTest(ANEURALNETWORKS_SOFTMAX, {input, beta}, {output}, in softmaxOpTest()
1807 OperationTestBase softmaxAxisTest(ANEURALNETWORKS_SOFTMAX, {input, beta, axis}, {output}, in softmaxOpTest()
3297 ANeuralNetworksOperandType beta = floatScalar; in instanceNormalizationOpTest() local
3303 {input, gamma, beta, epsilon, isNCHW}, {output}); in instanceNormalizationOpTest()

12