Home
last modified time | relevance | path

Searched full:tanh (Results 1 – 25 of 1388) sorted by relevance

12345678910>>...56

/external/arm-optimized-routines/math/test/testcases/directed/
Dtanh.tst1 ; tanh.tst
6 func=tanh op1=7ff80000.00000001 result=7ff80000.00000001 errno=0
7 func=tanh op1=fff80000.00000001 result=7ff80000.00000001 errno=0
8 func=tanh op1=7ff00000.00000001 result=7ff80000.00000001 errno=0 status=i
9 func=tanh op1=fff00000.00000001 result=7ff80000.00000001 errno=0 status=i
10 func=tanh op1=7ff00000.00000000 result=3ff00000.00000000 errno=0
11 func=tanh op1=fff00000.00000000 result=bff00000.00000000 errno=0
12 func=tanh op1=00000000.00000000 result=00000000.00000000 errno=0
13 func=tanh op1=80000000.00000000 result=80000000.00000000 errno=0
17 func=tanh op1=00000000.00000001 result=00000000.00000001 errno=0 maybestatus=ux
[all …]
/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/
Dtanh.c42 "failed to create TanH operator with %zu channels: number of channels must be non-zero", in pytorch_qnnp_create_tanh_nc_q8()
49 "failed to create TanH operator with %.7g input scale: scale must be finite and positive", in pytorch_qnnp_create_tanh_nc_q8()
56 "failed to create TanH operator with %.7g output scale: scale must be finite and positive", in pytorch_qnnp_create_tanh_nc_q8()
63 "failed to create TanH operator with [%" PRIu8 ", %" PRIu8 in pytorch_qnnp_create_tanh_nc_q8()
74 … "failed to create TanH operator with %.7g output scale: only output scale of 2/256 is supported", in pytorch_qnnp_create_tanh_nc_q8()
81 "failed to create TanH operator with %" PRIu8 in pytorch_qnnp_create_tanh_nc_q8()
100 "failed to allocate 256 bytes for TanH lookup table"); in pytorch_qnnp_create_tanh_nc_q8()
110 /* Scale tanh(x) by 1 / output scale = 128.0 in pytorch_qnnp_create_tanh_nc_q8()
137 pytorch_qnnp_operator_t tanh, in pytorch_qnnp_setup_tanh_nc_q8() argument
150 tanh->batch_size = 0; in pytorch_qnnp_setup_tanh_nc_q8()
[all …]
/external/libopus/dnn/torch/rdovae/
Dexport_rdovae_weights.py190 ('core_encoder.module.dense_1' , 'enc_dense1', 'TANH', False,),
192 ('core_encoder.module.state_dense_1' , 'gdense1' , 'TANH', True,),
193 ('core_encoder.module.state_dense_2' , 'gdense2' , 'TANH', True)
202 ('core_encoder.module.gru1' , 'enc_gru1', 'TANH', True),
203 ('core_encoder.module.gru2' , 'enc_gru2', 'TANH', True),
204 ('core_encoder.module.gru3' , 'enc_gru3', 'TANH', True),
205 ('core_encoder.module.gru4' , 'enc_gru4', 'TANH', True),
206 ('core_encoder.module.gru5' , 'enc_gru5', 'TANH', True),
214 ('core_encoder.module.conv1.conv' , 'enc_conv1', 'TANH', True),
215 ('core_encoder.module.conv2.conv' , 'enc_conv2', 'TANH', True),
[all …]
/external/arm-optimized-routines/math/aarch64/advsimd/
Dtanh.c2 * Double-precision vector tanh(x) function.
27 return v_call_f64 (tanh, x, vdivq_f64 (q, qp2), special); in special_case()
30 /* Vector approximation for double-precision tanh(x), using a simplified
34 float64x2_t VPCS_ATTR V_NAME_D1 (tanh) (float64x2_t x) in V_NAME_D1() argument
53 /* tanh(x) = (e^2x - 1) / (e^2x + 1). */ in V_NAME_D1()
62 TEST_SIG (V, D, 1, tanh, -10.0, 10.0)
63 TEST_ULP (V_NAME_D1 (tanh), 2.21)
64 TEST_DISABLE_FENV_IF_NOT (V_NAME_D1 (tanh), WANT_SIMD_EXCEPT)
65 TEST_SYM_INTERVAL (V_NAME_D1 (tanh), 0, 0x1p-27, 5000)
66 TEST_SYM_INTERVAL (V_NAME_D1 (tanh), 0x1p-27, 0x1.241bf835f9d5fp+4, 50000)
[all …]
Dtanhf.c2 * Single-precision vector tanh(x) function.
34 /* Approximation for single-precision vector tanh(x), using a simplified
38 float32x4_t VPCS_ATTR NOINLINE V_NAME_F1 (tanh) (float32x4_t x) in V_NAME_F1() argument
63 /* tanh(x) = (e^2x - 1) / (e^2x + 1). */ in V_NAME_F1()
74 HALF_WIDTH_ALIAS_F1 (tanh)
76 TEST_SIG (V, F, 1, tanh, -10.0, 10.0)
77 TEST_ULP (V_NAME_F1 (tanh), 2.09)
78 TEST_DISABLE_FENV_IF_NOT (V_NAME_F1 (tanh), WANT_SIMD_EXCEPT)
79 TEST_SYM_INTERVAL (V_NAME_F1 (tanh), 0, 0x1p-23, 1000)
80 TEST_SYM_INTERVAL (V_NAME_F1 (tanh), 0x1p-23, 0x1.205966p+3, 100000)
[all …]
/external/skia/resources/sksl/intrinsics/
DTanh.sksl6 return (tanh(inputVal.x) == expected.x &&
7 tanh(inputVal.xy) == expected.xy &&
8 tanh(inputVal.xyz) == expected.xyz &&
9 tanh(inputVal.xyzw) == expected.xyzw &&
10 tanh(constVal.x) == expected.x &&
11 tanh(constVal.xy) == expected.xy &&
12 tanh(constVal.xyz) == expected.xyz &&
13 tanh(constVal.xyzw) == expected.xyzw) ? colorGreen : colorRed;
/external/arm-optimized-routines/math/aarch64/sve/
Dtanhf.c2 * Single-precision SVE tanh(x) function.
37 /* Approximation for single-precision SVE tanh(x), using a simplified
41 svfloat32_t SV_NAME_F1 (tanh) (svfloat32_t x, const svbool_t pg) in SV_NAME_F1() argument
52 /* tanh(x) = (e^2x - 1) / (e^2x + 1). */ in SV_NAME_F1()
62 TEST_SIG (SV, F, 1, tanh, -10.0, 10.0)
63 TEST_ULP (SV_NAME_F1 (tanh), 2.07)
64 TEST_DISABLE_FENV (SV_NAME_F1 (tanh))
65 TEST_SYM_INTERVAL (SV_NAME_F1 (tanh), 0, 0x1p-23, 1000)
66 TEST_SYM_INTERVAL (SV_NAME_F1 (tanh), 0x1p-23, BoringBound, 100000)
67 TEST_SYM_INTERVAL (SV_NAME_F1 (tanh), BoringBound, inf, 100)
Dtanh.c2 * Double-precision SVE tanh(x) function.
40 the scalar variant of tanh. */ in expm1_inline()
65 return sv_call_f64 (tanh, x, y, special); in special_case()
68 /* SVE approximation for double-precision tanh(x), using a simplified
72 svfloat64_t SV_NAME_D1 (tanh) (svfloat64_t x, svbool_t pg) in SV_NAME_D1() argument
83 /* tanh(x) = (e^2x - 1) / (e^2x + 1). */ in SV_NAME_D1()
92 TEST_SIG (SV, D, 1, tanh, -10.0, 10.0)
93 TEST_ULP (SV_NAME_D1 (tanh), 2.27)
94 TEST_DISABLE_FENV (SV_NAME_D1 (tanh))
95 TEST_SYM_INTERVAL (SV_NAME_D1 (tanh), 0, 0x1p-27, 5000)
[all …]
/external/armnn/docs/
D05_03_delegate.dox44 - AVERAGE_POOL_2D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
46 - AVERAGE_POOL_3D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, SIGN_BIT, TANH, …
54 - CONCATENATION, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
56 - CONV_2D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
58 - CONV_3D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
62 - DEPTHWISE_CONV_2D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
82 - FULLY_CONNECTED, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
120 - MAX_POOL_2D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, TANH, NONE
122 - MAX_POOL_3D, Supported Fused Activation: RELU, RELU6, RELU_N1_TO_1, SIGMOID, SIGN_BIT, TANH, NONE
192 - TANH
D05_01_parsers.dox79 - Tanh
80 …- See the ONNX [Tanh documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Tan…
124 - AVERAGE_POOL_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE
126 - CONCATENATION, Supported Fused Activation: RELU , RELU6 , TANH, NONE
127 - CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE
128 - CONV_3D, Supported Fused Activation: RELU , RELU6 , TANH, NONE
130 - DEPTHWISE_CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE
138 - FULLY_CONNECTED, Supported Fused Activation: RELU , RELU6 , TANH, NONE
152 - MAX_POOL_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE
187 - TANH
/external/arm-optimized-routines/math/aarch64/experimental/
Dtanh_3u.c2 * Double-precision tanh(x) function.
50 /* Approximation for double-precision tanh(x), using a simplified version of
52 tanh(-0x1.c4a4ca0f9f3b7p-3) got -0x1.bd6a21a163627p-3
55 tanh (double x) in tanh() function
71 /* tanh(x) = (e^2x - 1) / (e^2x + 1). */ in tanh()
76 TEST_SIG (S, D, 1, tanh, -10.0, 10.0)
77 TEST_ULP (tanh, 2.27)
78 TEST_SYM_INTERVAL (tanh, 0, TinyBound, 1000)
79 TEST_SYM_INTERVAL (tanh, TinyBound, BoringBound, 100000)
80 TEST_SYM_INTERVAL (tanh, BoringBound, inf, 1000)
/external/pytorch/benchmarks/fastrnns/
Dcells.py19 cellgate = cellgate.tanh()
23 hy = outgate * cy.tanh()
43 cellgate = torch.tanh(cellgate)
47 hy = outgate * torch.tanh(cy)
67 cellgate = torch.tanh(cellgate)
71 hy = outgate * torch.tanh(cy)
90 cellgate = torch.tanh(cellgate)
94 hy = outgate * torch.tanh(cy)
109 cellgate = torch.tanh(cellgate)
113 hy = outgate * torch.tanh(cy)
[all …]
/external/executorch/backends/arm/test/ops/
Dtest_tanh.py32 class Tanh(torch.nn.Module): class in TestTanh
35 self.tanh = torch.nn.Tanh()
38 return self.tanh(x)
50 .check(["torch.ops.aten.tanh.default"])
69 .check(["torch.ops.aten.tanh.default"])
93 .check_count({"torch.ops.aten.tanh.default": 1})
122 self._test_tanh_tosa_MI_pipeline(self.Tanh(), (test_data,))
126 self._test_tanh_tosa_BI_pipeline(self.Tanh(), (test_data,))
130 self._test_tanh_tosa_u55_BI_pipeline(self.Tanh(), (test_data,))
134 self._test_tanh_tosa_u85_BI_pipeline(self.Tanh(), (test_data,))
/external/tensorflow/tensorflow/core/kernels/
Dcwise_op_tanh.cc21 REGISTER3(UnaryOp, CPU, "Tanh", functor::tanh, float, Eigen::half, double);
22 REGISTER3(UnaryOp, CPU, "Tanh", functor::tanh, bfloat16, complex64, complex128)
26 REGISTER3(UnaryOp, GPU, "Tanh", functor::tanh, float, Eigen::half, double);
28 REGISTER(UnaryOp, GPU, "Tanh", functor::tanh, bfloat16)
/external/tensorflow/tensorflow/compiler/jit/
Dintroduce_floating_point_jitter_pass_test.cc46 Output tanh_a = ops::Tanh(root.WithOpName("tanh_a"), sigmoid_a); in TEST()
47 Output tanh_b = ops::Tanh(root.WithOpName("tanh_b"), sigmoid_b); in TEST()
62 auto m_tanh_a = NodeWith(Op("Tanh"), Inputs(Out(m_sigmoid_a_with_jitter))); in TEST()
67 auto m_tanh_b = NodeWith(Op("Tanh"), Inputs(Out(m_sigmoid_b_with_jitter))); in TEST()
125 Output tanh = ops::Tanh(root.WithOpName("tanh"), sigmoid); in TEST() local
139 auto m_tanh = NodeWith(Op("Tanh"), Inputs(Out(m_sigmoid_with_jitter))); in TEST()
141 Node* tanh_transformed = testing::FindNodeByName(graph.get(), "tanh"); in TEST()
155 Output tanh_s = ops::Tanh(root.WithOpName("tanh_s"), svd.s); in TEST()
156 Output tanh_u = ops::Tanh(root.WithOpName("tanh_u"), svd.u); in TEST()
157 Output tanh_v = ops::Tanh(root.WithOpName("tanh_v"), svd.v); in TEST()
[all …]
/external/executorch/backends/arm/operators/
Dop_tanh.py31 target = "aten.tanh.default"
65 tosa_graph.addOperator(TosaOp.Op().TANH, [inputs[0].name], [output.name])
70 Returns a table mapping 256 entries to tanh([qmin,qmax])
74 def tanh(x): function
75 # Convert quantized input to floating point tanh input space.
77 # Compute tanh.
81 # Convert tanh output back to quantized space.
85 tanh(x)
/external/pytorch/test/cpp/tensorexpr/
Dtest_graph_opt.cpp85 %6 : Float(60, strides=[1], device=cpu) = aten::tanh(%5) in TEST_F()
93 // The `aten::log` and `aten::tanh` ops must be moved to the inputs of in TEST_F()
99 ->check("aten::tanh") in TEST_F()
100 ->check("aten::tanh") in TEST_F()
101 ->check("aten::tanh") in TEST_F()
104 ->check_not("aten::tanh") in TEST_F()
110 auto ref = at::tanh(at::log(at::cat({x, y, z}, 0))); in TEST_F()
132 %5 : Float(60, strides=[1], device=cpu) = aten::tanh(%cat) in TEST_F()
141 // The `aten::tanh` op must be moved to the inputs of `aten::cat`. in TEST_F()
145 .check("aten::tanh") in TEST_F()
[all …]
/external/tensorflow/tensorflow/lite/kernels/internal/reference/
Dtanh.h29 inline void Tanh(const RuntimeShape& input_shape, const float* input_data, in Tanh() function
35 float result = std::tanh(val); in Tanh()
42 inline void Tanh(const TanhParams&, const RuntimeShape& input_shape, in Tanh() function
46 Tanh(input_shape, input_data, output_shape, output_data); in Tanh()
49 inline void Tanh(const TanhParams& params, const RuntimeShape& input_shape, in Tanh() function
61 // This is the return type of math functions such as tanh, logistic, in Tanh()
70 F0 output = gemmlowp::tanh(input); in Tanh()
77 F0 output = gemmlowp::tanh(input); in Tanh()
83 inline void Tanh(const TanhParams& params, const RuntimeShape& input_shape, in Tanh() function
109 const FixedPoint0 output_val_f0 = gemmlowp::tanh(input_val_f4); in Tanh()
/external/tensorflow/tensorflow/compiler/jit/tests/
Dopens2s_gnmt_mixed_precision.golden_summary209 Tanh 17
254 Tanh 2
295 Tanh 2
327 Tanh 2
341 Tanh 2
377 Tanh 2
391 Tanh 2
405 Tanh 2
419 Tanh 2
/external/pytorch/test/cpp/jit/
Dtest_subgraph_utils.cpp61 %q2 : Tensor = aten::tanh(%q1) in TEST()
62 %q3 : Tensor = aten::tanh(%q2) in TEST()
63 %q4 : Tensor = aten::tanh(%q3) in TEST()
77 if (next->kind() == aten::tanh) { in TEST()
103 ->check_count("aten::tanh", 3) in TEST()
126 %x : Tensor = aten::tanh(%a) in TEST()
130 %q2 : Tensor = aten::tanh(%q1) in TEST()
131 %q3 : Tensor = aten::tanh(%q2) in TEST()
132 %q4 : Tensor = aten::tanh(%q3) in TEST()
133 %q5 : Tensor = aten::tanh(%q4) in TEST()
/external/tensorflow/tensorflow/compiler/xla/tools/
Dhlo_extractor_test.cc72 tanh = f32[4]{0} tanh(f32[4]{0} param.0) in TEST_F()
73 negate = f32[4]{0} negate(f32[4]{0} tanh) in TEST_F()
86 op::Exp(op::Negate(op::Tanh(op::Parameter(0))))); in TEST_F()
106 op::Add(op::Negate(op::Tanh(op::Parameter(0))), in TEST_F()
107 op::Exp(op::Negate(op::Tanh(op::Parameter(0)))))); in TEST_F()
117 tanh = f32[4]{0} tanh(p) in TEST_F()
119 ROOT add = f32[4]{0} add(tanh, c) in TEST_F()
136 op::Add(op::Tanh(op::Parameter(0)), op::Constant())); in TEST_F()
/external/tensorflow/tensorflow/compiler/xla/tests/
Dexhaustive_unary_test_complex.cc41 // TODO(b/138126045): Current libc++ implementation of the complex tanh in SetParamsForTanh()
44 // TODO(b/138750327): Current libc++ implementation of the complex tanh in SetParamsForTanh()
121 // The current libc++ implementation of the complex tanh function provides
122 // less accurate results when the denomenator of a complex tanh is small, due
124 // we cast it to and from a complex128 when computing tanh.
125 UNARY_TEST_COMPLEX_64(Tanh, {
128 // This implementation of Tanh becomes less accurate when the denominator in __anonfde485890602()
137 Tanh,
139 return static_cast<complex64>(std::tanh(static_cast<complex128>(x))); in __anonfde485890702()
192 // Similar to the Tanh bug.
[all …]
/external/webrtc/modules/audio_processing/ns/
Dspeech_probability_estimator.cc48 // Width for pause region: lower range, so increase width in tanh map. in Update()
51 // Average LRT feature: use larger width in tanh map for pause regions. in Update()
56 0.5f * (tanh(width_prior * (model.lrt - prior_model.lrt)) + 1.f); in Update()
58 // Spectral flatness feature: use larger width in tanh map for pause regions. in Update()
65 0.5f * (tanh(1.f * width_prior * in Update()
69 // For template spectrum-difference : use larger width in tanh map for pause in Update()
77 0.5f * (tanh(width_prior * (model.spectral_diff - in Update()
/external/tensorflow/tensorflow/core/api_def/python_api/
Dapi_def_Tanh.pbtxt2 graph_op_name: "Tanh"
4 name: "math.tanh"
7 name: "nn.tanh"
10 name: "tanh"
/external/pytorch/benchmarks/static_runtime/
Dtest_cpu_fusion.cc15 return (a + b).relu().tanh() in TEST()
33 auto expect = at::tanh(at::relu(input1 + input2)); in TEST()
42 auto expect = at::tanh(at::relu(new_input1 + new_input2)); in TEST()
50 return (a + b).relu().tanh() in TEST()
71 auto expect = at::tanh(at::relu(input1 + input2)); in TEST()
80 auto expect = at::tanh(at::relu(input1 + input2)); in TEST()
89 return (a + b).relu().tanh() in TEST()
124 auto expect = at::tanh(at::relu(a + b)); in TEST()

12345678910>>...56