Home
last modified time | relevance | path

Searched refs:quant (Results 1 – 25 of 141) sorted by relevance

123456

/external/tensorflow/tensorflow/compiler/mlir/lite/tests/
Dquantize.mlir7 …l.quantize"(%0) {qtype = tensor<!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>} : (tensor<2x2x…
8 …%2 = "tfl.dequantize"(%1) : (tensor<!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>) -> tensor<…
11 // CHECK: %[[cst:.*]] = "tfl.pseudo_qconst"() {qtype = tensor<!quant.uniform<u8:f32, 7.84313725490…
19 …tize"(%0) {qtype = tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>} : (tensor<2x2xf3…
20 …%2 = "tfl.dequantize"(%1) : (tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>) -> ten…
23 // CHECK: %[[cst:.*]] = "tfl.pseudo_qconst"() {qtype = tensor<2x2x!quant.uniform<u8:f32, 7.8431372…
31 …tize"(%0) {qtype = tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>} : (tensor<2x2xf3…
32 …%2 = "tfl.dequantize"(%1) : (tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>) -> ten…
35 // CHECK: %[[cst:.*]] = "tfl.pseudo_qconst"() {qtype = tensor<2x2x!quant.uniform<u8:f32, 7.8431372…
41 func @DequantizeAndQuantize() -> tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>> {
[all …]
Dprepare-quantize.mlir15 func @DequantizeAndQuantize() -> tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>> {
16 …qtype = tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>, value = dense<-1> : tensor<…
17 …%0 = "tfl.dequantize"(%cst) : (tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>) -> t…
18 …tize"(%0) {qtype = tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>} : (tensor<2x2xf3…
19 return %1 : tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>
29 %0 = "quant.stats"(%arg0) {
32 %1 = "quant.stats"(%0) {
42 // CHECK: %[[q1:.*]] = "tfl.quantize"(%arg0) {qtype = tensor<8x4x3x!quant.uniform<u8:f32, 0.0078431…
44 // CHECK: %[[q2:.*]] = "tfl.quantize"(%[[dq1]]) {qtype = tensor<8x4x3x!quant.uniform<u8:f32:2, {0.0…
50 func @QuantizeConv2DPerChannel(%arg0: tensor<1x224x224x3x!quant.uniform<u8:f32, 1.5>>,
[all …]
Dpost-quantize.mlir5 … = "tfl.quantize"(%arg0) {qtype = tensor<4x!quant.uniform<u8:f32, 1.0>>} : (tensor<4xf32>) -> tens…
6 …%1:4 = "tfl.split"(%arg1, %0) {num_splits = 4 : i32} : (tensor<i32>, tensor<4x!quant.uniform<u8:f3…
7 …-> (tensor<2x!quant.uniform<u8:f32, 1.0>>, tensor<2x!quant.uniform<u8:f32, 1.0>>,tensor<2x!quant.u…
8 %2 = "tfl.dequantize"(%1#0) : (tensor<2x!quant.uniform<u8:f32, 1.0>>) -> tensor<2xf32>
9 %3 = "tfl.dequantize"(%1#1) : (tensor<2x!quant.uniform<u8:f32, 1.0>>) -> tensor<2xf32>
12 %4 = "tfl.dequantize"(%1#2) : (tensor<2x!quant.uniform<u8:f32, 1.0>>) -> tensor<2xf32>
13 …%5 = "tfl.quantize"(%4) {qtype = tensor<2x!quant.uniform<u8:f32, 1.0>>} : (tensor<2xf32>) -> (tens…
14 %6 = tfl.add %5, %5 {fused_activation_function = "NONE"} : tensor<2x!quant.uniform<u8:f32, 1.0>>
24 …type = tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>} : (tensor<1x224x224x3xf32>) -…
25 …r<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<3…
[all …]
Ddefault_quant_params.mlir1 // RUN: tf-opt %s --tfl-default-quant --tfl-quantize | FileCheck %s
8 // CHECK: %[[q0:.*]] = "tfl.quantize"(%arg1) {qtype = tensor<2x1x!quant.uniform<u8:f32, 0.007843137…
9 // CHECK: %[[q1:.*]] = "tfl.quantize"(%arg0) {qtype = tensor<2x2x!quant.uniform<u8:f32, 0.007843137…
11 …dd"(%[[q1]], %[[q0]]) {fused_activation_function = "NONE"} : (tensor<2x2x!quant.uniform<u8:f32, 0.…
12 // CHECK: %[[dq:.*]] = "tfl.dequantize"(%[[add]]) : (tensor<2x2x!quant.uniform<u8:f32, 0.0078431372…
18 …l.quantize"(%arg0) {qtype = tensor<2x2x!quant.uniform<u8:f32, 1.0:128>>}: (tensor<2x2xf32>) -> ten…
19 %1 = "tfl.dequantize"(%0) : (tensor<2x2x!quant.uniform<u8:f32, 1.0:128>>) -> tensor<2x2xf32>
23 // CHECK: %[[q0:.*]] = "tfl.quantize"(%arg1) {qtype = tensor<2x1x!quant.uniform<u8:f32, 0.007843137…
24 // CHECK: %[[q1:.*]] = "tfl.quantize"(%arg0) {qtype = tensor<2x2x!quant.uniform<u8:f32, 1.000000e+0…
25 …dd"(%[[q1]], %[[q0]]) {fused_activation_function = "NONE"} : (tensor<2x2x!quant.uniform<u8:f32, 1.…
[all …]
Dload-quantization-recipe.mlir20 // CHECK-SAME: -> tensor<*x!quant.any<i16:f32>>
22 // CHECK-SAME: -> tensor<*x!quant.any<i16:f32>>
24 // CHECK-SAME: -> tensor<*x!quant.any<i16:f32>>
26 // CHECK-SAME: -> tensor<*x!quant.any<i16:f32>>
28 // CHECK-SAME: -> tensor<*x!quant.any<i16:f32>>
30 // CHECK-SAME: tensor<*x!quant.any<i16:f32>>
32 // CHECK-SAME: -> tensor<*x!quant.any<i16:f32>>
34 // CHECK-SAME: -> tensor<*x!quant.any<i16:f32>>
38 // CHECK-SAME: tensor<*x!quant.any<i16:f32>>
40 // CHECK-SAME: -> tensor<*x!quant.any<i16:f32>>
[all …]
Dprepare-quantize-signed.mlir6 …l.quantize"(%arg0) {qtype = tensor<2x2x!quant.uniform<u8:f32, 1.0:128>>} : (tensor<2x2xf32>) -> te…
7 %2 = "tfl.dequantize"(%1) : (tensor<2x2x!quant.uniform<u8:f32, 1.0:128>>) -> tensor<2x2xf32>
10 // CHECK-NEXT: %[[q:.*]] = "tfl.quantize"(%arg0) {qtype = tensor<2x2x!quant.uniform<i8:f32, 1.00000…
17 …ntize"(%arg0) {qtype = tensor<2x2x!quant.uniform<u8:f32:1, {1.0:128, 1.0}>>} : (tensor<2x2xf32>) -…
18 …%2 = "tfl.dequantize"(%1) : (tensor<2x2x!quant.uniform<u8:f32:1, {1.0:128, 1.0}>>) -> tensor<2x2xf…
21 // CHECK-NEXT: %[[q:.*]] = "tfl.quantize"(%arg0) {qtype = tensor<2x2x!quant.uniform<i8:f32:1, {1.00…
28 …antize"(%arg0) {qtype = tensor<2x2x!quant.uniform<u8<1:255>:f32, 1.0:255>>} : (tensor<2x2xf32>) ->…
29 …%2 = "tfl.dequantize"(%1) : (tensor<2x2x!quant.uniform<u8<1:255>:f32, 1.0:255>>) -> tensor<2x2xf32>
32 // CHECK-NEXT: %[[q:.*]] = "tfl.quantize"(%arg0) {qtype = tensor<2x2x!quant.uniform<i8<-127:127>:f3…
39 %0 = "quant.stats"(%arg0) {
[all …]
/external/tensorflow/tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/
Dquantization.mlir4 …type = tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>} : (tensor<1x224x224x3xf32>) -…
6 …<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678{{[0-9]*}}:151>>, value = dense<-76> : tensor<3…
7 …) {qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092{{[0-9]*}}E-4>>, value = dense<0> : tensor<3…
8 // CHECK: %{{.*}} = "tfl.dequantize"(%{{.*}}) : (tensor<1x1001x!quant.uniform<u8:f32, 3.906250e-0…
11 …type = tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>} : (tensor<1x224x224x3xf32>) -…
12 …r<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<3…
13 …() {qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>, value = dense<0> : tensor<3…
14quant.uniform<u8:f32, 7.812500e-03:128>>, tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.02182667…
15 …%3, %cst) : (tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>, tensor<2xi32>) -> …
16 …a = 1.000000e+00 : f32} : (tensor<1x1001x!quant.uniform<u8:f32, 0.023528476789885875>>) -> tensor<…
[all …]
Dconstants.mlir71 func @qi32_per_axis() -> tensor<3x3x!quant.uniform<i32:f32:1, {1.0, 0.5:1, 0.25:1}>> {
73 …sor<3x3x!quant.uniform<i32:f32:1, {1.000000e+00,5.000000e-01:1,2.500000e-01:1}>>, value = dense<1>…
74 … qtype = tensor<3x3x!quant.uniform<i32:f32:1, {1.0, 0.5:1, 0.25:1}>>, value = dense<1> : tensor<3x…
75 return %0 : tensor<3x3x!quant.uniform<i32:f32:1, {1.0, 0.5:1, 0.25:1}>>
78 func @qi32_per_axis_zero() -> tensor<3x3x!quant.uniform<i32:f32:0, {1.0, 0.5:1, 0.25:1}>> {
80 …sor<3x3x!quant.uniform<i32:f32:0, {1.000000e+00,5.000000e-01:1,2.500000e-01:1}>>, value = dense<1>…
81 … qtype = tensor<3x3x!quant.uniform<i32:f32:0, {1.0, 0.5:1, 0.25:1}>>, value = dense<1> : tensor<3x…
82 return %0 : tensor<3x3x!quant.uniform<i32:f32:0, {1.0, 0.5:1, 0.25:1}>>
85 func @qu8() -> tensor<3x!quant.uniform<u8<1:255>:f32, 1.0>> {
87 …CHECK: {qtype = tensor<3x!quant.uniform<u8<1:255>:f32, 1.000000e+00>>, value = dense<1> : tensor<3…
[all …]
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/
Dquantization_utils.cc35 namespace quant { namespace
46 quant::ExpressedToQuantizedConverter::forInputType(input_type); in GetQuantizedType()
48 quant::QuantizedType quantizedEleType; in GetQuantizedType()
50 quantizedEleType = quant::fakeQuantAttrsToType( in GetQuantizedType()
60 quantizedEleType = quant::fakeQuantAttrsToType( in GetQuantizedType()
72 auto ele_type = quant::QuantizedType::getQuantizedElementType(input); in RescaleQuantizedType()
74 if (auto qtype = ele_type.dyn_cast<quant::UniformQuantizedPerAxisType>()) { in RescaleQuantizedType()
86 auto new_ele_type = quant::UniformQuantizedPerAxisType::get( in RescaleQuantizedType()
91 quant::QuantizedType::castToExpressedType(input))) { in RescaleQuantizedType()
149 static quant::UniformQuantizedPerAxisType ResetAxisAndBroadcast( in ResetAxisAndBroadcast()
[all …]
/external/tensorflow/tensorflow/compiler/mlir/lite/transforms/
Ddefault_quant_params.cc69 quant::QuantParams quant_params);
75 quant::QuantParams GetDefaultQuantParams(Builder builder);
79 quant::QuantParams GetQuantParamsForBias(Operation *op, int bias,
81 quant::AccumulatorScaleFunc func);
85 quant::QuantParams default_quant_params_;
108 op->hasTrait<OpTrait::quant::NoQuantizableResult>() || in runOnFunction()
109 llvm::isa<quant::QuantizeCastOp>(op) || in runOnFunction()
110 llvm::isa<quant::DequantizeCastOp>(op)) in runOnFunction()
123 quant::QuantParams default_params = GetDefaultQuantParams(builder); in runOnFunction()
134 quant::QuantParams bias_params = GetQuantParamsForBias( in runOnFunction()
[all …]
/external/libvpx/libvpx/vpx_dsp/x86/
Dquantize_avx.c34 __m128i zbin, round, quant, dequant, shift; in vpx_quantize_b_avx() local
47 load_b_values(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant, in vpx_quantize_b_avx()
73 quant = _mm_unpackhi_epi64(quant, quant); in vpx_quantize_b_avx()
77 calculate_qcoeff(&qcoeff0, round, quant, shift); in vpx_quantize_b_avx()
79 quant = _mm_unpackhi_epi64(quant, quant); in vpx_quantize_b_avx()
81 calculate_qcoeff(&qcoeff1, round, quant, shift); in vpx_quantize_b_avx()
124 calculate_qcoeff(&qcoeff0, round, quant, shift); in vpx_quantize_b_avx()
125 calculate_qcoeff(&qcoeff1, round, quant, shift); in vpx_quantize_b_avx()
160 __m128i zbin, round, quant, dequant, shift; in vpx_quantize_b_32x32_avx() local
186 quant = _mm_load_si128((const __m128i *)quant_ptr); in vpx_quantize_b_32x32_avx()
[all …]
Dquantize_ssse3.c30 __m128i zbin, round, quant, dequant, shift; in vpx_quantize_b_ssse3() local
40 load_b_values(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant, in vpx_quantize_b_ssse3()
54 calculate_qcoeff(&qcoeff0, round, quant, shift); in vpx_quantize_b_ssse3()
56 quant = _mm_unpackhi_epi64(quant, quant); in vpx_quantize_b_ssse3()
58 calculate_qcoeff(&qcoeff1, round, quant, shift); in vpx_quantize_b_ssse3()
88 calculate_qcoeff(&qcoeff0, round, quant, shift); in vpx_quantize_b_ssse3()
89 calculate_qcoeff(&qcoeff1, round, quant, shift); in vpx_quantize_b_ssse3()
125 __m128i zbin, round, quant, dequant, shift; in vpx_quantize_b_32x32_ssse3() local
151 quant = _mm_load_si128((const __m128i *)quant_ptr); in vpx_quantize_b_32x32_ssse3()
184 quant = _mm_unpackhi_epi64(quant, quant); in vpx_quantize_b_32x32_ssse3()
[all …]
Dquantize_sse2.c30 __m128i zbin, round, quant, dequant, shift; in vpx_quantize_b_sse2() local
41 load_b_values(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant, in vpx_quantize_b_sse2()
58 calculate_qcoeff(&qcoeff0, round, quant, shift); in vpx_quantize_b_sse2()
61 quant = _mm_unpackhi_epi64(quant, quant); in vpx_quantize_b_sse2()
64 calculate_qcoeff(&qcoeff1, round, quant, shift); in vpx_quantize_b_sse2()
96 calculate_qcoeff(&qcoeff0, round, quant, shift); in vpx_quantize_b_sse2()
97 calculate_qcoeff(&qcoeff1, round, quant, shift); in vpx_quantize_b_sse2()
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/
Dtf_to_quant.mlir1 // RUN: tf-opt -tf-to-quant %s | FileCheck %s
11 // CHECK: %[[q:.*]] = "quant.qcast"(%[[fq]]) : (tensor<8x3xf32>) -> tensor<8x3x!quant.uniform<i8:f…
12 // CHECK: %[[dq:.*]] = "quant.dcast"(%[[q]])
25 // CHECK: %1 = "quant.qcast"(%0) : (tensor<8xf32>) -> tensor<8x!quant.uniform<i8:f32, 1.000000e+00…
26 // CHECK: %2 = "quant.dcast"(%1)
31 func @fakeQuantForActivationNoDuplication(tensor<8xf32>) -> (tensor<8x!quant.uniform<i8:f32, 1.0000…
36 %1 = "quant.qcast"(%0) : (tensor<8xf32>) -> tensor<8x!quant.uniform<i8:f32, 1.000000e+00:-128>>
37 return %1 : tensor<8x!quant.uniform<i8:f32, 1.000000e+00:-128>>
40 // CHECK: %1 = "quant.qcast"(%0) : (tensor<8xf32>) -> tensor<8x!quant.uniform<i8:f32, 1.000000e+00…
55 // CHECK: %[[QUANTIZE:.*]] = "quant.qcast"(%[[CONSTANT]]) : (tensor<8xf32>) -> tensor<8x!quant.unif…
[all …]
/external/libaom/libaom/aom_dsp/x86/
Dhighbd_adaptive_quantize_sse2.c47 const __m128i *quant, in highbd_calculate_qcoeff() argument
52 highbd_mul_shift_sse2(&qcoeff, quant, &tmp, 16); in highbd_calculate_qcoeff()
108 __m128i zbin, round, quant, dequant, shift; in aom_highbd_quantize_b_adaptive_sse2() local
133 quant = _mm_load_si128((const __m128i *)quant_ptr); in aom_highbd_quantize_b_adaptive_sse2()
139 __m128i quant_sign = _mm_srai_epi16(quant, 15); in aom_highbd_quantize_b_adaptive_sse2()
145 quant = _mm_unpacklo_epi16(quant, quant_sign); in aom_highbd_quantize_b_adaptive_sse2()
176 quant = _mm_unpackhi_epi64(quant, quant); in aom_highbd_quantize_b_adaptive_sse2()
180 highbd_calculate_qcoeff(&qcoeff0, &round, &quant, &shift, &log_scale); in aom_highbd_quantize_b_adaptive_sse2()
183 quant = _mm_unpackhi_epi64(quant, quant); in aom_highbd_quantize_b_adaptive_sse2()
185 highbd_calculate_qcoeff(&qcoeff1, &round, &quant, &shift, &log_scale); in aom_highbd_quantize_b_adaptive_sse2()
[all …]
Dadaptive_quantize_sse2.c31 __m128i zbin, round, quant, dequant, shift; in aom_quantize_b_adaptive_sse2() local
54 load_b_values(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant, in aom_quantize_b_adaptive_sse2()
87 quant = _mm_unpackhi_epi64(quant, quant); in aom_quantize_b_adaptive_sse2()
91 calculate_qcoeff(&qcoeff0, round, quant, shift); in aom_quantize_b_adaptive_sse2()
94 quant = _mm_unpackhi_epi64(quant, quant); in aom_quantize_b_adaptive_sse2()
97 calculate_qcoeff(&qcoeff1, round, quant, shift); in aom_quantize_b_adaptive_sse2()
149 calculate_qcoeff(&qcoeff0, round, quant, shift); in aom_quantize_b_adaptive_sse2()
150 calculate_qcoeff(&qcoeff1, round, quant, shift); in aom_quantize_b_adaptive_sse2()
234 __m128i zbin, round, quant, dequant, shift; in aom_quantize_b_32x32_adaptive_sse2() local
261 quant = _mm_load_si128((const __m128i *)quant_ptr); in aom_quantize_b_32x32_adaptive_sse2()
[all …]
Dhighbd_adaptive_quantize_avx2.c23 __m256i *round, const int16_t *quant_ptr, __m256i *quant, in highbd_load_b_values_avx2() argument
29 *quant = _mm256_cvtepi16_epi32(_mm_load_si128((const __m128i *)quant_ptr)); in highbd_load_b_values_avx2()
77 const __m256i *quant, in highbd_calculate_qcoeff_avx2() argument
82 highbd_mul_shift_avx2(&qcoeff, quant, &tmp, 16); in highbd_calculate_qcoeff_avx2()
118 __m256i zbin, round, quant, dequant, shift; in aom_highbd_quantize_b_adaptive_avx2() local
141 &quant, dequant_ptr, &dequant, quant_shift_ptr, in aom_highbd_quantize_b_adaptive_avx2()
163 quant = _mm256_unpackhi_epi64(quant, quant); in aom_highbd_quantize_b_adaptive_avx2()
167 highbd_calculate_qcoeff_avx2(&qcoeff0, &round, &quant, &shift, &log_scale); in aom_highbd_quantize_b_adaptive_avx2()
169 quant = _mm256_unpackhi_epi64(quant, quant); in aom_highbd_quantize_b_adaptive_avx2()
171 highbd_calculate_qcoeff_avx2(&qcoeff1, &round, &quant, &shift, &log_scale); in aom_highbd_quantize_b_adaptive_avx2()
[all …]
Dquantize_ssse3.c23 const __m128i quant, in calculate_qcoeff_64x64() argument
27 tmp = _mm_mulhi_epi16(qcoeff, quant); in calculate_qcoeff_64x64()
75 __m128i zbin, round, quant, dequant, shift; in aom_quantize_b_64x64_ssse3() local
86 quant = _mm_load_si128((const __m128i *)quant_ptr); in aom_quantize_b_64x64_ssse3()
117 quant = _mm_unpackhi_epi64(quant, quant); in aom_quantize_b_64x64_ssse3()
121 calculate_qcoeff_64x64(&qcoeff0, round, quant, &shift); in aom_quantize_b_64x64_ssse3()
123 quant = _mm_unpackhi_epi64(quant, quant); in aom_quantize_b_64x64_ssse3()
125 calculate_qcoeff_64x64(&qcoeff1, round, quant, &shift); in aom_quantize_b_64x64_ssse3()
169 calculate_qcoeff_64x64(&qcoeff0, round, quant, &shift); in aom_quantize_b_64x64_ssse3()
170 calculate_qcoeff_64x64(&qcoeff1, round, quant, &shift); in aom_quantize_b_64x64_ssse3()
Dadaptive_quantize_avx2.c20 const int16_t *quant_ptr, __m256i *quant, in load_b_values_avx2() argument
30 *quant = _mm256_castsi128_si256(_mm_load_si128((const __m128i *)quant_ptr)); in load_b_values_avx2()
31 *quant = _mm256_permute4x64_epi64(*quant, 0x54); in load_b_values_avx2()
74 const __m256i *quant, in calculate_qcoeff_avx2() argument
78 tmp = _mm256_mulhi_epi16(qcoeff, *quant); in calculate_qcoeff_avx2()
108 __m256i zbin, round, quant, dequant, shift; in aom_quantize_b_adaptive_avx2() local
129 load_b_values_avx2(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant, in aom_quantize_b_adaptive_avx2()
147 quant = _mm256_unpackhi_epi64(quant, quant); in aom_quantize_b_adaptive_avx2()
151 calculate_qcoeff_avx2(&qcoeff, &round, &quant, &shift); in aom_quantize_b_adaptive_avx2()
153 quant = _mm256_unpackhi_epi64(quant, quant); in aom_quantize_b_adaptive_avx2()
[all …]
Dquantize_sse2.c31 __m128i zbin, round, quant, dequant, shift; in aom_quantize_b_sse2() local
40 load_b_values(zbin_ptr, &zbin, round_ptr, &round, quant_ptr, &quant, in aom_quantize_b_sse2()
57 calculate_qcoeff(&qcoeff0, round, quant, shift); in aom_quantize_b_sse2()
60 quant = _mm_unpackhi_epi64(quant, quant); in aom_quantize_b_sse2()
63 calculate_qcoeff(&qcoeff1, round, quant, shift); in aom_quantize_b_sse2()
99 calculate_qcoeff(&qcoeff0, round, quant, shift); in aom_quantize_b_sse2()
100 calculate_qcoeff(&qcoeff1, round, quant, shift); in aom_quantize_b_sse2()
/external/libvpx/libvpx/vp9/encoder/
Dvp9_quantize.c180 x->skip_block, p->zbin, p->round, p->quant, in vp9_regular_quantize_b_4x4()
187 p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, in vp9_regular_quantize_b_4x4()
191 static void invert_quant(int16_t *quant, int16_t *shift, int d) { in invert_quant() argument
197 *quant = (int16_t)(m - (1 << 16)); in invert_quant()
202 const int quant = vp9_dc_quant(q, 0, bit_depth); in get_qzbin_factor() local
205 case VPX_BITS_8: return q == 0 ? 64 : (quant < 148 ? 84 : 80); in get_qzbin_factor()
206 case VPX_BITS_10: return q == 0 ? 64 : (quant < 592 ? 84 : 80); in get_qzbin_factor()
209 return q == 0 ? 64 : (quant < 2368 ? 84 : 80); in get_qzbin_factor()
213 return q == 0 ? 64 : (quant < 148 ? 84 : 80); in get_qzbin_factor()
220 int i, q, quant; in vp9_init_quantizer() local
[all …]
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/xla/tests/
Dweight-only.mlir1 // RUN: tf-opt -xla-hlo-propagate-quant %s | FileCheck %s
6 // CHECK-NEXT: %[[q:.*]] = "quant.qcast"(%[[w]]) : (tensor<2x2xf32>) -> tensor<2x2x!quant.uniform<i…
7 // CHECK-NEXT: %[[dq:.*]] = "quant.dcast"(%[[q]]) : (tensor<2x2x!quant.uniform<i8:f32, 0.0078431372…
18 // CHECK-NEXT: %[[q:.*]] = "quant.qcast"(%[[b]]) : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f3…
19 // CHECK-NEXT: %[[dq:.*]] = "quant.dcast"(%[[q]]) : (tensor<2x!quant.uniform<i8:f32, 0.003921568627…
/external/libvpx/libvpx/vpx_dsp/ppc/
Dquantize_vsx.c40 int16x8_t round, int16x8_t quant, in quantize_coeff()
43 int16x8_t qcoeff = vec_mulhi(rounded, quant); in quantize_coeff()
52 int16x8_t round, int16x8_t quant, in quantize_coeff_32()
56 int16x8_t qcoeff = vec_mulhi(rounded, quant); in quantize_coeff_32()
110 int16x8_t quant = vec_vsx_ld(0, quant_ptr); in vpx_quantize_b_vsx() local
129 quantize_coeff(coeff0, coeff0_abs, round, quant, quant_shift, zero_mask0); in vpx_quantize_b_vsx()
132 quant = vec_splat(quant, 1); in vpx_quantize_b_vsx()
135 quantize_coeff(coeff1, coeff1_abs, round, quant, quant_shift, zero_mask1); in vpx_quantize_b_vsx()
164 qcoeff0 = quantize_coeff(coeff0, coeff0_abs, round, quant, quant_shift, in vpx_quantize_b_vsx()
166 qcoeff1 = quantize_coeff(coeff1, coeff1_abs, round, quant, quant_shift, in vpx_quantize_b_vsx()
[all …]
/external/icu/icu4c/source/i18n/
Dregexcst.txt68 quoted n expr-quant doLiteralChar
69 rule_char n expr-quant doLiteralChar
72 '.' n expr-quant doDotAny
73 '^' n expr-quant doCaret
74 '$' n expr-quant doDollar
84 # expr-quant We've just finished scanning a term, now look for the optional
87 expr-quant:
88 '*' n quant-star
89 '+' n quant-plus
90 '?' n quant-opt
[all …]
/external/tensorflow/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/
Dmul_v2.mlir3 func @main(tensor<3x!quant.uniform<i8:f32, 0.1>>) -> tensor<3x!quant.uniform<i8:f32, 0.1>> {
4 ^bb0(%arg0: tensor<3x!quant.uniform<i8:f32, 0.1>>):
64 …udo_qconst"() { qtype = tensor<3x!quant.uniform<i8:f32, 0.1>>, value = dense<2> : tensor<3xi8>} : …
65 …_function = "NONE"} : (tensor<3x!quant.uniform<i8:f32, 0.1>>, tensor<3x!quant.uniform<i8:f32, 0.1>…
66 return %1 : tensor<3x!quant.uniform<i8:f32, 0.1>>

123456