/packages/modules/NeuralNetworks/runtime/test/generated/spec_V1_2/ |
D | quantize.example.cpp | 7 namespace generated_tests::quantize { namespace 54 namespace generated_tests::quantize { namespace 135 namespace generated_tests::quantize { namespace 182 namespace generated_tests::quantize { namespace 263 namespace generated_tests::quantize { namespace 310 namespace generated_tests::quantize { namespace 391 namespace generated_tests::quantize { namespace 438 namespace generated_tests::quantize { namespace 519 namespace generated_tests::quantize { namespace 566 namespace generated_tests::quantize { namespace [all …]
|
/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/ |
D | while_sum_of_powers_quant8.mod.py | 35 def quantize(data, scale, offset): function 88 zero = Parameter("zero", DataType10, quantize([0, 0], 1.0, 128)) 104 sum_init = Parameter("sum_init", DataType10, quantize([1, 1], 1.0, 128)) 109 x: quantize(x_data, 0.5, 128), 111 sum: quantize(sum_data, 1.0, 128),
|
D | while_sum_of_powers_quant8_signed.mod.py | 35 def quantize(data, scale, offset): function 88 zero = Parameter("zero", DataType10, quantize([0, 0], 1.0, 12)) 104 sum_init = Parameter("sum_init", DataType10, quantize([1, 1], 1.0, 12)) 109 x: quantize(x_data, 0.5, 12), 111 sum: quantize(sum_data, 1.0, 12),
|
D | tanh_quant8_signed.mod.py | 24 def quantize(x): function 32 output_values = [quantize(math.tanh(dequantize(x))) for x in input_values]
|
D | sub_quant8_signed.mod.py | 23 def quantize(x, scale, offset): function 32 return quantize(a_dequantized - b_dequantized, output_scale, output_offset)
|
/packages/modules/NeuralNetworks/common/operations/ |
D | Quantize.cpp | 29 namespace quantize { namespace 127 NN_REGISTER_OPERATION(QUANTIZE, "QUANTIZE", quantize::validate, quantize::prepare, 128 quantize::execute, .allowZeroSizedInput = true);
|
/packages/modules/NeuralNetworks/runtime/test/specs/V1_2/ |
D | sub_quantized_different_scales.mod.py | 22 def quantize(x, scale, offset): function 31 return quantize(a_dequantized - b_dequantized, output_scale, output_offset)
|
D | tanh_v1_2.mod.py | 38 def quantize(x): function 46 output_values = [quantize(math.tanh(dequantize(x))) for x in input_values]
|
/packages/modules/NeuralNetworks/common/ |
D | OperationsUtils.cpp | 53 auto quantize = [scale, zero_point](float f) { in CalculateActivationRangeImpl() local 58 *act_min = std::max(qmin, quantize(0.0)); in CalculateActivationRangeImpl() 61 *act_min = std::max(qmin, quantize(0.0)); in CalculateActivationRangeImpl() 62 *act_max = std::min(qmax, quantize(6.0)); in CalculateActivationRangeImpl() 64 *act_min = std::max(qmin, quantize(-1.0)); in CalculateActivationRangeImpl() 65 *act_max = std::min(qmax, quantize(1.0)); in CalculateActivationRangeImpl()
|
/packages/inputmethods/LatinIME/dictionaries/ |
D | en_US_wordlist.combined.gz |
|
D | en_wordlist.combined.gz | 1dictionary=main:en,locale=en,description=English,date=1414726273, ... |