/external/libtextclassifier/ |
D | quantization_test.cc | 40 std::vector<float> scales{{0.1, 9.0, -7.0}}; in TEST() local 51 DequantizeAdd(scales.data(), embeddings.data(), bytes_per_embedding, in TEST() 69 DequantizeAdd(scales.data(), embeddings.data(), bytes_per_embedding, in TEST() 92 std::vector<float> scales(num_buckets); in TEST() local 94 std::fill(scales.begin(), scales.end(), 1); in TEST() 98 DequantizeAdd(scales.data(), embeddings.data(), bytes_per_embedding, in TEST() 115 std::vector<float> scales(num_buckets, 1.0); in TEST() local 119 DequantizeAdd(scales.data(), embeddings.data(), bytes_per_embedding, in TEST() 135 std::vector<float> scales(num_buckets, 1.0); in TEST() local 136 scales[1] = 9.0; in TEST() [all …]
|
D | quantization.cc | 28 void DequantizeAdd8bit(const float* scales, const uint8* embeddings, in DequantizeAdd8bit() argument 32 const float multiplier = scales[bucket_id]; in DequantizeAdd8bit() 40 void DequantizeAddNBit(const float* scales, const uint8* embeddings, in DequantizeAddNBit() argument 45 const float multiplier = scales[bucket_id]; in DequantizeAddNBit() 73 bool DequantizeAdd(const float* scales, const uint8* embeddings, in DequantizeAdd() argument 78 DequantizeAdd8bit(scales, embeddings, bytes_per_embedding, in DequantizeAdd() 81 DequantizeAddNBit(scales, embeddings, bytes_per_embedding, in DequantizeAdd()
|
D | model-executor.cc | 71 const TfLiteTensor* scales = interpreter->tensor(1); in Instance() local 72 if (scales->dims->size != 2 || scales->dims->data[0] != num_buckets || in Instance() 73 scales->dims->data[1] != 1) { in Instance() 85 embedding_size, scales, embeddings, std::move(interpreter))); in Instance() 91 const TfLiteTensor* scales, const TfLiteTensor* embeddings, in TFLiteEmbeddingExecutor() argument 98 scales_(scales), in TFLiteEmbeddingExecutor()
|
D | quantization.h | 32 bool DequantizeAdd(const float* scales, const uint8* embeddings,
|
/external/dng_sdk/source/ |
D | dng_misc_opcodes.cpp | 1277 uint32 scales = SafeUint32DivideUp (fAreaSpec.Area ().H (), in dng_opcode_ScalePerRow() local 1280 if (scales != stream.Get_uint32 ()) in dng_opcode_ScalePerRow() 1285 if (dataSize != dng_area_spec::kDataSize + 4 + scales * 4) in dng_opcode_ScalePerRow() 1290 fTable.Reset (host.Allocate (SafeUint32Mult (scales, in dng_opcode_ScalePerRow() 1295 for (uint32 j = 0; j < scales; j++) in dng_opcode_ScalePerRow() 1305 printf ("Count: %u\n", (unsigned) scales); in dng_opcode_ScalePerRow() 1307 for (uint32 k = 0; k < scales && k < gDumpLineLimit; k++) in dng_opcode_ScalePerRow() 1312 if (scales > gDumpLineLimit) in dng_opcode_ScalePerRow() 1314 printf (" ... %u scales skipped\n", (unsigned) (scales - gDumpLineLimit)); in dng_opcode_ScalePerRow() 1328 uint32 scales = SafeUint32DivideUp (fAreaSpec.Area ().H (), in PutData() local [all …]
|
/external/skia/tests/ |
D | MatrixTest.cpp | 163 SkScalar scales[2]; in test_matrix_min_max_scale() local 170 success = identity.getMinMaxScales(scales); in test_matrix_min_max_scale() 171 REPORTER_ASSERT(reporter, success && SK_Scalar1 == scales[0] && SK_Scalar1 == scales[1]); in test_matrix_min_max_scale() 177 success = scale.getMinMaxScales(scales); in test_matrix_min_max_scale() 178 … REPORTER_ASSERT(reporter, success && SK_Scalar1 * 2 == scales[0] && SK_Scalar1 * 4 == scales[1]); in test_matrix_min_max_scale() 185 success = rot90Scale.getMinMaxScales(scales); in test_matrix_min_max_scale() 186 … REPORTER_ASSERT(reporter, success && SK_Scalar1 / 4 == scales[0] && SK_Scalar1 / 2 == scales[1]); in test_matrix_min_max_scale() 192 success = rotate.getMinMaxScales(scales); in test_matrix_min_max_scale() 194 REPORTER_ASSERT(reporter, SkScalarNearlyEqual(SK_Scalar1, scales[0], SK_ScalarNearlyZero)); in test_matrix_min_max_scale() 195 REPORTER_ASSERT(reporter, SkScalarNearlyEqual(SK_Scalar1, scales[1], SK_ScalarNearlyZero)); in test_matrix_min_max_scale() [all …]
|
/external/skqp/tests/ |
D | MatrixTest.cpp | 163 SkScalar scales[2]; in test_matrix_min_max_scale() local 170 success = identity.getMinMaxScales(scales); in test_matrix_min_max_scale() 171 REPORTER_ASSERT(reporter, success && SK_Scalar1 == scales[0] && SK_Scalar1 == scales[1]); in test_matrix_min_max_scale() 177 success = scale.getMinMaxScales(scales); in test_matrix_min_max_scale() 178 … REPORTER_ASSERT(reporter, success && SK_Scalar1 * 2 == scales[0] && SK_Scalar1 * 4 == scales[1]); in test_matrix_min_max_scale() 185 success = rot90Scale.getMinMaxScales(scales); in test_matrix_min_max_scale() 186 … REPORTER_ASSERT(reporter, success && SK_Scalar1 / 4 == scales[0] && SK_Scalar1 / 2 == scales[1]); in test_matrix_min_max_scale() 192 success = rotate.getMinMaxScales(scales); in test_matrix_min_max_scale() 194 REPORTER_ASSERT(reporter, SkScalarNearlyEqual(SK_Scalar1, scales[0], SK_ScalarNearlyZero)); in test_matrix_min_max_scale() 195 REPORTER_ASSERT(reporter, SkScalarNearlyEqual(SK_Scalar1, scales[1], SK_ScalarNearlyZero)); in test_matrix_min_max_scale() [all …]
|
/external/skqp/gm/ |
D | imagefiltersscaled.cpp | 118 SkVector scales[] = { in onDraw() local 131 for (size_t j = 0; j < SK_ARRAY_COUNT(scales); ++j) { in onDraw() 139 canvas->scale(scales[j].fX, scales[j].fY); in onDraw() 149 canvas->translate(r.width() * scales[j].fX + margin, 0); in onDraw() 152 canvas->translate(0, r.height() * scales[j].fY + margin); in onDraw()
|
D | blurrect.cpp | 116 SkScalar scales[] = { SK_Scalar1, 0.6f }; in onDraw() local 118 for (size_t s = 0; s < SK_ARRAY_COUNT(scales); ++s) { in onDraw() 133 canvas->scale(scales[s], scales[s]); in onDraw() 143 canvas->translate(0, SK_ARRAY_COUNT(procs) * r.height() * 4/3 * scales[s]); in onDraw() 146 canvas->translate(4 * r.width() * 4/3 * scales[s], 0); in onDraw()
|
D | dftext.cpp | 39 SkScalar scales[] = { 2.0f*5.0f, 5.0f, 2.0f, 1.0f }; in onDraw() local 75 canvas->scale(scales[i], scales[i]); in onDraw() 78 y += paint.getFontMetrics(nullptr)*scales[i]; in onDraw() 104 SkScalar scaleFactor = SkScalarInvert(scales[arraySize - i - 1]); in onDraw()
|
D | reveal.cpp | 54 SkScalar scales[2]; in asDevSpaceRRect() local 55 if (!ctm.getMinMaxScales(scales)) { in asDevSpaceRRect() 59 SkASSERT(SkScalarNearlyEqual(scales[0], scales[1])); in asDevSpaceRRect() 64 SkScalar scaledRad = scales[0] * fRRect.getSimpleRadii().fX; in asDevSpaceRRect()
|
D | convex_all_line_paths.cpp | 283 const float scales[] = { 1.0f, 0.75f, 0.5f, 0.25f, 0.1f, 0.01f, 0.001f }; in drawPath() local 291 for (size_t i = 0; i < SK_ARRAY_COUNT(scales); ++i) { in drawPath() 301 canvas->scale(scales[i], scales[i]); in drawPath()
|
/external/skia/gm/ |
D | imagefiltersscaled.cpp | 118 SkVector scales[] = { in onDraw() local 131 for (size_t j = 0; j < SK_ARRAY_COUNT(scales); ++j) { in onDraw() 139 canvas->scale(scales[j].fX, scales[j].fY); in onDraw() 149 canvas->translate(r.width() * scales[j].fX + margin, 0); in onDraw() 152 canvas->translate(0, r.height() * scales[j].fY + margin); in onDraw()
|
D | blurrect.cpp | 116 SkScalar scales[] = { SK_Scalar1, 0.6f }; in onDraw() local 118 for (size_t s = 0; s < SK_ARRAY_COUNT(scales); ++s) { in onDraw() 133 canvas->scale(scales[s], scales[s]); in onDraw() 143 canvas->translate(0, SK_ARRAY_COUNT(procs) * r.height() * 4/3 * scales[s]); in onDraw() 146 canvas->translate(4 * r.width() * 4/3 * scales[s], 0); in onDraw()
|
D | dftext.cpp | 39 SkScalar scales[] = { 2.0f*5.0f, 5.0f, 2.0f, 1.0f }; in onDraw() local 75 canvas->scale(scales[i], scales[i]); in onDraw() 78 y += paint.getFontMetrics(nullptr)*scales[i]; in onDraw() 104 SkScalar scaleFactor = SkScalarInvert(scales[arraySize - i - 1]); in onDraw()
|
D | reveal.cpp | 54 SkScalar scales[2]; in asDevSpaceRRect() local 55 if (!ctm.getMinMaxScales(scales)) { in asDevSpaceRRect() 59 SkASSERT(SkScalarNearlyEqual(scales[0], scales[1])); in asDevSpaceRRect() 64 SkScalar scaledRad = scales[0] * SkRRectPriv::GetSimpleRadii(fRRect).fX; in asDevSpaceRRect()
|
D | convex_all_line_paths.cpp | 283 const float scales[] = { 1.0f, 0.75f, 0.5f, 0.25f, 0.1f, 0.01f, 0.001f }; in drawPath() local 291 for (size_t i = 0; i < SK_ARRAY_COUNT(scales); ++i) { in drawPath() 301 canvas->scale(scales[i], scales[i]); in drawPath()
|
/external/tensorflow/tensorflow/examples/android/jni/object_tracking/ |
D | frame_pair.cc | 152 Point2f* const scales) const { in FillScales() 177 scales[i].x = dist2_x / dist1_x; in FillScales() 178 scales[i].y = dist2_y / dist1_y; in FillScales() 182 scales[i].x = 1.0f; in FillScales() 183 scales[i].y = 1.0f; in FillScales()
|
D | frame_pair.h | 72 Point2f* const scales) const;
|
/external/mesa3d/src/gallium/auxiliary/gallivm/ |
D | lp_bld_format_aos.c | 229 LLVMValueRef scales[4]; in lp_build_unpack_arith_rgba_aos() local 271 scales[i] = LLVMConstNull(LLVMFloatTypeInContext(gallivm->context)); in lp_build_unpack_arith_rgba_aos() 286 scales[i] = lp_build_const_float(gallivm, 1.0 / mask); in lp_build_unpack_arith_rgba_aos() 290 scales[i] = lp_build_const_float(gallivm, 1.0); in lp_build_unpack_arith_rgba_aos() 322 scales[i] = lp_build_const_float(gallivm, 1.0 / mask); in lp_build_unpack_arith_rgba_aos() 347 scaled = LLVMBuildFMul(builder, casted, LLVMConstVector(scales, 4), ""); in lp_build_unpack_arith_rgba_aos() 374 LLVMValueRef scales[4]; in lp_build_pack_rgba_aos() local 407 scales[i] = LLVMGetUndef(LLVMFloatTypeInContext(gallivm->context)); in lp_build_pack_rgba_aos() 418 scales[i] = lp_build_const_float(gallivm, mask); in lp_build_pack_rgba_aos() 422 scales[i] = lp_build_const_float(gallivm, 1.0); in lp_build_pack_rgba_aos() [all …]
|
/external/skqp/src/gpu/effects/ |
D | GrBicubicEffect.cpp | 186 SkScalar scales[2]; in ShouldUseBicubic() local 187 if (!matrix.getMinMaxScales(scales) || scales[0] < SK_Scalar1) { in ShouldUseBicubic() 194 if (scales[1] == SK_Scalar1) { in ShouldUseBicubic()
|
/external/skia/src/gpu/effects/ |
D | GrBicubicEffect.cpp | 186 SkScalar scales[2]; in ShouldUseBicubic() local 187 if (!matrix.getMinMaxScales(scales) || scales[0] < SK_Scalar1) { in ShouldUseBicubic() 194 if (scales[1] == SK_Scalar1) { in ShouldUseBicubic()
|
/external/python/cpython3/Lib/ |
D | timeit.py | 341 scales = [(scale, unit) for unit, scale in units.items()] 342 scales.sort(reverse=True) 343 for scale, time_unit in scales:
|
/external/llvm/docs/ |
D | BlockFrequencyTerminology.rst | 104 multiplying these masses and loop scales together. A given block's frequency 106 containing loops' loop scales. 125 * loop scales are ignored.
|
/external/gemmlowp/doc/ |
D | packing.md | 49 "seeking" but only at larger scales, where the storage order is less \ 96 ### Random access to PackedSideBlock data at larger scales 98 We still need some random access at larger scales (with high granularity), which 107 scales i.e. between runs, we accept that the storage order is less optimal and 134 is templatized in the KernelSideFormat. At larger scales, the packing is
|