/packages/modules/NeuralNetworks/common/operations/ |
D | Reduce.cpp | 52 inline bool compute(IOperationExecutionContext* context, T init, T func(T, T)) { in compute() function 171 return compute<_Float16>(context, 1, [](_Float16 a, _Float16 b) -> _Float16 { in executeProd() 177 return compute<float>(context, 1, [](float a, float b) -> float { in executeProd() 190 return compute<_Float16>(context, 0, [](_Float16 a, _Float16 b) { return a + b; }); in executeSum() 192 return compute<float>(context, 0, [](float a, float b) { return a + b; }); in executeSum() 201 return compute<_Float16>(context, kFloat16Lowest, in executeMax() 204 return compute<float>(context, std::numeric_limits<float>::lowest(), in executeMax() 207 return compute<uint8_t>(context, std::numeric_limits<uint8_t>::lowest(), in executeMax() 210 return compute<int8_t>(context, std::numeric_limits<int8_t>::lowest(), in executeMax() 220 return compute<_Float16>(context, kFloat16Max, in executeMin() [all …]
|
D | Dequantize.cpp | 36 bool compute(const InputType* inputData, const Shape& inputShape, OutputType* outputData) { in compute() function 125 return compute(inputBuffer, inputShape, in execute() 128 return compute(inputBuffer, inputShape, context->getOutputBuffer<float>(kOutputTensor)); in execute() 133 return compute(inputBuffer, inputShape, in execute() 136 return compute(inputBuffer, inputShape, context->getOutputBuffer<float>(kOutputTensor)); in execute() 141 return compute(inputBuffer, inputShape, in execute() 144 return compute(inputBuffer, inputShape, context->getOutputBuffer<float>(kOutputTensor)); in execute()
|
D | Elementwise.cpp | 38 inline bool compute(IntermediateType func(IntermediateType), const T* input, const Shape& shape, in compute() function 50 return compute<float, _Float16>(func, context->getInputBuffer<_Float16>(kInputTensor), in execute() 54 return compute<float, float>(func, context->getInputBuffer<float>(kInputTensor), in execute() 67 return compute<float, _Float16>(std::abs, in executeAbs() 72 return compute<float, float>(std::abs, context->getInputBuffer<float>(kInputTensor), in executeAbs() 76 return compute<int32_t, int32_t>(std::abs, in executeAbs()
|
D | Comparisons.cpp | 40 bool compute(const std::function<bool(ComparisonType, ComparisonType)>& func, const DataType* aData, in compute() function 72 return compute<DataType, ComparisonType>( in executeLessTyped() 81 return compute<DataType, ComparisonType>( in executeLessEqualTyped() 90 return compute<DataType, ComparisonType>( in executeEqualTyped() 99 return compute<DataType, ComparisonType>( in executeNotEqualTyped() 108 return compute<DataType, ComparisonType>( in executeGreaterEqualTyped() 117 return compute<DataType, ComparisonType>( in executeGreaterTyped()
|
D | Neg.cpp | 40 inline bool compute(const T* input, const Shape& shape, T* output) { in compute() function 72 return compute(context->getInputBuffer<_Float16>(kInputTensor), in execute() 76 return compute(context->getInputBuffer<float>(kInputTensor), in execute() 80 return compute(context->getInputBuffer<int32_t>(kInputTensor), in execute()
|
D | LogicalAndOr.cpp | 39 bool compute(const std::function<bool(bool, bool)>& func, const bool8* aData, const Shape& aShape, in compute() function 83 return compute( in executeAnd() 91 return compute( in executeOr()
|
D | LogSoftmax.cpp | 42 inline bool compute(const T* input, const Shape& shape, T beta, uint32_t axis, T* output) { in compute() function 102 return compute(context->getInputBuffer<_Float16>(kInputTensor), in execute() 107 return compute(context->getInputBuffer<float>(kInputTensor), in execute()
|
D | LogicalNot.cpp | 34 bool compute(const bool8* input, const Shape& shape, bool8* output) { in compute() function 63 return compute(context->getInputBuffer<bool8>(kInputTensor), in execute()
|
D | Select.cpp | 38 bool compute(const bool8* conditionData, const Shape& conditionShape, const T* aData, in compute() function 59 return compute<T>( in executeTyped()
|
D | StridedSlice.cpp | 53 bool compute(const T* inputData, const Shape& inputShape, const int32_t* beginData, in compute() function 91 return compute<T>( in executeTyped()
|
/packages/apps/Camera2/src/com/android/camera/one/v2/sharedimagereader/ringbuffer/ |
D | AvailableTicketCounter.java | 62 private int compute() { in compute() method in AvailableTicketCounter 75 value = compute(); in get() 86 int value = compute(); in freeze() 102 int newValue = compute(); in unfreeze()
|
/packages/modules/NeuralNetworks/runtime/test/ |
D | TestExecution.cpp | 739 void computeHelper(bool reusable, const std::function<void()>& compute) { in computeHelper() argument 742 compute(); in computeHelper() 746 compute(); in computeHelper() 765 const auto compute = [this, &execution] { in TestWait() local 786 computeHelper(reusable, compute); in TestWait() 793 const auto compute = [this, &execution] { in TestWait() local 795 std::thread run([this, &execution] { EXPECT_EQ(execution.compute(), kExpectResult); }); in TestWait() 813 computeHelper(reusable, compute); in TestWait() 825 const auto compute = [this, &execution] { in TestWait() local 828 EXPECT_EQ(execution.compute(WrapperExecution::ComputeMode::BURST), kExpectResult); in TestWait() [all …]
|
D | TestTrivialModel.cpp | 125 ASSERT_EQ(execution.compute(), Result::NO_ERROR); in TEST_F() 186 ASSERT_EQ(execution.compute(), Result::NO_ERROR); in testAddTwoWithHardwareBufferInput() 214 ASSERT_EQ(execution2.compute(), Result::NO_ERROR); in TEST_F() 225 ASSERT_EQ(execution3.compute(), Result::NO_ERROR); in TEST_F() 297 ASSERT_EQ(execution.compute(), Result::NO_ERROR); in TEST_F() 326 ASSERT_EQ(execution.compute(), Result::NO_ERROR); in TEST_F()
|
D | TestMemory.cpp | 87 ASSERT_EQ(execution2.compute(), WrapperResult::NO_ERROR); in TEST_F() 150 ASSERT_EQ(execution2.compute(), WrapperResult::NO_ERROR); in TEST_F()
|
D | TestFailingDriver.cpp | 169 ASSERT_EQ(execution.compute(), Result::NO_ERROR); in TEST_F() 207 ASSERT_EQ(execution.compute(), Result::NO_ERROR); in TEST_F()
|
D | TestMemoryInternal.cpp | 167 ASSERT_EQ(execution2.compute(), WrapperResult::NO_ERROR); in TEST_F() 235 WrapperResult r = execution.compute(); in TEST_F()
|
D | TestUnknownDimensions.cpp | 301 ASSERT_EQ(execution.compute(computeMode), Result::NO_ERROR); in TestOne() 305 ASSERT_NE(execution.compute(), Result::NO_ERROR); in TestOne()
|
/packages/modules/NeuralNetworks/runtime/ |
D | ExecutionBuilder.h | 92 return compute(synchronizationCallback); in computeAsynchronously() 94 int computeSynchronously() { return compute(nullptr); } in computeSynchronously() 95 int burstCompute(BurstBuilder* burst) { return compute(nullptr, burst); } in burstCompute() 148 int compute(std::shared_ptr<ExecutionCallback>* synchronizationCallback, 381 std::tuple<int, std::vector<OutputShape>, Timing> compute(
|
D | Manager.cpp | 204 std::tuple<int, std::vector<OutputShape>, Timing> compute( 668 std::tuple<int, std::vector<OutputShape>, Timing> DriverExecution::compute( in compute() function in android::nn::DriverExecution 705 auto result = execution->compute(deadline); in compute() 766 auto result = kExecution->compute(deadline); in computeFenced() 951 std::tuple<int, std::vector<OutputShape>, Timing> compute( 1168 std::tuple<int, std::vector<OutputShape>, Timing> CpuExecution::compute( in compute() function in android::nn::CpuExecution 1215 const auto [result, outputShapes, timing] = compute(nullptr, closestDeadline); in computeFenced()
|
D | ExecutionBuilder.cpp | 653 auto [n, outputShapes, timing] = mExecutor->compute(deadline, burstController); in computeInternal() 718 auto [stepN, stepOutputShapes, _] = executor->compute(deadline, burstController); in computeInternal() 1010 int ExecutionBuilder::compute(std::shared_ptr<ExecutionCallback>* synchronizationCallback, in compute() function in android::nn::ExecutionBuilder 1460 std::tuple<int, std::vector<OutputShape>, Timing> StepExecutor::compute( in compute() function in android::nn::StepExecutor 1475 std::tie(n, outputShapes, timing) = execution->compute(burstController, deadline); in compute()
|
/packages/modules/NeuralNetworks/runtime/test/fibonacci_extension/ |
D | FibonacciDriver.cpp | 94 bool compute(int32_t n, ScaleT outputScale, ZeroPointT outputZeroPoint, OutputT* output) { in compute() function 125 return compute(n, /*scale=*/1.0, /*zeroPoint=*/0, output); in execute() 131 return compute(n, outputQuant->scale, outputQuant->zeroPoint, output); in execute()
|
/packages/apps/Gallery2/src/com/android/gallery3d/data/ |
D | TimeClustering.java | 145 compute(items.get(i)); in run() 148 compute(null); in run() 192 private void compute(SmallItem currentItem) { in compute() method in TimeClustering
|
/packages/modules/NeuralNetworks/common/include/nnapi/ |
D | IExecution.h | 97 virtual ExecutionResult<std::pair<std::vector<OutputShape>, Timing>> compute(
|
/packages/modules/NeuralNetworks/common/include/ |
D | DefaultExecution.h | 43 ExecutionResult<std::pair<std::vector<OutputShape>, Timing>> compute( in compute() function
|
/packages/apps/Gallery2/src/com/android/gallery3d/filtershow/pipeline/ |
D | UpdatePreviewTask.java | 61 mPreviewPipeline.compute(buffer, renderingPreset, 0); in doInBackground()
|