• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * Copyright (C) 2020 The Android Open Source Project
3   *
4   * Licensed under the Apache License, Version 2.0 (the "License");
5   * you may not use this file except in compliance with the License.
6   * You may obtain a copy of the License at
7   *
8   *      http://www.apache.org/licenses/LICENSE-2.0
9   *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS,
12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13   * See the License for the specific language governing permissions and
14   * limitations under the License.
15   */
16  
17  #include "GeneratedTestUtils.h"
18  
19  #include <android-base/logging.h>
20  #include <gtest/gtest.h>
21  
22  #include <algorithm>
23  #include <memory>
24  #include <string>
25  #include <utility>
26  #include <vector>
27  
28  #include "TestHarness.h"
29  #include "TestNeuralNetworksWrapper.h"
30  
31  namespace android::nn::generated_tests {
32  using namespace test_wrapper;
33  using namespace test_helper;
34  
getOperandType(const TestOperand & op,bool testDynamicOutputShape)35  static OperandType getOperandType(const TestOperand& op, bool testDynamicOutputShape) {
36      auto dims = op.dimensions;
37      if (testDynamicOutputShape && op.lifetime == TestOperandLifeTime::SUBGRAPH_OUTPUT) {
38          dims.assign(dims.size(), 0);
39      }
40      if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
41          return OperandType(
42                  static_cast<Type>(op.type), dims,
43                  SymmPerChannelQuantParams(op.channelQuant.scales, op.channelQuant.channelDim));
44      } else {
45          return OperandType(static_cast<Type>(op.type), dims, op.scale, op.zeroPoint);
46      }
47  }
48  
49  // A Memory object that owns AHardwareBuffer
50  class MemoryAHWB : public Memory {
51     public:
create(uint32_t size)52      static std::unique_ptr<MemoryAHWB> create(uint32_t size) {
53          const uint64_t usage =
54                  AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
55          AHardwareBuffer_Desc desc = {
56                  .width = size,
57                  .height = 1,
58                  .layers = 1,
59                  .format = AHARDWAREBUFFER_FORMAT_BLOB,
60                  .usage = usage,
61          };
62          AHardwareBuffer* ahwb = nullptr;
63          EXPECT_EQ(AHardwareBuffer_allocate(&desc, &ahwb), 0);
64          EXPECT_NE(ahwb, nullptr);
65  
66          void* buffer = nullptr;
67          EXPECT_EQ(AHardwareBuffer_lock(ahwb, usage, -1, nullptr, &buffer), 0);
68          EXPECT_NE(buffer, nullptr);
69  
70          return std::unique_ptr<MemoryAHWB>(new MemoryAHWB(ahwb, buffer));
71      }
72  
~MemoryAHWB()73      ~MemoryAHWB() override {
74          EXPECT_EQ(AHardwareBuffer_unlock(mAhwb, nullptr), 0);
75          AHardwareBuffer_release(mAhwb);
76      }
77  
getPointer() const78      void* getPointer() const { return mBuffer; }
79  
80     private:
MemoryAHWB(AHardwareBuffer * ahwb,void * buffer)81      MemoryAHWB(AHardwareBuffer* ahwb, void* buffer) : Memory(ahwb), mAhwb(ahwb), mBuffer(buffer) {}
82  
83      AHardwareBuffer* mAhwb;
84      void* mBuffer;
85  };
86  
createConstantReferenceMemory(const TestModel & testModel)87  static std::unique_ptr<MemoryAHWB> createConstantReferenceMemory(const TestModel& testModel) {
88      uint32_t size = 0;
89  
90      auto processSubgraph = [&size](const TestSubgraph& subgraph) {
91          for (const TestOperand& operand : subgraph.operands) {
92              if (operand.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
93                  size += operand.data.alignedSize();
94              }
95          }
96      };
97  
98      processSubgraph(testModel.main);
99      for (const TestSubgraph& subgraph : testModel.referenced) {
100          processSubgraph(subgraph);
101      }
102      return size == 0 ? nullptr : MemoryAHWB::create(size);
103  }
104  
createModelFromSubgraph(const TestSubgraph & subgraph,bool testDynamicOutputShape,const std::vector<TestSubgraph> & refSubgraphs,const std::unique_ptr<MemoryAHWB> & memory,uint32_t * memoryOffset,Model * model,Model * refModels)105  static void createModelFromSubgraph(const TestSubgraph& subgraph, bool testDynamicOutputShape,
106                                      const std::vector<TestSubgraph>& refSubgraphs,
107                                      const std::unique_ptr<MemoryAHWB>& memory,
108                                      uint32_t* memoryOffset, Model* model, Model* refModels) {
109      // Operands.
110      for (const auto& operand : subgraph.operands) {
111          auto type = getOperandType(operand, testDynamicOutputShape);
112          auto index = model->addOperand(&type);
113  
114          switch (operand.lifetime) {
115              case TestOperandLifeTime::CONSTANT_COPY: {
116                  model->setOperandValue(index, operand.data.get<void>(), operand.data.size());
117              } break;
118              case TestOperandLifeTime::CONSTANT_REFERENCE: {
119                  const uint32_t length = operand.data.size();
120                  std::memcpy(static_cast<uint8_t*>(memory->getPointer()) + *memoryOffset,
121                              operand.data.get<void>(), length);
122                  model->setOperandValueFromMemory(index, memory.get(), *memoryOffset, length);
123                  *memoryOffset += operand.data.alignedSize();
124              } break;
125              case TestOperandLifeTime::NO_VALUE: {
126                  model->setOperandValue(index, nullptr, 0);
127              } break;
128              case TestOperandLifeTime::SUBGRAPH: {
129                  uint32_t refIndex = *operand.data.get<uint32_t>();
130                  CHECK_LT(refIndex, refSubgraphs.size());
131                  const TestSubgraph& refSubgraph = refSubgraphs[refIndex];
132                  Model* refModel = &refModels[refIndex];
133                  if (!refModel->isFinished()) {
134                      createModelFromSubgraph(refSubgraph, testDynamicOutputShape, refSubgraphs,
135                                              memory, memoryOffset, refModel, refModels);
136                      ASSERT_EQ(refModel->finish(), Result::NO_ERROR);
137                      ASSERT_TRUE(refModel->isValid());
138                  }
139                  model->setOperandValueFromModel(index, refModel);
140              } break;
141              case TestOperandLifeTime::SUBGRAPH_INPUT:
142              case TestOperandLifeTime::SUBGRAPH_OUTPUT:
143              case TestOperandLifeTime::TEMPORARY_VARIABLE: {
144                  // Nothing to do here.
145              } break;
146          }
147      }
148  
149      // Operations.
150      for (const auto& operation : subgraph.operations) {
151          model->addOperation(static_cast<int>(operation.type), operation.inputs, operation.outputs);
152      }
153  
154      // Inputs and outputs.
155      model->identifyInputsAndOutputs(subgraph.inputIndexes, subgraph.outputIndexes);
156  }
157  
createModel(const TestModel & testModel,bool testDynamicOutputShape,GeneratedModel * model)158  void createModel(const TestModel& testModel, bool testDynamicOutputShape, GeneratedModel* model) {
159      ASSERT_NE(nullptr, model);
160  
161      std::unique_ptr<MemoryAHWB> memory = createConstantReferenceMemory(testModel);
162      uint32_t memoryOffset = 0;
163      std::vector<Model> refModels(testModel.referenced.size());
164      createModelFromSubgraph(testModel.main, testDynamicOutputShape, testModel.referenced, memory,
165                              &memoryOffset, model, refModels.data());
166      model->setRefModels(std::move(refModels));
167      model->setConstantReferenceMemory(std::move(memory));
168  
169      // Relaxed computation.
170      model->relaxComputationFloat32toFloat16(testModel.isRelaxed);
171  
172      if (!testModel.expectFailure) {
173          ASSERT_TRUE(model->isValid());
174      }
175  }
176  
createRequest(const TestModel & testModel,Execution * execution,std::vector<TestBuffer> * outputs)177  void createRequest(const TestModel& testModel, Execution* execution,
178                     std::vector<TestBuffer>* outputs) {
179      ASSERT_NE(nullptr, execution);
180      ASSERT_NE(nullptr, outputs);
181  
182      // Model inputs.
183      for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
184          const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]];
185          ASSERT_EQ(Result::NO_ERROR,
186                    execution->setInput(i, operand.data.get<void>(), operand.data.size()));
187      }
188  
189      // Model outputs.
190      for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
191          const auto& operand = testModel.main.operands[testModel.main.outputIndexes[i]];
192  
193          // In the case of zero-sized output, we should at least provide a one-byte buffer.
194          // This is because zero-sized tensors are only supported internally to the runtime, or
195          // reported in output shapes. It is illegal for the client to pre-specify a zero-sized
196          // tensor as model output. Otherwise, we will have two semantic conflicts:
197          // - "Zero dimension" conflicts with "unspecified dimension".
198          // - "Omitted operand buffer" conflicts with "zero-sized operand buffer".
199          const size_t bufferSize = std::max<size_t>(operand.data.size(), 1);
200  
201          outputs->emplace_back(bufferSize);
202          ASSERT_EQ(Result::NO_ERROR,
203                    execution->setOutput(i, outputs->back().getMutable<void>(), bufferSize));
204      }
205  }
206  
207  }  // namespace android::nn::generated_tests
208