• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <algorithm>   // For std::generate, std::shuffle.
7 #include <array>       // For std::array.
8 #include <cstddef>     // For size_t.
9 #include <functional>  // For std::multiplies.
10 #include <memory>      // For std::unique_ptr.
11 #include <random>      // For std::random_device, std::mt19937, std::uniform_real_distribution.
12 #include <vector>      // For std::vector.
13 
14 #include <xnnpack.h>
15 #include <xnnpack/node-type.h>
16 #include <xnnpack/operator.h>
17 #include <xnnpack/subgraph.h>
18 
19 #include "subgraph-unary-tester.h"
20 #include <gtest/gtest.h>
21 #include <fp16.h>
22 
23 using StaticConstantPadTestInt8 = UnaryTest<int8_t>;
24 using StaticConstantPadTestUint8 = UnaryTest<uint8_t>;
25 using StaticConstantPadTestF32 = UnaryTest<float>;
26 
TEST_F(StaticConstantPadTestInt8,define)27 TEST_F(StaticConstantPadTestInt8, define)
28 {
29   const int32_t zero_point = i8dist(rng);
30   const float scale = scale_dist(rng);
31   std::array<size_t, XNN_MAX_TENSOR_DIMS> pre_paddings;
32   std::array<size_t, XNN_MAX_TENSOR_DIMS> post_paddings;
33   std::fill(pre_paddings.begin(), pre_paddings.begin() + dims.size(), dim_dist(rng));
34   std::fill(post_paddings.begin(), post_paddings.begin() + dims.size(), dim_dist(rng));
35   float padding_value = f32dist(rng);
36   uint32_t quantized_padding_value = xnn_qs8_quantize(padding_value, scale, zero_point);
37 
38   ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
39 
40   xnn_subgraph_t subgraph = nullptr;
41   ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph));
42   std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph);
43 
44   input_id = XNN_INVALID_NODE_ID;
45   ASSERT_EQ(
46     xnn_status_success, xnn_define_quantized_tensor_value(
47                           subgraph, xnn_datatype_qint8, zero_point, scale, dims.size(), dims.data(), nullptr, 0,
48                           /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id));
49   ASSERT_NE(input_id, XNN_INVALID_NODE_ID);
50 
51   output_id = XNN_INVALID_NODE_ID;
52   ASSERT_EQ(
53     xnn_status_success, xnn_define_quantized_tensor_value(
54                           subgraph, xnn_datatype_qint8, zero_point, scale, dims.size(), dims.data(), nullptr, 1,
55                           /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id));
56   ASSERT_NE(output_id, XNN_INVALID_NODE_ID);
57 
58   ASSERT_EQ(
59     xnn_status_success,
60     xnn_define_static_constant_pad(
61       subgraph, pre_paddings.data(), post_paddings.data(), padding_value, input_id, output_id, /*flags=*/0));
62 
63   ASSERT_EQ(subgraph->num_nodes, 1);
64   const struct xnn_node* node = &subgraph->nodes[0];
65   ASSERT_EQ(node->type, xnn_node_type_static_constant_pad);
66   ASSERT_EQ(node->compute_type, xnn_compute_type_qs8);
67   for (size_t i = 0; i < dims.size(); i++) {
68     ASSERT_EQ(node->params.static_pad.pre_paddings[i], pre_paddings[i]);
69     ASSERT_EQ(node->params.static_pad.post_paddings[i], post_paddings[i]);
70   }
71   ASSERT_EQ(node->params.static_pad.padding_value, quantized_padding_value);
72   ASSERT_EQ(node->num_inputs, 1);
73   ASSERT_EQ(node->inputs[0], input_id);
74   ASSERT_EQ(node->num_outputs, 1);
75   ASSERT_EQ(node->outputs[0], output_id);
76   ASSERT_EQ(node->flags, 0);
77 }
78 
TEST_F(StaticConstantPadTestUint8,define)79 TEST_F(StaticConstantPadTestUint8, define)
80 {
81   const int32_t zero_point = u8dist(rng);
82   const float scale = scale_dist(rng);
83   std::array<size_t, XNN_MAX_TENSOR_DIMS> pre_paddings;
84   std::array<size_t, XNN_MAX_TENSOR_DIMS> post_paddings;
85   std::fill(pre_paddings.begin(), pre_paddings.begin() + dims.size(), dim_dist(rng));
86   std::fill(post_paddings.begin(), post_paddings.begin() + dims.size(), dim_dist(rng));
87   float padding_value = f32dist(rng);
88   uint32_t quantized_padding_value = xnn_qu8_quantize(padding_value, scale, zero_point);
89 
90   ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
91 
92   xnn_subgraph_t subgraph = nullptr;
93   ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph));
94   std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph);
95 
96   input_id = XNN_INVALID_NODE_ID;
97   ASSERT_EQ(
98     xnn_status_success, xnn_define_quantized_tensor_value(
99                           subgraph, xnn_datatype_quint8, zero_point, scale, dims.size(), dims.data(), nullptr, 0,
100                           /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id));
101   ASSERT_NE(input_id, XNN_INVALID_NODE_ID);
102 
103   output_id = XNN_INVALID_NODE_ID;
104   ASSERT_EQ(
105     xnn_status_success, xnn_define_quantized_tensor_value(
106                           subgraph, xnn_datatype_quint8, zero_point, scale, dims.size(), dims.data(), nullptr, 1,
107                           /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id));
108   ASSERT_NE(output_id, XNN_INVALID_NODE_ID);
109 
110   ASSERT_EQ(
111     xnn_status_success,
112     xnn_define_static_constant_pad(
113       subgraph, pre_paddings.data(), post_paddings.data(), padding_value, input_id, output_id, /*flags=*/0));
114 
115   ASSERT_EQ(subgraph->num_nodes, 1);
116   const struct xnn_node* node = &subgraph->nodes[0];
117   ASSERT_EQ(node->type, xnn_node_type_static_constant_pad);
118   ASSERT_EQ(node->compute_type, xnn_compute_type_qu8);
119   for (size_t i = 0; i < dims.size(); i++) {
120     ASSERT_EQ(node->params.static_pad.pre_paddings[i], pre_paddings[i]);
121     ASSERT_EQ(node->params.static_pad.post_paddings[i], post_paddings[i]);
122   }
123   ASSERT_EQ(node->params.static_pad.padding_value, quantized_padding_value);
124   ASSERT_EQ(node->num_inputs, 1);
125   ASSERT_EQ(node->inputs[0], input_id);
126   ASSERT_EQ(node->num_outputs, 1);
127   ASSERT_EQ(node->outputs[0], output_id);
128   ASSERT_EQ(node->flags, 0);
129 }
130 
TEST_F(StaticConstantPadTestF32,define)131 TEST_F(StaticConstantPadTestF32, define)
132 {
133   std::array<size_t, XNN_MAX_TENSOR_DIMS> pre_paddings;
134   std::array<size_t, XNN_MAX_TENSOR_DIMS> post_paddings;
135   std::fill(pre_paddings.begin(), pre_paddings.begin() + dims.size(), dim_dist(rng));
136   std::fill(post_paddings.begin(), post_paddings.begin() + dims.size(), dim_dist(rng));
137   float padding_value = f32dist(rng);
138   uint32_t padding_value_as_bits = float_as_uint32(padding_value);
139 
140   ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
141 
142   xnn_subgraph_t subgraph = nullptr;
143   ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph));
144   std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph);
145 
146   input_id = XNN_INVALID_NODE_ID;
147   ASSERT_EQ(
148     xnn_status_success, xnn_define_tensor_value(
149                           subgraph, xnn_datatype_fp32, dims.size(), dims.data(), nullptr, 0,
150                           /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id));
151   ASSERT_NE(input_id, XNN_INVALID_NODE_ID);
152 
153   output_id = XNN_INVALID_NODE_ID;
154   ASSERT_EQ(
155     xnn_status_success, xnn_define_tensor_value(
156                           subgraph, xnn_datatype_fp32, dims.size(), dims.data(), nullptr, 1,
157                           /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id));
158   ASSERT_NE(output_id, XNN_INVALID_NODE_ID);
159 
160   ASSERT_EQ(
161     xnn_status_success,
162     xnn_define_static_constant_pad(
163       subgraph, pre_paddings.data(), post_paddings.data(), padding_value, input_id, output_id, /*flags=*/0));
164 
165   ASSERT_EQ(subgraph->num_nodes, 1);
166   const struct xnn_node* node = &subgraph->nodes[0];
167   ASSERT_EQ(node->type, xnn_node_type_static_constant_pad);
168   ASSERT_EQ(node->compute_type, xnn_compute_type_fp32);
169   for (size_t i = 0; i < dims.size(); i++) {
170     ASSERT_EQ(node->params.static_pad.pre_paddings[i], pre_paddings[i]);
171     ASSERT_EQ(node->params.static_pad.post_paddings[i], post_paddings[i]);
172   }
173   ASSERT_EQ(node->params.static_pad.padding_value, padding_value_as_bits);
174   ASSERT_EQ(node->num_inputs, 1);
175   ASSERT_EQ(node->inputs[0], input_id);
176   ASSERT_EQ(node->num_outputs, 1);
177   ASSERT_EQ(node->outputs[0], output_id);
178   ASSERT_EQ(node->flags, 0);
179 }
180 
TEST_F(StaticConstantPadTestInt8,matches_operator_api)181 TEST_F(StaticConstantPadTestInt8, matches_operator_api)
182 {
183   const int32_t zero_point = i8dist(rng);
184   const float scale = scale_dist(rng);
185   std::array<size_t, XNN_MAX_TENSOR_DIMS> pre_paddings;
186   std::array<size_t, XNN_MAX_TENSOR_DIMS> post_paddings;
187   std::fill(pre_paddings.begin(), pre_paddings.begin() + dims.size(), dim_dist(rng));
188   std::fill(post_paddings.begin(), post_paddings.begin() + dims.size(), dim_dist(rng));
189   float padding_value = f32dist(rng);
190   uint32_t quantized_padding_value = xnn_qs8_quantize(padding_value, scale, zero_point);
191   std::vector<size_t> output_dims = dims;
192   for (size_t i = 0; i < dims.size(); i++) {
193     output_dims[i] = pre_paddings[i] + output_dims[i] + post_paddings[i];
194   }
195   // Output sizes
196   operator_output = std::vector<int8_t>(NumElements(output_dims));
197   subgraph_output = std::vector<int8_t>(operator_output.size());
198   std::fill(operator_output.begin(), operator_output.end(), INT8_C(0xA5));
199   std::fill(subgraph_output.begin(), subgraph_output.end(), INT8_C(0xA5));
200 
201   ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
202 
203   // Call operator API.
204   xnn_operator_t op = nullptr;
205   const xnn_status status = xnn_create_constant_pad_nd_x8(&quantized_padding_value, /*flags=*/0, &op);
206   if (status == xnn_status_unsupported_hardware) {
207     GTEST_SKIP();
208   }
209   ASSERT_EQ(xnn_status_success, status);
210   ASSERT_NE(nullptr, op);
211   std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_op(op, xnn_delete_operator);
212   ASSERT_EQ(
213     xnn_status_success, xnn_setup_constant_pad_nd_x8(
214                           op, dims.size(), dims.data(), pre_paddings.data(), post_paddings.data(), input.data(),
215                           operator_output.data(), /*threadpool=*/nullptr));
216   ASSERT_EQ(xnn_status_success, xnn_run_operator(op, /*threadpool=*/nullptr));
217 
218   // Call subgraph API.
219   xnn_subgraph_t subgraph = nullptr;
220   ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph));
221   std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph);
222 
223   input_id = XNN_INVALID_NODE_ID;
224   ASSERT_EQ(
225     xnn_status_success, xnn_define_quantized_tensor_value(
226                           subgraph, xnn_datatype_qint8, zero_point, scale, dims.size(), dims.data(), nullptr, 0,
227                           /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id));
228   ASSERT_NE(input_id, XNN_INVALID_NODE_ID);
229 
230   output_id = XNN_INVALID_NODE_ID;
231   ASSERT_EQ(
232     xnn_status_success,
233     xnn_define_quantized_tensor_value(
234       subgraph, xnn_datatype_qint8, zero_point, scale, output_dims.size(), output_dims.data(), nullptr, 1,
235       /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id));
236   ASSERT_NE(output_id, XNN_INVALID_NODE_ID);
237 
238   ASSERT_EQ(
239     xnn_status_success,
240     xnn_define_static_constant_pad(
241       subgraph, pre_paddings.data(), post_paddings.data(), padding_value, input_id, output_id, /*flags=*/0));
242 
243   xnn_runtime_t runtime = nullptr;
244   ASSERT_EQ(xnn_status_success, xnn_create_runtime_v3(subgraph, nullptr, nullptr, /*flags=*/0, &runtime));
245   ASSERT_NE(nullptr, runtime);
246   std::unique_ptr<xnn_runtime, decltype(&xnn_delete_runtime)> auto_runtime(runtime, xnn_delete_runtime);
247   std::array<xnn_external_value, 2> external = {
248     xnn_external_value{input_id, input.data()}, xnn_external_value{output_id, subgraph_output.data()}};
249   ASSERT_EQ(xnn_status_success, xnn_setup_runtime(runtime, external.size(), external.data()));
250   ASSERT_EQ(xnn_status_success, xnn_invoke_runtime(runtime));
251 
252   ASSERT_EQ(subgraph_output, operator_output);
253 }
254 
TEST_F(StaticConstantPadTestUint8,matches_operator_api)255 TEST_F(StaticConstantPadTestUint8, matches_operator_api)
256 {
257   const int32_t zero_point = u8dist(rng);
258   const float scale = scale_dist(rng);
259   std::array<size_t, XNN_MAX_TENSOR_DIMS> pre_paddings;
260   std::array<size_t, XNN_MAX_TENSOR_DIMS> post_paddings;
261   std::fill(pre_paddings.begin(), pre_paddings.begin() + dims.size(), dim_dist(rng));
262   std::fill(post_paddings.begin(), post_paddings.begin() + dims.size(), dim_dist(rng));
263   float padding_value = f32dist(rng);
264   uint32_t quantized_padding_value = xnn_qu8_quantize(padding_value, scale, zero_point);
265   std::vector<size_t> output_dims = dims;
266   for (size_t i = 0; i < dims.size(); i++) {
267     output_dims[i] = pre_paddings[i] + output_dims[i] + post_paddings[i];
268   }
269   // Output sizes
270   operator_output = std::vector<uint8_t>(NumElements(output_dims));
271   subgraph_output = std::vector<uint8_t>(operator_output.size());
272   std::fill(operator_output.begin(), operator_output.end(), UINT8_C(0xA5));
273   std::fill(subgraph_output.begin(), subgraph_output.end(), UINT8_C(0xA5));
274 
275   ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
276 
277   // Call operator API.
278   xnn_operator_t op = nullptr;
279   const xnn_status status = xnn_create_constant_pad_nd_x8(&quantized_padding_value, /*flags=*/0, &op);
280   if (status == xnn_status_unsupported_hardware) {
281     GTEST_SKIP();
282   }
283   ASSERT_EQ(xnn_status_success, status);
284   ASSERT_NE(nullptr, op);
285   std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_op(op, xnn_delete_operator);
286   ASSERT_EQ(
287     xnn_status_success, xnn_setup_constant_pad_nd_x8(
288                           op, dims.size(), dims.data(), pre_paddings.data(), post_paddings.data(), input.data(),
289                           operator_output.data(), /*threadpool=*/nullptr));
290   ASSERT_EQ(xnn_status_success, xnn_run_operator(op, /*threadpool=*/nullptr));
291 
292   // Call subgraph API.
293   xnn_subgraph_t subgraph = nullptr;
294   ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph));
295   std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph);
296 
297   input_id = XNN_INVALID_NODE_ID;
298   ASSERT_EQ(
299     xnn_status_success, xnn_define_quantized_tensor_value(
300                           subgraph, xnn_datatype_quint8, zero_point, scale, dims.size(), dims.data(), nullptr, 0,
301                           /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id));
302   ASSERT_NE(input_id, XNN_INVALID_NODE_ID);
303 
304   output_id = XNN_INVALID_NODE_ID;
305   ASSERT_EQ(
306     xnn_status_success,
307     xnn_define_quantized_tensor_value(
308       subgraph, xnn_datatype_quint8, zero_point, scale, output_dims.size(), output_dims.data(), nullptr, 1,
309       /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id));
310   ASSERT_NE(output_id, XNN_INVALID_NODE_ID);
311 
312   ASSERT_EQ(
313     xnn_status_success,
314     xnn_define_static_constant_pad(
315       subgraph, pre_paddings.data(), post_paddings.data(), padding_value, input_id, output_id, /*flags=*/0));
316 
317   xnn_runtime_t runtime = nullptr;
318   ASSERT_EQ(xnn_status_success, xnn_create_runtime_v3(subgraph, nullptr, nullptr, /*flags=*/0, &runtime));
319   ASSERT_NE(nullptr, runtime);
320   std::unique_ptr<xnn_runtime, decltype(&xnn_delete_runtime)> auto_runtime(runtime, xnn_delete_runtime);
321   std::array<xnn_external_value, 2> external = {
322     xnn_external_value{input_id, input.data()}, xnn_external_value{output_id, subgraph_output.data()}};
323   ASSERT_EQ(xnn_status_success, xnn_setup_runtime(runtime, external.size(), external.data()));
324   ASSERT_EQ(xnn_status_success, xnn_invoke_runtime(runtime));
325 
326   ASSERT_EQ(subgraph_output, operator_output);
327 }
328 
TEST_F(StaticConstantPadTestF32,matches_operator_api)329 TEST_F(StaticConstantPadTestF32, matches_operator_api)
330 {
331   std::array<size_t, XNN_MAX_TENSOR_DIMS> pre_paddings;
332   std::array<size_t, XNN_MAX_TENSOR_DIMS> post_paddings;
333   std::fill(pre_paddings.begin(), pre_paddings.begin() + dims.size(), dim_dist(rng));
334   std::fill(post_paddings.begin(), post_paddings.begin() + dims.size(), dim_dist(rng));
335   float padding_value = f32dist(rng);
336   uint32_t padding_value_as_u32 = float_as_uint32(padding_value);
337   std::vector<size_t> output_dims = dims;
338   for (size_t i = 0; i < dims.size(); i++) {
339     output_dims[i] = pre_paddings[i] + output_dims[i] + post_paddings[i];
340   }
341   // Output sizes
342   operator_output = std::vector<float>(NumElements(output_dims));
343   subgraph_output = std::vector<float>(operator_output.size());
344   std::fill(operator_output.begin(), operator_output.end(), UINT32_C(0xDEADBEEF));
345   std::fill(subgraph_output.begin(), subgraph_output.end(), UINT32_C(0xDEADBEEF));
346 
347   ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
348 
349   // Call operator API.
350   xnn_operator_t op = nullptr;
351   const xnn_status status = xnn_create_constant_pad_nd_x32(&padding_value_as_u32, /*flags=*/0, &op);
352   if (status == xnn_status_unsupported_hardware) {
353     GTEST_SKIP();
354   }
355   ASSERT_EQ(xnn_status_success, status);
356   ASSERT_NE(nullptr, op);
357   std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_op(op, xnn_delete_operator);
358   ASSERT_EQ(
359     xnn_status_success, xnn_setup_constant_pad_nd_x32(
360                           op, dims.size(), dims.data(), pre_paddings.data(), post_paddings.data(), input.data(),
361                           operator_output.data(), /*threadpool=*/nullptr));
362   ASSERT_EQ(xnn_status_success, xnn_run_operator(op, /*threadpool=*/nullptr));
363 
364   // Call subgraph API.
365   xnn_subgraph_t subgraph = nullptr;
366   ASSERT_EQ(xnn_status_success, xnn_create_subgraph(/*external_value_ids=*/2, /*flags=*/0, &subgraph));
367   std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> auto_subgraph(subgraph, xnn_delete_subgraph);
368 
369   input_id = XNN_INVALID_NODE_ID;
370   ASSERT_EQ(
371     xnn_status_success, xnn_define_tensor_value(
372                           subgraph, xnn_datatype_fp32, dims.size(), dims.data(), nullptr, 0,
373                           /*flags=*/XNN_VALUE_FLAG_EXTERNAL_INPUT, &input_id));
374   ASSERT_NE(input_id, XNN_INVALID_NODE_ID);
375 
376   output_id = XNN_INVALID_NODE_ID;
377   ASSERT_EQ(
378     xnn_status_success,
379     xnn_define_tensor_value(
380       subgraph, xnn_datatype_fp32, output_dims.size(), output_dims.data(), nullptr, 1,
381       /*flags=*/XNN_VALUE_FLAG_EXTERNAL_OUTPUT, &output_id));
382   ASSERT_NE(output_id, XNN_INVALID_NODE_ID);
383 
384   ASSERT_EQ(
385     xnn_status_success,
386     xnn_define_static_constant_pad(
387       subgraph, pre_paddings.data(), post_paddings.data(), padding_value, input_id, output_id, /*flags=*/0));
388 
389   xnn_runtime_t runtime = nullptr;
390   ASSERT_EQ(xnn_status_success, xnn_create_runtime_v3(subgraph, nullptr, nullptr, /*flags=*/0, &runtime));
391   ASSERT_NE(nullptr, runtime);
392   std::unique_ptr<xnn_runtime, decltype(&xnn_delete_runtime)> auto_runtime(runtime, xnn_delete_runtime);
393   std::array<xnn_external_value, 2> external = {
394     xnn_external_value{input_id, input.data()}, xnn_external_value{output_id, subgraph_output.data()}};
395   ASSERT_EQ(xnn_status_success, xnn_setup_runtime(runtime, external.size(), external.data()));
396   ASSERT_EQ(xnn_status_success, xnn_invoke_runtime(runtime));
397 
398   ASSERT_EQ(subgraph_output, operator_output);
399 }
400