| /external/XNNPACK/eval/ |
| D | f32-f16-cvt.cc | 31 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local 36 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST() 39 ASSERT_EQ(reference_output, outputs[i]) in TEST() 42 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST() 49 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local 54 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST() 57 ASSERT_EQ(reference_output, outputs[i]) in TEST() 60 << ", optimized = 0x" << std::hex << std::setw(4) << std::setfill('0') << outputs[i]; in TEST() 67 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> outputs(kBlockSize); in TEST() local 72 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST() [all …]
|
| D | f32-exp.cc | 34 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 36 … xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 38 ASSERT_EQ(reference_output, outputs[0]) in TEST() 41 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST() 48 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 50 … xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 52 ASSERT_EQ(reference_output, outputs[0]) in TEST() 55 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST() 62 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 67 … xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() [all …]
|
| D | f32-expminus.cc | 34 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 36 …xnn_math_f32_expminus__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST() 38 ASSERT_EQ(reference_output, outputs[0]) in TEST() 41 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST() 48 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 50 …xnn_math_f32_expminus__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST() 52 ASSERT_EQ(reference_output, outputs[0]) in TEST() 55 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST() 62 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 67 …math_f32_expminus__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() [all …]
|
| D | f32-expm1minus.cc | 34 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 36 …xnn_math_f32_expm1minus__neon_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data… in TEST() 38 ASSERT_EQ(reference_output, outputs[0]) in TEST() 41 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST() 48 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 53 …xnn_math_f32_expm1minus__neon_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data… in TEST() 56 ASSERT_EQ(reference_output, outputs[i]) in TEST() 59 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST() 68 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 73 …xnn_math_f32_expm1minus__neon_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data… in TEST() [all …]
|
| D | f16-f32-cvt.cc | 31 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 36 xnn_math_f16_f32_cvt__sse2_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 39 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST() 42 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST() 49 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 54 xnn_math_f16_f32_cvt__sse2_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 57 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST() 60 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST() 67 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 69 xnn_math_f16_f32_cvt__sse2_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() [all …]
|
| D | f32-roundu.cc | 30 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 32 xnn_math_f32_roundu__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 34 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST() 37 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST() 42 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 44 xnn_math_f32_roundu__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 46 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST() 49 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST() 54 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 59 xnn_math_f32_roundu__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() [all …]
|
| D | f32-roundd.cc | 30 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 32 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 34 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST() 37 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST() 42 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 44 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 46 ASSERT_EQ(reference_output, float_as_uint32(outputs[0])) in TEST() 49 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[0]); in TEST() 54 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 59 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() [all …]
|
| D | f32-roundz.cc | 30 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 35 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 38 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST() 41 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST() 48 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 53 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 56 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST() 59 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST() 66 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 71 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() [all …]
|
| D | f32-roundne.cc | 30 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 35 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 38 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST() 41 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST() 48 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 53 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 56 ASSERT_EQ(reference_output, float_as_uint32(outputs[i])) in TEST() 59 …", optimized = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(outputs[i]); in TEST() 66 std::vector<float, AlignedAllocator<float, 64>> outputs(kBlockSize); in TEST() local 71 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() [all …]
|
| D | u64-sqrt.cc | 49 std::vector<uint64_t, AlignedAllocator<uint64_t, 64>> outputs(kBlockSize); in TEST() local 54 …qrt__scalar_cvtu32_sqrt_cvtsatu32f64(kBlockSize * sizeof(uint64_t), inputs.data(), outputs.data()); in TEST() 57 const uint64_t output = outputs[i]; in TEST() 65 std::vector<uint64_t, AlignedAllocator<uint64_t, 64>> outputs(kBlockSize); in TEST() local 70 …qrt__scalar_cvtu32_sqrt_cvtsatu32f64(kBlockSize * sizeof(uint64_t), inputs.data(), outputs.data()); in TEST() 73 const uint64_t output = outputs[i]; in TEST() 81 std::vector<uint64_t, AlignedAllocator<uint64_t, 64>> outputs(kBlockSize); in TEST() local 86 …qrt__scalar_cvtu32_sqrt_cvtsatu32f64(kBlockSize * sizeof(uint64_t), inputs.data(), outputs.data()); in TEST() 89 const uint64_t output = outputs[i]; in TEST() 97 std::vector<uint64_t, AlignedAllocator<uint64_t, 64>> outputs(kBlockSize); in TEST() local [all …]
|
| D | u32-sqrt.cc | 25 std::vector<uint32_t, AlignedAllocator<uint32_t, 64>> outputs(kBlockSize); in TEST() local 30 … xnn_math_u32_sqrt__scalar_bitmanip(kBlockSize * sizeof(uint32_t), inputs.data(), outputs.data()); in TEST() 33 const uint32_t output = outputs[i]; in TEST() 51 std::vector<uint32_t, AlignedAllocator<uint32_t, 64>> outputs(kBlockSize); 56 … xnn_math_u32_sqrt__scalar_bitmanip(kBlockSize * sizeof(uint32_t), inputs.data(), outputs.data()); 59 const uint32_t output = outputs[i]; 69 std::vector<uint32_t, AlignedAllocator<uint32_t, 64>> outputs(kBlockSize); in TEST() local 74 …xnn_math_u32_sqrt__scalar_clz_binsearch(kBlockSize * sizeof(uint32_t), inputs.data(), outputs.data… in TEST() 77 const uint32_t output = outputs[i]; in TEST() 95 std::vector<uint32_t, AlignedAllocator<uint32_t, 64>> outputs(kBlockSize); [all …]
|
| /external/llvm/lib/Target/Hexagon/ |
| D | HexagonBitTracker.cpp | 126 CellMapType &Outputs) const { in evaluate() 142 return evaluateLoad(MI, Inputs, Outputs); in evaluate() 159 if (evaluateFormalCopy(MI, Inputs, Outputs)) in evaluate() 188 auto rr0 = [this,Reg] (const BT::RegisterCell &Val, CellMapType &Outputs) in evaluate() 190 putCell(Reg[0], Val, Outputs); in evaluate() 253 return rr0(eIMM(im(1), W0), Outputs); in evaluate() 255 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::Zero), Outputs); in evaluate() 257 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::One), Outputs); in evaluate() 265 return rr0(RC, Outputs); in evaluate() 273 return rr0(rc(1), Outputs); in evaluate() [all …]
|
| /external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Hexagon/ |
| D | HexagonBitTracker.cpp | 189 CellMapType &Outputs) const { in evaluate() 215 return evaluateLoad(MI, Inputs, Outputs); in evaluate() 234 if (evaluateFormalCopy(MI, Inputs, Outputs)) in evaluate() 260 auto rr0 = [this,Reg] (const BT::RegisterCell &Val, CellMapType &Outputs) in evaluate() 262 putCell(Reg[0], Val, Outputs); in evaluate() 325 return rr0(eIMM(im(1), W0), Outputs); in evaluate() 327 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::Zero), Outputs); in evaluate() 329 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::One), Outputs); in evaluate() 337 return rr0(RC, Outputs); in evaluate() 345 return rr0(rc(1), Outputs); in evaluate() [all …]
|
| /external/tensorflow/tensorflow/python/kernel_tests/strings_ops/ |
| D | unicode_transcode_op_test.py | 35 outputs = string_ops.unicode_transcode( 42 values = self.evaluate(outputs) 45 outputs = string_ops.unicode_transcode( 52 values = self.evaluate(outputs) 55 outputs = string_ops.unicode_transcode( 62 values = self.evaluate(outputs) 70 outputs = string_ops.unicode_transcode( 77 values = self.evaluate(outputs) 83 outputs = string_ops.unicode_transcode( 90 values = self.evaluate(outputs) [all …]
|
| /external/tensorflow/tensorflow/core/tfrt/saved_model/tests/ |
| D | saved_model_test.cc | 75 std::vector<tensorflow::Tensor> outputs; in TEST_P() local 76 TF_ASSERT_OK(saved_model->Run(run_options, "toy", inputs, &outputs)); in TEST_P() 77 ASSERT_EQ(outputs.size(), 1); in TEST_P() 79 EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]), in TEST_P() 121 std::vector<tensorflow::Tensor> outputs; in TEST() local 122 TF_ASSERT_OK(saved_model->Run(run_options, "toy", inputs, &outputs)); in TEST() 123 ASSERT_EQ(outputs.size(), 1); in TEST() 125 EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]), in TEST() 156 std::vector<tensorflow::Tensor> outputs; in TEST() local 158 test.GetSavedModel()->Run({}, "serving_default", inputs, &outputs)); in TEST() [all …]
|
| /external/icu/icu4c/source/layoutex/ |
| D | layoutex.vcxproj | 156 …<Outputs Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">..\..\include\layout\%(Filename… 159 …<Outputs Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..\..\include\layout\%(Filename)%… 162 …<Outputs Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">..\..\include\layout\%(Filena… 165 …<Outputs Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..\..\include\layout\%(Filename… 170 …<Outputs Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">..\..\include\layout\%(Filename… 173 …<Outputs Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..\..\include\layout\%(Filename)%… 176 …<Outputs Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">..\..\include\layout\%(Filena… 179 …<Outputs Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..\..\include\layout\%(Filename… 184 …<Outputs Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">..\..\include\layout\%(Filename… 187 …<Outputs Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..\..\include\layout\%(Filename)%… [all …]
|
| /external/tensorflow/tensorflow/python/compiler/xla/ |
| D | xla.py | 97 3) Operation-only outputs: a NoOp would be returned which 243 # ignore ops which don't have outputs. TODO(phawkins): fix that. 247 array_ops.identity(x.outputs[0]).op 249 if x.outputs 256 # Mark op's outputs as seen by this context and any outer contexts. 257 output_names = [x.name for x in op.outputs] 322 value output 3) Operation-only outputs 324 ValueError: If any element in computation outputs is neither an operations 326 ValueError: If computation outputs is non-flat and contains any Operations. 364 outputs = computation(*computation_inputs) [all …]
|
| /external/tensorflow/tensorflow/lite/nnapi/sl/public/ |
| D | NeuralNetworksSupportLibraryImpl.h | 355 * Gets the type of tensors used for outputs. 631 * Behavior, arguments, and outputs match NNAPI Runtime function 640 * Behavior, arguments, and outputs match NNAPI Runtime function 649 * outputs match NNAPI Runtime function 659 * Behavior, arguments, and outputs match NNAPI Runtime function 668 * Behavior, arguments, and outputs match NNAPI Runtime function 678 * arguments, and outputs match NNAPI Runtime function 689 * arguments, and outputs match NNAPI Runtime function 700 * arguments, and outputs match NNAPI Runtime function 711 * arguments, and outputs match NNAPI Runtime function [all …]
|
| /external/tensorflow/tensorflow/cc/client/ |
| D | client_session_test.cc | 81 std::vector<Tensor> outputs; in TEST() local 83 TF_EXPECT_OK(session.Run({c}, &outputs)); in TEST() 84 test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({1, 1}, {1, 2})); in TEST() 93 std::vector<Tensor> outputs; in TEST() local 95 TF_EXPECT_OK(session.Run({{a, 1}, {b, 41}}, {c}, &outputs)); in TEST() 96 test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({42}, {})); in TEST() 104 std::vector<Tensor> outputs; in TEST() local 106 TF_EXPECT_OK(session.Run({{a, {1, 1}}}, {c}, &outputs)); in TEST() 107 test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({3, 3}, {2})); in TEST() 110 outputs.clear(); in TEST() [all …]
|
| /external/tensorflow/tensorflow/core/common_runtime/ |
| D | direct_session_test.cc | 144 std::vector<Tensor> outputs; in TEST_F() local 145 Status s = session->Run(inputs, output_names, target_nodes, &outputs); in TEST_F() 148 ASSERT_EQ(1, outputs.size()); in TEST_F() 151 auto mat = outputs[0].matrix<float>(); in TEST_F() 152 ASSERT_TRUE(outputs[0].IsInitialized()); in TEST_F() 170 std::vector<Tensor> outputs; in TEST_F() local 171 TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr)); in TEST_F() 173 ASSERT_EQ(1, outputs.size()); in TEST_F() 176 auto mat = outputs[0].matrix<float>(); in TEST_F() 177 ASSERT_TRUE(outputs[0].IsInitialized()); in TEST_F() [all …]
|
| /external/tensorflow/tensorflow/python/saved_model/ |
| D | signature_def_utils_test.py | 44 def _make_signature(inputs, outputs, name=None): argument 51 for output_name, tensor in outputs.items() 71 outputs = {} 72 outputs["foo-output"] = y_tensor_info 75 inputs, outputs, "foo-method-name") 86 # Check outputs in signature def. 87 self.assertEqual(1, len(signature_def.outputs)) 88 y_tensor_info_actual = signature_def.outputs["foo-output"] 114 # Check outputs in signature def. 115 self.assertEqual(1, len(signature_def.outputs)) [all …]
|
| /external/tensorflow/tensorflow/python/ops/ |
| D | cond_v2.py | 111 if_op = op.outputs[0].op 150 # Make outputs match by adding none optionals. 154 true_graph.outputs.extend(extra_true_outputs) 155 false_graph.outputs.extend(extra_false_outputs) 180 outputs = _build_cond( 190 return [None] + outputs 219 A list of Tensors which are the outputs of the If op. Does not include added 220 intermediate outputs. 232 # Add all intermediate tensors as function outputs so they're available for 233 # the gradient computation. Since the outputs of the two functions must [all …]
|
| D | while_v2.py | 200 outputs = body( 202 if not nest.is_nested(outputs): 203 outputs = [outputs] 207 nest.assert_same_structure(outputs, orig_loop_vars, check_types=False, 212 vars1 = variable_utils.convert_variables_to_tensors(outputs) 216 outputs = _tensor_array_to_flow(outputs) 219 # is_constant=True for inputs that are directly passed to outputs. 220 return [loop_counter + 1, maximum_iterations_arg] + list(outputs) 242 # is_constant=True for inputs that are directly passed to outputs. 243 body_graph.outputs.extend(body_graph.internal_captures) [all …]
|
| /external/tensorflow/tensorflow/lite/toco/graph_transformations/ |
| D | propagate_array_data_types.cc | 29 for (const auto& output : op->outputs) { in SetDataTypeForAllOutputs() 52 for (const auto& output : op->outputs) { in Run() 58 // These operators unconditionally produce float outputs in Run() 71 // These operators unconditionally produce bool outputs in Run() 75 // These operators only produce int32 outputs. in Run() 110 CHECK_EQ(op->outputs.size(), 1); in Run() 112 model->GetArray(op->outputs[0]).data_type = cast_op->dst_data_type; in Run() 117 CHECK_EQ(op->outputs.size(), 1); in Run() 119 model->GetArray(op->outputs[0]).data_type = argmax_op->output_data_type; in Run() 124 CHECK_EQ(op->outputs.size(), 1); in Run() [all …]
|
| /external/tensorflow/tensorflow/lite/java/src/test/java/org/tensorflow/lite/ |
| D | NativeInterpreterWrapperTest.java | 140 Map<Integer, Object> outputs = new HashMap<>(); in testRunWithFloat() local 141 outputs.put(0, parsedOutputs); in testRunWithFloat() 142 wrapper.run(inputs, outputs); in testRunWithFloat() 159 Map<Integer, Object> outputs = new HashMap<>(); in testRunWithBufferOutput() local 160 outputs.put(0, parsedOutput); in testRunWithBufferOutput() 161 wrapper.run(inputs, outputs); in testRunWithBufferOutput() 179 Map<Integer, Object> outputs = new HashMap<>(); in testRunWithInputsOfSameDims() local 180 outputs.put(0, parsedOutputs); in testRunWithInputsOfSameDims() 181 wrapper.run(inputs, outputs); in testRunWithInputsOfSameDims() 186 outputs.put(0, parsedOutputs); in testRunWithInputsOfSameDims() [all …]
|