/external/tensorflow/tensorflow/python/distribute/v1/ |
D | all_reduce.py | 254 def build_ring_all_reduce(input_tensors, num_workers, num_subchunks, argument 277 if len(input_tensors) < 2: 279 input_tensors, shape = _flatten_tensors(input_tensors) 280 devices = [t.device for t in input_tensors] 284 input_tensors, devices, 297 def _build_ring_gather(input_tensors, devices, num_subchunks, argument 317 num_devices = len(input_tensors) 321 return input_tensors 322 shape = input_tensors[0].shape 332 splits, split_pad_len = _padded_split(input_tensors[d], num_chunks) [all …]
|
D | all_reduce_test.py | 93 input_tensors = [] 100 input_tensors.append(array_ops.identity(t8)) 101 return input_tensors, device_names 106 input_tensors, device_names = self._buildInput(1, 1) 108 output_tensors = ar._build_ring_gather(input_tensors, device_names, 1, 111 self.assertEqual(output_tensors, input_tensors) 113 input_tensors, device_names = self._buildInput(1, 4) 116 input_tensors, device_names, 2, pred_by_c_d, rank_by_c_d, math_ops.add) 119 self.assertEqual(len(output_tensors), len(input_tensors)) 120 num_chunks = 2 * len(input_tensors) [all …]
|
/external/tensorflow/tensorflow/core/ops/ |
D | array_ops_test.cc | 310 op.input_tensors.resize(2); in TEST() 316 op.input_tensors[0] = &in_t; in TEST() 342 op.input_tensors.resize(3); in TEST() 343 op.input_tensors[2] = &axis_dim_t; in TEST() 450 op.input_tensors.resize(2); in TEST() 471 op.input_tensors[1] = &paddings_t; in TEST() 481 op.input_tensors.resize(3); in TEST() 502 op.input_tensors[1] = &paddings_t; in TEST() 511 op.input_tensors.resize(2); in TEST() 535 op.input_tensors[1] = &paddings_t; in TEST() [all …]
|
D | image_ops_test.cc | 35 op.input_tensors.resize(2); in TEST() 49 op.input_tensors[1] = &size_tensor; in TEST() 212 op.input_tensors.resize(2); in TEST() 234 op.input_tensors[1] = &size_tensor; in TEST() 246 op.input_tensors.resize(4); in TEST() 262 op.input_tensors[3] = &size_tensor; in TEST() 277 op.input_tensors.resize(2); in TEST() 288 op.input_tensors[1] = &size_tensor; in TEST() 294 op.input_tensors.resize(4); in TEST() 304 op.input_tensors[3] = &image_size; in TEST() [all …]
|
D | random_ops_test.cc | 26 op.input_tensors.resize(2); in TEST() 34 op.input_tensors[1] = &num_samples; in TEST() 42 op.input_tensors.resize(2); in TEST() 49 op.input_tensors[0] = &shape; in TEST() 57 op.input_tensors.resize(2); in TEST() 64 op.input_tensors[0] = &shape; in TEST()
|
D | math_ops_test.cc | 294 op.input_tensors.resize(3); in TEST() 306 op.input_tensors[0] = &start_t; in TEST() 309 op.input_tensors[1] = &limit_t; in TEST() 313 op.input_tensors[2] = &delta_t; in TEST() 337 op.input_tensors.resize(3); in TEST() 347 op.input_tensors[2] = &num_t; in TEST() 357 op.input_tensors.resize(3); in TEST() 368 op.input_tensors[2] = &num_segments_t; in TEST() 379 op.input_tensors.resize(3); in TEST() 392 op.input_tensors.resize(4); in TEST() [all …]
|
D | data_flow_ops_test.cc | 148 op.input_tensors.push_back(nullptr); in TEST() 149 op.input_tensors.push_back(&tensor_5); in TEST() 152 op.input_tensors[0] = &tensor_2; in TEST() 153 op.input_tensors[1] = nullptr; in TEST() 157 op.input_tensors[1] = &tensor_5; in TEST() 242 op.input_tensors.push_back(nullptr); in TEST() 243 op.input_tensors.push_back(&n_tensor); in TEST() 274 op.input_tensors.push_back(nullptr); in TEST() 275 op.input_tensors.push_back(&n_tensor); in TEST()
|
D | spectral_ops_test.cc | 74 op.input_tensors.resize(2); in TEST() 76 op.input_tensors[1] = &fft_length; in TEST() 134 op.input_tensors.resize(2); in TEST() 136 op.input_tensors[1] = &fft_length; in TEST() 194 op.input_tensors.resize(2); in TEST() 196 op.input_tensors[1] = &fft_length; in TEST()
|
/external/tensorflow/tensorflow/python/keras/ |
D | models.py | 110 tensor in tensor_map for tensor in nest.flatten(node.input_tensors)): 129 def _clone_functional_model(model, input_tensors=None, layer_fn=_clone_layer): argument 172 if input_tensors is not None: 174 input_tensors = nest.flatten(input_tensors) 175 for i, input_tensor in enumerate(input_tensors): 195 input_tensors, output_tensors, created_layers = ( 199 model = Model(input_tensors, output_tensors, name=model.name) 279 def _clone_sequential_model(model, input_tensors=None, layer_fn=_clone_layer): argument 321 if isinstance(layer, InputLayer) and input_tensors is not None: 332 if input_tensors is None: [all …]
|
/external/tensorflow/tensorflow/lite/ |
D | graph_info_test.cc | 117 EXPECT_EQ(generated_subgraphs[subgraph_index].input_tensors, in CheckPartitionSubgraphs() 118 expected_subgraphs[subgraph_index].input_tensors); in CheckPartitionSubgraphs() 162 expected_subgraph.input_tensors = {0}; in TEST() 180 expected_subgraph.input_tensors = {0}; in TEST() 200 expected_subgraph.input_tensors = {}; in TEST() 220 expected_subgraph.input_tensors = {0}; in TEST() 243 expected_subgraph0.input_tensors = {0}; in TEST() 248 expected_subgraph1.input_tensors = {1}; in TEST() 270 expected_subgraph0.input_tensors = {0}; in TEST() 275 expected_subgraph1.input_tensors = {1}; in TEST() [all …]
|
/external/armnn/python/pyarmnn/test/ |
D | test_runtime.py | 36 input_tensors = [const_tensor_pair] 49 yield preferred_backends, network, runtime, input_tensors, output_tensors 75 input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data]) 85 yield runtime, net_id, input_tensors, output_tensors 178 input_tensors = random_runtime[3] 185 runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) 192 input_tensors = [] 200 runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) 214 input_tensors = mock_model_runtime[2] 220 runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) [all …]
|
D | test_tensor_conversion.py | 49 input_tensors = ann.make_input_tensors(input_tensor_info, input_data) 50 assert len(input_tensors) == 1 52 for tensor, tensor_info in zip(input_tensors, input_tensor_info): 90 input_tensors = ann.make_input_tensors(input_tensor_info, input_data) 91 assert len(input_tensors) == 1 93 for tensor, tensor_info in zip(input_tensors, input_tensor_info):
|
/external/tensorflow/tensorflow/lite/experimental/mlir/testing/ |
D | mlir_convert.py | 30 def mlir_convert(options, graph_def, input_tensors, output_tensors, **kwargs): argument 49 input_arrays = [x[0] for x in input_tensors] 50 input_shapes = zip_test_utils.get_input_shapes_map(input_tensors) 76 def representative_dataset(input_tensors): argument 78 for _, shape, _ in input_tensors: 88 yield representative_dataset(input_tensors) 126 input_tensors, argument 156 for input_tensor in input_tensors: 161 input_types = ",".join([x[2] for x in input_tensors]) 176 ",".join([x[0] for x in input_tensors]),
|
/external/tensorflow/tensorflow/lite/python/ |
D | lite.py | 808 input_tensors = [ 816 return graph_def, input_tensors, output_tensors 819 def _validate_inputs(self, graph_def, input_tensors): argument 839 for tensor in input_tensors: 863 def _optimize_tf_model(self, graph_def, input_tensors, output_tensors, argument 883 input_tensors, 889 def convert(self, graph_def, input_tensors, output_tensors): argument 907 self._validate_inputs(graph_def, input_tensors) 923 input_tensors=input_tensors, 979 graph_def, input_tensors, output_tensors = self._load_saved_model( [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/tfrt/benchmarks/ |
D | benchmark_mlir_function.cc | 37 llvm::SmallVector<Tensor> input_tensors; in GetInputTensors() local 42 input_tensors.emplace_back(spec.dtype, shape); in GetInputTensors() 47 input_tensors.back().flat<float>().setRandom(); in GetInputTensors() 54 return input_tensors; in GetInputTensors() 77 llvm::SmallVector<Tensor> input_tensors = GetInputTensors(input_specs); in RunMlirBenchmark() local 81 for (const Tensor& tensor : input_tensors) in RunMlirBenchmark() 133 llvm::SmallVector<Tensor> input_tensors = GetInputTensors(input_specs); in RunEigenBenchmark() local 137 compute(input_tensors, device); in RunEigenBenchmark()
|
/external/tensorflow/tensorflow/lite/testing/ |
D | toco_convert.py | 76 def toco_convert(options, graph_def, input_tensors, output_tensors, **kwargs): argument 101 input_arrays = [x[0] for x in input_tensors] 102 data_types = [zip_test_utils.TF_TYPE_INFO[x[2]][1] for x in input_tensors] 111 input_shapes = zip_test_utils.get_input_shapes_map(input_tensors) 122 def representative_dataset(input_tensors): argument 124 for _, shape, _ in input_tensors: 134 yield representative_dataset(input_tensors) 170 shapes=[x[1] for x in input_tensors],
|
/external/tensorflow/tensorflow/python/eager/ |
D | tape.py | 181 def record_operation(op_type, output_tensors, input_tensors, backward_function, argument 185 input_tensors, backward_function, 189 def record_operation_backprop_only(op_type, output_tensors, input_tensors, argument 193 input_tensors, 197 def record_operation_forwardprop_only(op_type, output_tensors, input_tensors, argument 215 op_type, output_tensors, input_tensors, backward_function,
|
/external/tensorflow/tensorflow/lite/testing/op_tests/ |
D | add_n.py | 59 input_tensors = [] 61 input_tensors.append( 66 out = tf.add_n(input_tensors) 67 return input_tensors, [out]
|
D | identity.py | 43 input_tensors = [] 46 input_tensors = [ 59 inputs_doubled = [input_tensor * 2.0 for input_tensor in input_tensors] 66 return input_tensors, identity_outputs
|
D | batch_to_space_nd.py | 81 input_tensors = [input_tensor] 90 input_tensors.append(block_shape) 99 input_tensors.append(crops) 102 return input_tensors, [out]
|
/external/tensorflow/tensorflow/python/keras/engine/ |
D | node_test.py | 46 self.assertListEqual(node.input_tensors, [a]) 75 self.assertIs(dense._inbound_nodes[0].input_tensors, a) 76 self.assertIs(dense._inbound_nodes[1].input_tensors, b) 102 self.assertLen(merge_layer._inbound_nodes[0].input_tensors, 2) 103 self.assertEqual(merge_layer._inbound_nodes[0].input_tensors, [a_2, b_2]) 150 self.assertLen(merge_layer._inbound_nodes[0].input_tensors, 2) 151 self.assertEqual(merge_layer._inbound_nodes[0].input_tensors, [a, b])
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tpu_feed.py | 377 def set_configuration_from_input_tensors(self, input_tensors): argument 391 if len(input_tensors) != self.number_of_tuple_elements: 393 % (str(input_tensors), self.number_of_tuple_elements)) 394 self.set_tuple_shapes([t.shape for t in input_tensors]) 395 self.set_tuple_types([t.dtype for t in input_tensors]) 397 def set_configuration_from_sharded_input_tensors(self, input_tensors): argument 421 number_of_shards = len(input_tensors) 423 for t in input_tensors: 428 str(input_tensors), self.number_of_tuple_elements)) 431 sharded_shapes = [[t[i].shape for t in input_tensors] [all …]
|
/external/tensorflow/tensorflow/core/kernels/data/ |
D | map_defun_op_test.cc | 44 std::vector<Tensor> input_tensors = arguments_; in GetInputTensors() local 45 input_tensors.insert(input_tensors.end(), captured_inputs_.begin(), in GetInputTensors() 47 return input_tensors; in GetInputTensors() 244 auto input_tensors = test_case.map_defun_op_params.GetInputTensors(); in TEST_P() local 246 for (auto& input : input_tensors) { in TEST_P() 273 auto input_tensors = test_case.map_defun_op_params.GetInputTensors(); in TEST_F() local 275 for (auto& input : input_tensors) { in TEST_F()
|
/external/tflite-support/tensorflow_lite_support/cc/task/vision/core/ |
D | base_vision_task_api.h | 114 absl::Status Preprocess(const std::vector<TfLiteTensor*>& input_tensors, in Preprocess() argument 131 if (input_tensors.size() != 1) { in Preprocess() 179 if (input_tensors[0]->bytes != input_data_byte_size) { in Preprocess() 187 input_data, input_data_byte_size / sizeof(uint8), input_tensors[0]); in Preprocess() 190 if (input_tensors[0]->bytes / sizeof(float) != in Preprocess() 200 input_tensors[0]); in Preprocess()
|
/external/armnn/python/pyarmnn/examples/common/ |
D | network_executor.py | 66 def execute_network(input_tensors: list, output_tensors: list, runtime, net_id: int) -> List[np.nda… 79 runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) 98 def run(self, input_tensors: list) -> List[np.ndarray]: 108 return execute_network(input_tensors, self.output_tensors, self.runtime, self.network_id)
|