/external/federated-compute/fcp/aggregation/core/ |
D | input_tensor_list_test.cc | 70 InputTensorList tensor_list = CreateInlined(); in TEST_F() local 71 EXPECT_THAT(tensor_list.size(), Eq(3)); in TEST_F() 75 InputTensorList tensor_list = CreateInlined(); in TEST_F() local 76 auto iter = tensor_list.begin(); in TEST_F() 83 EXPECT_THAT(iter, Eq(tensor_list.end())); in TEST_F() 88 InputTensorList tensor_list(std::move(moved_tensor_list)); in TEST_F() local 89 auto iter = tensor_list.begin(); in TEST_F() 96 EXPECT_THAT(iter, Eq(tensor_list.end())); in TEST_F() 103 InputTensorList tensor_list = CreateAllocated(); in TEST_F() local 104 tensor_list = std::move(moved_tensor_list); in TEST_F() [all …]
|
/external/pytorch/test/cpp/api/ |
D | ivalue.cpp | 30 c10::List<torch::Tensor> tensor_list(tensor_vector); in TEST() local 31 torch::IValue tensor_list_ivalue(tensor_list); in TEST() 36 ASSERT_TRUE(ivalue_compare(tensor_list[0].get(), tensor_list[3].get())); in TEST() 37 ASSERT_FALSE(ivalue_compare(tensor_list[0].get(), tensor_list[1].get())); in TEST() 38 ASSERT_FALSE(ivalue_compare(tensor_list[0].get(), tensor_list[2].get())); in TEST() 39 ASSERT_FALSE(ivalue_compare(tensor_list[1].get(), tensor_list[4].get())); in TEST() 40 ASSERT_TRUE(tensor_list[0].get().isAliasOf(tensor_list[2].get())); in TEST() 54 tensor_list[0].get().toTensor())); in TEST() 56 tensor_list[1].get().toTensor())); in TEST() 58 tensor_list[2].get().toTensor())); in TEST() [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/tfr/tests/ |
D | ops.mlir | 17 func.func private @tensor_list_type_noconstraint() -> !tfr.tensor_list 22 func.func private @tensor_list_type_array_like() -> !tfr.tensor_list<[N, T]> 27 func.func private @tensor_list_type_tuple_like() -> !tfr.tensor_list<input_T> 42 func.func @call_op(%arg0: !tfr.tensor<T>, %arg1: !tfr.tensor_list<TL>, %arg2: i32) -> !tfr.tensor<K… 43 …%0 = tfr.call @Foo(%arg0, %arg1, %arg2) : (!tfr.tensor<T>, !tfr.tensor_list<TL>, i32) -> !tfr.tens… 212 func.func @get_element(%arg0: !tfr.tensor_list<T>) -> !tfr.tensor { 214 %0 = tfr.get_element %arg0[%cst] : (!tfr.tensor_list<T>, index) -> !tfr.tensor 221 func.func @build_list(%arg0: !tfr.tensor<A>, %arg1: !tfr.tensor<B>) -> !tfr.tensor_list { 222 %0 = "tfr.build_list"(%arg0, %arg1) : (!tfr.tensor<A>, !tfr.tensor<B>) -> !tfr.tensor_list 223 func.return %0 : !tfr.tensor_list [all …]
|
/external/tensorflow/tensorflow/python/training/ |
D | input.py | 323 def slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None, argument 358 with ops.name_scope(name, "input_producer", tensor_list): 359 tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list) 360 if not tensor_list: 363 range_size = array_ops.shape(tensor_list[0])[0] 370 output = [array_ops.gather(t, index) for t in tensor_list] 378 return [tensor for tensor_list in tensor_list_list for tensor in tensor_list] 461 def _as_original_type(original_tensors, tensor_list): argument 466 tensor_list = [tensor_list] 467 return {k: tensor_list[i] [all …]
|
/external/executorch/runtime/core/exec_aten/util/ |
D | tensor_util_aten.cpp | 81 const exec_aten::ArrayRef<exec_aten::Tensor> tensor_list) { in tensors_have_same_dim_order() argument 82 if (tensor_list.size() < 2) { in tensors_have_same_dim_order() 90 get_dim_order(tensor_list[0], first_dim_order, tensor_list[0].dim()) == in tensors_have_same_dim_order() 95 is_contiguous_dim_order(first_dim_order, tensor_list[0].dim()); in tensors_have_same_dim_order() 97 is_channels_last_dim_order(first_dim_order, tensor_list[0].dim()); in tensors_have_same_dim_order() 99 for (size_t i = 1; i < tensor_list.size(); ++i) { in tensors_have_same_dim_order() 101 get_dim_order(tensor_list[i], other_dim_order, tensor_list[i].dim()) == in tensors_have_same_dim_order() 107 is_contiguous_dim_order(other_dim_order, tensor_list[i].dim()); in tensors_have_same_dim_order() 109 is_channels_last_dim_order(other_dim_order, tensor_list[i].dim()); in tensors_have_same_dim_order() 115 tensor_list.size()); in tensors_have_same_dim_order()
|
D | tensor_util_portable.cpp | 111 const exec_aten::ArrayRef<exec_aten::Tensor> tensor_list) { in tensors_have_same_dim_order() argument 112 if (tensor_list.size() < 2) { in tensors_have_same_dim_order() 117 for (size_t i = 0; i < tensor_list.size(); ++i) { in tensors_have_same_dim_order() 120 tensor_list[i].dim_order().data(), in tensors_have_same_dim_order() 121 tensor_list[i].dim_order().size()); in tensors_have_same_dim_order() 124 tensor_list[i].dim_order().data(), in tensors_have_same_dim_order() 125 tensor_list[i].dim_order().size()); in tensors_have_same_dim_order() 131 tensor_list.size()); in tensors_have_same_dim_order()
|
/external/tensorflow/tensorflow/lite/testing/op_tests/ |
D | tensor_list_dynamic_shape.py | 39 tensor_list = list_ops.tensor_list_reserve( 44 init_state = (0, tensor_list) 47 def loop_body(i, tensor_list): argument 51 new_list = list_ops.tensor_list_set_item(tensor_list, i, new_item) 54 _, tensor_list = tf.while_loop(condition, loop_body, init_state) 56 tensor_list,
|
D | tensor_list_resize.py | 42 tensor_list = list_ops.tensor_list_from_tensor(data, 44 tensor_list = list_ops.tensor_list_resize(tensor_list, 47 tensor_list, element_dtype=parameters["element_dtype"])
|
D | tensor_list_set_item.py | 43 tensor_list = list_ops.tensor_list_from_tensor(data, 45 tensor_list = list_ops.tensor_list_set_item(tensor_list, 48 tensor_list,
|
D | dynamic_update_slice.py | 61 tensor_list = list_ops.tensor_list_from_tensor(data, 63 tensor_list = list_ops.tensor_list_set_item(tensor_list, 66 tensor_list,
|
/external/tensorflow/tensorflow/lite/experimental/mlir/testing/op_tests/ |
D | tensor_list_dynamic_shape.py | 43 tensor_list = list_ops.tensor_list_reserve( 48 init_state = (0, tensor_list) 51 def loop_body(i, tensor_list): argument 55 new_list = list_ops.tensor_list_set_item(tensor_list, i, new_item) 58 _, tensor_list = tf.while_loop(condition, loop_body, init_state) 60 tensor_list,
|
D | tensor_list_resize.py | 45 tensor_list = list_ops.tensor_list_from_tensor(data, 47 tensor_list = list_ops.tensor_list_resize(tensor_list, 50 tensor_list, element_dtype=parameters["element_dtype"])
|
D | tensor_list_set_item.py | 47 tensor_list = list_ops.tensor_list_from_tensor(data, 49 tensor_list = list_ops.tensor_list_set_item(tensor_list, 52 tensor_list,
|
/external/tensorflow/tensorflow/lite/kernels/internal/ |
D | portable_tensor.h | 37 const TfLiteIntArray& tensor_list) { in VectorOfTensors() argument 38 int num_tensors = tensor_list.size; in VectorOfTensors() 45 TfLiteTensor* t = &context.tensors[tensor_list.data[i]]; in VectorOfTensors() 81 const TfLiteIntArray& tensor_list) in VectorOfQuantizedTensors() argument 82 : VectorOfTensors<uint8_t>(context, tensor_list) { in VectorOfQuantizedTensors() 83 for (int i = 0; i < tensor_list.size; ++i) { in VectorOfQuantizedTensors() 84 TfLiteTensor* t = &context.tensors[tensor_list.data[i]]; in VectorOfQuantizedTensors()
|
/external/pytorch/torch/distributed/algorithms/_quantization/ |
D | quantization.py | 44 def _quantize_tensor_list(tensor_list, qtype): argument 45 if not isinstance(tensor_list, list) or not all( 46 isinstance(p, torch.Tensor) for p in tensor_list 51 quantized_tensor_list = [_quantize_tensor(t, qtype) for t in tensor_list] 80 def _dequantize_tensor_list(tensor_list, qtype, quant_loss=None): argument 81 if not isinstance(tensor_list, list) or not all( 82 isinstance(p, torch.Tensor) for p in tensor_list 87 dequantized_tensor_list = [_dequantize_tensor(t, qtype) for t in tensor_list]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | list_kernels.h | 62 const TensorList& tensor_list, int index, 174 const TensorList* tensor_list = nullptr; in Compute() local 175 OP_REQUIRES_OK(c, GetInputList(c, 0, &tensor_list)); in Compute() 177 c, element_dtype_ == tensor_list->element_dtype, in Compute() 180 " but list elements ", DataTypeString(tensor_list->element_dtype))); in Compute() 182 OP_REQUIRES(c, tensor_list->tensors().size() == num_elements_, in Compute() 186 tensor_list->tensors().size(), " elements.")); in Compute() 189 OP_REQUIRES_OK(c, GetElementShapeFromInput(c, *tensor_list, 1, in Compute() 194 !tensor_list->tensors().empty(), in Compute() 201 if (!tensor_list->element_shape.IsFullyDefined()) { in Compute() [all …]
|
/external/pytorch/test/cpp/jit/ |
D | test_utils.h | 52 using tensor_list = std::vector<at::Tensor>; variable 60 void assertAllClose(const tensor_list& a, const tensor_list& b); 66 std::pair<tensor_list, tensor_list> runGradient( 68 tensor_list& tensors_in, 69 tensor_list& tensor_grads_in);
|
/external/tensorflow/tensorflow/python/autograph/lang/ |
D | special_functions_test.py | 39 l = special_functions.tensor_list([], 46 l = special_functions.tensor_list((), 54 l = special_functions.tensor_list( 62 special_functions.tensor_list(np.array([1, 2, 3])) 67 special_functions.tensor_list([]) 72 l = special_functions.tensor_list(elements) 80 l = special_functions.tensor_list(elements, use_tensor_array=True)
|
/external/tensorflow/tensorflow/python/data/experimental/ops/ |
D | compression_ops.py | 31 tensor_list = structure.to_tensor_list(element_spec, element) 32 return ged_ops.compress_element(tensor_list) 49 tensor_list = ged_ops.uncompress_element( 51 return structure.from_tensor_list(output_spec, tensor_list)
|
/external/pytorch/test/distributed/_tensor/ |
D | test_utils.py | 292 tensor_list, _ = shard_placement._split_tensor(x, self.world_size) 293 shard_x = tensor_list[self.rank] 308 tensor_list, _ = shard_placement._split_tensor(x, self.world_size) 309 shard_x = tensor_list[self.rank] 341 tensor_list, _ = shard_placement_dim0._split_tensor(x, mesh_dim0_size) 343 shard_x = tensor_list[mesh_dim0_local_rank] 348 tensor_list, _ = shard_placement_dim1._split_tensor(shard_x, mesh_dim1_size) 350 shard_x = tensor_list[mesh_dim1_local_rank] 380 tensor_list, _ = shard_placement_dim0._split_tensor(x, mesh_dim0_size) 381 shard_x = tensor_list[mesh_dim0_local_rank] [all …]
|
/external/pytorch/torch/distributed/tensor/ |
D | placement_types.py | 91 tensor_list = list(torch.chunk(tensor, num_chunks, dim=self.dim)) 92 num_empty_tensors = num_chunks - len(tensor_list) 98 tensor_list = [t.contiguous() for t in tensor_list] 100 fill_empty_tensor_to_shards(tensor_list, self.dim, num_empty_tensors), 109 tensor_list[idx].size(self.dim) if idx < len(tensor_list) else 0 116 tensor_list = fill_empty_tensor_to_shards( 117 tensor_list, self.dim, num_empty_tensors 120 for shard, pad_size in zip(tensor_list, pad_sizes): 468 tensor_list = [ 480 tensor_list = [t.contiguous() for t in tensor_list] [all …]
|
/external/tensorflow/tensorflow/python/framework/ |
D | type_spec.py | 392 def _from_tensor_list(self, tensor_list: List["ops.Tensor"]) -> Any: 406 self.__check_tensor_list(tensor_list) 407 return self._from_compatible_tensor_list(tensor_list) 410 tensor_list: List["ops.Tensor"]) -> Any: 423 self._component_specs, tensor_list, expand_composites=True)) 513 def __check_tensor_list(self, tensor_list): argument 516 specs = [type_spec_from_value(t) for t in tensor_list] 806 tensor_list = nest.flatten(component_batched_tensor_lists) 807 if any(t.shape.ndims == 0 for t in tensor_list): 811 return tensor_list [all …]
|
D | python_api_parameter_converter.cc | 266 PyObject* tensor_list = params[index]; in ConvertInputsWithTypeAttr() local 267 DCHECK(PyList_CheckExact(tensor_list)); in ConvertInputsWithTypeAttr() 268 Py_ssize_t num_tensors = PyList_GET_SIZE(tensor_list); in ConvertInputsWithTypeAttr() 269 PyObject** tensors = PyList_ITEMS(tensor_list); in ConvertInputsWithTypeAttr() 288 PyObject* tensor_list = params[index]; in ConvertInputsWithTypeAttr() local 289 DCHECK(PyList_CheckExact(tensor_list)); in ConvertInputsWithTypeAttr() 290 Py_ssize_t num_tensors = PyList_GET_SIZE(tensor_list); in ConvertInputsWithTypeAttr() 291 PyObject** items = PyList_ITEMS(tensor_list); in ConvertInputsWithTypeAttr() 362 PyObject* tensor_list = params[index]; in ConvertInputsWithTypeListAttr() local 363 DCHECK(PyList_CheckExact(tensor_list)); in ConvertInputsWithTypeListAttr() [all …]
|
D | sparse_tensor.py | 338 def _from_components(self, tensor_list): argument 339 if (all(isinstance(t, np.ndarray) for t in tensor_list) and 341 return SparseTensorValue(*tensor_list) 343 return SparseTensor(*tensor_list) 372 def _from_compatible_tensor_list(self, tensor_list): argument 373 tensor_list = gen_sparse_ops.deserialize_sparse(tensor_list[0], self._dtype) 374 indices, values, dense_shape = tensor_list
|
/external/executorch/runtime/executor/ |
D | tensor_parser_exec_aten.cpp | 77 auto* tensor_list = in parseTensorList() local 80 if (tensor_list == nullptr) { in parseTensorList() 95 new (&tensor_list[output_idx]) exec_aten::Tensor( in parseTensorList() 102 evalp_list, tensor_list, tensor_indices->size()); in parseTensorList()
|