Home
last modified time | relevance | path

Searched refs:input_tensor (Results 1 – 25 of 493) sorted by relevance

12345678910>>...20

/external/tflite-support/tensorflow_lite_support/custom_ops/python/
Dsentencepiece_tokenizer.py52 input_tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(inputs)
53 if input_tensor.shape.ndims is None:
55 if ragged_tensor.is_ragged(input_tensor):
57 input_tensor = input_tensor.with_row_splits_dtype(tf.int32)
59 tokens = self.tokenize(input_tensor.flat_values)
60 return input_tensor.with_flat_values(tokens)
62 if input_tensor.shape.ndims > 1:
66 input_tensor, row_splits_dtype=tf.int32))
67 elif input_tensor.shape.ndims == 0:
68 tokens = self.tokenize(tf.stack([input_tensor]))
[all …]
/external/tflite-support/tensorflow_lite_support/custom_ops/kernel/
Dngrams_test.py75 input_tensor = tf.RaggedTensor.from_nested_row_splits(
78 input_tensor, width, reduction_type=tf_text.Reduction.STRING_JOIN)
92 def __call__(self, input_tensor): argument
94 input_tensor, width, reduction_type=tf_text.Reduction.STRING_JOIN)
112 input_tensor = tf.ragged.constant(test_case).to_tensor()
114 input_tensor, 2, reduction_type=tf_text.Reduction.STRING_JOIN)
116 rank = input_tensor.shape.rank
120 interpreter.resize_tensor_input(0, input_tensor.shape)
123 input_tensor.numpy())
132 input_tensor = tf.ragged.constant(test_case).to_tensor()
[all …]
/external/tensorflow/tensorflow/python/keras/engine/
Dinput_layer.py102 input_tensor=None, argument
148 if input_tensor is None:
151 dtype = backend.dtype(input_tensor)
152 elif input_tensor is not None and input_tensor.dtype != dtype:
154 (input_tensor.dtype, dtype))
172 ('input_tensor', input_tensor),
181 input_tensor = keras_tensor.keras_tensor_from_type_spec(type_spec)
182 if isinstance(input_tensor, keras_tensor.SparseKerasTensor):
184 if isinstance(input_tensor, keras_tensor.RaggedKerasTensor):
188 self._batch_input_shape = tuple(input_tensor.shape.as_list())
[all …]
/external/tensorflow/tensorflow/python/ops/signal/
Dfft_ops.py33 def _infer_fft_length_for_rfft(input_tensor, fft_rank): argument
36 fft_shape = input_tensor.get_shape()[-fft_rank:]
40 return _array_ops.shape(input_tensor)[-fft_rank:]
46 def _infer_fft_length_for_irfft(input_tensor, fft_rank): argument
49 fft_shape = input_tensor.get_shape()[-fft_rank:]
53 fft_length = _array_ops.unstack(_array_ops.shape(input_tensor)[-fft_rank:])
64 def _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=False): argument
69 if (input_tensor.shape.ndims is not None and
70 any(dim.value == 0 for dim in input_tensor.shape.dims)):
71 return input_tensor
[all …]
/external/tensorflow/tensorflow/lite/c/
Dc_test.c102 TfLiteTensor* input_tensor = in TestInferenceUsingSignature() local
104 ASSERT_NE(input_tensor, NULL); in TestInferenceUsingSignature()
105 ASSERT_EQ(TfLiteTensorType(input_tensor), kTfLiteFloat32); in TestInferenceUsingSignature()
106 ASSERT_EQ(TfLiteTensorNumDims(input_tensor), 1); in TestInferenceUsingSignature()
107 ASSERT_EQ(TfLiteTensorDim(input_tensor, 0), 2); in TestInferenceUsingSignature()
108 ASSERT_EQ(TfLiteTensorByteSize(input_tensor), sizeof(float) * 2); in TestInferenceUsingSignature()
109 ASSERT_NE(TfLiteTensorData(input_tensor), NULL); in TestInferenceUsingSignature()
112 TfLiteTensorQuantizationParams(input_tensor); in TestInferenceUsingSignature()
117 ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input, 2 * sizeof(float)), in TestInferenceUsingSignature()
177 TfLiteTensor* input_tensor = in TestRepeatResizeInputTensor() local
[all …]
Dc_api_test.cc63 TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0); in TEST() local
64 ASSERT_NE(input_tensor, nullptr); in TEST()
65 EXPECT_EQ(TfLiteTensorType(input_tensor), kTfLiteFloat32); in TEST()
66 EXPECT_EQ(TfLiteTensorNumDims(input_tensor), 1); in TEST()
67 EXPECT_EQ(TfLiteTensorDim(input_tensor, 0), 2); in TEST()
68 EXPECT_EQ(TfLiteTensorByteSize(input_tensor), sizeof(float) * 2); in TEST()
69 EXPECT_NE(TfLiteTensorData(input_tensor), nullptr); in TEST()
70 EXPECT_STREQ(TfLiteTensorName(input_tensor), "input"); in TEST()
73 TfLiteTensorQuantizationParams(input_tensor); in TEST()
78 ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(), in TEST()
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/strings_ops/
Dregex_full_match_op_test.py36 input_tensor = constant_op.constant(values, dtypes.string)
37 matched = op(input_tensor, "a.*a").eval()
44 input_tensor = constant_op.constant(values, dtypes.string)
45 matched = op(input_tensor, "a.*a").eval()
52 input_tensor = constant_op.constant(values, dtypes.string)
53 matched = op(input_tensor, "").eval()
60 input_tensor = constant_op.constant(values, dtypes.string)
62 matched = op(input_tensor, invalid_pattern)
72 input_tensor = constant_op.constant("foo", dtypes.string)
74 op = string_ops.regex_full_match(input_tensor, pattern)
[all …]
/external/ComputeLibrary/tests/validation/reference/
DUnstack.cpp65 SimpleTensor<T> get_slice(const SimpleTensor<T> &input_tensor, size_t axis, size_t slice) in get_slice() argument
67 TensorShape out_shape = input_tensor.shape(); in get_slice()
70 const size_t unpacked_num_dimensions(input_tensor.shape().num_dimensions()); in get_slice()
72 SimpleTensor<T> output{ out_shape, input_tensor.data_type() }; in get_slice()
79 … *reinterpret_cast<T *>(output(id)) = *reinterpret_cast<const T *>(input_tensor(input_coords)); in get_slice()
87 std::vector<SimpleTensor<T>> unstack(const SimpleTensor<T> &input_tensor, std::vector<SimpleTensor<… in unstack() argument
90 …const unsigned int axis_u = wrap_around(axis, static_cast<int>(input_tensor.shape().num_dimensions… in unstack()
91 ARM_COMPUTE_ERROR_ON(axis_u >= input_tensor.shape().num_dimensions()); in unstack()
95 const SimpleTensor<T> kth_slice = get_slice(input_tensor, axis_u, k); in unstack()
101 template std::vector<SimpleTensor<float>> unstack(const SimpleTensor<float> &input_tensor, std::vec…
[all …]
/external/tensorflow/tensorflow/python/ops/
Dimage_grad_test_base.py48 input_tensor = constant_op.constant(x, shape=in_shape)
49 resize_out = image_ops.resize_nearest_neighbor(input_tensor,
67 input_tensor = constant_op.constant(x, shape=in_shape)
70 resize_nn, [input_tensor], delta=1 / 8))
84 input_tensor = constant_op.constant(x, shape=in_shape)
87 resize_nn, [input_tensor], delta=1 / 8))
103 input_tensor = constant_op.constant(x, shape=in_shape)
105 resize_nn, [input_tensor], delta=1 / 8)
108 input_tensor = constant_op.constant(x, shape=in_shape)
110 resize_nn, [input_tensor], delta=1 / 8)
[all …]
/external/tensorflow/tensorflow/core/kernels/
Dquantized_instance_norm_test.cc125 Tensor input_tensor(DT_QUINT8, {1, 4, 4, 32}); in TestBasic() local
126 auto input = input_tensor.flat<quint8>(); in TestBasic()
130 Expect(input_tensor, 0.0f, 1.0f, false, 0.0f, 0.0f); in TestBasic()
134 Tensor input_tensor(DT_QUINT8, {1, 4, 4, 32}); in TestZeroInput() local
135 auto input = input_tensor.flat<quint8>(); in TestZeroInput()
140 Expect(input_tensor, 2.0f, 3.0f, false, 0.0f, 0.0f); in TestZeroInput()
144 Tensor input_tensor(DT_QUINT8, {1, 1, 2, 16}); in TestMaxInput() local
145 auto input = input_tensor.flat<quint8>(); in TestMaxInput()
149 Expect(input_tensor, 0.0f, in TestMaxInput()
155 Tensor input_tensor(DT_QUINT8, {1, 4, 4, 32}); in TestOutputRangeGiven() local
[all …]
Dbroadcast_to_op.h48 const Tensor &input_tensor, const BCast &bcast) const { in ReshapeAndBCast()
51 input_tensor.template shaped<T, NDIMS>(bcast.x_reshape()), in ReshapeAndBCast()
60 const Tensor &input_tensor, const TensorShape &input_shape, in operator()
65 ReshapeAndBCast<1>(device, output_tensor, input_tensor, bcast); in operator()
68 ReshapeAndBCast<2>(device, output_tensor, input_tensor, bcast); in operator()
71 ReshapeAndBCast<3>(device, output_tensor, input_tensor, bcast); in operator()
74 ReshapeAndBCast<4>(device, output_tensor, input_tensor, bcast); in operator()
77 ReshapeAndBCast<5>(device, output_tensor, input_tensor, bcast); in operator()
Dregex_full_match_op.cc36 const Tensor* input_tensor; in Compute() local
37 OP_REQUIRES_OK(ctx, ctx->input("input", &input_tensor)); in Compute()
38 const auto& input_flat = input_tensor->flat<tstring>(); in Compute()
52 OP_REQUIRES_OK(ctx, ctx->allocate_output("output", input_tensor->shape(), in Compute()
100 const Tensor* input_tensor; in Compute() local
101 OP_REQUIRES_OK(ctx, ctx->input("input", &input_tensor)); in Compute()
102 const auto& input_flat = input_tensor->flat<tstring>(); in Compute()
105 OP_REQUIRES_OK(ctx, ctx->allocate_output("output", input_tensor->shape(), in Compute()
Das_string_op.cc145 const Tensor* input_tensor; in Compute() local
146 OP_REQUIRES_OK(context, context->input("input", &input_tensor)); in Compute()
147 const DataType& dtype = input_tensor->dtype(); in Compute()
151 context->allocate_output("output", input_tensor->shape(), in Compute()
157 const auto& input_flat = input_tensor->flat<T>(); \ in Compute()
175 const auto& input_flat = input_tensor->flat<bool>(); in Compute()
181 const auto& input_flat = input_tensor->flat<Variant>(); in Compute()
187 const auto& input_flat = input_tensor->flat<Eigen::half>(); in Compute()
194 const auto& input_flat = input_tensor->flat<bfloat16>(); in Compute()
201 const auto& input_flat = input_tensor->flat<complex64>(); in Compute()
[all …]
/external/tensorflow/tensorflow/core/tpu/kernels/
Dinfeed_ops.cc76 const Tensor& input_tensor, in TransposeTensor() argument
96 input_tensor, input_tensor.dtype(), shape)); in TransposeTensor()
102 TF_RETURN_IF_ERROR(ctx->allocate_temp(input_tensor.dtype(), in TransposeTensor()
106 if (input_tensor.NumElements() > 0) { in TransposeTensor()
108 input_tensor, permutation, in TransposeTensor()
196 const Tensor& input_tensor, in AutoTransposeAndLinearize() argument
200 const Tensor* tensor = &input_tensor; in AutoTransposeAndLinearize()
207 TransposeTensor(ctx, input_tensor, shape)); in AutoTransposeAndLinearize()
251 const Tensor& input_tensor = ctx->input(0); in Compute() local
254 ctx, input_tensor.dtype() == dtype_, in Compute()
[all …]
/external/tensorflow/tensorflow/lite/tools/benchmark/
Dbenchmark_test.cc107 void CheckInputTensorValue(const TfLiteTensor* input_tensor, in CheckInputTensorValue() argument
109 ASSERT_THAT(input_tensor, testing::NotNull()); in CheckInputTensorValue()
111 input_tensor->data.raw, input_tensor->data.raw + input_tensor->bytes, in CheckInputTensorValue()
115 void CheckInputTensorValue(const TfLiteTensor* input_tensor, in CheckInputTensorValue() argument
118 StringRef tensor_value = GetString(input_tensor, tensor_dim_index); in CheckInputTensorValue()
138 : interpreter_->input_tensor(index); in GetInputTensor()
260 auto input_tensor = benchmark.GetInputTensor(0); in TEST() local
261 ASSERT_THAT(input_tensor, testing::NotNull()); in TEST()
263 input_tensor->data.raw, input_tensor->data.raw + input_tensor->bytes, in TEST()
337 auto input_tensor = benchmark.GetInputTensor(0); in TEST() local
[all …]
/external/tensorflow/tensorflow/core/ir/importexport/
Dconvert_tensor.cc57 static TensorProto ConvertToProto(const Tensor& input_tensor, in ConvertToProto() argument
67 input_tensor.AsProtoTensorContent(&tensor_proto); in ConvertToProto()
69 input_tensor.AsProtoField(&tensor_proto); in ConvertToProto()
79 tensorflow::StatusOr<ElementsAttr> ConvertFlatTensor(const Tensor& input_tensor, in ConvertFlatTensor() argument
81 auto arr = input_tensor.flat<T>(); in ConvertFlatTensor()
86 ElementsAttr ConvertBf16Tensor(const Tensor& input_tensor, in ConvertBf16Tensor() argument
88 auto buffer = llvm::makeArrayRef(static_cast<char*>(input_tensor.data()), in ConvertBf16Tensor()
89 input_tensor.TotalBytes()); in ConvertBf16Tensor()
100 const Tensor& input_tensor, ShapedType type) { in ConvertStringTensor() argument
102 auto arr = input_tensor.flat<tstring>(); in ConvertStringTensor()
[all …]
/external/tensorflow/tensorflow/compiler/xla/mlir_hlo/stablehlo/tests/
Dverify_scatter.mlir4 func.func @scatter(%input_tensor: tensor<200x100x300xf32>,
7 %0 = "stablehlo.scatter" (%input_tensor, %scatter_indices, %updates) ({
26 func.func @scatter_with_unranked_inputs(%input_tensor: tensor<*xf32>,
29 %0 = "stablehlo.scatter" (%input_tensor, %scatter_indices, %updates) ({
49 func.func @invalid_scatter(%input_tensor: tensor<200x100x300xf32>,
53 %0 = "stablehlo.scatter" (%input_tensor, %scatter_indices, %updates) ({
73 func.func @invalid_scatter(%input_tensor: tensor<*xf32>,
77 %0 = "stablehlo.scatter" (%input_tensor, %scatter_indices, %updates) ({
97 func.func @invalid_scatter(%input_tensor: tensor<200x100x300xf32>,
101 %0 = "stablehlo.scatter" (%input_tensor, %scatter_indices, %updates) ({
[all …]
/external/tensorflow/tensorflow/compiler/xla/mlir_hlo/tests/Dialect/mhlo/
Dverifier_scatter_op.mlir4 func.func @scatter(%input_tensor: tensor<200x100x300xf32>,
7 %0 = "mhlo.scatter" (%input_tensor, %scatter_indices, %updates) ({
26 func.func @scatter_with_unranked_inputs(%input_tensor: tensor<*xf32>,
29 %0 = "mhlo.scatter" (%input_tensor, %scatter_indices, %updates) ({
49 func.func @invalid_scatter(%input_tensor: tensor<200x100x300xf32>,
53 %0 = "mhlo.scatter" (%input_tensor, %scatter_indices, %updates) ({
73 func.func @invalid_scatter(%input_tensor: tensor<*xf32>,
77 %0 = "mhlo.scatter" (%input_tensor, %scatter_indices, %updates) ({
97 func.func @invalid_scatter(%input_tensor: tensor<200x100x300xf32>,
101 %0 = "mhlo.scatter" (%input_tensor, %scatter_indices, %updates) ({
[all …]
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/utils/
Dconvert_tensor.cc60 static TensorProto ConvertToProto(const Tensor& input_tensor, in ConvertToProto() argument
70 input_tensor.AsProtoTensorContent(&tensor_proto); in ConvertToProto()
72 input_tensor.AsProtoField(&tensor_proto); in ConvertToProto()
82 StatusOr<ElementsAttr> ConvertFlatTensor(const Tensor& input_tensor, in ConvertFlatTensor() argument
84 auto arr = input_tensor.flat<T>(); in ConvertFlatTensor()
89 ElementsAttr ConvertBf16Tensor(const Tensor& input_tensor, in ConvertBf16Tensor() argument
91 auto buffer = llvm::makeArrayRef(static_cast<char*>(input_tensor.data()), in ConvertBf16Tensor()
92 input_tensor.TotalBytes()); in ConvertBf16Tensor()
102 StatusOr<ElementsAttr> ConvertStringTensor(const Tensor& input_tensor, in ConvertStringTensor() argument
105 auto arr = input_tensor.flat<tstring>(); in ConvertStringTensor()
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/nn_ops/
Dfractional_max_pool_op_test.py80 def _GetExpectedFractionalMaxPoolResult(self, input_tensor, row_seq, col_seq, argument
98 input_shape = input_tensor.shape
101 output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)
104 two_dim_slice = input_tensor[batch, :, :, channel]
111 def _ValidateFractionalMaxPoolResult(self, input_tensor, pooling_ratio, argument
129 input_tensor,
135 expected = self._GetExpectedFractionalMaxPoolResult(input_tensor, row_seq,
403 input_tensor = constant_op.constant(
408 output_tensor = nn_ops.max_pool(input_tensor, window_size,
413 input_tensor, output_tensor, output_backprop, window_size,
[all …]
Dfractional_avg_pool_op_test.py80 def _GetExpectedFractionalAvgPoolResult(self, input_tensor, row_seq, col_seq, argument
98 input_shape = input_tensor.shape
101 output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)
104 two_dim_slice = input_tensor[batch, :, :, channel]
111 def _ValidateFractionalAvgPoolResult(self, input_tensor, pooling_ratio, argument
129 input_tensor,
135 expected = self._GetExpectedFractionalAvgPoolResult(input_tensor, row_seq,
384 input_tensor = constant_op.constant(
390 output_tensor = nn_ops.avg_pool(input_tensor, window_size,
399 input_tensor.get_shape(), output_backprop, window_size,
[all …]
/external/tensorflow/tensorflow/core/data/
Dserialization_utils_test.cc131 Tensor input_tensor(DT_FLOAT, {1}); in TEST() local
132 input_tensor.flat<float>()(0) = 2.0f; in TEST()
133 TF_ASSERT_OK(writer.WriteTensor(full_name("Tensor"), input_tensor)); in TEST()
143 EXPECT_EQ(input_tensor.NumElements(), val_tensor.NumElements()); in TEST()
144 EXPECT_EQ(input_tensor.flat<float>()(0), val_tensor.flat<float>()(0)); in TEST()
168 Tensor input_tensor(DT_FLOAT, {1}); in TEST() local
169 input_tensor.flat<float>()(0) = 2.0f; in TEST()
170 TF_ASSERT_OK(writer.WriteTensor("Iterator", "Tensor", input_tensor)); in TEST()
180 EXPECT_EQ(input_tensor.NumElements(), val_tensor.NumElements()); in TEST()
181 EXPECT_EQ(input_tensor.flat<float>()(0), val_tensor.flat<float>()(0)); in TEST()
[all …]
/external/ComputeLibrary/tests/validation/fixtures/
DUnstackFixture.h66 TensorType input_tensor = create_tensor<TensorType>(input_shape, data_type); in compute_target() local
78 unstack.configure(&input_tensor, output_ptrs, axis); in compute_target()
85 input_tensor.allocator()->allocate(); in compute_target()
86 ARM_COMPUTE_ASSERT(!input_tensor.info()->is_resizable()); in compute_target()
87 fill(AccessorType(input_tensor), 0); in compute_target()
99 SimpleTensor<T> input_tensor{ input_shape, data_type }; in compute_reference()
100 fill(input_tensor, 0); in compute_reference()
109 return reference::unstack<T>(input_tensor, output_tensors, axis); in compute_reference()
/external/tensorflow/tensorflow/lite/delegates/xnnpack/
Dxnnpack_delegate.cc1767 TfLiteContext* context, const TfLiteTensor& input_tensor, in CheckTensorsDimensionMatch() argument
1770 if (SizeOfDimension(&input_tensor, dimension_index) != in CheckTensorsDimensionMatch()
1776 dimension_index, SizeOfDimension(&input_tensor, dimension_index), in CheckTensorsDimensionMatch()
1811 TfLiteContext* context, const TfLiteTensor& input_tensor, in CheckTensorsInputOutputScale() argument
1814 if (input_tensor.type != output_tensor.type) { in CheckTensorsInputOutputScale()
1819 if (input_tensor.type == kTfLiteInt8 || input_tensor.type == kTfLiteUInt8) { in CheckTensorsInputOutputScale()
1821 input_tensor.quantization.params) in CheckTensorsInputOutputScale()
2153 const TfLiteTensor& input_tensor = tensors[node->inputs->data[0]]; in VisitAbsNode() local
2155 logging_context, input_tensor, node->inputs->data[0], node_index)); in VisitAbsNode()
2157 logging_context, input_tensor, node->inputs->data[0], node_index)); in VisitAbsNode()
[all …]
/external/tensorflow/tensorflow/python/feature_column/
Dfeature_column.py2216 def expand_dims(input_tensor): argument
2218 if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
2220 input_tensor, [array_ops.shape(input_tensor)[0], 1])
2222 return array_ops.expand_dims(input_tensor, -1)
2258 def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None): argument
2275 input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
2276 input_tensor)
2277 if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
2278 return input_tensor
2279 with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)):
[all …]

12345678910>>...20