Home
last modified time | relevance | path

Searched refs:num_splits (Results 1 – 25 of 47) sorted by relevance

12

/external/tensorflow/tensorflow/lite/kernels/
Dsplit_v_test.cc36 int num_splits, int axis) { in SplitVOpModel() argument
44 for (int i = 0; i < num_splits; ++i) { in SplitVOpModel()
48 CreateSplitVOptions(builder_, num_splits).Union()); in SplitVOpModel()
86 int num_splits = size_splits_data.size(); in Check() local
88 {TensorType_INT32, size_splits_shape}, num_splits, in Check()
94 for (int i = 0; i < num_splits; ++i) { in Check()
100 {TensorType_INT32, size_splits_shape}, num_splits, in Check()
105 for (int i = 0; i < num_splits; ++i) { in Check()
Dsplit_test.cc41 SplitOpModel(const TensorData& input, int num_splits, in SplitOpModel() argument
49 for (int i = 0; i < num_splits; ++i) { in SplitOpModel()
53 CreateSplitOptions(builder_, num_splits).Union()); in SplitOpModel()
80 void Check(TestType test_type, int axis, int num_splits, in Check() argument
89 << " and num_splits=" << num_splits; in Check()
93 SplitOpModel m({type, input_shape}, num_splits); in Check()
97 for (int i = 0; i < num_splits; ++i) { in Check()
104 SplitOpModel const_m({type, input_shape}, num_splits, axis); in Check()
107 for (int i = 0; i < num_splits; ++i) { in Check()
Dsplit.cc53 const TfLiteTensor* input, int num_splits) { in ResizeOutputTensors() argument
63 TF_LITE_ENSURE_MSG(context, input_size % num_splits == 0, in ResizeOutputTensors()
65 const int slice_size = input_size / num_splits; in ResizeOutputTensors()
83 TF_LITE_ENSURE_EQ(context, NumOutputs(node), op_context.params->num_splits); in Prepare()
100 op_context.params->num_splits); in Prepare()
115 op_context.params->num_splits)); in Eval()
/external/tensorflow/tensorflow/core/ops/
Dragged_conversion_ops.cc160 int64 num_splits; in RaggedTensorToSparseShapeFn() local
161 TF_RETURN_IF_ERROR(c->GetAttr<int64>("RAGGED_RANK", &num_splits)); in RaggedTensorToSparseShapeFn()
163 if (num_splits < 1) { in RaggedTensorToSparseShapeFn()
166 ShapeHandle rt_dense_values = c->input(num_splits); in RaggedTensorToSparseShapeFn()
170 for (int64 i = 0; i < num_splits; ++i) { in RaggedTensorToSparseShapeFn()
177 ? c->MakeDim(c->Rank(rt_dense_values) + num_splits) in RaggedTensorToSparseShapeFn()
189 int64 num_splits; in RaggedTensorToVariantShapeFn() local
190 TF_RETURN_IF_ERROR(c->GetAttr<int64>("RAGGED_RANK", &num_splits)); in RaggedTensorToVariantShapeFn()
193 shape_inference::ShapeHandle rt_dense_values = c->input(num_splits); in RaggedTensorToVariantShapeFn()
195 for (int64 i = 0; i < num_splits; ++i) { in RaggedTensorToVariantShapeFn()
[all …]
Dragged_array_ops.cc122 int num_splits; in RaggedGatherShapeFn() local
126 TF_RETURN_IF_ERROR(c->GetAttr<int>("OUTPUT_RAGGED_RANK", &num_splits)); in RaggedGatherShapeFn()
131 c->WithRank(indices, num_splits - PARAMS_RAGGED_RANK + 1, &indices)); in RaggedGatherShapeFn()
145 for (int i = 0; i < num_splits; ++i) { in RaggedGatherShapeFn()
154 c->set_output(num_splits, values); in RaggedGatherShapeFn()
Dparsing_ops.cc68 DimensionHandle num_splits; in AddRaggedOutputShapes() local
69 TF_RETURN_IF_ERROR(c->Add(num_examples, 1, &num_splits)); in AddRaggedOutputShapes()
76 c->set_output((*output_idx)++, c->Vector(num_splits)); in AddRaggedOutputShapes()
Dsparse_ops.cc333 int num_splits = c->num_outputs() / 3; in __anon36a8fc880e02() local
335 for (int i = 0; i < num_splits; ++i) in __anon36a8fc880e02()
337 for (int i = 0; i < num_splits; ++i) in __anon36a8fc880e02()
339 for (int i = 0; i < num_splits; ++i) in __anon36a8fc880e02()
/external/tensorflow/tensorflow/lite/delegates/hexagon/builders/tests/
Dsplit_test.cc24 int num_splits, int axis) { in SplitOpModel() argument
27 for (int i = 0; i < num_splits; ++i) { in SplitOpModel()
31 CreateSplitOptions(builder_, num_splits).Union()); in SplitOpModel()
56 int axis, int num_splits, std::initializer_list<int> input_shape, in CheckSplitBehavior() argument
63 << " and num_splits=" << num_splits; in CheckSplitBehavior()
70 {tensor_dtype, output_shape, kMin, kMax}, num_splits, in CheckSplitBehavior()
74 for (int i = 0; i < num_splits; ++i) { in CheckSplitBehavior()
/external/tensorflow/tensorflow/core/tpu/ops/
Dtpu_partitioned_output_op.cc37 int num_splits; in __anon4952381a0102() local
38 TF_RETURN_IF_ERROR(c->GetAttr("num_splits", &num_splits)); in __anon4952381a0102()
47 c->Divide(c->Dim(input, partition_dim), num_splits, in __anon4952381a0102()
51 for (int i = num_splits - 1; i >= 0; --i) { in __anon4952381a0102()
/external/tensorflow/tensorflow/core/kernels/
Dragged_tensor_to_variant_op.cc42 int num_splits = ragged_rank - 1; in UnbatchRaggedZerothDim() local
45 ragged_component.mutable_nested_splits()->reserve(num_splits); in UnbatchRaggedZerothDim()
55 if (num_splits == 0) { in UnbatchRaggedZerothDim()
79 std::vector<int> index(num_splits, 1); in UnbatchRaggedZerothDim()
83 ragged_component_splits_vec.reserve(num_splits); in UnbatchRaggedZerothDim()
85 for (int j = 0; j < num_splits; j++) { in UnbatchRaggedZerothDim()
105 int last_split_size = ragged_component_splits_vec[num_splits - 1].size(); in UnbatchRaggedZerothDim()
107 ragged_component_splits_vec[num_splits - 1](last_split_size - 1); in UnbatchRaggedZerothDim()
Dragged_tensor_to_sparse_kernel_test.cc44 int64 num_splits = rt_nested_splits.size(); in BuildRaggedTensorToSparseGraph() local
46 .Input(FakeInput(num_splits)) // rt_nested_splits in BuildRaggedTensorToSparseGraph()
48 .Attr("RAGGED_RANK", num_splits) in BuildRaggedTensorToSparseGraph()
Dragged_gather_op.cc124 int num_splits = indices_in.dims() - 1 + params_nested_splits_in.size(); in MakeSplits() local
125 out_splits->assign(num_splits, {0}); in MakeSplits()
224 SPLITS_TYPE num_splits = out_splits[i].size(); in WriteSplits() local
226 splits_out.allocate(i, TensorShape({num_splits}), &splits)); in WriteSplits()
Dragged_gather_op_test.cc42 int64 num_splits = PARAMS_RAGGED_RANK + indices_shape.dims() - 1; in BuildRaggedGatherGraph() local
49 .Attr("OUTPUT_RAGGED_RANK", num_splits) in BuildRaggedGatherGraph()
Dragged_tensor_to_variant_op_test.cc47 int64 num_splits = ragged_splits.size(); in BuildEncodeRaggedTensorGraph() local
50 .Input(FakeInput(num_splits, splits_dtype)) // ragged_splits in BuildEncodeRaggedTensorGraph()
52 .Attr("RAGGED_RANK", num_splits) in BuildEncodeRaggedTensorGraph()
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/tests/
Dimport_quant_stats.mlir6 …%0:2 = "tfl.split"(%cst, %arg0) {num_splits = 2 : i32} : (tensor<i32>, tensor<4xf32>) -> (tensor<2…
16 …%0:2 = "tfl.split"(%cst, %arg0) {num_splits = 2 : i32} : (tensor<i32>, tensor<4xf32>) -> (tensor<2…
28 …%0:2 = "tfl.split"(%cst, %arg0) {num_splits = 2 : i32} : (tensor<i32>, tensor<4xf32>) -> (tensor<2…
39 …%0:2 = "tfl.split"(%cst, %arg0) {num_splits = 2 : i32, name = "op_regex"} : (tensor<i32>, tensor<4…
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/
DTPUPartitionedOutput.pbtxt10 number_attr: "num_splits"
17 name: "num_splits"
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/utils/
Dxla_sharding_util.cc159 const int num_splits = num_splits_and_index.value(); in HandleTileShardedInputs() local
161 if (num_splits == 1) continue; in HandleTileShardedInputs()
166 auto result = CreateSplitOp(num_splits, dimension_index, location, in HandleTileShardedInputs()
175 new_split_ops.reserve(split_ops_for_tiled_input.size() * num_splits); in HandleTileShardedInputs()
181 CreateSplitOp(num_splits, dimension_index, location, in HandleTileShardedInputs()
427 for (auto num_splits : llvm::reverse(sharding.tile_assignment_dimensions())) { in HandleTileShardedOutputs() local
428 if (num_splits == 1) { in HandleTileShardedOutputs()
434 new_outputs.reserve(num_splits); in HandleTileShardedOutputs()
436 i = i + num_splits) { in HandleTileShardedOutputs()
442 outputs_to_merge.begin() + i + num_splits}, in HandleTileShardedOutputs()
/external/tflite-support/tensorflow_lite_support/custom_ops/kernel/sentencepiece/
Dsentencepiece_tokenizer_op.cc56 tensorflow::shape_inference::DimensionHandle num_splits; in __anon18bebb200102() local
57 TF_RETURN_IF_ERROR(c->Add(c->NumElements(c->input(1)), 1, &num_splits)); in __anon18bebb200102()
58 c->set_output(1, c->Vector(num_splits)); in __anon18bebb200102()
/external/tensorflow/tensorflow/python/keras/engine/
Dkeras_tensor.py471 num_splits = known_num_splits[axis-1]
472 if num_splits is not None:
473 num_splits = num_splits + 1
475 ragged_spec.row_splits_dtype, [num_splits])
/external/tensorflow/tensorflow/lite/c/
Dbuiltin_op_data.h379 int num_splits; member
383 int num_splits; member
/external/tensorflow/tensorflow/core/grappler/
Dgraph_view_test.cc70 for (int num_splits : {1, 2}) { in TEST_F()
73 ops::SparseSplit b(s.WithOpName("b"), a, a, a, a, num_splits); in TEST_F()
90 for (int port_id = 0; port_id <= num_splits * 3; ++port_id) { in TEST_F()
92 if (port_id < num_splits * 3) { in TEST_F()
93 arg_id = port_id / num_splits; in TEST_F()
/external/tensorflow/tensorflow/compiler/mlir/lite/ir/
Dtfl_ops.cc1855 Operation *op, int64_t num_splits, in VerifySplitOpOutputTypes() argument
1857 for (int64_t i = 0; i < num_splits; ++i) { in VerifySplitOpOutputTypes()
1869 int64_t num_splits = op.num_splits(); in Verify() local
1870 if (op.getNumResults() != num_splits) in Verify()
1893 if (dim_size % num_splits != 0) in Verify()
1898 input_type, split_dim, dim_size / num_splits); in Verify()
1900 op.getOperation(), num_splits, in Verify()
1905 int64_t num_splits = op.num_splits(); in Verify() local
1906 if (op.getNumResults() != num_splits) in Verify()
1934 if (size_splits_attr.getNumElements() != num_splits) { in Verify()
[all …]
/external/tensorflow/tensorflow/compiler/mlir/lite/tests/
Dops.mlir1734 …%0 = "tfl.split"(%arg0, %arg1) {num_splits = 1 : i32} : (tensor<i32>, tensor<10x!quant.uniform<u8:…
1741 …%0 = "tfl.split_v"(%arg0, %arg1, %arg2) {num_splits = 1 : i32} : (tensor<10x!quant.uniform<u8:f32,…
1965 …// expected-error @+1 {{'tfl.split' op attribute 'num_splits' failed to satisfy constraint: 32-bit…
1966 "tfl.split"(%split_dim, %arg0) {num_splits = 0 : i32} : (tensor<i32>, tensor<16xf32>) -> ()
1974 // expected-error @+1 {{'tfl.split' op output count should match 'num_splits' attribute}}
1975 …%0, %1 = "tfl.split"(%split_dim, %arg0) {num_splits = 4 : i32} : (tensor<i32>, tensor<16xf32>) -> …
1984 …%0 = "tfl.split"(%split_dim, %arg0) {num_splits = 1 : i32} : (tensor<2x2xi32>, tensor<16x4x4xf32>)…
1992 …%0 = "tfl.split"(%split_dim, %arg0) {num_splits = 1 : i32} : (tensor<*xi32>, tensor<16x4x4xf32>) -…
2001 …%0, %1 = "tfl.split"(%split_dim, %arg0) {num_splits = 2 : i32} : (tensor<i32>, tensor<16xf32>) -> …
2010 …%0, %1 = "tfl.split"(%split_dim, %arg0) {num_splits = 2 : i32} : (tensor<i32>, tensor<16xf32>) -> …
[all …]
/external/tensorflow/tensorflow/python/distribute/
Dcross_device_ops.py731 num_splits = self.num_packs
743 split_size = total_grad_size // num_splits
744 split_size_last = total_grad_size - split_size * (num_splits - 1)
745 split_sizes = [split_size] * (num_splits - 1) + [split_size_last]
752 device_grad_packs.append(zip(grad_packs, [None] * num_splits))
/external/tensorflow/tensorflow/compiler/xla/service/
Dspace_to_batch_converter.cc120 int64 num_splits, bool is_backprop = false, bool is_rhs = false);
127 int64 spatial_split_size, int64 num_splits);
1665 const int64 num_splits = new_batch_size / old_batch_size; in SelectValidPortion() local
1671 const int64 batch_index = (k / new_space_size) % num_splits; in SelectValidPortion()
1908 const int64 num_splits = kNumSplits; in PropagateOnConv() local
1913 CeilOfRatio(output_offsets, num_splits); in PropagateOnConv()
1922 while (spatial_split_size * num_splits + c.halo_size - c.spatial_size < 0) { in PropagateOnConv()
2040 int64 spatial_split_size, int64 num_splits) { in SplitSpaceHelper() argument
2082 reshape_dimensions[activations_batch_dim] = num_splits * old_batch_size; in SplitSpaceHelper()
2096 int64 num_splits, bool is_backprop, in SplitSpace() argument
[all …]

12