• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include <vector>
17 
18 #include "absl/strings/match.h"
19 #include "tensorflow/core/framework/fake_input.h"
20 #include "tensorflow/core/framework/node_def_builder.h"
21 #include "tensorflow/core/framework/shape_inference.h"
22 #include "tensorflow/core/framework/shape_inference_testutil.h"
23 #include "tensorflow/core/framework/tensor.h"
24 #include "tensorflow/core/framework/tensor_shape.h"
25 #include "tensorflow/core/framework/tensor_testutil.h"
26 #include "tensorflow/core/framework/variant.h"
27 #include "tensorflow/core/framework/variant_encode_decode.h"
28 #include "tensorflow/core/kernels/ops_testutil.h"
29 #include "tensorflow/core/kernels/ragged_tensor_variant.h"
30 #include "tensorflow/core/lib/core/status_test_util.h"
31 #include "tensorflow/core/platform/test.h"
32 
33 namespace tensorflow {
34 namespace {
35 
36 class RaggedTensorToVariantKernelTest : public ::tensorflow::OpsTestBase {
37  protected:
38   // Builds the tensorflow test graph for the RaggedTensorToVariant op, and
39   // populates the `splits` input with the given values.
40   template <typename VALUE_TYPE, typename SPLIT_TYPE>
BuildEncodeRaggedTensorGraph(const std::vector<std::vector<SPLIT_TYPE>> & ragged_splits,const TensorShape & ragged_values_shape,const std::vector<VALUE_TYPE> & ragged_values,const bool batched)41   void BuildEncodeRaggedTensorGraph(
42       const std::vector<std::vector<SPLIT_TYPE>>& ragged_splits,
43       const TensorShape& ragged_values_shape,
44       const std::vector<VALUE_TYPE>& ragged_values, const bool batched) {
45     const auto values_dtype = DataTypeToEnum<VALUE_TYPE>::v();
46     const auto splits_dtype = DataTypeToEnum<SPLIT_TYPE>::v();
47     int64_t num_splits = ragged_splits.size();
48     TF_ASSERT_OK(
49         NodeDefBuilder("tested_op", "RaggedTensorToVariant")
50             .Input(FakeInput(num_splits, splits_dtype))  // ragged_splits
51             .Input(FakeInput(values_dtype))              // ragged_values
52             .Attr("RAGGED_RANK", num_splits)
53             .Attr("Tvalues", values_dtype)
54             .Attr("Tsplits", splits_dtype)
55             .Attr("batched_input", batched)
56             .Finalize(node_def()));
57     TF_ASSERT_OK(InitOp());
58     for (const auto& splits : ragged_splits) {
59       int64_t splits_size = splits.size();
60       AddInputFromArray<SPLIT_TYPE>(TensorShape({splits_size}), splits);
61     }
62     AddInputFromArray<VALUE_TYPE>(ragged_values_shape, ragged_values);
63   }
64 
65   template <typename VALUE_TYPE, typename SPLIT_TYPE>
CreateVariantFromRagged(const std::vector<std::vector<SPLIT_TYPE>> & ragged_splits,const TensorShape & ragged_values_shape,const std::vector<VALUE_TYPE> & ragged_values)66   RaggedTensorVariant CreateVariantFromRagged(
67       const std::vector<std::vector<SPLIT_TYPE>>& ragged_splits,
68       const TensorShape& ragged_values_shape,
69       const std::vector<VALUE_TYPE>& ragged_values) {
70     RaggedTensorVariant encoded;
71     for (auto ragged_split : ragged_splits) {
72       int splits_size = ragged_split.size();
73       Tensor splits(DataTypeToEnum<SPLIT_TYPE>::v(),
74                     TensorShape({splits_size}));
75       test::FillValues<SPLIT_TYPE>(&splits, ragged_split);
76       encoded.append_splits(splits);
77     }
78     Tensor values(DataTypeToEnum<VALUE_TYPE>::v(), ragged_values_shape);
79     test::FillValues<VALUE_TYPE>(&values, ragged_values);
80     encoded.set_values(values);
81     return encoded;
82   }
83 
84   template <typename VALUE_TYPE, typename SPLIT_TYPE>
CreateVariantFromRagged(const std::vector<std::vector<SPLIT_TYPE>> & ragged_splits,const std::vector<VALUE_TYPE> & ragged_values)85   RaggedTensorVariant CreateVariantFromRagged(
86       const std::vector<std::vector<SPLIT_TYPE>>& ragged_splits,
87       const std::vector<VALUE_TYPE>& ragged_values) {
88     int num_values = ragged_values.size();
89     return CreateVariantFromRagged(ragged_splits, {num_values}, ragged_values);
90   }
91 
92   template <typename VALUE_TYPE, typename SPLIT_TYPE>
ExpectRaggedTensorVariantEqual(const RaggedTensorVariant & expected,const RaggedTensorVariant & actual)93   void ExpectRaggedTensorVariantEqual(const RaggedTensorVariant& expected,
94                                       const RaggedTensorVariant& actual) {
95     test::ExpectTensorEqual<VALUE_TYPE>(actual.values(), expected.values());
96     EXPECT_EQ(actual.ragged_rank(), expected.ragged_rank());
97     for (int i = 0; i < actual.ragged_rank(); ++i) {
98       test::ExpectTensorEqual<SPLIT_TYPE>(actual.splits(i), expected.splits(i));
99     }
100   }
101 };
102 
TEST_F(RaggedTensorToVariantKernelTest,NoValuesInput)103 TEST_F(RaggedTensorToVariantKernelTest, NoValuesInput) {
104   // ragged_tensor=[[[], []], [[]], []]
105   const std::vector<int64> batched_splits_1 = {0, 2, 3, 3};
106   const std::vector<int64> batched_splits_2 = {0, 0, 0, 0};
107 
108   BuildEncodeRaggedTensorGraph<int, int64>({batched_splits_1, batched_splits_2},
109                                            TensorShape({0}), {}, true);
110   TF_ASSERT_OK(RunOpKernel());
111 
112   const auto& encoded_list = GetOutput(0)->vec<Variant>();
113   EXPECT_EQ(encoded_list.size(), 3);
114 
115   ExpectRaggedTensorVariantEqual<int, int64>(
116       CreateVariantFromRagged<int, int64>({{0, 0, 0}}, {}),
117       *encoded_list(0).get<RaggedTensorVariant>());
118   ExpectRaggedTensorVariantEqual<int, int64>(
119       CreateVariantFromRagged<int, int64>({{0, 0}}, {}),
120       *encoded_list(1).get<RaggedTensorVariant>());
121   ExpectRaggedTensorVariantEqual<int, int64>(
122       CreateVariantFromRagged<int, int64>({{0}}, {}),
123       *encoded_list(2).get<RaggedTensorVariant>());
124 }
125 
126 TEST_F(RaggedTensorToVariantKernelTest, 1DValuesRaggedRankOneInput) {
127   // ragged_tensor=
128   // [ [1, 2, 3],
129   //   [       ],
130   //   [4, 5   ],
131   //   [6      ]]
132   const std::vector<int64> batched_splits = {0, 3, 3, 5, 6};
133   const std::vector<int> batched_values = {1, 2, 3, 4, 5, 6};
134 
135   BuildEncodeRaggedTensorGraph<int, int64>({batched_splits}, TensorShape({6}),
136                                            batched_values, true);
137   TF_ASSERT_OK(RunOpKernel());
138 
139   const auto& encoded_list = GetOutput(0)->vec<Variant>();
140   EXPECT_EQ(encoded_list.size(), 4);
141 
142   ExpectRaggedTensorVariantEqual<int, int64>(
143       CreateVariantFromRagged<int, int64>({}, {1, 2, 3}),
144       *encoded_list(0).get<RaggedTensorVariant>());
145   ExpectRaggedTensorVariantEqual<int, int64>(
146       CreateVariantFromRagged<int, int64>({}, {}),
147       *encoded_list(1).get<RaggedTensorVariant>());
148   ExpectRaggedTensorVariantEqual<int, int64>(
149       CreateVariantFromRagged<int, int64>({}, {4, 5}),
150       *encoded_list(2).get<RaggedTensorVariant>());
151   ExpectRaggedTensorVariantEqual<int, int64>(
152       CreateVariantFromRagged<int, int64>({}, {6}),
153       *encoded_list(3).get<RaggedTensorVariant>());
154 }
155 
156 TEST_F(RaggedTensorToVariantKernelTest, 2DBatchedValuesRankOneInput) {
157   // ragged_tensor=
158   // [[1, 2],
159   //  [4, 5],
160   //  [6, 7]]
161   const std::vector<int64> batched_splits = {0, 1, 2, 3};
162   const std::vector<int> batched_values = {1, 2, 4, 5, 6, 7};
163 
164   BuildEncodeRaggedTensorGraph<int, int64>(
165       {batched_splits}, TensorShape({3, 2}), batched_values, true);
166   TF_ASSERT_OK(RunOpKernel());
167 
168   const auto& encoded_list = GetOutput(0)->vec<Variant>();
169   EXPECT_EQ(encoded_list.size(), 3);
170 
171   ExpectRaggedTensorVariantEqual<int, int64>(
172       CreateVariantFromRagged<int, int64>({}, {1, 2}, {1, 2}),
173       *encoded_list(0).get<RaggedTensorVariant>());
174   ExpectRaggedTensorVariantEqual<int, int64>(
175       CreateVariantFromRagged<int, int64>({}, {1, 2}, {4, 5}),
176       *encoded_list(1).get<RaggedTensorVariant>());
177   ExpectRaggedTensorVariantEqual<int, int64>(
178       CreateVariantFromRagged<int, int64>({}, {1, 2}, {6, 7}),
179       *encoded_list(2).get<RaggedTensorVariant>());
180 }
181 
182 TEST_F(RaggedTensorToVariantKernelTest, 2DBatchedValuesRankTwoInput) {
183   // ragged_tensor=
184   // [ [[[1, 2], [4, 5]]],
185   //   [[[6 7]]]          ]
186   const std::vector<int64> batched_splits_1 = {0, 1, 2};
187   const std::vector<int64> batched_splits_2 = {0, 2, 3};
188   const std::vector<int> batched_values = {1, 2, 4, 5, 6, 7};
189 
190   BuildEncodeRaggedTensorGraph<int, int64>({batched_splits_1, batched_splits_2},
191                                            TensorShape({3, 2}), batched_values,
192                                            true);
193   TF_ASSERT_OK(RunOpKernel());
194 
195   const auto& encoded_list = GetOutput(0)->vec<Variant>();
196   EXPECT_EQ(encoded_list.size(), 2);
197 
198   ExpectRaggedTensorVariantEqual<int, int64>(
199       CreateVariantFromRagged<int, int64>({{0, 2}}, {2, 2}, {1, 2, 4, 5}),
200       *encoded_list(0).get<RaggedTensorVariant>());
201   ExpectRaggedTensorVariantEqual<int, int64>(
202       CreateVariantFromRagged<int, int64>({{0, 1}}, {1, 2}, {6, 7}),
203       *encoded_list(1).get<RaggedTensorVariant>());
204 }
205 
TEST_F(RaggedTensorToVariantKernelTest,EmptyRowInBatchedInput)206 TEST_F(RaggedTensorToVariantKernelTest, EmptyRowInBatchedInput) {
207   // ragged_tensor =
208   // [[ [x],         [x x],       [] ],
209   //  [                              ],
210   //  [ [x x x x x], [x x x]         ],
211   //  [ [],          [x x x x]       ]]
212   const std::vector<int64> batched_splits_1 = {0, 3, 3, 5, 7};
213   const std::vector<int64> batched_splits_2 = {0, 1, 3, 3, 8, 11, 11, 15};
214   const std::vector<int> batched_values = {1, 2,  3,  4,  5,  6,  7, 8,
215                                            9, 10, 11, 12, 13, 14, 15};
216 
217   BuildEncodeRaggedTensorGraph<int, int64>({batched_splits_1, batched_splits_2},
218                                            TensorShape({15}), batched_values,
219                                            true);
220   TF_ASSERT_OK(RunOpKernel());
221 
222   const auto& encoded_list = GetOutput(0)->vec<Variant>();
223   EXPECT_EQ(encoded_list.size(), 4);
224 
225   ExpectRaggedTensorVariantEqual<int, int64>(
226       CreateVariantFromRagged<int, int64>({{0, 1, 3, 3}}, {1, 2, 3}),
227       *encoded_list(0).get<RaggedTensorVariant>());
228   ExpectRaggedTensorVariantEqual<int, int64>(
229       CreateVariantFromRagged<int, int64>({{0}}, {}),
230       *encoded_list(1).get<RaggedTensorVariant>());
231   ExpectRaggedTensorVariantEqual<int, int64>(
232       CreateVariantFromRagged<int, int64>({{0, 5, 8}},
233                                           {4, 5, 6, 7, 8, 9, 10, 11}),
234       *encoded_list(2).get<RaggedTensorVariant>());
235   ExpectRaggedTensorVariantEqual<int, int64>(
236       CreateVariantFromRagged<int, int64>({{0, 0, 4}}, {12, 13, 14, 15}),
237       *encoded_list(3).get<RaggedTensorVariant>());
238 }
239 
TEST_F(RaggedTensorToVariantKernelTest,NonEmptyBatchedInput)240 TEST_F(RaggedTensorToVariantKernelTest, NonEmptyBatchedInput) {
241   // ragged_tensor =
242   // [[     [ [x, x]        ],
243   //        [ [x],      [x] ],
244   //        [ [x]           ],
245   //        [ [x]           ],
246   //        [ [x]           ]],
247   //  [     [ [x]           ],
248   //        [ [x]           ],
249   //        [ [x, x, x]     ],
250   //        [ [x]           ],
251   //        [ [x]           ] ]]
252   const std::vector<int64> batched_splits_1 = {0, 5, 10};
253   const std::vector<int64> batched_splits_2 = {0, 1, 3, 4,  5, 6,
254                                                7, 8, 9, 10, 11};
255   const std::vector<int64> batched_splits_3 = {0, 2, 3, 4,  5,  6,
256                                                7, 8, 9, 12, 13, 14};
257   const std::vector<int> batched_values = {0, 1, 1, 2, 2, 3, 4,
258                                            5, 6, 7, 8, 9, 8, 9};
259 
260   BuildEncodeRaggedTensorGraph<int, int64>(
261       {batched_splits_1, batched_splits_2, batched_splits_3}, TensorShape({14}),
262       batched_values, true);
263   TF_ASSERT_OK(RunOpKernel());
264 
265   const auto& encoded_list = GetOutput(0)->vec<Variant>();
266   EXPECT_EQ(encoded_list.size(), 2);
267 
268   ExpectRaggedTensorVariantEqual<int, int64>(
269       CreateVariantFromRagged<int, int64>(
270           {{0, 1, 3, 4, 5, 6}, {0, 2, 3, 4, 5, 6, 7}}, {0, 1, 1, 2, 2, 3, 4}),
271       *encoded_list(0).get<RaggedTensorVariant>());
272   ExpectRaggedTensorVariantEqual<int, int64>(
273       CreateVariantFromRagged<int, int64>(
274           {{0, 1, 2, 3, 4, 5}, {0, 1, 2, 5, 6, 7}}, {5, 6, 7, 8, 9, 8, 9}),
275       *encoded_list(1).get<RaggedTensorVariant>());
276 }
277 
TEST_F(RaggedTensorToVariantKernelTest,NonEmptyBatchedInputInt32Splits)278 TEST_F(RaggedTensorToVariantKernelTest, NonEmptyBatchedInputInt32Splits) {
279   // ragged_tensor =
280   // [[     [ [x, x]        ],
281   //        [ [x],      [x] ],
282   //        [ [x]           ],
283   //        [ [x]           ],
284   //        [ [x]           ]],
285   //  [     [ [x]           ],
286   //        [ [x]           ],
287   //        [ [x, x, x]     ],
288   //        [ [x]           ],
289   //        [ [x]           ] ]]
290   const std::vector<int> batched_splits_1 = {0, 5, 10};
291   const std::vector<int> batched_splits_2 = {0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11};
292   const std::vector<int> batched_splits_3 = {0, 2, 3, 4,  5,  6,
293                                              7, 8, 9, 12, 13, 14};
294   const std::vector<int> batched_values = {0, 1, 1, 2, 2, 3, 4,
295                                            5, 6, 7, 8, 9, 8, 9};
296 
297   BuildEncodeRaggedTensorGraph<int, int32>(
298       {batched_splits_1, batched_splits_2, batched_splits_3}, TensorShape({14}),
299       batched_values, true);
300   TF_ASSERT_OK(RunOpKernel());
301 
302   const auto& encoded_list = GetOutput(0)->vec<Variant>();
303   EXPECT_EQ(encoded_list.size(), 2);
304 
305   ExpectRaggedTensorVariantEqual<int, int32>(
306       CreateVariantFromRagged<int, int32>(
307           {{0, 1, 3, 4, 5, 6}, {0, 2, 3, 4, 5, 6, 7}}, {0, 1, 1, 2, 2, 3, 4}),
308       *encoded_list(0).get<RaggedTensorVariant>());
309   ExpectRaggedTensorVariantEqual<int, int32>(
310       CreateVariantFromRagged<int, int32>(
311           {{0, 1, 2, 3, 4, 5}, {0, 1, 2, 5, 6, 7}}, {5, 6, 7, 8, 9, 8, 9}),
312       *encoded_list(1).get<RaggedTensorVariant>());
313 }
314 
TEST_F(RaggedTensorToVariantKernelTest,NonBatchInput)315 TEST_F(RaggedTensorToVariantKernelTest, NonBatchInput) {
316   // ragged_tensor =
317   // [[ [x],         [x x],       [] ],
318   //  [                              ],
319   //  [ [x x x x x], [x x x]         ],
320   //  [ [],          [x x x x]       ]]
321   const std::vector<int64> batched_splits_1 = {0, 3, 3, 5, 7};
322   const std::vector<int64> batched_splits_2 = {0, 1, 3, 3, 8, 11, 11, 15};
323   const std::vector<int> batched_values = {1, 2,  3,  4,  5,  6,  7, 8,
324                                            9, 10, 11, 12, 13, 14, 15};
325 
326   BuildEncodeRaggedTensorGraph<int, int64>({batched_splits_1, batched_splits_2},
327                                            TensorShape({15}), batched_values,
328                                            false);
329   TF_ASSERT_OK(RunOpKernel());
330 
331   const auto& encoded_scalar = GetOutput(0)->scalar<Variant>()();
332 
333   ExpectRaggedTensorVariantEqual<int, int64>(
334       CreateVariantFromRagged<int, int64>({batched_splits_1, batched_splits_2},
335                                           batched_values),
336       *encoded_scalar.get<RaggedTensorVariant>());
337 }
338 
TEST_F(RaggedTensorToVariantKernelTest,ShapeFnTestBatched)339 TEST_F(RaggedTensorToVariantKernelTest, ShapeFnTestBatched) {
340   ShapeInferenceTestOp op("RaggedTensorToVariant");
341   (*op.node_def.mutable_attr())["batched_input"].set_b(true);
342 
343   // Tests with len(ragged_splits)==0.
344   (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(0);
345   INFER_ERROR(
346       "ragged_rank=0 is not currently supported "
347       "when batched_input=true.",
348       op, "?");
349 
350   // Tests with len(ragged_splits)==1.
351   (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(1);
352   INFER_OK(op, "?;?", "[?]");
353   INFER_OK(op, "?;[?]", "[?]");
354   INFER_OK(op, "?;[?,?]", "[?]");
355   INFER_OK(op, "[?];[5]", "[?]");
356   INFER_OK(op, "[?];[5,2]", "[?]");
357   INFER_OK(op, "[5];[5,2]", "[4]");
358   INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?");
359   INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[5,5];?");
360   INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "?;[]");
361 
362   // Tests with len(ragged_splits)==2
363   (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(2);
364   INFER_OK(op, "?;?;?", "[?]");
365   INFER_OK(op, "?;?;[?]", "[?]");
366   INFER_OK(op, "?;?;[?,?]", "[?]");
367   INFER_OK(op, "[?];[?];[5]", "[?]");
368   INFER_OK(op, "[?];[?];[5,2]", "[?]");
369   INFER_OK(op, "[6];[?];[5,2]", "[5]");
370   INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[5,5];?");
371 
372   // Tests with len(ragged_splits)==3
373   (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(3);
374   INFER_OK(op, "?;?;?;?", "[?]");
375   INFER_OK(op, "?;?;?;[?]", "[?]");
376   INFER_OK(op, "?;?;?;[5]", "[?]");
377   INFER_OK(op, "[4];?;?;[5]", "[3]");
378 }
379 
TEST_F(RaggedTensorToVariantKernelTest,ShapeFnTestNotBatched)380 TEST_F(RaggedTensorToVariantKernelTest, ShapeFnTestNotBatched) {
381   ShapeInferenceTestOp op("RaggedTensorToVariant");
382   (*op.node_def.mutable_attr())["batched_input"].set_b(false);
383 
384   // Tests with len(ragged_splits)==0.
385   (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(0);
386   INFER_OK(op, "?", "[]");
387 
388   // Tests with len(ragged_splits)==1.
389   (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(1);
390   INFER_OK(op, "?;?", "[]");
391   INFER_OK(op, "?;[?]", "[]");
392   INFER_OK(op, "?;[?,?]", "[]");
393   INFER_OK(op, "[?];[5]", "[]");
394   INFER_OK(op, "[?];[5,2]", "[]");
395   INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?");
396   INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[5,5];?");
397   INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "?;[]");
398 
399   // Tests with len(ragged_splits)==2
400   (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(2);
401   INFER_OK(op, "?;?;?", "[]");
402   INFER_OK(op, "?;?;[?]", "[]");
403   INFER_OK(op, "?;?;[?,?]", "[]");
404   INFER_OK(op, "[?];[?];[5]", "[]");
405   INFER_OK(op, "[?];[?];[5,2]", "[]");
406   INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[5,5];?");
407 
408   // Tests with len(ragged_splits)==3
409   (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(3);
410   INFER_OK(op, "?;?;?;?", "[]");
411   INFER_OK(op, "?;?;?;[?]", "[]");
412   INFER_OK(op, "?;?;?;[5]", "[]");
413 }
414 
TEST_F(RaggedTensorToVariantKernelTest,NonRaggedInput)415 TEST_F(RaggedTensorToVariantKernelTest, NonRaggedInput) {
416   const std::vector<int> values = {1, 2, 3, 4, 5, 6};
417 
418   BuildEncodeRaggedTensorGraph<int, int64>({}, TensorShape({6}), values, false);
419   TF_ASSERT_OK(RunOpKernel());
420 
421   const auto& encoded_scalar = GetOutput(0)->scalar<Variant>()();
422   ExpectRaggedTensorVariantEqual<int, int64>(
423       CreateVariantFromRagged<int, int64>({}, values),
424       *encoded_scalar.get<RaggedTensorVariant>());
425 }
426 
427 }  // namespace
428 }  // namespace tensorflow
429