1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/core/framework/tensor.h"
17
18 #include "tensorflow/core/framework/tensor.pb.h"
19 #include "tensorflow/core/framework/tensor_testutil.h"
20 #include "tensorflow/core/framework/types.h"
21 #include "tensorflow/core/framework/variant.h"
22 #include "tensorflow/core/framework/variant_encode_decode.h"
23 #include "tensorflow/core/framework/variant_tensor_data.h"
24 #include "tensorflow/core/lib/math/math_util.h"
25 #include "tensorflow/core/lib/strings/strcat.h"
26 #include "tensorflow/core/platform/logging.h"
27 #include "tensorflow/core/platform/test.h"
28 #include "tensorflow/core/platform/test_benchmark.h"
29
30 namespace tensorflow {
31
32 class TensorTestHelper {
33 public:
34 // This is an operation that can be done by VariableOp.
set_shape(Tensor * t,const TensorShape & s)35 static void set_shape(Tensor* t, const TensorShape& s) { t->set_shape(s); }
36 };
37
38 // To make TestCopies do the right thing.
operator ==(const ResourceHandle & a,const ResourceHandle & b)39 bool operator==(const ResourceHandle& a, const ResourceHandle& b) {
40 return a.device() == b.device() && a.container() == b.container() &&
41 a.name() == b.name() && a.hash_code() == b.hash_code() &&
42 a.maybe_type_name() == b.maybe_type_name();
43 }
44
operator ==(const Variant & a,const Variant & b)45 bool operator==(const Variant& a, const Variant& b) {
46 if (a.is_empty()) {
47 return b.is_empty();
48 }
49
50 if (a.TypeId() != b.TypeId()) return false;
51 if (a.TypeName() != b.TypeName()) return false;
52
53 VariantTensorData a_data, b_data;
54 a.Encode(&a_data);
55 b.Encode(&b_data);
56
57 string a_metadata;
58 string b_metadata;
59 a_data.get_metadata(&a_metadata);
60 b_data.get_metadata(&b_metadata);
61 if (a_metadata != b_metadata) return false;
62
63 if (a_data.tensors_size() != b_data.tensors_size()) return false;
64
65 for (int i = 0; i < a_data.tensors_size(); ++i) {
66 TensorProto a_proto, b_proto;
67 a_data.tensors(i).AsProtoTensorContent(&a_proto);
68 b_data.tensors(i).AsProtoTensorContent(&b_proto);
69 string a_str, b_str;
70 a_proto.SerializeToString(&a_str);
71 b_proto.SerializeToString(&b_str);
72 if (a_str != b_str) return false;
73 }
74
75 return true;
76 }
77
78 namespace {
79
TEST(TensorTest,Default)80 TEST(TensorTest, Default) {
81 Tensor t;
82 EXPECT_EQ(t.dtype(), DT_FLOAT);
83 EXPECT_EQ(t.dims(), 1);
84 EXPECT_EQ(t.NumElements(), 0);
85 }
86
TEST(TensorTest,DataType_Traits)87 TEST(TensorTest, DataType_Traits) {
88 EXPECT_TRUE(std::is_trivial<float>::value);
89 EXPECT_TRUE(std::is_trivial<double>::value);
90 EXPECT_TRUE(std::is_trivial<int32>::value);
91 EXPECT_TRUE(std::is_trivial<uint8>::value);
92 EXPECT_TRUE(std::is_trivial<uint16>::value);
93 EXPECT_TRUE(std::is_trivial<int16>::value);
94 EXPECT_TRUE(std::is_trivial<int8>::value);
95 EXPECT_TRUE(std::is_trivial<int64>::value);
96 EXPECT_TRUE(std::is_trivial<bool>::value);
97 EXPECT_FALSE(std::is_trivial<string>::value);
98
99 EXPECT_EQ(sizeof(bool), 1);
100
101 // Unfortunately. std::complex::complex() initializes (0, 0).
102 EXPECT_FALSE(std::is_trivial<complex64>::value);
103 EXPECT_FALSE(std::is_trivial<complex128>::value);
104 EXPECT_TRUE(std::is_trivial<float[2]>::value);
105 EXPECT_TRUE(std::is_trivial<double[2]>::value);
106 struct MyComplex64 {
107 float re, im;
108 };
109 EXPECT_TRUE(std::is_trivial<MyComplex64>::value);
110 struct MyComplex128 {
111 double re, im;
112 };
113 EXPECT_TRUE(std::is_trivial<MyComplex128>::value);
114 }
115
116 template <typename T>
TestCopies(const Tensor & t)117 void TestCopies(const Tensor& t) {
118 {
119 LOG(INFO) << "CopyFrom()";
120 Tensor t2(t.dtype());
121 EXPECT_TRUE(t2.CopyFrom(t, t.shape()));
122 test::ExpectTensorEqual<T>(t, t2);
123 }
124 {
125 LOG(INFO) << "operator=()";
126 Tensor t2(t.dtype());
127 t2 = t;
128 test::ExpectTensorEqual<T>(t, t2);
129 }
130 {
131 LOG(INFO) << "deep copy";
132 Tensor t2(t.dtype(), t.shape());
133 t2.flat<T>() = t.flat<T>();
134 test::ExpectTensorEqual<T>(t, t2);
135 }
136 {
137 LOG(INFO) << "AsProtoField()";
138 TensorProto proto;
139 t.AsProtoField(&proto);
140 Tensor t2(t.dtype());
141 EXPECT_TRUE(t2.FromProto(proto));
142 test::ExpectTensorEqual<T>(t, t2);
143 }
144 {
145 LOG(INFO) << "AsProtoTensorContent()";
146 TensorProto proto;
147 t.AsProtoTensorContent(&proto);
148 Tensor t2(t.dtype());
149 EXPECT_TRUE(t2.FromProto(proto));
150 test::ExpectTensorEqual<T>(t, t2);
151 // Make another copy via tensor_content field.
152 *proto.mutable_tensor_content() = proto.tensor_content();
153 Tensor t3(t.dtype());
154 EXPECT_TRUE(t3.FromProto(proto));
155 test::ExpectTensorEqual<T>(t, t2);
156 }
157 {
158 LOG(INFO) << "AsTensor";
159 gtl::ArraySlice<T> values(t.flat<T>().data(), t.NumElements());
160 Tensor t2 = test::AsTensor(values, t.shape());
161 test::ExpectTensorEqual<T>(t, t2);
162 }
163 {
164 LOG(INFO) << "Move constructor";
165 Tensor t2 = t;
166 Tensor t3(std::move(t2));
167 test::ExpectTensorEqual<T>(t, t3);
168 EXPECT_TRUE(t3.IsInitialized());
169 EXPECT_FALSE(t2.IsInitialized());
170 }
171 {
172 LOG(INFO) << "Move assignment";
173 Tensor t2 = t;
174 Tensor t3 = std::move(t2);
175 Tensor* t4 = &t3;
176 *t4 = std::move(t3);
177 test::ExpectTensorEqual<T>(t, t3);
178 EXPECT_TRUE(t3.IsInitialized());
179 EXPECT_FALSE(t2.IsInitialized());
180 }
181 }
182
TEST(Tensor_Half,Simple)183 TEST(Tensor_Half, Simple) {
184 Tensor t(DT_HALF, TensorShape({5, 7}));
185 EXPECT_TRUE(t.shape().IsSameSize(TensorShape({5, 7})));
186 for (int64 a = 0; a < t.shape().dim_size(0); a++) {
187 for (int64 b = 0; b < t.shape().dim_size(1); b++) {
188 t.matrix<Eigen::half>()(a, b) = static_cast<Eigen::half>(a * b);
189 }
190 }
191 TestCopies<Eigen::half>(t);
192 }
193
TEST(Tensor_Bfloat16,Simple)194 TEST(Tensor_Bfloat16, Simple) {
195 Tensor t(DT_BFLOAT16, TensorShape({5, 7}));
196 EXPECT_TRUE(t.shape().IsSameSize(TensorShape({5, 7})));
197 for (int64 a = 0; a < t.shape().dim_size(0); a++) {
198 for (int64 b = 0; b < t.shape().dim_size(1); b++) {
199 t.matrix<bfloat16>()(a, b) = static_cast<bfloat16>(a * b);
200 }
201 }
202 TestCopies<bfloat16>(t);
203 }
204
TEST(Tensor_Float,Simple)205 TEST(Tensor_Float, Simple) {
206 Tensor t(DT_FLOAT, TensorShape({10, 20}));
207 EXPECT_TRUE(t.shape().IsSameSize(TensorShape({10, 20})));
208 for (int64 a = 0; a < t.shape().dim_size(0); a++) {
209 for (int64 b = 0; b < t.shape().dim_size(1); b++) {
210 t.matrix<float>()(a, b) = static_cast<float>(a * b);
211 }
212 }
213 TestCopies<float>(t);
214 }
215
TEST(Tensor_ResourceHandle,Simple)216 TEST(Tensor_ResourceHandle, Simple) {
217 Tensor t(DT_RESOURCE, TensorShape({}));
218 ResourceHandle tmp;
219 tmp.set_name("a");
220 t.flat<ResourceHandle>()(0) = tmp;
221 TestCopies<ResourceHandle>(t);
222 }
223
TEST(Tensor_Variant,Simple)224 TEST(Tensor_Variant, Simple) {
225 Tensor t(DT_VARIANT, TensorShape({}));
226 Tensor value(DT_FLOAT, TensorShape({}));
227 value.flat<float>()(0) = 42.0f;
228 t.flat<Variant>()(0) = value;
229 // All the tests in TestCopies except the ones that serialize and deserialize
230 // the tensor. The consumer of a serialized Variant Tensor should know what
231 // type is stored in the Tensor, so not testing the generic
232 // serialize/deserialize case here.
233 {
234 LOG(INFO) << "CopyFrom()";
235 Tensor t2(t.dtype());
236 EXPECT_TRUE(t2.CopyFrom(t, t.shape()));
237 test::ExpectTensorEqual<Variant>(t, t2);
238 }
239 {
240 LOG(INFO) << "operator=()";
241 Tensor t2(t.dtype());
242 t2 = t;
243 test::ExpectTensorEqual<Variant>(t, t2);
244 }
245 {
246 LOG(INFO) << "deep copy";
247 Tensor t2(t.dtype(), t.shape());
248 t2.flat<Variant>() = t.flat<Variant>();
249 test::ExpectTensorEqual<Variant>(t, t2);
250 }
251 {
252 LOG(INFO) << "AsTensor";
253 gtl::ArraySlice<Variant> values(t.flat<Variant>().data(), t.NumElements());
254 Tensor t2 = test::AsTensor(values, t.shape());
255 test::ExpectTensorEqual<Variant>(t, t2);
256 }
257 {
258 LOG(INFO) << "Move constructor";
259 Tensor t2 = t;
260 Tensor t3(std::move(t2));
261 test::ExpectTensorEqual<Variant>(t, t3);
262 EXPECT_TRUE(t3.IsInitialized());
263 EXPECT_FALSE(t2.IsInitialized());
264 }
265 {
266 LOG(INFO) << "Move assignment";
267 Tensor t2 = t;
268 Tensor t3 = std::move(t2);
269 Tensor* t4 = &t3;
270 *t4 = std::move(t3);
271 test::ExpectTensorEqual<Variant>(t, t3);
272 EXPECT_TRUE(t3.IsInitialized());
273 EXPECT_FALSE(t2.IsInitialized());
274 }
275 }
276
TEST(Tensor_Variant,Marshal)277 TEST(Tensor_Variant, Marshal) {
278 Tensor t(DT_VARIANT, TensorShape({}));
279
280 Tensor internal(DT_FLOAT, TensorShape({}));
281 internal.flat<float>()(0) = 42.0f;
282 t.flat<Variant>()(0) = internal;
283
284 LOG(INFO) << "AsProtoField()";
285 TensorProto proto;
286 t.AsProtoField(&proto);
287
288 // This performs a decode operation.
289 Tensor t2(t.dtype());
290 EXPECT_TRUE(t2.FromProto(proto));
291
292 Tensor* out = t2.flat<Variant>()(0).get<Tensor>();
293 EXPECT_NE(out, nullptr);
294 EXPECT_FLOAT_EQ(out->scalar<float>()(), 42.0f);
295 }
296
TEST(Tensor_UInt16,Simple)297 TEST(Tensor_UInt16, Simple) {
298 Tensor t(DT_UINT16, TensorShape({2, 2}));
299 EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 2})));
300 for (int64 a = 0; a < t.shape().dim_size(0); a++) {
301 for (int64 b = 0; b < t.shape().dim_size(1); b++) {
302 t.matrix<uint16>()(a, b) = uint16(a * b);
303 }
304 }
305 TestCopies<uint16>(t);
306 }
307
TEST(Tensor_QInt8,Simple)308 TEST(Tensor_QInt8, Simple) {
309 Tensor t(DT_QINT8, TensorShape({2, 2}));
310 EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 2})));
311 for (int64 a = 0; a < t.shape().dim_size(0); a++) {
312 for (int64 b = 0; b < t.shape().dim_size(1); b++) {
313 t.matrix<qint8>()(a, b) = qint8(a * b);
314 }
315 }
316 TestCopies<qint8>(t);
317 }
318
TEST(Tensor_QUInt8,Simple)319 TEST(Tensor_QUInt8, Simple) {
320 Tensor t(DT_QUINT8, TensorShape({2, 2}));
321 EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 2})));
322 for (int64 a = 0; a < t.shape().dim_size(0); a++) {
323 for (int64 b = 0; b < t.shape().dim_size(1); b++) {
324 t.matrix<Eigen::QUInt8>()(a, b) = Eigen::QUInt8(a * b);
325 }
326 }
327 TestCopies<Eigen::QUInt8>(t);
328 }
329
TEST(Tensor_QInt32,Simple)330 TEST(Tensor_QInt32, Simple) {
331 Tensor t(DT_QINT32, TensorShape({2, 2}));
332 EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 2})));
333 for (int64 a = 0; a < t.shape().dim_size(0); a++) {
334 for (int64 b = 0; b < t.shape().dim_size(1); b++) {
335 t.matrix<qint32>()(a, b) = qint32(static_cast<int32>(a * b));
336 }
337 }
338 TestCopies<qint32>(t);
339 }
340
341 class TensorReshapeTest : public ::testing::Test {
342 protected:
343 Tensor t;
344 Tensor zero_t;
345
TensorReshapeTest()346 TensorReshapeTest()
347 : t(DT_FLOAT, TensorShape({2, 3, 4, 5})),
348 zero_t(DT_FLOAT, TensorShape({3, 0, 2, 0, 5})) {}
349
SetUp()350 void SetUp() override {
351 EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 3, 4, 5})));
352 EXPECT_TRUE(zero_t.shape().IsSameSize(TensorShape({3, 0, 2, 0, 5})));
353
354 auto tensor = t.tensor<float, 4>();
355 EXPECT_EQ(2, tensor.dimension(0));
356 EXPECT_EQ(3, tensor.dimension(1));
357 EXPECT_EQ(4, tensor.dimension(2));
358 EXPECT_EQ(5, tensor.dimension(3));
359
360 // Set first and last elements.
361 tensor(0, 0, 0, 0) = 0.01f;
362 tensor(1, 2, 3, 4) = 0.02f;
363 }
364
365 template <typename T>
366 using ReshapeFunc = T (Tensor::*)(gtl::ArraySlice<int64>);
367 template <typename T>
368 using ConstReshapeFunc = T (Tensor::*)(gtl::ArraySlice<int64>) const;
369
370 template <typename T, ReshapeFunc<T> Func>
TestReshape(std::initializer_list<int64> sizes)371 void TestReshape(std::initializer_list<int64> sizes) {
372 T shaped = (t.*Func)(sizes);
373 TestReshapeImpl(shaped, sizes);
374 }
375
376 template <typename T, ConstReshapeFunc<T> Func>
TestReshape(std::initializer_list<int64> sizes)377 void TestReshape(std::initializer_list<int64> sizes) {
378 T shaped = (static_cast<const Tensor&>(t).*Func)(sizes);
379 TestReshapeImpl(shaped, sizes);
380 }
381
382 template <typename T>
TestReshapeImpl(T shaped,std::initializer_list<int64> sizes)383 void TestReshapeImpl(T shaped, std::initializer_list<int64> sizes) {
384 auto iter = sizes.begin();
385 for (int i = 0; i < shaped.rank(); ++i, ++iter) {
386 EXPECT_EQ(*iter, shaped.dimension(i));
387 }
388
389 using Index = typename T::Index;
390 using Scalar = typename T::Scalar;
391 constexpr int N = T::NumIndices;
392
393 // To handle the cast when `shaped` is bit casted into a different type.
394 const float expected_first = 0.01f;
395 Eigen::DSizes<Index, N> coord;
396 EXPECT_EQ(shaped(coord), *reinterpret_cast<const Scalar*>(&expected_first));
397
398 for (int i = 0; i < N; ++i) {
399 coord[i] = shaped.dimension(i) - 1;
400 }
401 const float expected_last = 0.02f;
402 constexpr int kNumScalarPerFloat =
403 sizeof(float) / sizeof(Scalar); // Assuming even divide.
404 EXPECT_EQ(shaped(coord), reinterpret_cast<const Scalar*>(
405 &expected_last)[kNumScalarPerFloat - 1]);
406 }
407 };
408
TEST_F(TensorReshapeTest,Reshape)409 TEST_F(TensorReshapeTest, Reshape) {
410 LOG(INFO) << "shaped";
411
412 #define TEST_RESHAPE(...) \
413 { \
414 constexpr int N = (sizeof((int[]){__VA_ARGS__}) / sizeof(int)); \
415 TestReshape<TTypes<float, N>::Tensor, &Tensor::shaped<float, N>>( \
416 {__VA_ARGS__}); \
417 TestReshape<TTypes<float, N>::ConstTensor, &Tensor::shaped<float, N>>( \
418 {__VA_ARGS__}); \
419 TestReshape<TTypes<float, N>::UnalignedTensor, \
420 &Tensor::unaligned_shaped<float, N>>({__VA_ARGS__}); \
421 TestReshape<TTypes<float, N>::UnalignedConstTensor, \
422 &Tensor::unaligned_shaped<float, N>>({__VA_ARGS__}); \
423 TestReshape<TTypes<float, N>::Tensor, \
424 &Tensor::bit_casted_shaped<float, N>>({__VA_ARGS__}); \
425 TestReshape<TTypes<float, N>::ConstTensor, \
426 &Tensor::bit_casted_shaped<float, N>>({__VA_ARGS__}); \
427 TestReshape<TTypes<int32, N>::Tensor, \
428 &Tensor::bit_casted_shaped<int32, N>>({__VA_ARGS__}); \
429 TestReshape<TTypes<int32, N>::ConstTensor, \
430 &Tensor::bit_casted_shaped<int32, N>>({__VA_ARGS__}); \
431 }
432
433 TEST_RESHAPE(120);
434 TEST_RESHAPE(6, 20);
435 TEST_RESHAPE(6, 4, 5);
436 TEST_RESHAPE(2, 3, 4, 5);
437 #undef TEST_RESHAPE
438 }
439
TEST_F(TensorReshapeTest,BitcastReshapeDifferentSize)440 TEST_F(TensorReshapeTest, BitcastReshapeDifferentSize) {
441 #define TEST_BITCAST8_RESHAPE(...) \
442 { \
443 constexpr int N = (sizeof((int[]){__VA_ARGS__}) / sizeof(int)); \
444 TestReshape<TTypes<uint8, N>::Tensor, \
445 &Tensor::bit_casted_shaped<uint8, N>>({__VA_ARGS__}); \
446 }
447
448 TEST_BITCAST8_RESHAPE(480);
449 TEST_BITCAST8_RESHAPE(24, 20);
450 TEST_BITCAST8_RESHAPE(6, 16, 5);
451 TEST_BITCAST8_RESHAPE(2, 3, 4, 20);
452 #undef TEST_BITCAST8_RESHAPE
453 #define TEST_BITCAST16_RESHAPE(...) \
454 { \
455 constexpr int N = (sizeof((int[]){__VA_ARGS__}) / sizeof(int)); \
456 TestReshape<TTypes<int16, N>::Tensor, \
457 &Tensor::bit_casted_shaped<int16, N>>({__VA_ARGS__}); \
458 }
459
460 TEST_BITCAST16_RESHAPE(240);
461 TEST_BITCAST16_RESHAPE(6, 40);
462 TEST_BITCAST16_RESHAPE(12, 4, 5);
463 TEST_BITCAST16_RESHAPE(2, 3, 8, 5);
464 TEST_BITCAST16_RESHAPE(2, 3, 4, 1, 10);
465 #undef TEST_BITCAST16_RESHAPE
466 }
467
TEST_F(TensorReshapeTest,ReshapeError)468 TEST_F(TensorReshapeTest, ReshapeError) {
469 EXPECT_DEATH((t.shaped<float, 0>({})), "1 vs. 120");
470 EXPECT_DEATH((t.shaped<float, 1>({119})), "119 vs. 120");
471 EXPECT_DEATH((t.shaped<float, 4>({2, 3, 4, 6})), "144 vs. 120");
472
473 EXPECT_DEATH((t.unaligned_shaped<float, 0>({})), "1 vs. 120");
474 EXPECT_DEATH((t.unaligned_shaped<float, 1>({119})), "119 vs. 120");
475 EXPECT_DEATH((t.unaligned_shaped<float, 4>({2, 3, 4, 6})), "144 vs. 120");
476
477 EXPECT_DEATH((t.bit_casted_shaped<float, 0>({})), "4 vs. 480");
478 EXPECT_DEATH((t.bit_casted_shaped<float, 1>({119})), "476 vs. 480");
479 EXPECT_DEATH((t.bit_casted_shaped<float, 4>({2, 3, 4, 6})), "576 vs. 480");
480
481 Tensor string_tensor{DT_STRING, {10}};
482 // Note that the error message compare # of elements, not # of bytes.
483 EXPECT_DEATH((string_tensor.bit_casted_shaped<string, 1>({9})), "9 vs. 10");
484 }
485
TEST_F(TensorReshapeTest,Flat)486 TEST_F(TensorReshapeTest, Flat) {
487 LOG(INFO) << "flat";
488 {
489 auto flat = t.flat<float>();
490 EXPECT_EQ(flat(0), 0.01f);
491 EXPECT_EQ(120, flat.dimension(0));
492 EXPECT_EQ(flat(0), 0.01f);
493 EXPECT_EQ(flat(119), 0.02f);
494 }
495 }
496
TEST_F(TensorReshapeTest,FlatInnerDims)497 TEST_F(TensorReshapeTest, FlatInnerDims) {
498 LOG(INFO) << "flat_inner_dims";
499 {
500 auto flat_inner_dims = t.flat_inner_dims<float>();
501 EXPECT_EQ(24, flat_inner_dims.dimension(0));
502 EXPECT_EQ(5, flat_inner_dims.dimension(1));
503 EXPECT_EQ(flat_inner_dims(0, 0), 0.01f);
504 EXPECT_EQ(flat_inner_dims(23, 4), 0.02f);
505 }
506 {
507 auto flat_inner_dims = t.flat_inner_dims<float, 3>();
508 EXPECT_EQ(6, flat_inner_dims.dimension(0));
509 EXPECT_EQ(4, flat_inner_dims.dimension(1));
510 EXPECT_EQ(5, flat_inner_dims.dimension(2));
511 EXPECT_EQ(flat_inner_dims(0, 0, 0), 0.01f);
512 EXPECT_EQ(flat_inner_dims(5, 3, 4), 0.02f);
513 }
514 {
515 auto flat_inner_dims = t.flat_inner_dims<float, 5>();
516 EXPECT_EQ(1, flat_inner_dims.dimension(0));
517 EXPECT_EQ(2, flat_inner_dims.dimension(1));
518 EXPECT_EQ(3, flat_inner_dims.dimension(2));
519 EXPECT_EQ(4, flat_inner_dims.dimension(3));
520 EXPECT_EQ(5, flat_inner_dims.dimension(4));
521 EXPECT_EQ(flat_inner_dims(0, 0, 0, 0, 0), 0.01f);
522 EXPECT_EQ(flat_inner_dims(0, 1, 2, 3, 4), 0.02f);
523 }
524 {
525 auto flat_inner_dims = zero_t.flat_inner_dims<float>();
526 EXPECT_EQ(0, flat_inner_dims.dimension(0));
527 EXPECT_EQ(5, flat_inner_dims.dimension(1));
528 }
529 {
530 auto flat_inner_dims = zero_t.flat_inner_dims<float, 3>();
531 EXPECT_EQ(0, flat_inner_dims.dimension(0));
532 EXPECT_EQ(0, flat_inner_dims.dimension(1));
533 EXPECT_EQ(5, flat_inner_dims.dimension(2));
534 }
535 {
536 auto flat_inner_dims = zero_t.flat_inner_dims<float, 5>();
537 EXPECT_EQ(3, flat_inner_dims.dimension(0));
538 EXPECT_EQ(0, flat_inner_dims.dimension(1));
539 EXPECT_EQ(2, flat_inner_dims.dimension(2));
540 EXPECT_EQ(0, flat_inner_dims.dimension(3));
541 EXPECT_EQ(5, flat_inner_dims.dimension(4));
542 }
543 }
544
TEST_F(TensorReshapeTest,FlatOuterDims)545 TEST_F(TensorReshapeTest, FlatOuterDims) {
546 LOG(INFO) << "flat_outer_dims";
547 {
548 auto flat_outer_dims = t.flat_outer_dims<float>();
549 EXPECT_EQ(2, flat_outer_dims.dimension(0));
550 EXPECT_EQ(60, flat_outer_dims.dimension(1));
551 EXPECT_EQ(flat_outer_dims(0, 0), 0.01f);
552 EXPECT_EQ(flat_outer_dims(1, 59), 0.02f);
553 }
554 {
555 auto flat_outer_dims = t.flat_outer_dims<float, 3>();
556 EXPECT_EQ(2, flat_outer_dims.dimension(0));
557 EXPECT_EQ(3, flat_outer_dims.dimension(1));
558 EXPECT_EQ(20, flat_outer_dims.dimension(2));
559 EXPECT_EQ(flat_outer_dims(0, 0, 0), 0.01f);
560 EXPECT_EQ(flat_outer_dims(1, 2, 19), 0.02f);
561 }
562 {
563 auto flat_outer_dims = t.flat_outer_dims<float, 5>();
564 EXPECT_EQ(2, flat_outer_dims.dimension(0));
565 EXPECT_EQ(3, flat_outer_dims.dimension(1));
566 EXPECT_EQ(4, flat_outer_dims.dimension(2));
567 EXPECT_EQ(5, flat_outer_dims.dimension(3));
568 EXPECT_EQ(1, flat_outer_dims.dimension(4));
569 EXPECT_EQ(flat_outer_dims(0, 0, 0, 0, 0), 0.01f);
570 EXPECT_EQ(flat_outer_dims(1, 2, 3, 4, 0), 0.02f);
571 }
572 {
573 auto flat_outer_dims = zero_t.flat_outer_dims<float>();
574 EXPECT_EQ(3, flat_outer_dims.dimension(0));
575 EXPECT_EQ(0, flat_outer_dims.dimension(1));
576 }
577 {
578 auto flat_outer_dims = zero_t.flat_outer_dims<float, 3>();
579 EXPECT_EQ(3, flat_outer_dims.dimension(0));
580 EXPECT_EQ(0, flat_outer_dims.dimension(1));
581 EXPECT_EQ(0, flat_outer_dims.dimension(2));
582 }
583 {
584 auto flat_outer_dims = zero_t.flat_outer_dims<float, 5>();
585 EXPECT_EQ(3, flat_outer_dims.dimension(0));
586 EXPECT_EQ(0, flat_outer_dims.dimension(1));
587 EXPECT_EQ(2, flat_outer_dims.dimension(2));
588 EXPECT_EQ(0, flat_outer_dims.dimension(3));
589 EXPECT_EQ(5, flat_outer_dims.dimension(4));
590 }
591 }
592
TEST_F(TensorReshapeTest,FlatInnerOuterDims)593 TEST_F(TensorReshapeTest, FlatInnerOuterDims) {
594 LOG(INFO) << "flat_inner_outer_dims";
595 {
596 auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 4>(0);
597 EXPECT_EQ(2, flat_inner_outer_dims.dimension(0));
598 EXPECT_EQ(3, flat_inner_outer_dims.dimension(1));
599 EXPECT_EQ(4, flat_inner_outer_dims.dimension(2));
600 EXPECT_EQ(5, flat_inner_outer_dims.dimension(3));
601 EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0), 0.01f);
602 EXPECT_EQ(flat_inner_outer_dims(1, 2, 3, 4), 0.02f);
603 }
604 {
605 auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 6>(-2);
606 EXPECT_EQ(1, flat_inner_outer_dims.dimension(0));
607 EXPECT_EQ(1, flat_inner_outer_dims.dimension(1));
608 EXPECT_EQ(2, flat_inner_outer_dims.dimension(2));
609 EXPECT_EQ(3, flat_inner_outer_dims.dimension(3));
610 EXPECT_EQ(4, flat_inner_outer_dims.dimension(4));
611 EXPECT_EQ(5, flat_inner_outer_dims.dimension(5));
612 EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0, 0, 0), 0.01f);
613 EXPECT_EQ(flat_inner_outer_dims(0, 0, 1, 2, 3, 4), 0.02f);
614 }
615 {
616 auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 6>(0);
617 EXPECT_EQ(2, flat_inner_outer_dims.dimension(0));
618 EXPECT_EQ(3, flat_inner_outer_dims.dimension(1));
619 EXPECT_EQ(4, flat_inner_outer_dims.dimension(2));
620 EXPECT_EQ(5, flat_inner_outer_dims.dimension(3));
621 EXPECT_EQ(1, flat_inner_outer_dims.dimension(4));
622 EXPECT_EQ(1, flat_inner_outer_dims.dimension(5));
623 EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0, 0, 0), 0.01f);
624 EXPECT_EQ(flat_inner_outer_dims(1, 2, 3, 4, 0, 0), 0.02f);
625 }
626 {
627 auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 8>(-2);
628 EXPECT_EQ(1, flat_inner_outer_dims.dimension(0));
629 EXPECT_EQ(1, flat_inner_outer_dims.dimension(1));
630 EXPECT_EQ(2, flat_inner_outer_dims.dimension(2));
631 EXPECT_EQ(3, flat_inner_outer_dims.dimension(3));
632 EXPECT_EQ(4, flat_inner_outer_dims.dimension(4));
633 EXPECT_EQ(5, flat_inner_outer_dims.dimension(5));
634 EXPECT_EQ(1, flat_inner_outer_dims.dimension(6));
635 EXPECT_EQ(1, flat_inner_outer_dims.dimension(7));
636 EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0, 0, 0, 0, 0), 0.01f);
637 EXPECT_EQ(flat_inner_outer_dims(0, 0, 1, 2, 3, 4, 0, 0), 0.02f);
638 }
639 {
640 auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 3>(1);
641 EXPECT_EQ(6, flat_inner_outer_dims.dimension(0));
642 EXPECT_EQ(4, flat_inner_outer_dims.dimension(1));
643 EXPECT_EQ(5, flat_inner_outer_dims.dimension(2));
644 EXPECT_EQ(flat_inner_outer_dims(0, 0, 0), 0.01f);
645 EXPECT_EQ(flat_inner_outer_dims(5, 3, 4), 0.02f);
646 }
647 {
648 auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 5>(1);
649 EXPECT_EQ(6, flat_inner_outer_dims.dimension(0));
650 EXPECT_EQ(4, flat_inner_outer_dims.dimension(1));
651 EXPECT_EQ(5, flat_inner_outer_dims.dimension(2));
652 EXPECT_EQ(1, flat_inner_outer_dims.dimension(3));
653 EXPECT_EQ(1, flat_inner_outer_dims.dimension(4));
654 EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0, 0), 0.01f);
655 EXPECT_EQ(flat_inner_outer_dims(5, 3, 4, 0, 0), 0.02f);
656 }
657 {
658 auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 3>(0);
659 EXPECT_EQ(2, flat_inner_outer_dims.dimension(0));
660 EXPECT_EQ(3, flat_inner_outer_dims.dimension(1));
661 EXPECT_EQ(20, flat_inner_outer_dims.dimension(2));
662 EXPECT_EQ(flat_inner_outer_dims(0, 0, 0), 0.01f);
663 EXPECT_EQ(flat_inner_outer_dims(1, 2, 19), 0.02f);
664 }
665 {
666 auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 5>(-2);
667 EXPECT_EQ(1, flat_inner_outer_dims.dimension(0));
668 EXPECT_EQ(1, flat_inner_outer_dims.dimension(1));
669 EXPECT_EQ(2, flat_inner_outer_dims.dimension(2));
670 EXPECT_EQ(3, flat_inner_outer_dims.dimension(3));
671 EXPECT_EQ(20, flat_inner_outer_dims.dimension(4));
672 EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0, 0), 0.01f);
673 EXPECT_EQ(flat_inner_outer_dims(0, 0, 1, 2, 19), 0.02f);
674 }
675 {
676 auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 2>(1);
677 EXPECT_EQ(6, flat_inner_outer_dims.dimension(0));
678 EXPECT_EQ(20, flat_inner_outer_dims.dimension(1));
679 EXPECT_EQ(flat_inner_outer_dims(0, 0), 0.01f);
680 EXPECT_EQ(flat_inner_outer_dims(5, 19), 0.02f);
681 }
682 {
683 auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 2>(0);
684 EXPECT_EQ(3, flat_inner_outer_dims.dimension(0));
685 EXPECT_EQ(0, flat_inner_outer_dims.dimension(1));
686 }
687 {
688 auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 3>(0);
689 EXPECT_EQ(3, flat_inner_outer_dims.dimension(0));
690 EXPECT_EQ(0, flat_inner_outer_dims.dimension(1));
691 EXPECT_EQ(0, flat_inner_outer_dims.dimension(2));
692 }
693 {
694 auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 5>(0);
695 EXPECT_EQ(3, flat_inner_outer_dims.dimension(0));
696 EXPECT_EQ(0, flat_inner_outer_dims.dimension(1));
697 EXPECT_EQ(2, flat_inner_outer_dims.dimension(2));
698 EXPECT_EQ(0, flat_inner_outer_dims.dimension(3));
699 EXPECT_EQ(5, flat_inner_outer_dims.dimension(4));
700 }
701 {
702 auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 2>(3);
703 EXPECT_EQ(0, flat_inner_outer_dims.dimension(0));
704 EXPECT_EQ(5, flat_inner_outer_dims.dimension(1));
705 }
706 {
707 auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 3>(2);
708 EXPECT_EQ(0, flat_inner_outer_dims.dimension(0));
709 EXPECT_EQ(0, flat_inner_outer_dims.dimension(1));
710 EXPECT_EQ(5, flat_inner_outer_dims.dimension(2));
711 }
712 {
713 auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 3>(1);
714 EXPECT_EQ(0, flat_inner_outer_dims.dimension(0));
715 EXPECT_EQ(2, flat_inner_outer_dims.dimension(1));
716 EXPECT_EQ(0, flat_inner_outer_dims.dimension(2));
717 }
718 }
719
TEST(ReinterpretLastDimension,Reinterpret_NCHW_VECT_C_as_NCHW)720 TEST(ReinterpretLastDimension, Reinterpret_NCHW_VECT_C_as_NCHW) {
721 LOG(INFO) << "reinterpret_last_dimension";
722 {
723 Tensor t_nchw_vect_c(DT_QINT8, TensorShape({2, 3, 5, 7, 4}));
724 auto nchw_vect_c = t_nchw_vect_c.tensor<qint8, 5>();
725 Tensor t_expected_nchw(DT_INT32, TensorShape({2, 3, 5, 7}));
726 auto expected_nchw = t_expected_nchw.tensor<int32, 4>();
727 int8 val = 0;
728 for (int n = 0; n < t_nchw_vect_c.shape().dim_size(0); ++n) {
729 for (int c = 0; c < t_nchw_vect_c.shape().dim_size(1); ++c) {
730 for (int h = 0; h < t_nchw_vect_c.shape().dim_size(2); ++h, ++val) {
731 int8 packet[4];
732 for (int w = 0; w < t_nchw_vect_c.shape().dim_size(3); ++w) {
733 packet[0] = nchw_vect_c(n, c, h, w, 0) = ++val;
734 packet[1] = nchw_vect_c(n, c, h, w, 1) = ++val;
735 packet[2] = nchw_vect_c(n, c, h, w, 2) = ++val;
736 packet[3] = nchw_vect_c(n, c, h, w, 3) = ++val;
737 expected_nchw(n, c, h, w) = *reinterpret_cast<int32*>(&packet[0]);
738 }
739 }
740 }
741 }
742 auto actual_nchw = t_nchw_vect_c.reinterpret_last_dimension<int32, 4>();
743 const auto& const_t_nchw_vect_c = t_nchw_vect_c;
744 auto const_actual_nchw =
745 const_t_nchw_vect_c.reinterpret_last_dimension<int32, 4>();
746 for (int n = 0; n < t_nchw_vect_c.shape().dim_size(0); ++n) {
747 for (int c = 0; c < t_nchw_vect_c.shape().dim_size(1); ++c) {
748 for (int h = 0; h < t_nchw_vect_c.shape().dim_size(2); ++h) {
749 for (int w = 0; w < t_nchw_vect_c.shape().dim_size(3); ++w) {
750 EXPECT_EQ(expected_nchw(n, c, h, w), actual_nchw(n, c, h, w));
751 EXPECT_EQ(expected_nchw(n, c, h, w), const_actual_nchw(n, c, h, w));
752 }
753 }
754 }
755 }
756 }
757 }
758
TEST(Tensor_Scalar,Basics)759 TEST(Tensor_Scalar, Basics) {
760 {
761 Tensor t(DT_BOOL, TensorShape({}));
762 EXPECT_EQ(1, t.NumElements());
763 auto Tt = t.scalar<bool>();
764 EXPECT_EQ(1, Tt.size());
765 EXPECT_EQ(0, Tt.rank());
766 t.scalar<bool>()() = true;
767 EXPECT_TRUE(Tt());
768 }
769 {
770 Tensor t(DT_FLOAT, TensorShape({}));
771 EXPECT_EQ(1, t.NumElements());
772 auto Tt = t.scalar<float>();
773 EXPECT_EQ(1, Tt.size());
774 EXPECT_EQ(0, Tt.rank());
775 t.scalar<float>()() = 123.45f;
776 EXPECT_FLOAT_EQ(123.45f, Tt());
777 }
778 {
779 Tensor t(DT_FLOAT, TensorShape({1}));
780 EXPECT_EQ(1, t.NumElements());
781 auto Tt = t.vec<float>();
782 EXPECT_EQ(1, Tt.size());
783 t.vec<float>()(0) = 123.45f;
784 EXPECT_FLOAT_EQ(123.45f, Tt(0));
785 }
786 {
787 Tensor t(DT_FLOAT, TensorShape({1, 1, 1}));
788 EXPECT_EQ(1, t.NumElements());
789 auto Tt = t.scalar<float>();
790 EXPECT_EQ(1, Tt.size());
791 EXPECT_EQ(0, Tt.rank());
792 t.flat<float>()(0) = 123.45f;
793 EXPECT_FLOAT_EQ(123.45f, Tt());
794 }
795 {
796 Tensor t(DT_STRING, TensorShape({}));
797 EXPECT_EQ(1, t.NumElements());
798 auto Tt = t.scalar<string>();
799 EXPECT_EQ(1, Tt.size());
800 EXPECT_EQ(0, Tt.rank());
801 t.scalar<string>()() = "foo";
802 EXPECT_EQ("foo", Tt());
803 }
804 {
805 Tensor t(DT_STRING, TensorShape({1}));
806 EXPECT_EQ(1, t.NumElements());
807 auto Tt = t.vec<string>();
808 EXPECT_EQ(1, Tt.size());
809 t.flat<string>()(0) = "foo";
810 EXPECT_EQ("foo", Tt(0));
811 }
812 {
813 Tensor t(DT_STRING, TensorShape({1, 1, 1}));
814 EXPECT_EQ(1, t.NumElements());
815 auto Tt = t.scalar<string>();
816 EXPECT_EQ(1, Tt.size());
817 EXPECT_EQ(0, Tt.rank());
818 t.flat<string>()(0) = "bar";
819 EXPECT_EQ("bar", Tt());
820 }
821 {
822 Tensor t(DT_FLOAT, TensorShape({0, 1}));
823 EXPECT_EQ(0, t.NumElements());
824 auto Tt = t.flat<float>();
825 EXPECT_EQ(0, Tt.size());
826 auto Tm = t.matrix<float>();
827 EXPECT_EQ(0, Tm.size());
828 EXPECT_EQ(0, Tm.dimensions()[0]);
829 EXPECT_EQ(1, Tm.dimensions()[1]);
830 }
831 }
832
TEST(Tensor_HostScalar,Basics)833 TEST(Tensor_HostScalar, Basics) {
834 {
835 Tensor t(true);
836 EXPECT_EQ(DT_BOOL, t.dtype());
837 EXPECT_EQ(1, t.NumElements());
838 auto Tt = t.scalar<bool>();
839 EXPECT_EQ(1, Tt.size());
840 EXPECT_EQ(0, Tt.rank());
841 EXPECT_TRUE(Tt());
842 Tt() = false;
843 EXPECT_FALSE(Tt());
844 }
845 {
846 Tensor t(123.45f);
847 EXPECT_EQ(DT_FLOAT, t.dtype());
848 EXPECT_EQ(1, t.NumElements());
849 auto Tt = t.scalar<float>();
850 EXPECT_EQ(1, Tt.size());
851 EXPECT_EQ(0, Tt.rank());
852 EXPECT_FLOAT_EQ(123.45f, Tt());
853 Tt() = 42.0f;
854 EXPECT_FLOAT_EQ(42.0f, Tt());
855 }
856 {
857 // NOTE(mrry): Use long enough strings so that the contents are dynamically
858 // allocated, and the absence of a call to the string destructor would
859 // cause a memory leak.
860 Tensor t("fooooooooooooooooooooooooooooooooooooo");
861 EXPECT_EQ(DT_STRING, t.dtype());
862 EXPECT_EQ(1, t.NumElements());
863 auto Tt = t.scalar<string>();
864 EXPECT_EQ(1, Tt.size());
865 EXPECT_EQ(0, Tt.rank());
866 EXPECT_EQ("fooooooooooooooooooooooooooooooooooooo", Tt());
867 Tt() = "baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaar";
868 EXPECT_EQ("baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaar", Tt());
869 }
870 }
871
TEST(Tensor_Float,Reshape_And_Slice_Assignment)872 TEST(Tensor_Float, Reshape_And_Slice_Assignment) {
873 // A test to experiment with a way to assign to a subset of a tensor
874 Tensor t(DT_FLOAT, TensorShape({10, 4, 3, 2}));
875 EXPECT_TRUE(t.shape().IsSameSize(TensorShape({10, 4, 3, 2})));
876
877 // Get the N dimensional tensor (N==4 here)
878 auto e_t = t.tensor<float, 4>();
879 // Reshape to view it as a two-dimensional tensor
880 auto e_2d = t.shaped<float, 2>({10, 4 * 3 * 2});
881 for (int i = 0; i < 10; i++) {
882 // Assign a 1 x 4*3*2 matrix (really vector) to a slice of size
883 // 1 x 4*3*2 in e_t.
884 Eigen::Tensor<float, 2, Eigen::RowMajor> m(1, 4 * 3 * 2);
885 m.setConstant(i * 2.0);
886
887 Eigen::DSizes<Eigen::DenseIndex, 2> indices(i, 0);
888 Eigen::DSizes<Eigen::DenseIndex, 2> sizes(1, 4 * 3 * 2);
889 e_2d.slice(indices, sizes) = m;
890 }
891 for (int i = 0; i < 10; i++) {
892 for (int j = 0; j < 4; j++) {
893 for (int k = 0; k < 3; k++) {
894 for (int l = 0; l < 2; l++) {
895 EXPECT_EQ(e_t(i, j, k, l), i * 2.0f);
896 LOG(INFO) << i << "," << j << "," << k << "," << l
897 << " &e_t(i, j, k, l): " << &e_t(i, j, k, l) << " = "
898 << e_t(i, j, k, l);
899 }
900 }
901 }
902 }
903 }
904
TEST(Tensor_String,Simple)905 TEST(Tensor_String, Simple) {
906 Tensor t = test::AsTensor<string>(
907 {"hello", "world", "machine", "learning", "new", "york"},
908 TensorShape({3, 2}));
909 auto s = t.shape();
910 ASSERT_EQ(s.dims(), 2);
911 ASSERT_EQ(s.dim_size(0), 3);
912 ASSERT_EQ(s.dim_size(1), 2);
913 auto m = t.matrix<string>();
914 EXPECT_EQ(t.TotalBytes(), 3 * 2 * sizeof(string) + 5 + 5 + 7 + 8 + 3 + 4);
915
916 EXPECT_EQ(m(0, 0), "hello");
917 EXPECT_EQ(m(0, 1), "world");
918 EXPECT_EQ(m(1, 0), "machine");
919 EXPECT_EQ(m(1, 1), "learning");
920 EXPECT_EQ(m(2, 0), "new");
921 EXPECT_EQ(m(2, 1), "york");
922
923 TestCopies<string>(t);
924 }
925
TEST(Tensor_Float,SimpleWithHelper)926 TEST(Tensor_Float, SimpleWithHelper) {
927 Tensor t1 = test::AsTensor<float>({0, 1, 2, 3, 4, 5}, {2, 3});
928 Tensor t2(t1.dtype(), t1.shape());
929 t2.flat<float>() = t1.flat<float>() * 2.0f;
930 Tensor t3 = test::AsTensor<float>({0, 2, 4, 6, 8, 10}, t1.shape());
931 test::ExpectTensorEqual<float>(t2, t3);
932 }
933
TEST(Tensor_Int32,SimpleWithHelper)934 TEST(Tensor_Int32, SimpleWithHelper) {
935 Tensor t1 = test::AsTensor<int32>({0, 1, 2, 3, 4, 5}, {2, 3});
936 Tensor t2(t1.dtype(), t1.shape());
937 t2.flat<int32>() = t1.flat<int32>() * 2;
938 Tensor t3 = test::AsTensor<int32>({0, 2, 4, 6, 8, 10}, t1.shape());
939 test::ExpectTensorEqual<int32>(t2, t3);
940 }
941
TEST(Tensor_UInt16,SimpleWithHelper)942 TEST(Tensor_UInt16, SimpleWithHelper) {
943 Tensor t1 = test::AsTensor<uint16>({0, 1, 2, 3, 4, 5}, {2, 3});
944 Tensor t2(t1.dtype(), t1.shape());
945 t2.flat<uint16>() = t1.flat<uint16>() * uint16(2);
946 Tensor t3 = test::AsTensor<uint16>({0, 2, 4, 6, 8, 10}, t1.shape());
947 test::ExpectTensorEqual<uint16>(t2, t3);
948 }
949
TEST(Tensor_QInt8,SimpleWithHelper)950 TEST(Tensor_QInt8, SimpleWithHelper) {
951 Tensor t1 = test::AsTensor<qint8>({0, 1, 2, 3, 4, 5}, {2, 3});
952 Tensor t2(t1.dtype(), t1.shape());
953 t2.flat<qint8>() = t1.flat<qint8>() + qint8(-2);
954 Tensor t3 = test::AsTensor<qint8>({-2, -1, 0, 1, 2, 3}, {2, 3});
955 test::ExpectTensorEqual<qint8>(t2, t3);
956 }
957
TEST(Tensor_QUInt8,SimpleWithHelper)958 TEST(Tensor_QUInt8, SimpleWithHelper) {
959 Tensor t1 = test::AsTensor<quint8>({0, 1, 2, 3, 4, 5}, {2, 3});
960 Tensor t2(t1.dtype(), t1.shape());
961 t2.flat<quint8>() = t1.flat<quint8>() + quint8(2);
962 Tensor t3 = test::AsTensor<quint8>({2, 3, 4, 5, 6, 7}, {2, 3});
963 test::ExpectTensorEqual<quint8>(t2, t3);
964 }
965
TEST(Tensor_Int64,SimpleWithHelper)966 TEST(Tensor_Int64, SimpleWithHelper) {
967 Tensor t1 = test::AsTensor<int64>(
968 {0LL << 48, 1LL << 48, 2LL << 48, 3LL << 48, 4LL << 48, 5LL << 48},
969 {2, 3});
970 Tensor t2(t1.dtype(), t1.shape());
971 t2.flat<int64>() = t1.flat<int64>() * static_cast<int64>(2);
972 Tensor t3 = test::AsTensor<int64>(
973 {0LL << 48, 2LL << 48, 4LL << 48, 6LL << 48, 8LL << 48, 10LL << 48},
974 {2, 3});
975 test::ExpectTensorEqual<int64>(t2, t3);
976 }
977
TEST(Tensor_String,SimpleWithHelper)978 TEST(Tensor_String, SimpleWithHelper) {
979 Tensor t1 = test::AsTensor<string>({"0", "1", "2", "3", "4", "5"}, {2, 3});
980 Tensor t2(DT_STRING, {2, 3});
981 for (int i = 0; i < 2; ++i) {
982 for (int j = 0; j < 3; ++j) {
983 t2.matrix<string>()(i, j) = strings::StrCat(i * 3 + j);
984 }
985 }
986
987 // Test with helper.
988 test::ExpectTensorEqual<string>(t1, t2);
989 }
990
TEST(Tensor_Bool,SimpleWithHelper)991 TEST(Tensor_Bool, SimpleWithHelper) {
992 Tensor t1 =
993 test::AsTensor<bool>({false, true, false, true, false, true}, {2, 3});
994
995 Tensor t2(DT_BOOL, {2, 3});
996 for (int i = 0; i < 2; ++i) {
997 for (int j = 0; j < 3; ++j) {
998 t2.matrix<bool>()(i, j) = (((i + j) % 2) != 0);
999 }
1000 }
1001
1002 // Test with helper.
1003 test::ExpectTensorEqual<bool>(t1, t2);
1004 }
1005
TEST(Tensor_Complex,Simple64)1006 TEST(Tensor_Complex, Simple64) {
1007 Tensor t(DT_COMPLEX64, {4, 5, 3, 7});
1008 t.flat<complex64>().setRandom();
1009 TestCopies<complex64>(t);
1010 }
1011
TEST(Tensor_Complex,Simple128)1012 TEST(Tensor_Complex, Simple128) {
1013 Tensor t(DT_COMPLEX128, {4, 5, 3, 7});
1014 t.flat<complex128>().setRandom();
1015 TestCopies<complex128>(t);
1016 }
1017
TEST(Tensor_Complex,SimpleWithHelper64)1018 TEST(Tensor_Complex, SimpleWithHelper64) {
1019 {
1020 Tensor t1 = test::AsTensor<complex64>({0,
1021 {1, 1},
1022 complex64(2),
1023 complex64(3, 3),
1024 complex64(0, 4),
1025 complex64(2, 5)},
1026 {2, 3});
1027 Tensor t2(t1.dtype(), t1.shape());
1028 t2.flat<complex64>() = t1.flat<complex64>() * complex64(0, 2);
1029 Tensor t3 = test::AsTensor<complex64>(
1030 {0, {-2, 2}, {0, 4}, {-6, 6}, {-8, 0}, {-10, 4}},
1031 // shape
1032 {2, 3});
1033 test::ExpectTensorEqual<complex64>(t2, t3);
1034 }
1035
1036 // Does some numeric operations for complex64 numbers.
1037 {
1038 const float PI = std::acos(-1);
1039 const complex64 rotate_45 = std::polar(1.0f, PI / 4);
1040
1041 // x contains all the 8-th root of unity.
1042 Tensor x(DT_COMPLEX64, TensorShape({8}));
1043 for (int i = 0; i < 8; ++i) {
1044 x.vec<complex64>()(i) = MathUtil::IPow(rotate_45, i);
1045 }
1046
1047 // Shift the roots by 45 degree.
1048 Tensor y(DT_COMPLEX64, TensorShape({8}));
1049 y.vec<complex64>() = x.vec<complex64>() * rotate_45;
1050 Tensor y_expected(DT_COMPLEX64, TensorShape({8}));
1051 for (int i = 0; i < 8; ++i) {
1052 y_expected.vec<complex64>()(i) = MathUtil::IPow(rotate_45, i + 1);
1053 }
1054 test::ExpectTensorNear<complex64>(y, y_expected, 1e-5);
1055
1056 // Raise roots to the power of 8.
1057 Tensor z(DT_COMPLEX64, TensorShape({8}));
1058 z.vec<complex64>() = x.vec<complex64>().pow(8);
1059 Tensor z_expected(DT_COMPLEX64, TensorShape({8}));
1060 for (int i = 0; i < 8; ++i) {
1061 z_expected.vec<complex64>()(i) = 1;
1062 }
1063 test::ExpectTensorNear<complex64>(z, z_expected, 1e-5);
1064 }
1065 }
1066
TEST(Tensor_Complex,SimpleWithHelper128)1067 TEST(Tensor_Complex, SimpleWithHelper128) {
1068 {
1069 Tensor t1 = test::AsTensor<complex128>({0,
1070 {1, 1},
1071 complex128(2),
1072 complex128(3, 3),
1073 complex128(0, 4),
1074 complex128(2, 5)},
1075 {2, 3});
1076 Tensor t2(t1.dtype(), t1.shape());
1077 t2.flat<complex128>() = t1.flat<complex128>() * complex128(0, 2);
1078 Tensor t3 = test::AsTensor<complex128>(
1079 {0, {-2, 2}, {0, 4}, {-6, 6}, {-8, 0}, {-10, 4}},
1080 // shape
1081 {2, 3});
1082 test::ExpectTensorEqual<complex128>(t2, t3);
1083 }
1084
1085 // Does some numeric operations for complex128 numbers.
1086 {
1087 const double PI = std::acos(-1);
1088 const complex128 rotate_45 = std::polar(1.0, PI / 4);
1089
1090 // x contains all the 8-th root of unity.
1091 Tensor x(DT_COMPLEX128, TensorShape({8}));
1092 for (int i = 0; i < 8; ++i) {
1093 x.vec<complex128>()(i) = MathUtil::IPow(rotate_45, i);
1094 }
1095
1096 // Shift the roots by 45 degree.
1097 Tensor y(DT_COMPLEX128, TensorShape({8}));
1098 y.vec<complex128>() = x.vec<complex128>() * rotate_45;
1099 Tensor y_expected(DT_COMPLEX128, TensorShape({8}));
1100 for (int i = 0; i < 8; ++i) {
1101 y_expected.vec<complex128>()(i) = MathUtil::IPow(rotate_45, i + 1);
1102 }
1103 test::ExpectTensorNear<complex128>(y, y_expected, 1e-5);
1104
1105 // Raise roots to the power of 8.
1106 Tensor z(DT_COMPLEX128, TensorShape({8}));
1107 z.vec<complex128>() = x.vec<complex128>().pow(8);
1108 Tensor z_expected(DT_COMPLEX128, TensorShape({8}));
1109 for (int i = 0; i < 8; ++i) {
1110 z_expected.vec<complex128>()(i) = 1;
1111 }
1112 test::ExpectTensorNear<complex128>(z, z_expected, 1e-5);
1113 }
1114 }
1115
1116 // An allocator that always returns nullptr, for testing
1117 // failures to allocate.
1118 class DummyCPUAllocator : public Allocator {
1119 public:
1120 DummyCPUAllocator() = default;
Name()1121 string Name() override { return "cpu"; }
AllocateRaw(size_t alignment,size_t num_bytes)1122 void* AllocateRaw(size_t alignment, size_t num_bytes) override {
1123 return nullptr;
1124 }
DeallocateRaw(void * ptr)1125 void DeallocateRaw(void* ptr) override {}
1126 };
1127
TEST(Tensor,SharesBufferWith)1128 TEST(Tensor, SharesBufferWith) {
1129 Tensor a_empty;
1130 Tensor b_empty;
1131 Tensor a(DT_FLOAT, TensorShape({1}));
1132 Tensor b(DT_FLOAT, TensorShape({1}));
1133 Tensor copy(a);
1134 EXPECT_FALSE(a_empty.SharesBufferWith(a_empty));
1135 EXPECT_FALSE(a_empty.SharesBufferWith(b_empty));
1136 EXPECT_FALSE(a_empty.SharesBufferWith(a));
1137 EXPECT_FALSE(a_empty.SharesBufferWith(copy));
1138 EXPECT_TRUE(a.SharesBufferWith(a));
1139 EXPECT_FALSE(a.SharesBufferWith(b));
1140 EXPECT_TRUE(a.SharesBufferWith(copy));
1141 }
1142
TEST(Tensor,FailureToAllocate)1143 TEST(Tensor, FailureToAllocate) {
1144 TensorShape shape({1});
1145 DummyCPUAllocator allocator;
1146 {
1147 Tensor a(&allocator, DT_FLOAT, shape);
1148 ASSERT_FALSE(a.IsInitialized());
1149 }
1150
1151 // Float
1152 {
1153 Tensor t(DT_FLOAT, TensorShape({1}));
1154 t.vec<float>()(0) = 1.0;
1155 TensorProto proto;
1156 t.AsProtoField(&proto);
1157
1158 // FromProto should fail nicely.
1159 Tensor a(&allocator, DT_FLOAT, TensorShape({1}));
1160 ASSERT_FALSE(a.FromProto(&allocator, proto));
1161 }
1162
1163 // String
1164 {
1165 Tensor t(DT_STRING, TensorShape({1}));
1166 t.vec<string>()(0) = "foo";
1167 TensorProto proto;
1168 t.AsProtoField(&proto);
1169
1170 // FromProto should fail nicely.
1171 Tensor a(&allocator, DT_STRING, TensorShape({1}));
1172 ASSERT_FALSE(a.FromProto(&allocator, proto));
1173 }
1174
1175 // Half
1176 {
1177 Tensor t(DT_HALF, TensorShape({1}));
1178 t.vec<Eigen::half>()(0) = Eigen::half(1.0);
1179 TensorProto proto;
1180 t.AsProtoField(&proto);
1181
1182 // FromProto should fail nicely.
1183 Tensor a(&allocator, DT_HALF, TensorShape({1}));
1184 ASSERT_FALSE(a.FromProto(&allocator, proto));
1185 }
1186 }
1187
1188 // On the alignment.
1189 //
1190 // As of 2018/5, tensorflow::Tensor allocates its buffer with 64-byte
1191 // alignment. Tensor::tensor/flat/vec/matrix methods requires the
1192 // buffer satisfies Eigen::Aligned (e.g., 16-bytes aligned usually,
1193 // 32-bytes for AVX, and 64-bytes for AVX512). Tensor::Slice requires
1194 // the caller to ensure its result is aligned if the caller intends
1195 // to use those methods. In this test case, we simply make sure each
1196 // slice is 64-byte aligned: sizeof(float) * 4 * 36 = 576. 576 % 64 = 0.
TEST(Tensor,Slice_Basic)1197 TEST(Tensor, Slice_Basic) {
1198 Tensor saved;
1199 { // General
1200 Tensor x(DT_FLOAT, TensorShape({10, 4, 36}));
1201 // Fills in known values.
1202 for (int i = 0; i < 10; ++i) {
1203 x.Slice(i, i + 1).flat<float>().setConstant(i * 1.f);
1204 }
1205 // A simple slice along dim0.
1206 Tensor y = x.Slice(4, 8);
1207 EXPECT_TRUE(y.shape().IsSameSize(TensorShape({4, 4, 36})));
1208 auto tx = x.tensor<float, 3>();
1209 auto ty = y.tensor<float, 3>();
1210 for (int i = 0; i < 4; ++i) {
1211 for (int j = 0; j < 4; ++j) {
1212 for (int k = 0; k < 36; ++k) {
1213 EXPECT_EQ(ty(i, j, k), 4.0 + i);
1214 EXPECT_EQ(&tx(4 + i, j, k), &ty(i, j, k));
1215 }
1216 }
1217 }
1218 // A simple slice equivalent to identity.
1219 TestCopies<float>(y);
1220 y = x.Slice(0, 10);
1221 test::ExpectTensorEqual<float>(x, y);
1222 EXPECT_EQ(x.flat<float>().data(), y.flat<float>().data());
1223
1224 // A slice of a slice.
1225 auto z = x.Slice(4, 8).Slice(2, 3);
1226 auto tz = z.tensor<float, 3>();
1227 EXPECT_EQ(1, z.dim_size(0));
1228 for (int j = 0; j < 4; ++j) {
1229 for (int k = 0; k < 36; ++k) {
1230 EXPECT_EQ(tz(0, j, k), 6.0);
1231 }
1232 }
1233
1234 // x and y will be out of scope. But 'saved' should be alive.
1235 saved = z;
1236 }
1237 {
1238 EXPECT_EQ(1, saved.dim_size(0));
1239 auto tsaved = saved.tensor<float, 3>();
1240 for (int j = 0; j < 4; ++j) {
1241 for (int k = 0; k < 36; ++k) {
1242 EXPECT_EQ(tsaved(0, j, k), 6.0);
1243 }
1244 }
1245 }
1246 { // Empty
1247 Tensor x(DT_FLOAT, TensorShape({10, 0, 36}));
1248 x.flat<float>().setRandom();
1249 Tensor y = x.Slice(4, 8);
1250 EXPECT_TRUE(y.shape().IsSameSize(TensorShape({4, 0, 36})));
1251 }
1252
1253 {
1254 // Test unaligned access via a Slice.
1255 Tensor x(DT_FLOAT, TensorShape({30}));
1256 x.flat<float>().setConstant(0.0);
1257
1258 // Take an unaligned slice.
1259 Tensor y = x.Slice(1, 13);
1260 #if EIGEN_MAX_ALIGN_BYTES > 0
1261 EXPECT_FALSE(y.IsAligned());
1262 #endif
1263 y.unaligned_flat<float>().setConstant(1.0);
1264 for (int64 i = 0; i < y.NumElements(); ++i) {
1265 EXPECT_EQ(1.0, y.unaligned_flat<float>()(i));
1266 }
1267 }
1268 }
1269
TEST(Tensor,SubSlice_Basic)1270 TEST(Tensor, SubSlice_Basic) {
1271 { // General
1272 Tensor x(DT_FLOAT, TensorShape({10, 4, 36}));
1273 // Fills in known values.
1274 for (int i = 0; i < 10; ++i) {
1275 x.SubSlice(i).flat<float>().setConstant(i * 1.f);
1276 }
1277 // A simple sub-slice along dim0.
1278 Tensor y = x.SubSlice(5);
1279 EXPECT_TRUE(y.shape().IsSameSize(TensorShape({4, 36})));
1280 auto tx = x.tensor<float, 3>();
1281 auto ty = y.tensor<float, 2>();
1282 for (int j = 0; j < 4; ++j) {
1283 for (int k = 0; k < 36; ++k) {
1284 EXPECT_EQ(ty(j, k), 5.0);
1285 EXPECT_EQ(&tx(5, j, k), &ty(j, k));
1286 }
1287 }
1288 Tensor z = y.SubSlice(3).SubSlice(31);
1289 auto tz = z.unaligned_flat<float>();
1290 EXPECT_EQ(*tz.data(), 5.0);
1291 }
1292 {
1293 // Test unaligned access via a SubSlice.
1294 Tensor x(DT_FLOAT, TensorShape({30, 5}));
1295 x.flat<float>().setConstant(0.0);
1296
1297 // Take an unaligned subslice.
1298 Tensor y = x.SubSlice(1);
1299 #if EIGEN_MAX_ALIGN_BYTES > 0
1300 EXPECT_FALSE(y.IsAligned());
1301 #endif
1302 y.unaligned_flat<float>().setConstant(1.0);
1303 for (int64 i = 0; i < y.NumElements(); ++i) {
1304 EXPECT_EQ(1.0, y.unaligned_flat<float>()(i));
1305 }
1306 }
1307 }
1308
1309 template <typename T>
MkTensor(DataType dt,const TensorShape & shape,std::vector<T> init_values)1310 Tensor MkTensor(DataType dt, const TensorShape& shape,
1311 std::vector<T> init_values) {
1312 Tensor x(dt, shape);
1313 const int limit = x.NumElements();
1314 int vi = 0;
1315 for (int i = 0; i < limit; ++i) {
1316 x.flat<T>()(i) = init_values[vi++];
1317 if (vi >= init_values.size()) vi = 0;
1318 }
1319 return x;
1320 }
1321
TEST(SummarizeValue,Uninitialized)1322 TEST(SummarizeValue, Uninitialized) {
1323 Tensor x(DT_INT32);
1324 TensorTestHelper::set_shape(&x, TensorShape({4, 4}));
1325 EXPECT_EQ(
1326 strings::StrCat("uninitialized Tensor of 16 elements of type ", DT_INT32),
1327 x.SummarizeValue(16));
1328 }
1329
TEST(SummarizeValue,INT32)1330 TEST(SummarizeValue, INT32) {
1331 Tensor x = MkTensor<int>(DT_INT32, TensorShape({5}), {1, 2, 3, 4, 0});
1332 EXPECT_EQ("1 2 3 4 0", x.SummarizeValue(16));
1333 x = MkTensor<int>(DT_INT32, TensorShape({2, 2}), {1, 2, 3, 4, 0});
1334 EXPECT_EQ("[1 2][3 4]", x.SummarizeValue(16));
1335 x = MkTensor<int>(DT_INT32, TensorShape({2, 2, 1, 1}), {1, 2, 3, 4, 0});
1336 EXPECT_EQ("[[[1]][[2]]][[[3]][[4]]]", x.SummarizeValue(16));
1337 EXPECT_EQ("[[[1]][[2]]][[[3]]]...", x.SummarizeValue(3));
1338 x = MkTensor<int>(DT_INT32, TensorShape({0}), {});
1339 EXPECT_EQ("", x.SummarizeValue(16));
1340 }
1341
TEST(SummarizeValue,INT32Dims)1342 TEST(SummarizeValue, INT32Dims) {
1343 Tensor x = MkTensor<int>(DT_INT32, TensorShape({3, 4}),
1344 {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
1345 EXPECT_EQ("[1 2 3...]...", x.SummarizeValue(3));
1346 EXPECT_EQ("[1 2 3 4][5 6 7 8][9 10...]...", x.SummarizeValue(10));
1347 }
1348
TEST(SummarizeValue,FLOAT)1349 TEST(SummarizeValue, FLOAT) {
1350 Tensor x = MkTensor<float>(DT_FLOAT, TensorShape({5}), {1, 2, 3, 4, 0});
1351 EXPECT_EQ("1 2 3 4 0", x.SummarizeValue(16));
1352 x = MkTensor<float>(DT_FLOAT, TensorShape({2, 2}), {1, 2, 3, 4, 0});
1353 EXPECT_EQ("[1 2][3 4]", x.SummarizeValue(16));
1354 x = MkTensor<float>(DT_FLOAT, TensorShape({2, 2, 1, 1}), {1, 2, 3, 4, 0});
1355 EXPECT_EQ("[[[1]][[2]]][[[3]][[4]]]", x.SummarizeValue(16));
1356 EXPECT_EQ("[[[1]][[2]]][[[3]]]...", x.SummarizeValue(3));
1357 x = MkTensor<float>(DT_FLOAT, TensorShape({0}), {});
1358 EXPECT_EQ("", x.SummarizeValue(16));
1359 }
1360
TEST(SummarizeValue,BOOL)1361 TEST(SummarizeValue, BOOL) {
1362 Tensor x = MkTensor<bool>(DT_BOOL, TensorShape({5}), {false, true, true});
1363 EXPECT_EQ("0 1 1 0 1", x.SummarizeValue(16));
1364 EXPECT_EQ("0 1 1...", x.SummarizeValue(3));
1365 }
1366
TEST(SummarizeValue,STRING)1367 TEST(SummarizeValue, STRING) {
1368 Tensor x = MkTensor<string>(DT_STRING, TensorShape({5}),
1369 {"one", "two", "three", "four", "five"});
1370 EXPECT_EQ("one two three four five", x.SummarizeValue(16));
1371 x = MkTensor<string>(DT_STRING, TensorShape({5, 1, 5}),
1372 {"one", "two", "three", "four", "five"});
1373 EXPECT_EQ("[[one two three four five]][[one...]]...", x.SummarizeValue(6));
1374 }
1375
TEST(SummarizeValue,INT32_PRINT_V2)1376 TEST(SummarizeValue, INT32_PRINT_V2) {
1377 Tensor x = MkTensor<int>(DT_INT32, TensorShape({5}), {1, 2, 3, 4, 0});
1378 EXPECT_EQ("[1 2 3 4 0]", x.SummarizeValue(16, true));
1379 EXPECT_EQ("[1 2 3 4 0]", x.SummarizeValue(-1, true));
1380 EXPECT_EQ("[1 2 ... 4 0]", x.SummarizeValue(2, true));
1381 EXPECT_EQ("[1 ... 0]", x.SummarizeValue(1, true));
1382 x = MkTensor<int>(DT_INT32, TensorShape({2, 2}), {1, 2, 3, 4, 0});
1383 EXPECT_EQ("[[1 2]\n [3 4]]", x.SummarizeValue(16, true));
1384 x = MkTensor<int>(DT_INT32, TensorShape({2, 2, 1, 1}), {1, 2, 3, 4, 0});
1385 EXPECT_EQ("[[[[1]]\n\n [[2]]]\n\n\n [[[3]]\n\n [[4]]]]",
1386 x.SummarizeValue(16, true));
1387 x = MkTensor<int>(DT_INT32, TensorShape({0}), {});
1388 EXPECT_EQ("[]", x.SummarizeValue(16, true));
1389 }
1390
TEST(SummarizeValue,INT32Dims_PRINT_V2)1391 TEST(SummarizeValue, INT32Dims_PRINT_V2) {
1392 Tensor x = MkTensor<int>(DT_INT32, TensorShape({3, 4}),
1393 {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
1394 EXPECT_EQ("[[1 ... 4]\n ...\n [9 ... 12]]", x.SummarizeValue(1, true));
1395 EXPECT_EQ("[[1 2 3 4]\n [5 6 7 8]\n [9 10 11 12]]",
1396 x.SummarizeValue(10, true));
1397 EXPECT_EQ("[[1 2 3 4]\n [5 6 7 8]\n [9 10 11 12]]",
1398 x.SummarizeValue(-1, true));
1399 }
1400
TEST(SummarizeValue,FLOAT_PRINT_V2)1401 TEST(SummarizeValue, FLOAT_PRINT_V2) {
1402 Tensor x = MkTensor<float>(DT_FLOAT, TensorShape({5}), {1, 2, 3, 4, 0});
1403 EXPECT_EQ("[1 2 3 4 0]", x.SummarizeValue(16, true));
1404 EXPECT_EQ("[1 2 3 4 0]", x.SummarizeValue(-1, true));
1405 EXPECT_EQ("[1 2 ... 4 0]", x.SummarizeValue(2, true));
1406 EXPECT_EQ("[1 ... 0]", x.SummarizeValue(1, true));
1407 x = MkTensor<float>(DT_FLOAT, TensorShape({2, 2}), {1, 2, 3, 4, 0});
1408 EXPECT_EQ("[[1 2]\n [3 4]]", x.SummarizeValue(16, true));
1409 x = MkTensor<float>(DT_FLOAT, TensorShape({2, 2, 1, 1}), {1, 2, 3, 4, 0});
1410 EXPECT_EQ("[[[[1]]\n\n [[2]]]\n\n\n [[[3]]\n\n [[4]]]]",
1411 x.SummarizeValue(16, true));
1412 x = MkTensor<float>(DT_FLOAT, TensorShape({0}), {});
1413 EXPECT_EQ("[]", x.SummarizeValue(16, true));
1414 }
1415
TEST(SummarizeValue,BOOL_PRINT_V2)1416 TEST(SummarizeValue, BOOL_PRINT_V2) {
1417 Tensor x = MkTensor<bool>(DT_BOOL, TensorShape({5}), {false, true, true});
1418 EXPECT_EQ("[0 1 1 0 1]", x.SummarizeValue(16, true));
1419 EXPECT_EQ("[0 1 1 0 1]", x.SummarizeValue(-1, true));
1420 EXPECT_EQ("[0 1 ... 0 1]", x.SummarizeValue(2, true));
1421 }
1422
TEST(SummarizeValue,STRING_PRINT_V2)1423 TEST(SummarizeValue, STRING_PRINT_V2) {
1424 Tensor x = MkTensor<string>(DT_STRING, TensorShape({5}),
1425 {"one", "two", "three", "four", "five"});
1426 EXPECT_EQ("[\"one\" \"two\" \"three\" \"four\" \"five\"]",
1427 x.SummarizeValue(16, true));
1428 EXPECT_EQ("[\"one\" \"two\" \"three\" \"four\" \"five\"]",
1429 x.SummarizeValue(-1, true));
1430 EXPECT_EQ("[\"one\" \"two\" ... \"four\" \"five\"]",
1431 x.SummarizeValue(2, true));
1432 x = MkTensor<string>(DT_STRING, TensorShape({2, 2}),
1433 {"one", "two", "three", "four", "five"});
1434 EXPECT_EQ("[[\"one\" \"two\"]\n [\"three\" \"four\"]]",
1435 x.SummarizeValue(16, true));
1436 }
1437
BM_CreateAndDestroy(int iters)1438 void BM_CreateAndDestroy(int iters) {
1439 TensorShape shape({10, 20});
1440 while (--iters) {
1441 Tensor t(DT_FLOAT, shape);
1442 }
1443 }
1444 BENCHMARK(BM_CreateAndDestroy);
1445
BM_Assign(int iters)1446 void BM_Assign(int iters) {
1447 Tensor a(DT_FLOAT, TensorShape({10, 20}));
1448 Tensor b(DT_FLOAT, TensorShape({10, 20}));
1449 bool a_to_b = true;
1450 while (--iters) {
1451 if (a_to_b) {
1452 b = a;
1453 } else {
1454 a = b;
1455 }
1456 a_to_b = !a_to_b;
1457 }
1458 }
1459 BENCHMARK(BM_Assign);
1460
1461 // Ensure tensor_data() works on empty tensors
TEST(Tensor,EmptyTensorData)1462 TEST(Tensor, EmptyTensorData) {
1463 Tensor empty;
1464 EXPECT_EQ(empty.tensor_data().size(), 0);
1465 }
1466
1467 // Benchmark create and destroy a tensor, with an allocated buffer.
BM_CreateAndDestroyWithBuf(int iters)1468 void BM_CreateAndDestroyWithBuf(int iters) {
1469 TensorShape shape({10, 20});
1470 Allocator* allocator = cpu_allocator();
1471 while (--iters) {
1472 Tensor a(allocator, DT_FLOAT, shape);
1473 }
1474 }
1475 BENCHMARK(BM_CreateAndDestroyWithBuf);
1476
1477 // Benchmark create+copy a tensor, with an allocated buffer.
BM_CreateAndCopyCtrWithBuf(int iters)1478 void BM_CreateAndCopyCtrWithBuf(int iters) {
1479 TensorShape shape({10, 20});
1480 Allocator* allocator = cpu_allocator();
1481 while (--iters) {
1482 Tensor a(allocator, DT_FLOAT, shape);
1483 Tensor b(a);
1484 }
1485 }
1486 BENCHMARK(BM_CreateAndCopyCtrWithBuf);
1487
1488 // Benchmark create+move a tensor, with an allocated buffer.
BM_CreateAndMoveCtrWithBuf(int iters)1489 void BM_CreateAndMoveCtrWithBuf(int iters) {
1490 TensorShape shape({10, 20});
1491 Allocator* allocator = cpu_allocator();
1492 while (--iters) {
1493 Tensor a(allocator, DT_FLOAT, shape);
1494 Tensor b(std::move(a));
1495 }
1496 }
1497 BENCHMARK(BM_CreateAndMoveCtrWithBuf);
1498
1499 // Benchmark creating and destroy a host-scalar tensor, using the allocator
1500 // interface.
BM_CreateAndDestroyHostScalarNonOptimized(int iters)1501 void BM_CreateAndDestroyHostScalarNonOptimized(int iters) {
1502 TensorShape shape({});
1503 Allocator* allocator = cpu_allocator();
1504 while (--iters) {
1505 Tensor a(allocator, DT_FLOAT, shape);
1506 a.scalar<float>()() = 37.0;
1507 }
1508 }
1509 BENCHMARK(BM_CreateAndDestroyHostScalarNonOptimized);
1510
1511 // Benchmark creating and destroy a host-scalar tensor, using the specialized
1512 // constructor.
BM_CreateAndDestroyHostScalarOptimized(int iters)1513 void BM_CreateAndDestroyHostScalarOptimized(int iters) {
1514 while (--iters) {
1515 Tensor a(37.0);
1516 }
1517 }
1518 BENCHMARK(BM_CreateAndDestroyHostScalarOptimized);
1519
1520 } // namespace
1521 } // namespace tensorflow
1522