1 /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include <complex>
17 #include <functional>
18 #include <memory>
19 #include <string>
20 #include <vector>
21
22 #include "tensorflow/core/common_runtime/device.h"
23 #include "tensorflow/core/common_runtime/device_factory.h"
24 #include "tensorflow/core/framework/allocator.h"
25 #include "tensorflow/core/framework/fake_input.h"
26 #include "tensorflow/core/framework/node_def_builder.h"
27 #include "tensorflow/core/framework/op_kernel.h"
28 #include "tensorflow/core/framework/tensor.h"
29 #include "tensorflow/core/framework/types.h"
30 #include "tensorflow/core/framework/types.pb.h"
31 #include "tensorflow/core/kernels/ops_testutil.h"
32 #include "tensorflow/core/lib/core/status_test_util.h"
33 #include "tensorflow/core/lib/io/path.h"
34 #include "tensorflow/core/platform/test.h"
35 #include "tensorflow/core/public/session_options.h"
36 #include "tensorflow/core/public/version.h"
37
38 namespace tensorflow {
39 namespace {
40
41 // Make an input tensor with filled results.
42 template <typename T>
MakeInput(const TensorShape & shape,std::function<T (int)> input_mapping)43 Tensor MakeInput(const TensorShape& shape,
44 std::function<T(int)> input_mapping) {
45 Tensor input(DataTypeToEnum<T>::v(), shape);
46 test::FillFn(&input, input_mapping);
47 return input;
48 }
49
50 class RestoreV2OpTest : public OpsTestBase {
51 protected:
52 // Makes an operation to restore two tensors
MakeRestoreOp(DataType dt)53 void MakeRestoreOp(DataType dt) {
54 TF_ASSERT_OK(NodeDefBuilder("myop", "RestoreV2")
55 .Input(FakeInput()) // prefix
56 .Input(FakeInput()) // tensor_names
57 .Input(FakeInput()) // shape_and_slices
58 .Attr("dtypes", {dt}) // dtypes
59 .Finalize(node_def()));
60 TF_ASSERT_OK(InitOp());
61 }
62
RunTest(StringPiece save_op_to_use)63 void RunTest(StringPiece save_op_to_use) {
64 const string filename =
65 io::JoinPath(testing::TmpDir(), "tensor_simple-", save_op_to_use);
66 const std::vector<string> tensor_names = {
67 "tensor_bool", "tensor_int", "tensor_float", "tensor_double",
68 "tensor_qint8", "tensor_qint32", "tensor_uint8", "tensor_int8",
69 "tensor_int16", "tensor_int64", "tensor_complex64", "tensor_half"};
70
71 // We first need to write using the desired save op.
72 {
73 // Initialize an operation.
74 NodeDef save;
75 if (save_op_to_use != "Save") {
76 TF_ASSERT_OK(
77 NodeDefBuilder("myop", save_op_to_use)
78 .Input(FakeInput()) // prefix
79 .Input(FakeInput()) // tensor_names
80 .Input(FakeInput()) // shape_and_slices
81 .Input(FakeInput({DT_BOOL, DT_INT32, DT_FLOAT, DT_DOUBLE,
82 DT_QINT8, DT_QINT32, DT_UINT8, DT_INT8,
83 DT_INT16, DT_COMPLEX64, DT_HALF})) // tensors
84 .Finalize(&save));
85 } else {
86 TF_ASSERT_OK(
87 NodeDefBuilder("myop", save_op_to_use)
88 .Input(FakeInput()) // file
89 .Input(FakeInput()) // tensor_names
90 .Input(FakeInput({DT_BOOL, DT_INT32, DT_FLOAT, DT_DOUBLE,
91 DT_QINT8, DT_QINT32, DT_UINT8, DT_INT8,
92 DT_INT16, DT_COMPLEX64, DT_HALF})) // tensors
93 .Finalize(&save));
94 }
95
96 std::unique_ptr<Device> device(
97 DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0"));
98
99 gtl::InlinedVector<TensorValue, 4> inputs;
100
101 Status status;
102 std::unique_ptr<OpKernel> op(
103 CreateOpKernel(DEVICE_CPU, device.get(), cpu_allocator(), save,
104 TF_GRAPH_DEF_VERSION, &status));
105 TF_EXPECT_OK(status);
106
107 // Run it
108
109 // Input #0 is the file name
110 Tensor input_0(DT_STRING, TensorShape({}));
111 input_0.scalar<tstring>()() = filename;
112 inputs.push_back({nullptr, &input_0});
113
114 // Input #1 is the tensor names
115 Tensor input_1 = MakeInput<tstring>(
116 TensorShape({static_cast<int>(tensor_names.size())}),
117 [&tensor_names](int x) -> string { return tensor_names[x]; });
118 inputs.push_back({nullptr, &input_1});
119
120 Tensor shape_and_slices = MakeInput<tstring>(
121 TensorShape({static_cast<int>(tensor_names.size())}),
122 [](int x) -> string { return "" /* saves in full */; });
123 if (save_op_to_use != "Save") {
124 inputs.push_back({nullptr, &shape_and_slices});
125 }
126
127 // Input #2 is a 1-d bool tensor
128 Tensor input_2 = MakeInput<bool>(TensorShape({2}),
129 [](int x) -> bool { return x != 0; });
130 inputs.push_back({nullptr, &input_2});
131 // Input #3 is a 1-d integer tensor
132 Tensor input_3 = MakeInput<int32>(TensorShape({10}),
133 [](int x) -> int32 { return x + 1; });
134 inputs.push_back({nullptr, &input_3});
135 // Input #4 is a 2-d float tensor
136 Tensor input_4 = MakeInput<float>(
137 TensorShape({2, 4}),
138 [](int x) -> float { return static_cast<float>(x) / 10; });
139 inputs.push_back({nullptr, &input_4});
140 // Input #5 is a 2-d double tensor
141 Tensor input_5 = MakeInput<double>(
142 TensorShape({2, 4}),
143 [](int x) -> double { return static_cast<double>(x) / 20; });
144 inputs.push_back({nullptr, &input_5});
145 // Input #6 is a 2-d qint8 tensor
146 Tensor input_6 = MakeInput<qint8>(
147 TensorShape({3, 2}),
148 [](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); });
149 inputs.push_back({nullptr, &input_6});
150 // Input #7 is a 2-d qint32 tensor
151 Tensor input_7 =
152 MakeInput<qint32>(TensorShape({2, 3}), [](int x) -> qint32 {
153 return *reinterpret_cast<qint32*>(&x) * qint8(2);
154 });
155 inputs.push_back({nullptr, &input_7});
156 // Input #8 is a 1-d uint8 tensor
157 Tensor input_8 = MakeInput<uint8>(TensorShape({11}),
158 [](int x) -> uint8 { return x + 1; });
159 inputs.push_back({nullptr, &input_8});
160 // Input #9 is a 1-d int8 tensor
161 Tensor input_9 = MakeInput<int8>(TensorShape({7}),
162 [](int x) -> int8 { return x - 7; });
163 inputs.push_back({nullptr, &input_9});
164 // Input #10 is a 1-d int16 tensor
165 Tensor input_10 = MakeInput<int16>(TensorShape({7}),
166 [](int x) -> int16 { return x - 8; });
167 inputs.push_back({nullptr, &input_10});
168 // Input #11 is a 1-d int64 tensor
169 Tensor input_11 = MakeInput<int64>(TensorShape({9}),
170 [](int x) -> int64 { return x - 9; });
171 inputs.push_back({nullptr, &input_11});
172 // Input #12 is a 1-d complex64 tensor
173 Tensor input_13 = MakeInput<complex64>(
174 TensorShape({2, 3}),
175 [](int x) -> complex64 { return complex64(100 + x, 200 + x); });
176 inputs.push_back({nullptr, &input_13});
177 // Input #13 is a 2-d half tensor
178 Tensor input_14 =
179 MakeInput<Eigen::half>(TensorShape({2, 4}), [](int x) -> Eigen::half {
180 return static_cast<Eigen::half>(x) / Eigen::half(5);
181 });
182 inputs.push_back({nullptr, &input_14});
183 OpKernelContext::Params params;
184 params.device = device.get();
185 params.frame_iter = FrameAndIter(0, 0);
186 params.inputs = &inputs;
187 params.op_kernel = op.get();
188 std::vector<AllocatorAttributes> attrs;
189 test::SetOutputAttrs(¶ms, &attrs);
190
191 OpKernelContext ctx(¶ms);
192 op->Compute(&ctx);
193 TF_EXPECT_OK(ctx.status());
194 }
195
196 // Now we restore
197
198 // The 1-d bool tensor
199 {
200 MakeRestoreOp(DT_BOOL);
201 AddInput<tstring>(TensorShape({}),
202 [&filename](int x) -> tstring { return filename; });
203 AddInput<tstring>(TensorShape({1}),
204 [&](int x) -> tstring { return tensor_names[0]; });
205 AddInput<tstring>(TensorShape({1}), [&](int x) -> tstring {
206 return "";
207 }); // Restores in full.
208 TF_ASSERT_OK(RunOpKernel());
209 Tensor* output = GetOutput(0);
210 TensorShape expected({2});
211 EXPECT_TRUE(output->shape().IsSameSize(expected));
212 for (int i = 0; i < 2; ++i) {
213 EXPECT_EQ(i != 0, output->flat<bool>()(i));
214 }
215 }
216 // The 1-d integer tensor
217 {
218 MakeRestoreOp(DT_INT32);
219 (*mutable_input(1).tensor).flat<tstring>()(0) = tensor_names[1];
220 TF_ASSERT_OK(RunOpKernel());
221 Tensor* output = GetOutput(0);
222 TensorShape expected({10});
223 EXPECT_TRUE(output->shape().IsSameSize(expected));
224 for (int i = 0; i < 10; ++i) {
225 EXPECT_EQ(i + 1, output->flat<int32>()(i));
226 }
227 }
228 // The 2-d float tensor
229 {
230 MakeRestoreOp(DT_FLOAT);
231 (*mutable_input(1).tensor).flat<tstring>()(0) = tensor_names[2];
232 TF_ASSERT_OK(RunOpKernel());
233 Tensor* output = GetOutput(0);
234 TensorShape expected({2, 4});
235 EXPECT_TRUE(output->shape().IsSameSize(expected));
236 for (int i = 0; i < 8; ++i) {
237 EXPECT_EQ(static_cast<float>(i) / 10, output->flat<float>()(i));
238 }
239 }
240 // The 2-d double tensor
241 {
242 MakeRestoreOp(DT_DOUBLE);
243 (*mutable_input(1).tensor).flat<tstring>()(0) = tensor_names[3];
244 TF_ASSERT_OK(RunOpKernel());
245 Tensor* output = GetOutput(0);
246 TensorShape expected({2, 4});
247 EXPECT_TRUE(output->shape().IsSameSize(expected));
248 for (int i = 0; i < 8; ++i) {
249 EXPECT_EQ(static_cast<double>(i) / 20, output->flat<double>()(i));
250 }
251 }
252 // The 2-d qint8 tensor
253 {
254 MakeRestoreOp(DT_QINT8);
255 (*mutable_input(1).tensor).flat<tstring>()(0) = tensor_names[4];
256 TF_ASSERT_OK(RunOpKernel());
257 Tensor* output = GetOutput(0);
258 TensorShape expected({3, 2});
259 EXPECT_TRUE(output->shape().IsSameSize(expected));
260 for (int i = 0; i < 6; ++i) {
261 EXPECT_EQ(*reinterpret_cast<qint8*>(&i), output->flat<qint8>()(i));
262 }
263 }
264 // The 2-d qint32 tensor
265 {
266 MakeRestoreOp(DT_QINT32);
267 (*mutable_input(1).tensor).flat<tstring>()(0) = tensor_names[5];
268 TF_ASSERT_OK(RunOpKernel());
269 Tensor* output = GetOutput(0);
270 TensorShape expected({2, 3});
271 EXPECT_TRUE(output->shape().IsSameSize(expected));
272 for (int i = 0; i < 6; ++i) {
273 EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2),
274 output->flat<qint32>()(i));
275 }
276 }
277 // The 1-d uint8 tensor
278 {
279 MakeRestoreOp(DT_UINT8);
280 (*mutable_input(1).tensor).flat<tstring>()(0) = tensor_names[6];
281 TF_ASSERT_OK(RunOpKernel());
282 Tensor* output = GetOutput(0);
283 TensorShape expected({11});
284 EXPECT_TRUE(output->shape().IsSameSize(expected));
285 for (int i = 0; i < 11; ++i) {
286 EXPECT_EQ(i + 1, output->flat<uint8>()(i));
287 }
288 }
289 // The 1-d int8 tensor
290 {
291 MakeRestoreOp(DT_INT8);
292 (*mutable_input(1).tensor).flat<tstring>()(0) = tensor_names[7];
293 TF_ASSERT_OK(RunOpKernel());
294 Tensor* output = GetOutput(0);
295 TensorShape expected({7});
296 EXPECT_TRUE(output->shape().IsSameSize(expected));
297 for (int i = 0; i < 7; ++i) {
298 EXPECT_EQ(i - 7, output->flat<int8>()(i));
299 }
300 }
301 // The 1-d int16 tensor
302 {
303 MakeRestoreOp(DT_INT16);
304 (*mutable_input(1).tensor).flat<tstring>()(0) = tensor_names[8];
305 TF_ASSERT_OK(RunOpKernel());
306 Tensor* output = GetOutput(0);
307 TensorShape expected({7});
308 EXPECT_TRUE(output->shape().IsSameSize(expected));
309 for (int i = 0; i < 7; ++i) {
310 EXPECT_EQ(i - 8, output->flat<int16>()(i));
311 }
312 }
313 // The 1-d int64 tensor
314 {
315 MakeRestoreOp(DT_INT64);
316 (*mutable_input(1).tensor).flat<tstring>()(0) = tensor_names[9];
317 TF_ASSERT_OK(RunOpKernel());
318 Tensor* output = GetOutput(0);
319 TensorShape expected({9});
320 EXPECT_TRUE(output->shape().IsSameSize(expected));
321 for (int i = 0; i < 9; ++i) {
322 EXPECT_EQ(i - 9, output->flat<int64>()(i));
323 }
324 }
325 // The 2-d complex64 tensor
326 {
327 MakeRestoreOp(DT_COMPLEX64);
328 (*mutable_input(1).tensor).flat<tstring>()(0) = tensor_names[10];
329 TF_ASSERT_OK(RunOpKernel());
330 Tensor* output = GetOutput(0);
331 TensorShape expected({2, 3});
332 EXPECT_TRUE(output->shape().IsSameSize(expected));
333 for (int i = 0; i < 6; ++i) {
334 EXPECT_EQ(complex64(100 + i, 200 + i), output->flat<complex64>()(i));
335 }
336 }
337 // The 2-d half tensor
338 {
339 MakeRestoreOp(DT_HALF);
340 (*mutable_input(1).tensor).flat<tstring>()(0) = tensor_names[11];
341 TF_ASSERT_OK(RunOpKernel());
342 Tensor* output = GetOutput(0);
343 TensorShape expected({2, 4});
344 EXPECT_TRUE(output->shape().IsSameSize(expected));
345 for (int i = 0; i < 8; ++i) {
346 EXPECT_EQ(static_cast<Eigen::half>(i) / Eigen::half(5),
347 output->flat<Eigen::half>()(i));
348 }
349 }
350 }
351 };
352
353 // The intended use case (write in V2, read in V2).
TEST_F(RestoreV2OpTest,RestoreAfterSaveV2)354 TEST_F(RestoreV2OpTest, RestoreAfterSaveV2) { RunTest("SaveV2"); }
355 // For backward compatibility.
TEST_F(RestoreV2OpTest,RestoreAfterSaveSlicesV1)356 TEST_F(RestoreV2OpTest, RestoreAfterSaveSlicesV1) { RunTest("SaveSlices"); }
TEST_F(RestoreV2OpTest,RestoreAfterSaveV1)357 TEST_F(RestoreV2OpTest, RestoreAfterSaveV1) { RunTest("Save"); }
358
359 } // namespace
360 } // namespace tensorflow
361