1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #define EIGEN_USE_THREADS
17 #include "tensorflow/core/kernels/tensor_array.h"
18
19 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
20 #include "tensorflow/core/framework/register_types.h"
21 #include "tensorflow/core/kernels/aggregate_ops_cpu.h"
22
23 namespace tensorflow {
24
25 typedef Eigen::ThreadPoolDevice CPUDevice;
26 typedef Eigen::GpuDevice GPUDevice;
27
28 namespace tensor_array {
29
30 #define TENSOR_ARRAY_WRITE_OR_ADD(Device, T) \
31 template <> \
32 Status AddToTensor<Device, T>(OpKernelContext * ctx, Tensor * sum, \
33 const Tensor* current, const Tensor* add) { \
34 functor::Add2Functor<Device, T> add_functor; \
35 add_functor(ctx->template eigen_device<Device>(), sum->flat<T>(), \
36 current->flat<T>(), add->flat<T>()); \
37 return Status::OK(); \
38 }
39
40 #define TENSOR_ARRAY_WRITE_OR_ADD_CPU(T) TENSOR_ARRAY_WRITE_OR_ADD(CPUDevice, T)
41 TF_CALL_NUMBER_TYPES(TENSOR_ARRAY_WRITE_OR_ADD_CPU)
42 #undef TENSOR_ARRAY_WRITE_OR_ADD_CPU
43
44 #if GOOGLE_CUDA
45
46 #define TENSOR_ARRAY_WRITE_OR_ADD_GPU(T) TENSOR_ARRAY_WRITE_OR_ADD(GPUDevice, T)
47 TF_CALL_GPU_NUMBER_TYPES(TENSOR_ARRAY_WRITE_OR_ADD_GPU);
48 TF_CALL_complex64(TENSOR_ARRAY_WRITE_OR_ADD_GPU);
49 TF_CALL_complex128(TENSOR_ARRAY_WRITE_OR_ADD_GPU);
50 #undef TENSOR_ARRAY_WRITE_OR_ADD_GPU
51
52 #endif // GOOGLE_CUDA
53
54 #undef TENSOR_ARRAY_WRITE_OR_ADD
55
56 #define TENSOR_ARRAY_SET_ZERO(Device, T) \
57 template <> \
58 Status TensorSetZero<Device, T>(OpKernelContext * ctx, Tensor * value) { \
59 functor::SetZeroFunctor<Device, T> set_zero_functor; \
60 set_zero_functor(ctx->template eigen_device<Device>(), value->flat<T>()); \
61 return Status::OK(); \
62 }
63
64 #define TENSOR_ARRAY_SET_ZERO_CPU(T) TENSOR_ARRAY_SET_ZERO(CPUDevice, T)
65 TF_CALL_NUMBER_TYPES(TENSOR_ARRAY_SET_ZERO_CPU);
66 TF_CALL_bool(TENSOR_ARRAY_SET_ZERO_CPU);
67 #undef TENSOR_ARRAY_SET_ZERO_CPU
68
69 #if GOOGLE_CUDA
70
71 #define TENSOR_ARRAY_SET_ZERO_GPU(T) TENSOR_ARRAY_SET_ZERO(GPUDevice, T)
72 TF_CALL_GPU_NUMBER_TYPES(TENSOR_ARRAY_SET_ZERO_GPU);
73 TF_CALL_complex64(TENSOR_ARRAY_SET_ZERO_GPU);
74 TF_CALL_complex128(TENSOR_ARRAY_SET_ZERO_GPU);
75 #undef TENSOR_ARRAY_SET_ZERO_GPU
76
77 #endif // GOOGLE_CUDA
78
79 #undef TENSOR_ARRAY_SET_ZERO
80
81 } // namespace tensor_array
82
83 std::atomic<int64> TensorArray::tensor_array_counter{0};
84
CopyShapesFrom(TensorArray * rhs,const TensorShape * shape_to_prepend)85 Status TensorArray::CopyShapesFrom(TensorArray* rhs,
86 const TensorShape* shape_to_prepend) {
87 mutex_lock l(mu_);
88 mutex_lock l_rhs(rhs->mu_);
89 TF_RETURN_IF_ERROR(LockedReturnIfClosed());
90 TF_RETURN_IF_ERROR(rhs->LockedReturnIfClosed());
91 if (tensors_.size() != rhs->tensors_.size()) {
92 return errors::InvalidArgument(
93 "TensorArray sizes do not match during CopyShapesFrom: ",
94 handle_.vec<string>()(1), " has size ", tensors_.size(), " but rhs ",
95 rhs->handle_.vec<string>()(1), " has size ", rhs->tensors_.size());
96 }
97 for (std::size_t i = 0; i < tensors_.size(); ++i) {
98 // Skip "soft copy" of indices which have not been written.
99 if (!rhs->tensors_[i].written) continue;
100
101 // Copy the shape over.
102 if (shape_to_prepend) {
103 tensors_[i].shape = *shape_to_prepend;
104 tensors_[i].shape.AppendShape(rhs->tensors_[i].shape);
105 } else {
106 tensors_[i].shape = rhs->tensors_[i].shape;
107 }
108 // Mark as written. Reads will know that if written is true and
109 // read is false, and cleared is false, to return zeros of the
110 // appropriate shape. Future aggregating writes will only use the shape
111 // for validation.
112 tensors_[i].written = true;
113 }
114
115 return Status::OK();
116 }
117
118 } // namespace tensorflow
119