• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_CORE_KERNELS_CONCAT_LIB_H_
17 #define TENSORFLOW_CORE_KERNELS_CONCAT_LIB_H_
18 
19 #include <vector>
20 
21 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
22 #include "tensorflow/core/framework/device_base.h"
23 #include "tensorflow/core/framework/register_types.h"
24 
25 namespace tensorflow {
26 
27 // Functors to concatenate tensors. These always take a rank-2 tensor (i.e a
28 // matrix) and concatenate it along the axis 1 ("putting them next to each
29 // other" as opposed to "putting them on top of one another").
30 //
31 // Any concatenation of n-dimensional tensors across any axis can be reduced to
32 // a concatenation of two-dimensional tensors across the axis 1 by first
33 // partitioning the axes of the original tensors into those less than the axis
34 // to be concatenated across and the rest. Then reshape the tensors into a
35 // two-dimensional tensor by collapsing these two sets of axes and concatenate
36 // the resulting matrices across the axis 1, finally reshaping the result to
37 // have the proper shape.
38 //
39 // So, for example, when stacking N tensors, reshape each to have shape
40 // {1, Numelements} and reshape the result matrix to have shape
41 // {1, N * NumElements} before passing it to this functor.
42 
43 // Assumes all inputs are nonempty
44 template <typename T>
45 void ConcatCPU(
46     DeviceBase* d,
47     const std::vector<std::unique_ptr<typename TTypes<T, 2>::ConstMatrix>>&
48         inputs,
49     typename TTypes<T, 2>::Matrix* output);
50 #if GOOGLE_CUDA
51 template <typename T>
52 void ConcatGPU(
53     OpKernelContext* c,
54     const std::vector<std::unique_ptr<typename TTypes<T, 2>::ConstMatrix>>&
55         inputs_flat,
56     Tensor* output, typename TTypes<T, 2>::Tensor* output_flat);
57 
58 // Explicit instantiations in concat_lib_gpu.cc.
59 #define REGISTER(T)                                                           \
60   extern template void ConcatGPU<T>(                                          \
61       OpKernelContext * c,                                                    \
62       const std::vector<std::unique_ptr<typename TTypes<T, 2>::ConstMatrix>>& \
63           inputs_flat,                                                        \
64       Tensor* output, typename TTypes<T, 2>::Tensor* output_flat);
65 
66 TF_CALL_GPU_NUMBER_TYPES(REGISTER);
67 TF_CALL_complex64(REGISTER);
68 TF_CALL_complex128(REGISTER);
69 TF_CALL_int32(REGISTER);  // Needed for TensorLists.
70 TF_CALL_int64(REGISTER);
71 TF_CALL_int16(REGISTER);
72 TF_CALL_bfloat16(REGISTER);
73 TF_CALL_bool(REGISTER);
74 TF_CALL_uint8(REGISTER);
75 #undef REGISTER
76 #endif  // GOOGLE_CUDA
77 
78 #ifdef TENSORFLOW_USE_SYCL
79 template <typename T>
80 void ConcatSYCL(
81     const Eigen::SyclDevice& d,
82     const std::vector<std::unique_ptr<typename TTypes<T, 2>::ConstMatrix>>&
83         inputs,
84     typename TTypes<T, 2>::Matrix* output);
85 #endif  // TENSORFLOW_USE_SYCL
86 }  // namespace tensorflow
87 
88 #endif  // TENSORFLOW_CORE_KERNELS_CONCAT_LIB_H_
89