1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #ifndef TENSORFLOW_CORE_FRAMEWORK_OPS_UTIL_H_
17 #define TENSORFLOW_CORE_FRAMEWORK_OPS_UTIL_H_
18
19 // This file contains utilities for various operations.
20
21 #include <array>
22
23 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
24 #include "tensorflow/core/framework/common_shape_fns.h"
25 #include "tensorflow/core/framework/tensor_shape.h"
26 #include "tensorflow/core/framework/tensor_types.h"
27 #include "tensorflow/core/lib/core/status.h"
28 #include "tensorflow/core/util/padding.h"
29
30 namespace tensorflow {
31
32 // Calculates broadcast starting index and size. For SAME padding, addition
33 // padding could be applied to right, left, top and bottom. Depending on the
34 // current index, input size, kernel size, stride, padding size, the starting
35 // index and size for broadcast for that dimension are different from the
36 // current index and kernel size.
37 // This is mainly used by gradient algorithms for pooling operations.
38 Status GetBroadcastSize(const int index, const int in_size, const int ksize,
39 const int stride, const int pad_size, int* bindex,
40 int* bsize);
41
42 // Converts Brain's Padding to Eigen's PaddingType.
43 Eigen::PaddingType BrainPadding2EigenPadding(Padding padding);
44
45 // Given a shape 's' of a tensor of type T. Returns true iff the
46 // number of bytes occupied by each dim 0 (i.e., &tensor(i + 1, ...) -
47 // &tensor(i, ...)) is multiple of EIGEN_MAX_ALIGN_BYTES.
48 template <typename T>
IsInnerDimsSizeAligned(const TensorShape & s)49 bool IsInnerDimsSizeAligned(const TensorShape& s) {
50 if (s.dims() == 0) return false;
51 const int64 dim0_size = s.dim_size(0);
52 if (dim0_size == 0) return false;
53 #if EIGEN_MAX_ALIGN_BYTES == 0
54 return true;
55 #else
56 const int64 bytes_per_dim0 = (s.num_elements() / dim0_size) * sizeof(T);
57 return bytes_per_dim0 % EIGEN_MAX_ALIGN_BYTES == 0;
58 #endif
59 }
60
61 // Given a shape 's' of a tensor of type T and the `start` and `end` index of a
62 // dim 0 slice, returns true iff slice is aligned with respect to original
63 // tensor. Here aligned implies the address is a multiple of
64 // EIGEN_MAX_ALIGN_BYTES.
65 template <typename T>
IsDim0SliceAligned(const TensorShape & s,int64 start,int64 end_or_size)66 bool IsDim0SliceAligned(const TensorShape& s, int64 start, int64 end_or_size) {
67 if (s.dims() == 1) {
68 #if EIGEN_MAX_ALIGN_BYTES == 0
69 return true;
70 #else
71 bool start_aligned = (start * sizeof(T)) % EIGEN_MAX_ALIGN_BYTES == 0;
72 // End is aligned if either the explicit end index is passed and is a
73 // a multiple of EIGEN_MAX_ALIGN_BYTES, or the start index is aligned and
74 // the size is aligned. So for convenience we can either pass start and
75 // index, or start and size.
76 bool end_aligned = (end_or_size * sizeof(T)) % EIGEN_MAX_ALIGN_BYTES == 0;
77 return start_aligned && end_aligned;
78 #endif
79 } else {
80 return IsInnerDimsSizeAligned<T>(s);
81 }
82 }
83
84 // Returns <suffix> sanitized to have only [a-zA-Z0-9-_].
85 string SanitizeThreadSuffix(string suffix);
86
87 // Helper to compute 'strides' given a tensor 'shape'. I.e.,
88 // strides[i] = prod(shape.dim_size[(i+1):])
89 template <typename T>
ComputeStride(const TensorShape & shape)90 gtl::InlinedVector<T, 8> ComputeStride(const TensorShape& shape) {
91 const int ndims = shape.dims();
92 gtl::InlinedVector<T, 8> strides(ndims);
93 T stride = 1;
94 for (int i = ndims - 1; i >= 0; --i) {
95 strides[i] = stride;
96 stride *= static_cast<T>(shape.dim_size(i));
97 }
98 return strides;
99 }
100
101 // Helper to compute 'strides' given an Eigen TensorDimensions
102 template <typename T, typename EigenDimensions>
ComputeEigenStrides(const EigenDimensions & shape)103 gtl::InlinedVector<T, 8> ComputeEigenStrides(const EigenDimensions& shape) {
104 const int ndims = shape.rank();
105 gtl::InlinedVector<T, 8> strides(ndims);
106 T stride = 1;
107 for (int i = ndims - 1; i >= 0; --i) {
108 strides[i] = stride;
109 stride *= static_cast<T>(shape[i]);
110 }
111 return strides;
112 }
113
114 } // namespace tensorflow
115
116 #endif // TENSORFLOW_CORE_FRAMEWORK_OPS_UTIL_H_
117