1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/compiler/xla/service/cpu/runtime_matmul.h"
17
18 #define EIGEN_USE_THREADS
19
20 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
21 #include "tensorflow/compiler/xla/executable_run_options.h"
22 #include "tensorflow/compiler/xla/service/cpu/runtime_lightweight_check.h"
23 #include "tensorflow/core/platform/dynamic_annotations.h"
24 #include "tensorflow/core/platform/types.h"
25
26 #if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL)
27 #include "tensorflow/core/kernels/eigen_contraction_kernel.h"
28 #endif
29
30 namespace {
31
Is16BytesAligned(void * ptr)32 bool Is16BytesAligned(void* ptr) {
33 return reinterpret_cast<uintptr_t>(ptr) % 16 == 0;
34 }
35
36 template <typename T, Eigen::AlignmentType Alignment>
MatMul(const void * run_options_ptr,T * out,T * lhs,T * rhs,tensorflow::int64 m,tensorflow::int64 n,tensorflow::int64 k,tensorflow::int32 transpose_lhs,tensorflow::int32 transpose_rhs)37 void MatMul(const void* run_options_ptr, T* out, T* lhs, T* rhs,
38 tensorflow::int64 m, tensorflow::int64 n, tensorflow::int64 k,
39 tensorflow::int32 transpose_lhs, tensorflow::int32 transpose_rhs) {
40 const xla::ExecutableRunOptions* run_options =
41 static_cast<const xla::ExecutableRunOptions*>(run_options_ptr);
42
43 tensorflow::int64 lhs_rows = m;
44 tensorflow::int64 lhs_cols = k;
45 if (transpose_lhs) {
46 std::swap(lhs_rows, lhs_cols);
47 }
48
49 tensorflow::int64 rhs_rows = k;
50 tensorflow::int64 rhs_cols = n;
51 if (transpose_rhs) {
52 std::swap(rhs_rows, rhs_cols);
53 }
54
55 const Eigen::TensorMap<Eigen::Tensor<const T, 2>, Alignment> A(lhs, lhs_rows,
56 lhs_cols);
57 const Eigen::TensorMap<Eigen::Tensor<const T, 2>, Alignment> B(rhs, rhs_rows,
58 rhs_cols);
59 Eigen::TensorMap<Eigen::Tensor<T, 2>, Alignment> C(out, m, n);
60
61 typedef typename Eigen::Tensor<T, 2>::DimensionPair DimPair;
62 int lhs_contract_dim = transpose_lhs ? 0 : 1;
63 int rhs_contract_dim = transpose_rhs ? 1 : 0;
64 const Eigen::array<DimPair, 1> dims(
65 {DimPair(lhs_contract_dim, rhs_contract_dim)});
66
67 // Matrix multiply is a special case of the "contract" operation where
68 // the contraction is performed along dimension 1 of the lhs and dimension
69 // 0 of the rhs.
70 XLA_LIGHTWEIGHT_CHECK(run_options->intra_op_thread_pool() != nullptr);
71 C.device(*run_options->intra_op_thread_pool()) = A.contract(B, dims);
72 }
73
74 template <typename T>
MatMulDispatch(const void * run_options_ptr,T * out,T * lhs,T * rhs,tensorflow::int64 m,tensorflow::int64 n,tensorflow::int64 k,tensorflow::int32 transpose_lhs,tensorflow::int32 transpose_rhs)75 void MatMulDispatch(const void* run_options_ptr, T* out, T* lhs, T* rhs,
76 tensorflow::int64 m, tensorflow::int64 n,
77 tensorflow::int64 k, tensorflow::int32 transpose_lhs,
78 tensorflow::int32 transpose_rhs) {
79 bool all_buffers_16b_aligned =
80 Is16BytesAligned(out) && Is16BytesAligned(lhs) && Is16BytesAligned(rhs);
81
82 if (!all_buffers_16b_aligned) {
83 MatMul<T, Eigen::Unaligned>(run_options_ptr, out, lhs, rhs, m, n, k,
84 transpose_lhs, transpose_rhs);
85 return;
86 }
87
88 MatMul<T, Eigen::Aligned16>(run_options_ptr, out, lhs, rhs, m, n, k,
89 transpose_lhs, transpose_rhs);
90 }
91
92 } // namespace
93
__xla_cpu_runtime_EigenMatMulF16(const void * run_options_ptr,Eigen::half * out,Eigen::half * lhs,Eigen::half * rhs,tensorflow::int64 m,tensorflow::int64 n,tensorflow::int64 k,tensorflow::int32 transpose_lhs,tensorflow::int32 transpose_rhs)94 TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulF16(
95 const void* run_options_ptr, Eigen::half* out, Eigen::half* lhs,
96 Eigen::half* rhs, tensorflow::int64 m, tensorflow::int64 n,
97 tensorflow::int64 k, tensorflow::int32 transpose_lhs,
98 tensorflow::int32 transpose_rhs) {
99 MatMulDispatch<Eigen::half>(run_options_ptr, out, lhs, rhs, m, n, k,
100 transpose_lhs, transpose_rhs);
101 }
102
__xla_cpu_runtime_EigenMatMulF32(const void * run_options_ptr,float * out,float * lhs,float * rhs,tensorflow::int64 m,tensorflow::int64 n,tensorflow::int64 k,tensorflow::int32 transpose_lhs,tensorflow::int32 transpose_rhs)103 TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulF32(
104 const void* run_options_ptr, float* out, float* lhs, float* rhs,
105 tensorflow::int64 m, tensorflow::int64 n, tensorflow::int64 k,
106 tensorflow::int32 transpose_lhs, tensorflow::int32 transpose_rhs) {
107 MatMulDispatch<float>(run_options_ptr, out, lhs, rhs, m, n, k, transpose_lhs,
108 transpose_rhs);
109 }
110
__xla_cpu_runtime_EigenMatMulF64(const void * run_options_ptr,double * out,double * lhs,double * rhs,tensorflow::int64 m,tensorflow::int64 n,tensorflow::int64 k,tensorflow::int32 transpose_lhs,tensorflow::int32 transpose_rhs)111 TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulF64(
112 const void* run_options_ptr, double* out, double* lhs, double* rhs,
113 tensorflow::int64 m, tensorflow::int64 n, tensorflow::int64 k,
114 tensorflow::int32 transpose_lhs, tensorflow::int32 transpose_rhs) {
115 MatMulDispatch<double>(run_options_ptr, out, lhs, rhs, m, n, k, transpose_lhs,
116 transpose_rhs);
117 }
118
__xla_cpu_runtime_EigenMatMulC64(const void * run_options_ptr,std::complex<float> * out,std::complex<float> * lhs,std::complex<float> * rhs,tensorflow::int64 m,tensorflow::int64 n,tensorflow::int64 k,tensorflow::int32 transpose_lhs,tensorflow::int32 transpose_rhs)119 TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulC64(
120 const void* run_options_ptr, std::complex<float>* out,
121 std::complex<float>* lhs, std::complex<float>* rhs, tensorflow::int64 m,
122 tensorflow::int64 n, tensorflow::int64 k, tensorflow::int32 transpose_lhs,
123 tensorflow::int32 transpose_rhs) {
124 MatMulDispatch<std::complex<float>>(run_options_ptr, out, lhs, rhs, m, n, k,
125 transpose_lhs, transpose_rhs);
126 }
127
__xla_cpu_runtime_EigenMatMulC128(const void * run_options_ptr,std::complex<double> * out,std::complex<double> * lhs,std::complex<double> * rhs,tensorflow::int64 m,tensorflow::int64 n,tensorflow::int64 k,tensorflow::int32 transpose_lhs,tensorflow::int32 transpose_rhs)128 TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulC128(
129 const void* run_options_ptr, std::complex<double>* out,
130 std::complex<double>* lhs, std::complex<double>* rhs, tensorflow::int64 m,
131 tensorflow::int64 n, tensorflow::int64 k, tensorflow::int32 transpose_lhs,
132 tensorflow::int32 transpose_rhs) {
133 MatMulDispatch<std::complex<double>>(run_options_ptr, out, lhs, rhs, m, n, k,
134 transpose_lhs, transpose_rhs);
135 }
136
__xla_cpu_runtime_EigenMatMulS32(const void * run_options_ptr,tensorflow::int32 * out,tensorflow::int32 * lhs,tensorflow::int32 * rhs,tensorflow::int64 m,tensorflow::int64 n,tensorflow::int64 k,tensorflow::int32 transpose_lhs,tensorflow::int32 transpose_rhs)137 TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulS32(
138 const void* run_options_ptr, tensorflow::int32* out, tensorflow::int32* lhs,
139 tensorflow::int32* rhs, tensorflow::int64 m, tensorflow::int64 n,
140 tensorflow::int64 k, tensorflow::int32 transpose_lhs,
141 tensorflow::int32 transpose_rhs) {
142 MatMulDispatch<tensorflow::int32>(run_options_ptr, out, lhs, rhs, m, n, k,
143 transpose_lhs, transpose_rhs);
144 }
145