• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/core/kernels/eigen_contraction_kernel.h"
17 #include "tensorflow/core/platform/test.h"
18 
19 namespace Eigen {
20 namespace internal {
21 
22 namespace {
23 template <typename Index, int NumDims>
RandomDims(int min_dim=1,int max_dim=20)24 Eigen::array<Index, NumDims> RandomDims(int min_dim = 1, int max_dim = 20) {
25   Eigen::array<Index, NumDims> dims;
26   for (int i = 0; i < NumDims; ++i) {
27     dims[i] = internal::random<int>(min_dim, max_dim);
28   }
29   return dims;
30 }
31 }  // namespace
32 
33 using Scalar = float;
34 using Index = Eigen::Index;
35 
TEST(EigenMkldnnTest,GemmPackColMajor)36 TEST(EigenMkldnnTest, GemmPackColMajor) {
37   // Packing with gemm_pack_colmajor_block is the same as taking a slice of 2
38   // dimensional Tensor.
39 
40   // Mkldnn pack and gemm are used only in Tensor contractions, and it's
41   // guaranteed that Tensors will have ColMajor layout.
42   static const int Options = ColMajor;
43 
44   using DataMapper = blas_data_mapper<Scalar, Index, ColMajor>;
45   using GemmPackColMajor =
46       gemm_pack_colmajor_block<Scalar, Index, DataMapper, ColMajor>;
47   using Tensor2d = Tensor<Scalar, 2, Options, Index>;
48 
49   Eigen::array<Index, 2> dims = RandomDims<Index, 2>(1, 500);
50 
51   // Create a tensor initialized with random data.
52   Tensor2d src(dims);
53   src.setRandom();
54 
55   // Pick a random slice of src tensor.
56   Eigen::array<Index, 2> slice_start = RandomDims<Index, 2>(0, 250);
57   Eigen::array<Index, 2> slice_size = RandomDims<Index, 2>(100, 500);
58 
59   // Make sure that slice start + size do not overflow tensor dims.
60   for (int i = 0; i < 2; ++i) {
61     slice_start[i] = numext::mini(dims[i] - 1, slice_start[i]);
62     slice_size[i] = numext::mini(slice_size[i], dims[i] - slice_start[i]);
63   }
64 
65   // Prepare tensors for packing and slicing results.
66   Tensor2d pack_dst(slice_size[0], slice_size[1]);
67   Tensor2d slice_dst(slice_size[0], slice_size[1]);
68 
69   // Pack memory using gemm_pack_colmajor_block.
70   DataMapper data_mapper(src.data(), dims[0]);
71   GemmPackColMajor gemm_pack;
72   gemm_pack(pack_dst.data(),
73             data_mapper.getSubMapper(slice_start[0], slice_start[1]),
74             slice_size[0], slice_size[1]);
75 
76   // Slice the source tensor.
77   slice_dst = src.slice(slice_start, slice_size);
78 
79   // Verify that dst tensors are equal.
80   EXPECT_EQ(pack_dst.dimensions().TotalSize(),
81             slice_dst.dimensions().TotalSize());
82   for (size_t i = 0; i < pack_dst.dimensions().TotalSize(); ++i) {
83     Scalar packed = pack_dst.coeff(i);
84     Scalar sliced = slice_dst.coeff(i);
85     EXPECT_EQ(packed, sliced);
86   }
87 }
88 
TEST(EigenMkldnnTest,MkldnnGemm)89 TEST(EigenMkldnnTest, MkldnnGemm) {
90   // Mkldnn pack and gemm are used only in Tensor contractions, and it's
91   // guaranteed that Tensors will have ColMajor layout.
92   static const int Options = ColMajor;
93 
94   using Tensor2d = Tensor<Scalar, 2, Options, Index>;
95 
96   int m = internal::random<int>(1, 100);
97   int n = internal::random<int>(1, 100);
98   int k = internal::random<int>(1, 100);
99 
100   Tensor2d lhs(m, k);
101   lhs.setRandom();
102 
103   Tensor2d rhs(k, n);
104   rhs.setRandom();
105 
106   // Compute matmul with mkldnn gemm kernel.
107   using OutputMapper = blas_data_mapper<Scalar, Index, ColMajor>;
108   using MkldnnGemmKernel =
109       mkldnn_gemm_kernel<Scalar, Index, OutputMapper, ColMajor>;
110 
111   Tensor2d mkldnn_result(m, n);
112   mkldnn_result.setZero();
113   OutputMapper output_mapper(mkldnn_result.data(), m);
114 
115   MkldnnGemmKernel gemm_kernel;
116   gemm_kernel(output_mapper, lhs.data(), rhs.data(), m, k, n, /*alpha=*/1.0);
117 
118   // Compute matmul with Eigen::Matrix.
119   using Matrix = Eigen::Matrix<Scalar, Dynamic, Dynamic, ColMajor>;
120   using MatrixMap = Map<Eigen::Matrix<Scalar, Dynamic, Dynamic, ColMajor>>;
121 
122   MatrixMap lhs_mat(lhs.data(), m, k);
123   MatrixMap rhs_mat(rhs.data(), k, n);
124 
125   Matrix matmul_result(m, n);
126   matmul_result.setZero();
127   matmul_result = lhs_mat * rhs_mat;
128 
129   // Verify that results are equal.
130   for (Index i = 0; i < m * n; ++i) {
131     Scalar gemm = mkldnn_result(i);
132     Scalar matmul = matmul_result(i % m, i / m);
133 
134     Scalar delta = std::abs(gemm - matmul);
135 
136     // NOTE(rmlarsen): Compute proper forward error bound.
137     Scalar sum = Scalar(0.0);
138     for (int k1 = 0; k1 < k; ++k1) {
139       sum += std::abs(lhs_mat(i % m, k1) * rhs_mat(k1, i / m));
140     }
141     Scalar epsilon = std::numeric_limits<Scalar>::epsilon();
142     Scalar upper_bound = Scalar(1.01) * epsilon * k * sum;
143 
144     EXPECT_LE(delta, upper_bound);
145   }
146 }
147 
148 }  // namespace internal
149 }  // namespace Eigen
150