1 /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #define EIGEN_USE_THREADS
17
18 #if GOOGLE_CUDA
19 #include "tensorflow/core/platform/stream_executor.h"
20 #endif // GOOGLE_CUDA
21
22 #include "tensorflow/contrib/rnn/kernels/blas_gemm.h"
23 #include "tensorflow/core/framework/op_kernel.h"
24 namespace tensorflow {
25
26 #if GOOGLE_CUDA
27 namespace {
28 template <typename T>
AsDeviceMemory(const T * cuda_memory)29 se::DeviceMemory<T> AsDeviceMemory(const T* cuda_memory) {
30 se::DeviceMemoryBase wrapped(const_cast<T*>(cuda_memory));
31 se::DeviceMemory<T> typed(wrapped);
32 return typed;
33 }
34 } // namespace
35 #endif // GOOGLE_CUDA
36
37 namespace functor {
38 template <typename T>
operator ()(OpKernelContext * ctx,bool transa,bool transb,uint64 m,uint64 n,uint64 k,float alpha,const T * a,int lda,const T * b,int ldb,float beta,T * c,int ldc)39 void TensorCuBlasGemm<T>::operator()(OpKernelContext* ctx, bool transa,
40 bool transb, uint64 m, uint64 n, uint64 k,
41 float alpha, const T* a, int lda,
42 const T* b, int ldb, float beta, T* c,
43 int ldc) {
44 #if GOOGLE_CUDA
45 se::blas::Transpose trans[] = {se::blas::Transpose::kNoTranspose,
46 se::blas::Transpose::kTranspose};
47
48 auto a_ptr = AsDeviceMemory(a);
49 auto b_ptr = AsDeviceMemory(b);
50 auto c_ptr = AsDeviceMemory(c);
51
52 bool blas_launch_status =
53 ctx->op_device_context()
54 ->stream()
55 ->ThenBlasGemm(trans[transa], trans[transb], m, n, k, alpha, a_ptr,
56 lda, b_ptr, ldb, beta, &c_ptr, ldc)
57 .ok();
58 OP_REQUIRES(ctx, blas_launch_status, errors::Aborted("CuBlasGemm failed!"));
59 #else
60 ctx->SetStatus(errors::InvalidArgument("CuBlasGemm needs CUDA."));
61 #endif
62 }
63
64 template struct TensorCuBlasGemm<Eigen::half>;
65 template struct TensorCuBlasGemm<float>;
66
67 } // end namespace functor
68 } // end namespace tensorflow
69