• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_GPU_OPENCL_OPENCL_EXECUTOR_H_
18 #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_GPU_OPENCL_OPENCL_EXECUTOR_H_
19 
20 #include <vector>
21 #include "src/litert/kernel/gpu/opencl/opencl_runtime.h"
22 #include "src/litert/inner_allocator.h"
23 #include "src/litert/kernel/opencl/opencl_kernel.h"
24 #include "src/litert/executor.h"
25 
26 namespace mindspore::lite::opencl {
27 class OpenCLExecutor : public Executor {
28  public:
OpenCLExecutor()29   OpenCLExecutor() : Executor() { allocator_ = ocl_runtime_.GetInstance()->GetAllocator().get(); }
30 
31   ~OpenCLExecutor() override = default;
32 
Prepare(const std::vector<kernel::KernelExec * > & kernels,const std::vector<Tensor * > & inputs,const std::vector<Tensor * > & outputs,lite::InnerContext * ctx)33   int Prepare(const std::vector<kernel::KernelExec *> &kernels, const std::vector<Tensor *> &inputs,
34               const std::vector<Tensor *> &outputs, lite::InnerContext *ctx) override {
35     return RET_OK;
36   }
37 
38   int Run(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs,
39           const std::vector<kernel::KernelExec *> &kernels, const KernelCallBack &before = nullptr,
40           const KernelCallBack &after = nullptr) override;
41   int RunOrTune(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs,
42                 const std::vector<kernel::KernelExec *> &kernels, const KernelCallBack &before = nullptr,
43                 const KernelCallBack &after = nullptr, bool is_tune = false);
44 
45  private:
46   int Tune(kernel::OpenCLKernel *op_kernel);
47   OpenCLAllocator *allocator_ = nullptr;
48   OpenCLRuntimeInnerWrapper ocl_runtime_;
49 };
50 }  // namespace mindspore::lite::opencl
51 #endif
52