• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_LITE_SRC_RUNTIME_OPENCL_EXECUTOR_H_
18 #define MINDSPORE_LITE_SRC_RUNTIME_OPENCL_EXECUTOR_H_
19 
20 #include <vector>
21 #include "src/runtime/gpu/opencl/opencl_runtime.h"
22 #include "src/runtime/inner_allocator.h"
23 #include "src/runtime/kernel/opencl/opencl_kernel.h"
24 #include "src/executor.h"
25 #include "include/lite_session.h"
26 
27 namespace mindspore::lite::opencl {
28 class OpenCLExecutor : public Executor {
29  public:
OpenCLExecutor()30   OpenCLExecutor() : Executor() { allocator_ = ocl_runtime_.GetInstance()->GetAllocator().get(); }
31 
32   ~OpenCLExecutor() override = default;
33 
Prepare(const std::vector<kernel::LiteKernel * > & kernels,const std::vector<Tensor * > & inputs,const std::vector<Tensor * > & outputs,const lite::InnerContext * ctx)34   int Prepare(const std::vector<kernel::LiteKernel *> &kernels, const std::vector<Tensor *> &inputs,
35               const std::vector<Tensor *> &outputs, const lite::InnerContext *ctx) override {
36     return RET_OK;
37   }
38 
39   int Run(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs,
40           const std::vector<kernel::LiteKernel *> &kernels, const KernelCallBack &before = nullptr,
41           const KernelCallBack &after = nullptr) override;
42   int RunOrTune(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs,
43                 const std::vector<kernel::LiteKernel *> &kernels, const KernelCallBack &before = nullptr,
44                 const KernelCallBack &after = nullptr, bool is_tune = false);
45 
46  private:
47   int Tune(kernel::OpenCLKernel *op_kernel);
48   OpenCLAllocator *allocator_ = nullptr;
49   OpenCLRuntimeInnerWrapper ocl_runtime_;
50 };
51 }  // namespace mindspore::lite::opencl
52 #endif
53