• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_CCSRC_FL_SERVER_KERNEL_OPTIMIZER_KERNEL_H_
18 #define MINDSPORE_CCSRC_FL_SERVER_KERNEL_OPTIMIZER_KERNEL_H_
19 
20 #include <memory>
21 #include <string>
22 #include <vector>
23 #include <functional>
24 #include "backend/kernel_compiler/common_utils.h"
25 #include "backend/kernel_compiler/cpu/cpu_kernel.h"
26 #include "fl/server/common.h"
27 #include "fl/server/memory_register.h"
28 #include "fl/server/kernel/params_info.h"
29 
30 namespace mindspore {
31 namespace fl {
32 namespace server {
33 namespace kernel {
34 using mindspore::kernel::IsSameShape;
35 using mindspore::kernel::USE_NESTEROV;
36 
37 // OptimizerKernel is the kernel in server for weights' optimizing.
38 // Normally server's optimizer kernels should be inherited from CPU's optimzier kernels to reuse the implementation.
39 class OptimizerKernel : public CPUKernel {
40  public:
41   OptimizerKernel() = default;
42   virtual ~OptimizerKernel() = default;
43 
44   // InitKernel and Launch methods are inherited from pure virtual function of CPUKernel so it must have implementation.
InitKernel(const CNodePtr & kernel_node)45   virtual void InitKernel(const CNodePtr &kernel_node) {}
Launch(const std::vector<AddressPtr> & inputs,const std::vector<AddressPtr> & workspace,const std::vector<AddressPtr> & outputs)46   virtual bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
47                       const std::vector<AddressPtr> &outputs) {
48     return true;
49   }
50 
51   // Server kernel's memory allocation method, which is different from the workflow in
52   // Session(GPUSession/CPUSession/AscendSession).
53   // virtual void AssignMemory(const CNodePtr &kernel_node, std::shared_ptr<MemoryRegister> memory_register) = 0;
54 
55   // Setter and getter of kernels parameters information.
set_params_info(const ParamsInfo & params_info)56   void set_params_info(const ParamsInfo &params_info) { params_info_ = params_info; }
input_names()57   const std::vector<std::string> &input_names() { return params_info_.inputs_names(); }
workspace_names()58   const std::vector<std::string> &workspace_names() { return params_info_.workspace_names(); }
output_names()59   const std::vector<std::string> &output_names() { return params_info_.outputs_names(); }
60 
61   // Returns information about whether some inputs should reuse kernel node inputs memory.
reuse_kernel_node_inputs_info()62   const ReuseKernelNodeInfo &reuse_kernel_node_inputs_info() { return reuse_kernel_node_inputs_info_; }
63 
64  protected:
65   virtual void GenerateReuseKernelNodeInfo() = 0;
66 
InitServerKernelInputOutputSize(const CNodePtr & kernel_node)67   void InitServerKernelInputOutputSize(const CNodePtr &kernel_node) {
68     MS_EXCEPTION_IF_NULL(kernel_node);
69     size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node);
70     size_t type_size = sizeof(float);
71     for (size_t input_index = 0; input_index < input_num; ++input_index) {
72       std::vector<size_t> shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, input_index);
73       size_t tensor_size =
74         shape.empty() ? type_size : std::accumulate(shape.begin(), shape.end(), type_size, std::multiplies<size_t>());
75       input_size_list_.emplace_back(tensor_size);
76     }
77     size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node);
78     for (size_t output_index = 0; output_index < output_num; ++output_index) {
79       std::vector<size_t> shape = AnfAlgo::GetOutputInferShape(kernel_node, output_index);
80       size_t tensor_size =
81         shape.empty() ? type_size : std::accumulate(shape.begin(), shape.end(), type_size, std::multiplies<size_t>());
82       output_size_list_.emplace_back(tensor_size);
83     }
84   }
85 
86   // Parameters information used for kernel register, memory assignment, etc.
87   ParamsInfo params_info_;
88 
89   // Information about server kernel reusing kernel node inputs memory from the front end.
90   // Key refers to the server kernel's input index. Value refers to the kernel node's input index.
91   ReuseKernelNodeInfo reuse_kernel_node_inputs_info_;
92 };
93 }  // namespace kernel
94 }  // namespace server
95 }  // namespace fl
96 }  // namespace mindspore
97 #endif  // MINDSPORE_CCSRC_FL_SERVER_KERNEL_OPTIMIZER_KERNEL_H_
98