1 /** 2 * Copyright 2024 Huawei Technologies Co., Ltd 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 #ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_ASCEND_OPTIMIZER_GE_GE_CONVERT_CONST_INPUT_TO_TENSOR_INPUT_H_ 17 #define MINDSPORE_CCSRC_PLUGIN_DEVICE_ASCEND_OPTIMIZER_GE_GE_CONVERT_CONST_INPUT_TO_TENSOR_INPUT_H_ 18 #include <string> 19 20 #include "ir/anf.h" 21 #include "include/common/utils/convert_utils.h" 22 #include "include/backend/optimizer/optimizer.h" 23 24 namespace mindspore { 25 namespace opt { 26 class BACKEND_EXPORT GEConvertConstInputToTensorInput : public PatternProcessPass { 27 public: 28 explicit GEConvertConstInputToTensorInput(bool multigraph = true) 29 : PatternProcessPass("convert_const_input_to_tensor_input", multigraph) {} 30 ~GEConvertConstInputToTensorInput() override = default; 31 const AnfNodePtr Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const EquivPtr &) const override; 32 }; 33 34 } // namespace opt 35 } // namespace mindspore 36 37 #endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_ASCEND_OPTIMIZER_GE_GE_CONVERT_CONST_INPUT_TO_TENSOR_INPUT_H_ 38