1 /**
2 * Copyright 2022-2023 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "plugin/device/cpu/kernel/shape_calc_kernel.h"
18 #include <functional>
19 #include "plugin/device/cpu/hal/device/cpu_device_address.h"
20 #include "ops/shape_calc.h"
21
22 namespace mindspore {
23 namespace kernel {
Init(const std::vector<KernelTensor * > & inputs,const std::vector<KernelTensor * > & outputs)24 bool ShapeCalcCpuKernelMod::Init(const std::vector<KernelTensor *> &inputs,
25 const std::vector<KernelTensor *> &outputs) {
26 if (primitive_->HasAttr(kOutputRealTuple)) {
27 is_dynamic_len_out_ = true;
28 }
29 inputs_size_ = inputs.size();
30 return true;
31 }
32
Resize(const std::vector<KernelTensor * > & inputs,const std::vector<KernelTensor * > & outputs)33 int ShapeCalcCpuKernelMod::Resize(const std::vector<KernelTensor *> &inputs,
34 const std::vector<KernelTensor *> &outputs) {
35 auto ret = KernelMod::Resize(inputs, outputs);
36 if (ret != KRET_OK) {
37 return ret;
38 }
39
40 if (!primitive_->HasAttr(ops::kAttrCalcResult)) {
41 MS_LOG(ERROR) << "For ShapeCalc, the calc result should be get here.";
42 return KRET_RESIZE_FAILED;
43 }
44 outs_shape_ = GetValue<ShapeArray>(primitive_->GetAttr(ops::kAttrCalcResult));
45 return ret;
46 }
47
Launch(const std::vector<kernel::KernelTensor * > & inputs,const std::vector<kernel::KernelTensor * > &,const std::vector<kernel::KernelTensor * > & outputs)48 bool ShapeCalcCpuKernelMod::Launch(const std::vector<kernel::KernelTensor *> &inputs,
49 const std::vector<kernel::KernelTensor *> &,
50 const std::vector<kernel::KernelTensor *> &outputs) {
51 if (!is_dynamic_len_out_) {
52 if (outputs.size() != outs_shape_.size()) {
53 MS_LOG(ERROR) << "For '" << kernel_name_
54 << "', outputs address list size must be equal to the number of outputs of shape func, but got "
55 << outputs.size() << " vs " << outs_shape_.size();
56 return false;
57 }
58
59 for (size_t i = 0; i < outputs.size(); ++i) {
60 auto output_addr = reinterpret_cast<int64_t *>(outputs[i]->device_ptr());
61 for (size_t j = 0; j < outs_shape_[i].size(); ++j) {
62 output_addr[j] = outs_shape_[i][j];
63 }
64 }
65 } else {
66 // Dynamic length, each out should have same shape for dynamic-length-out solution in runtime.
67 if (outputs.size() != 1) {
68 MS_LOG(ERROR) << "For '" << kernel_name_
69 << "', dynamic length outputs address list size must be equal to 1, but got " << outputs.size();
70 return false;
71 }
72
73 auto output_addr = reinterpret_cast<int64_t *>(outputs[0]->device_ptr());
74 size_t offset_inner = outs_shape_[0].size();
75 for (size_t i = 0; i < outs_shape_.size(); ++i) {
76 for (size_t j = 0; j < outs_shape_[i].size(); ++j) {
77 size_t cur_offset = i * offset_inner + j;
78 *(output_addr + cur_offset) = outs_shape_[i][j];
79 }
80 }
81 }
82
83 return true;
84 }
85
GetOpSupport()86 std::vector<KernelAttr> ShapeCalcCpuKernelMod::GetOpSupport() {
87 static std::vector<KernelAttr> support_list = {KernelAttr().AddSkipCheckAttr(true)};
88 return support_list;
89 }
90
GetLaunchIgnoredInputAddressIdx() const91 std::vector<size_t> ShapeCalcCpuKernelMod::GetLaunchIgnoredInputAddressIdx() const {
92 std::vector<size_t> ignored_idx(inputs_size_);
93 std::iota(ignored_idx.begin(), ignored_idx.end(), kIndex0);
94 return ignored_idx;
95 }
96
97 MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, ShapeCalc, ShapeCalcCpuKernelMod);
98 } // namespace kernel
99 } // namespace mindspore
100