• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020-2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "ops/conv2d.h"
18 #include <string>
19 #include <algorithm>
20 #include <memory>
21 #include <set>
22 #include <vector>
23 #include "ir/dtype/tensor_type.h"
24 #include "utils/check_convert_utils.h"
25 #include "abstract/primitive_infer_map.h"
26 
27 using mindspore::abstract::Shape;
28 namespace mindspore {
29 namespace ops {
30 namespace {
31 // check functions
CheckShapeAnyAndPositive(const std::string & op,const ShapeVector & shape)32 void CheckShapeAnyAndPositive(const std::string &op, const ShapeVector &shape) {
33   for (size_t i = 0; i < shape.size(); ++i) {
34     if ((shape[i] < 0) && (shape[i] != Shape::SHP_ANY)) {
35       MS_EXCEPTION(ValueError) << op << " shape element [" << i << "] must be positive integer or SHP_ANY, but got "
36                                << shape[i];
37     }
38   }
39 }
40 
CheckShapeAllPositive(const std::string & op,const ShapeVector & shape)41 void CheckShapeAllPositive(const std::string &op, const ShapeVector &shape) {
42   for (size_t i = 0; i < shape.size(); ++i) {
43     if (shape[i] < 0) {
44       MS_LOG(EXCEPTION) << op << " shape element [" << i << "] must be positive integer, but got " << shape[i];
45     }
46   }
47 }
48 
CheckAttrPositiveInt64(const std::string & op,const ValuePtr & attr,const std::string & attr_name)49 int64_t CheckAttrPositiveInt64(const std::string &op, const ValuePtr &attr, const std::string &attr_name) {
50   int64_t attr_val = attr->cast<Int64ImmPtr>()->value();
51   if (attr_val <= 0) {
52     MS_LOG(EXCEPTION) << op << " invalid " << attr_name << " value: " << attr_val << ", should be greater then 0";
53   }
54   return attr_val;
55 }
56 
CheckAttrIntOrTuple(const ValuePtr & attr,const size_t start_idx,const size_t num_element)57 std::vector<int64_t> CheckAttrIntOrTuple(const ValuePtr &attr, const size_t start_idx, const size_t num_element) {
58   std::vector<int64_t> result;
59   MS_EXCEPTION_IF_NULL(attr);
60   if (attr->isa<ValueTuple>()) {
61     std::vector<ValuePtr> attr_vec = attr->cast<ValueTuplePtr>()->value();
62     auto it_start = attr_vec.begin() + SizeToLong(start_idx);
63     (void)std::transform(it_start, it_start + SizeToLong(num_element), std::back_inserter(result),
64                          [](const ValuePtr &e) -> int64_t { return GetValue<int64_t>(e); });
65   } else {
66     int64_t attr_val = attr->cast<Int64ImmPtr>()->value();
67     (void)result.insert(result.begin(), num_element, attr_val);
68   }
69   return result;
70 }
71 
Conv2DPadFunction(std::vector<int64_t> * output_hw,std::vector<int64_t> * pad_list,const int64_t x_h,const int64_t x_w,const std::vector<int64_t> & kernel,const std::vector<int64_t> & stride,const std::vector<int64_t> & dilation,const int64_t & pad_mode,const std::vector<int64_t> & padding,const bool is_min_shape=false)72 void Conv2DPadFunction(std::vector<int64_t> *output_hw, std::vector<int64_t> *pad_list, const int64_t x_h,
73                        const int64_t x_w, const std::vector<int64_t> &kernel, const std::vector<int64_t> &stride,
74                        const std::vector<int64_t> &dilation, const int64_t &pad_mode,
75                        const std::vector<int64_t> &padding, const bool is_min_shape = false) {
76   if (pad_mode == PadMode::VALID) {
77     int64_t out_h = -1;
78     int64_t out_w = -1;
79     if (x_h != Shape::SHP_ANY) {
80       out_h =
81         static_cast<int64_t>(std::ceil(((x_h * 1.0) - static_cast<double>(dilation[0] * (kernel[0] - 1))) / stride[0]));
82       if (is_min_shape && out_h < 1) {
83         out_h = 1L;
84       }
85     }
86     if (x_w != Shape::SHP_ANY) {
87       out_w =
88         static_cast<int64_t>(std::ceil(((x_w * 1.0) - static_cast<double>(dilation[1] * (kernel[1] - 1))) / stride[1]));
89       if (is_min_shape && out_w < 1) {
90         out_w = 1L;
91       }
92     }
93     output_hw->push_back(out_h);
94     output_hw->push_back(out_w);
95     constexpr size_t pad_size = 4;
96     (void)pad_list->insert(pad_list->begin(), pad_size, 0);
97   } else if (pad_mode == PadMode::SAME) {
98     if (x_h == Shape::SHP_ANY) {
99       output_hw->push_back(Shape::SHP_ANY);
100       pad_list->push_back(Shape::SHP_ANY);
101       pad_list->push_back(Shape::SHP_ANY);
102     } else {
103       output_hw->push_back(static_cast<int64_t>(std::ceil((x_h * 1.0) / stride[0])));
104       int64_t pad_needed_h = (output_hw->at(0) - 1) * stride[0] + dilation[0] * (kernel[0] - 1) + 1 - x_h;
105       pad_needed_h = std::max((int64_t)0, pad_needed_h);
106       pad_list->push_back(static_cast<int64_t>(std::floor(pad_needed_h / 2)));
107       pad_list->push_back(pad_needed_h - pad_list->at(0));
108     }
109 
110     if (x_w == Shape::SHP_ANY) {
111       output_hw->push_back(Shape::SHP_ANY);
112       pad_list->push_back(Shape::SHP_ANY);
113       pad_list->push_back(Shape::SHP_ANY);
114     } else {
115       output_hw->push_back(static_cast<int64_t>(std::ceil((x_w * 1.0) / stride[1])));
116       int64_t pad_needed_w = (output_hw->at(1) - 1) * stride[1] + dilation[1] * (kernel[1] - 1) + 1 - x_w;
117       pad_needed_w = std::max((int64_t)0, pad_needed_w);
118       pad_list->push_back(static_cast<int64_t>(std::floor(pad_needed_w / 2)));
119       pad_list->push_back(pad_needed_w - pad_list->at(kInputIndex2));
120     }
121   } else if (pad_mode == PadMode::PAD) {
122     (void)pad_list->insert(pad_list->begin(), padding.begin(), padding.end());
123     int64_t out_h = -1;
124     int64_t out_w = -1;
125     if (x_h != Shape::SHP_ANY) {
126       out_h = static_cast<int64_t>(std::floor(
127         1 + ((x_h * 1) + pad_list->at(0) + pad_list->at(1) - kernel[0] - ((kernel[0] - 1) * (dilation[0] - 1))) /
128               stride[0]));
129       if (is_min_shape && out_h < 1) {
130         out_h = 1L;
131       }
132     }
133     if (x_w != Shape::SHP_ANY) {
134       out_w = static_cast<int64_t>(std::floor(1 + ((x_w * 1) + pad_list->at(kInputIndex2) + pad_list->at(kInputIndex3) -
135                                                    kernel[1] - ((kernel[1] - 1) * (dilation[1] - 1))) /
136                                                     stride[1]));
137       if (is_min_shape && out_w < 1) {
138         out_w = 1L;
139       }
140     }
141     output_hw->push_back(out_h);
142     output_hw->push_back(out_w);
143   }
144 }
145 
Conv2dInferShape(const PrimitivePtr & primitive,const std::vector<AbstractBasePtr> & input_args)146 abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
147   MS_EXCEPTION_IF_NULL(primitive);
148   auto prim_name = primitive->name();
149   for (const auto &item : input_args) {
150     MS_EXCEPTION_IF_NULL(item);
151   }
152   auto x_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape());
153   auto w_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape());
154   auto x_shape = x_shape_map[kShape];
155   auto w_shape = w_shape_map[kShape];
156   const int64_t shape_size = 4;
157   (void)CheckAndConvertUtils::CheckInteger("x shape size", SizeToLong(x_shape.size()), kEqual, shape_size, prim_name);
158   (void)CheckAndConvertUtils::CheckInteger("w shape size", SizeToLong(w_shape.size()), kEqual, shape_size, prim_name);
159   auto x_min_shape = x_shape_map[kMinShape];
160   auto x_max_shape = x_shape_map[kMaxShape];
161   auto w_min_shape = w_shape_map[kMinShape];
162   auto w_max_shape = w_shape_map[kMaxShape];
163   CheckAndConvertUtils::CheckMinMaxShape(x_shape, &x_min_shape, &x_max_shape);
164   CheckAndConvertUtils::CheckMinMaxShape(w_shape, &w_min_shape, &w_max_shape);
165   CheckShapeAnyAndPositive(prim_name + " x_shape", x_shape);
166   CheckShapeAnyAndPositive(prim_name + " w_shape", w_shape);
167   CheckShapeAllPositive(prim_name + " x_min_shape", x_min_shape);
168   CheckShapeAllPositive(prim_name + " x_max_shape", x_max_shape);
169   CheckShapeAllPositive(prim_name + " w_min_shape", w_min_shape);
170   CheckShapeAllPositive(prim_name + " w_max_shape", w_max_shape);
171   const uint64_t n_axis = 0;
172   uint64_t c_axis = 1;
173   uint64_t h_axis = 2;
174   uint64_t w_axis = 3;
175   int64_t data_format = CheckAndConvertUtils::GetAndCheckFormat(primitive->GetAttr("format"));
176   if (data_format == Format::NHWC) {
177     c_axis = 3;
178     h_axis = 1;
179     w_axis = 2;
180   }
181   int64_t group = CheckAttrPositiveInt64(prim_name, primitive->GetAttr("group"), "group");
182   if ((x_shape[c_axis] != Shape::SHP_ANY) && (w_shape[c_axis] != Shape::SHP_ANY) &&
183       ((x_shape[c_axis] / group) != w_shape[c_axis])) {
184     MS_LOG(EXCEPTION) << "x_shape[C_in] / group must equal to w_shape[C_in] = " << w_shape[c_axis] << ", but got "
185                       << (x_shape[c_axis] / group);
186   }
187   int64_t out_channel = CheckAttrPositiveInt64(prim_name, primitive->GetAttr("out_channel"), "out_channel");
188   if ((w_shape[n_axis] != Shape::SHP_ANY) && (w_shape[n_axis] != out_channel)) {
189     MS_LOG(EXCEPTION) << "w_shape[" << n_axis << "] = " << w_shape[n_axis] << " must equal to = " << out_channel;
190   }
191   constexpr size_t kernel_size_num = 2;
192   constexpr size_t stride_num = 2;
193   constexpr size_t dilation_num = 2;
194   constexpr size_t padding_num = 4;
195   constexpr size_t start_index = 2;
196   std::vector<int64_t> kernel_size = CheckAttrIntOrTuple(primitive->GetAttr("kernel_size"), 0, kernel_size_num);
197   if ((w_shape[h_axis] != Shape::SHP_ANY) && (w_shape[h_axis] != kernel_size[0])) {
198     MS_LOG(EXCEPTION) << "weight height = " << w_shape[h_axis] << ", must equal to = " << kernel_size[0];
199   }
200   if ((w_shape[w_axis] != Shape::SHP_ANY) && (w_shape[w_axis] != kernel_size[1])) {
201     MS_LOG(EXCEPTION) << "weight width = " << w_shape[w_axis] << ", must equal to = " << kernel_size[1];
202   }
203   std::vector<int64_t> stride = CheckAttrIntOrTuple(primitive->GetAttr("stride"), start_index, stride_num);
204   std::vector<int64_t> dilation = CheckAttrIntOrTuple(primitive->GetAttr("dilation"), start_index, dilation_num);
205   std::vector<int64_t> padding = CheckAttrIntOrTuple(primitive->GetAttr("pad"), 0, padding_num);
206   int64_t pad_mode;
207   CheckAndConvertUtils::GetPadModEnumValue(primitive->GetAttr("pad_mode"), &pad_mode);
208   std::vector<int64_t> output_hw;
209   std::vector<int64_t> pad_list;
210   std::vector<int64_t> output_hw_min;
211   std::vector<int64_t> pad_list_min;
212   std::vector<int64_t> output_hw_max;
213   std::vector<int64_t> pad_list_max;
214   Conv2DPadFunction(&output_hw, &pad_list, x_shape[h_axis], x_shape[w_axis], kernel_size, stride, dilation, pad_mode,
215                     padding);
216   Conv2DPadFunction(&output_hw_min, &pad_list_min, x_min_shape[h_axis], x_min_shape[w_axis], kernel_size, stride,
217                     dilation, pad_mode, padding, true);
218   Conv2DPadFunction(&output_hw_max, &pad_list_max, x_max_shape[h_axis], x_max_shape[w_axis], kernel_size, stride,
219                     dilation, pad_mode, padding);
220   std::vector<ValuePtr> pad_list_val = {MakeValue(pad_list[0]), MakeValue(pad_list[1]), MakeValue(pad_list[2]),
221                                         MakeValue(pad_list[3])};
222   primitive->set_attr("pad_list", MakeValue(pad_list_val));
223   ShapeVector output_shape;
224   ShapeVector output_shape_min;
225   ShapeVector output_shape_max;
226   if (data_format == Format::NHWC) {
227     output_shape = {x_shape[n_axis], output_hw[0], output_hw[1], out_channel};
228     output_shape_min = {x_min_shape[n_axis], output_hw_min[0], output_hw_min[1], out_channel};
229     output_shape_max = {x_max_shape[n_axis], output_hw_max[0], output_hw_max[1], out_channel};
230   } else {
231     output_shape = {x_shape[n_axis], out_channel, output_hw[0], output_hw[1]};
232     output_shape_min = {x_min_shape[n_axis], out_channel, output_hw_min[0], output_hw_min[1]};
233     output_shape_max = {x_max_shape[n_axis], out_channel, output_hw_max[0], output_hw_max[1]};
234   }
235   CheckShapeAnyAndPositive(prim_name + " output_shape", output_shape);
236   CheckShapeAllPositive(prim_name + " output_shape_min", output_shape_min);
237   CheckShapeAllPositive(prim_name + " output_shape_max", output_shape_max);
238   return std::make_shared<abstract::Shape>(output_shape, output_shape_min, output_shape_max);
239 }
240 
Conv2dInferType(const PrimitivePtr & prim,const std::vector<AbstractBasePtr> & input_args)241 TypePtr Conv2dInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
242   const std::set<TypePtr> valid_types = {kInt8, kInt32, kInt64, kFloat16, kFloat32};
243   auto out_type = CheckAndConvertUtils::CheckTypeValid("x", input_args[0]->BuildType(), valid_types, prim->name());
244   if (out_type->type_id() == TypeId::kNumberTypeInt8) {
245     out_type = kInt32;
246   }
247   return out_type;
248 }
249 }  // namespace
Init(int64_t out_channel,const std::vector<int64_t> & kernel_size,int64_t mode,const PadMode & pad_mode,const std::vector<int64_t> & pad,const std::vector<int64_t> & stride,const std::vector<int64_t> & dilation,int64_t group,const Format & format)250 void Conv2D::Init(int64_t out_channel, const std::vector<int64_t> &kernel_size, int64_t mode, const PadMode &pad_mode,
251                   const std::vector<int64_t> &pad, const std::vector<int64_t> &stride,
252                   const std::vector<int64_t> &dilation, int64_t group, const Format &format) {
253   set_kernel_size(kernel_size);
254   set_stride(stride);
255   set_dilation(dilation);
256   set_pad(pad);
257   set_pad_mode(pad_mode);
258   set_mode(mode);
259   set_out_channel(out_channel);
260   set_group(group);
261   set_format(format);
262 }
263 
set_out_channel(int64_t out_channel)264 void Conv2D::set_out_channel(int64_t out_channel) {
265   (void)AddAttr(kOutChannel,
266                 MakeValue(CheckAndConvertUtils::CheckInteger(kOutChannel, out_channel, kGreaterThan, 0, name())));
267 }
268 
set_kernel_size(const std::vector<int64_t> & kernel_size)269 void Conv2D::set_kernel_size(const std::vector<int64_t> &kernel_size) {
270   (void)AddAttr(kKernelSize, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, name())));
271 }
272 
set_stride(const std::vector<int64_t> & stride)273 void Conv2D::set_stride(const std::vector<int64_t> &stride) {
274   (void)AddAttr(kStride, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kStride, stride, name())));
275 }
276 
set_dilation(const std::vector<int64_t> & dilation)277 void Conv2D::set_dilation(const std::vector<int64_t> &dilation) {
278   (void)AddAttr(kDilation, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kDilation, dilation, name())));
279 }
280 
set_pad_mode(const PadMode & pad_mode)281 void Conv2D::set_pad_mode(const PadMode &pad_mode) {
282   std::vector<int64_t> pad = get_pad();
283   if (pad_mode == PAD) {
284     for (auto item : pad) {
285       CheckAndConvertUtils::Check(kPadItem, item, kGreaterEqual, "zeros_list", 0, name());
286     }
287   } else {
288     CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros_list", {0, 0, 0, 0}, name());
289   }
290   int64_t swi = pad_mode;
291   (void)AddAttr(kPadMode, MakeValue(swi));
292 }
293 
set_pad(const std::vector<int64_t> & pad)294 void Conv2D::set_pad(const std::vector<int64_t> &pad) {
295   const int64_t pad_size = 4;
296   (void)CheckAndConvertUtils::CheckInteger("pad_size", SizeToLong(pad.size()), kEqual, pad_size, name());
297   (void)AddAttr(kPad, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kPad, pad, name())));
298 }
299 
set_mode(int64_t mode)300 void Conv2D::set_mode(int64_t mode) {
301   (void)AddAttr(kMode, MakeValue(CheckAndConvertUtils::CheckInteger(kMode, mode, kEqual, 1, name())));
302 }
303 
set_group(int64_t group)304 void Conv2D::set_group(int64_t group) {
305   (void)AddAttr(kGroup, MakeValue(CheckAndConvertUtils::CheckInteger(kGroup, group, kGreaterThan, 0, name())));
306 }
307 
set_format(const Format & format)308 void Conv2D::set_format(const Format &format) {
309   int64_t f = format;
310   (void)AddAttr(kFormat, MakeValue(f));
311 }
312 
get_out_channel() const313 int64_t Conv2D::get_out_channel() const {
314   auto value_ptr = GetAttr(kOutChannel);
315   return GetValue<int64_t>(value_ptr);
316 }
317 
get_kernel_size() const318 std::vector<int64_t> Conv2D::get_kernel_size() const {
319   auto value_ptr = GetAttr(kKernelSize);
320   return GetValue<std::vector<int64_t>>(value_ptr);
321 }
322 
get_stride() const323 std::vector<int64_t> Conv2D::get_stride() const {
324   auto value_ptr = GetAttr(kStride);
325   return GetValue<std::vector<int64_t>>(value_ptr);
326 }
327 
get_dilation() const328 std::vector<int64_t> Conv2D::get_dilation() const {
329   auto value_ptr = GetAttr(kDilation);
330   return GetValue<std::vector<int64_t>>(value_ptr);
331 }
332 
get_pad_mode() const333 PadMode Conv2D::get_pad_mode() const {
334   auto value_ptr = GetAttr(kPadMode);
335   return PadMode(GetValue<int64_t>(value_ptr));
336 }
337 
get_pad() const338 std::vector<int64_t> Conv2D::get_pad() const {
339   auto value_ptr = GetAttr(kPad);
340   return GetValue<std::vector<int64_t>>(value_ptr);
341 }
342 
get_mode() const343 int64_t Conv2D::get_mode() const {
344   auto value_ptr = GetAttr(kMode);
345   return GetValue<int64_t>(value_ptr);
346 }
347 
get_group() const348 int64_t Conv2D::get_group() const {
349   auto value_ptr = GetAttr(kGroup);
350   return GetValue<int64_t>(value_ptr);
351 }
352 
get_format() const353 Format Conv2D::get_format() const {
354   auto value_ptr = GetAttr(kFormat);
355   return Format(GetValue<int64_t>(value_ptr));
356 }
357 
Conv2dInfer(const abstract::AnalysisEnginePtr &,const PrimitivePtr & primitive,const std::vector<AbstractBasePtr> & input_args)358 AbstractBasePtr Conv2dInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
359                             const std::vector<AbstractBasePtr> &input_args) {
360   const int64_t input_num = 2;
361   (void)CheckAndConvertUtils::CheckInteger("Conv2d infer", SizeToLong(input_args.size()), kGreaterEqual, input_num,
362                                            primitive->name());
363   const std::set<TypePtr> valid_types = {kInt8, kInt32, kInt64, kFloat16, kFloat32};
364   std::map<std::string, TypePtr> types;
365   (void)types.emplace("x", input_args[0]->BuildType());
366   (void)types.emplace("w", input_args[1]->BuildType());
367   (void)CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, primitive->name());
368   return abstract::MakeAbstract(Conv2dInferShape(primitive, input_args), Conv2dInferType(primitive, input_args));
369 }
370 REGISTER_PRIMITIVE_EVAL_IMPL(Conv2D, prim::kPrimConv2D, Conv2dInfer, nullptr, true);
371 }  // namespace ops
372 }  // namespace mindspore
373