• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <algorithm>
18 #include "src/delegate/tensorrt/op/resize_tensorrt.h"
19 #include "nnacl/nnacl_common.h"
20 
21 namespace mindspore::lite {
IsSupport(const schema::Primitive * primitive,const std::vector<mindspore::MSTensor> & in_tensors,const std::vector<mindspore::MSTensor> & out_tensors)22 int ResizeTensorRT::IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
23                               const std::vector<mindspore::MSTensor> &out_tensors) {
24   if (!IsShapeKnown()) {
25     MS_LOG(ERROR) << "Unsupported input tensor unknown shape: " << op_name_;
26     return RET_ERROR;
27   }
28   if (in_tensors.size() != 1 && in_tensors.size() != INPUT_SIZE2) {
29     MS_LOG(ERROR) << "Unsupported input tensor size, size is " << in_tensors.size();
30   }
31   if (out_tensors.size() != 1) {
32     MS_LOG(ERROR) << "Unsupported output tensor size, size is " << out_tensors.size();
33   }
34   return RET_OK;
35 }
36 
AddInnerOp(nvinfer1::INetworkDefinition * network)37 int ResizeTensorRT::AddInnerOp(nvinfer1::INetworkDefinition *network) {
38   if (network == nullptr) {
39     MS_LOG(ERROR) << "network is invalid";
40     return RET_ERROR;
41   }
42 
43   nvinfer1::ITensor *resize_in_tensor = tensorrt_in_tensors_[0].trt_tensor_;
44   MS_LOG(DEBUG) << "origin input " << GetTensorFormat(resize_in_tensor, tensorrt_in_tensors_[0].format_);
45 
46   if (resize_in_tensor->getDimensions().nbDims == DIMENSION_4D && tensorrt_in_tensors_[0].format_ == Format::NCHW) {
47     // NCHW->NHWC
48     nvinfer1::IShuffleLayer *transpose_layer = NCHW2NHWC(network, *tensorrt_in_tensors_[0].trt_tensor_);
49     if (transpose_layer == nullptr) {
50       MS_LOG(ERROR) << "create transpose layer failed for " << op_name_;
51       return RET_ERROR;
52     }
53     transpose_layer->setName((op_name_ + "_transpose_in").c_str());
54     resize_in_tensor = transpose_layer->getOutput(0);
55   }
56   MS_LOG(DEBUG) << "after transpose input " << GetTensorFormat(resize_in_tensor, Format::NHWC);
57 
58   nvinfer1::IResizeLayer *resize_layer = network->addResize(*resize_in_tensor);
59   if (resize_layer == nullptr) {
60     MS_LOG(ERROR) << "create resize layer failed for " << op_name_;
61     return RET_ERROR;
62   }
63   int ret = SetOutputDims(resize_in_tensor, resize_layer);
64   if (ret != RET_OK) {
65     MS_LOG(ERROR) << "SetOutputDims failed for " << op_name_;
66     return RET_ERROR;
67   }
68 
69   ret = SetParams(resize_layer);
70   if (ret != RET_OK) {
71     MS_LOG(ERROR) << "SetParams failed for " << op_name_;
72     return RET_ERROR;
73   }
74 
75   resize_layer->getOutput(0)->setName((op_name_ + "_output").c_str());
76   this->AddInnerOutTensors(ITensorHelper{resize_layer->getOutput(0), Format::NHWC});
77   MS_LOG(DEBUG) << "output " << GetTensorFormat(resize_layer->getOutput(0), Format::NHWC);
78   return RET_OK;
79 }
80 
SetOutputDims(nvinfer1::ITensor * resize_in_tensor,nvinfer1::IResizeLayer * resize_layer)81 int ResizeTensorRT::SetOutputDims(nvinfer1::ITensor *resize_in_tensor, nvinfer1::IResizeLayer *resize_layer) {
82   auto resize_op = op_primitive_->value_as_Resize();
83   if (resize_op == nullptr) {
84     MS_LOG(ERROR) << "convert failed " << op_name_;
85     return RET_ERROR;
86   }
87   if (in_tensors_.size() == 1) {
88     nvinfer1::Dims new_dims = resize_in_tensor->getDimensions();  // nhwc
89     new_dims.d[1] = resize_op->new_height();
90     new_dims.d[2] = resize_op->new_width();
91     resize_layer->setOutputDimensions(new_dims);
92   } else {
93     std::vector<float> out_shape;
94     const void *shape_data = in_tensors_[1].Data().get();
95     if (shape_data == nullptr) {
96       // dynamic output shape
97       if (tensorrt_in_tensors_.size() < INPUT_SIZE2) {
98         MS_LOG(ERROR) << "no output shape tensor found for " << op_name_;
99         return RET_ERROR;
100       }
101       resize_layer->setInput(1, *tensorrt_in_tensors_[1].trt_tensor_);
102     } else {
103       if (in_tensors_[1].ElementNum() != resize_in_tensor->getDimensions().nbDims) {
104         MS_LOG(ERROR) << "output shape tensor value is invalid for " << op_name_;
105         return RET_ERROR;
106       }
107       switch (in_tensors_[1].DataType()) {
108         case DataType::kNumberTypeFloat32: {
109           const float *shape_data_fp32 = static_cast<const float *>(shape_data);
110           for (int i = 0; i < in_tensors_[1].ElementNum(); i++) {
111             out_shape.push_back(*(shape_data_fp32 + i));
112           }
113           break;
114         }
115         case DataType::kNumberTypeFloat16: {
116           const uint16_t *shape_data_fp16 = static_cast<const uint16_t *>(shape_data);
117           for (int i = 0; i < in_tensors_[1].ElementNum(); i++) {
118             out_shape.push_back(ShortToFloat32(*(shape_data_fp16 + i)));
119           }
120           break;
121         }
122         default:
123           MS_LOG(WARNING) << op_name_
124                           << " more datatype need to check: " << static_cast<int>(in_tensors_[1].DataType());
125           break;
126       }
127       if (SameDims(out_shape, out_tensors_[0].Shape())) {
128         // static dims
129         resize_layer->setOutputDimensions(ConvertCudaDims(out_shape));
130       } else if (IsScaleOutputDim(in_tensors_[0].Shape(), out_tensors_[0].Shape(), out_shape)) {
131         // scale dims
132         if (out_shape.size() > DIMENSION_4D) {
133           MS_LOG(ERROR) << "dims count needs check for " << op_name_;
134           return RET_ERROR;
135         }
136         float scales[DIMENSION_4D];
137         std::copy(out_shape.begin(), out_shape.end(), scales);
138         resize_layer->setScales(scales, out_shape.size());
139       } else {
140         MS_LOG(ERROR) << "output shape tensor value is invalid for " << op_name_;
141         return RET_ERROR;
142       }
143     }
144   }
145   return RET_OK;
146 }
147 
IsScaleOutputDim(const std::vector<int64_t> & in_shape,const std::vector<int64_t> & out_shape,const std::vector<float> & shape_tensor_val)148 bool ResizeTensorRT::IsScaleOutputDim(const std::vector<int64_t> &in_shape, const std::vector<int64_t> &out_shape,
149                                       const std::vector<float> &shape_tensor_val) {
150   if (in_shape.size() != out_shape.size() || shape_tensor_val.size() != in_shape.size()) {
151     MS_LOG(WARNING) << "tensor shape is not same for " << op_name_;
152     return false;
153   }
154   for (size_t i = 0; i < in_shape.size(); i++) {
155     if (std::abs(in_shape[i] * shape_tensor_val[i] - out_shape[i]) > 1e-6) {
156       return false;
157     }
158   }
159   return true;
160 }
161 
SetParams(nvinfer1::IResizeLayer * resize_layer)162 int ResizeTensorRT::SetParams(nvinfer1::IResizeLayer *resize_layer) {
163   auto resize_op = op_primitive_->value_as_Resize();
164   if (resize_op == nullptr) {
165     MS_LOG(ERROR) << "convert failed " << op_name_;
166     return RET_ERROR;
167   }
168 
169   auto method = resize_op->method();
170   std::map<schema::ResizeMethod, nvinfer1::ResizeMode> method_map = {
171     {schema::ResizeMethod_LINEAR, nvinfer1::ResizeMode::kLINEAR},
172     {schema::ResizeMethod_NEAREST, nvinfer1::ResizeMode::kNEAREST}};
173   if (method_map.find(method) == method_map.end()) {
174     MS_LOG(ERROR) << op_name_ << " unsupported resize mode " << EnumNameResizeMethod(method);
175     return RET_ERROR;
176   }
177   resize_layer->setResizeMode(method_map.at(method));
178 
179   // unsupported for trt6, but support in higher version
180   auto coordinate_transform_mode = resize_op->coordinate_transform_mode();
181   if (coordinate_transform_mode != schema::CoordinateTransformMode_ASYMMETRIC) {
182     MS_LOG(WARNING) << op_name_ << " has coordinate_transform_mode not supported: "
183                     << EnumNameCoordinateTransformMode(coordinate_transform_mode);
184   }
185   return RET_OK;
186 }
187 }  // namespace mindspore::lite
188