1 /**
2 * Copyright 2021-2022 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/extendrt/delegate/tensorrt/op/cast_tensorrt.h"
18 #include <cuda_runtime.h>
19 #include <numeric>
20 #include <memory>
21 #include <functional>
22 #include "src/extendrt/delegate/tensorrt/op/cast_plugin.h"
23 #include "ops/auto_generate/gen_lite_ops.h"
24
25 namespace mindspore::lite {
IsSupport(const BaseOperatorPtr & base_operator,const std::vector<TensorInfo> & in_tensors,const std::vector<TensorInfo> & out_tensors)26 int CastTensorRT::IsSupport(const BaseOperatorPtr &base_operator, const std::vector<TensorInfo> &in_tensors,
27 const std::vector<TensorInfo> &out_tensors) {
28 if (in_tensors.size() != INPUT_SIZE2) {
29 MS_LOG(ERROR) << "invalid input tensor size: " << in_tensors.size();
30 return RET_ERROR;
31 }
32 if (out_tensors.size() != 1) {
33 MS_LOG(ERROR) << "invalid output tensor size: " << out_tensors.size();
34 return RET_ERROR;
35 }
36 return RET_OK;
37 }
38
AddInnerOp(TensorRTContext * ctx)39 int CastTensorRT::AddInnerOp(TensorRTContext *ctx) {
40 // cast to type tensor
41 auto type_tensor = in_tensors_[1];
42 if (!type_tensor.IsConst()) {
43 MS_LOG(ERROR) << "unknown cast type of " << op_name_;
44 return RET_ERROR;
45 }
46 auto type_vec = ConvertTensorAsIntVector(type_tensor);
47 if (type_vec.size() != 1) {
48 MS_LOG(ERROR) << "Failed to get type input, type size " << type_vec.size() << ", node: " << op_name_;
49 return RET_ERROR;
50 }
51 DataType data_type = static_cast<DataType>(type_vec[0]);
52 MS_LOG(DEBUG) << op_name_ << " cast to data type(43 float): " << type_vec[0];
53 nvinfer1::DataType dest_datatype = ConvertDataType(data_type);
54 auto trt_tensor = input(ctx, 0).trt_tensor_;
55
56 #if TRT_VERSION_GE(7, 2)
57 dest_datatype = (dest_datatype == nvinfer1::DataType::kBOOL ? nvinfer1::DataType::kINT32 : dest_datatype);
58 auto cast_layer = ctx->network()->addIdentity(*trt_tensor);
59 #else
60 auto plugin = std::make_shared<CastPlugin>(op_name_, dest_datatype);
61 nvinfer1::ITensor *inputTensors[] = {trt_tensor};
62 nvinfer1::IPluginV2Layer *cast_layer = ctx->network()->addPluginV2(inputTensors, 1, *plugin);
63 #endif
64 if (cast_layer == nullptr) {
65 MS_LOG(ERROR) << "create cast layer failed for: " << op_name_;
66 return RET_ERROR;
67 }
68 #if TRT_VERSION_GE(7, 2)
69 cast_layer->setOutputType(0, dest_datatype);
70 #endif
71 cast_layer->setName(op_name_.c_str());
72 nvinfer1::ITensor *cast_out = cast_layer->getOutput(0);
73 ctx->RegisterTensor(ITensorHelper{cast_out, input(ctx, 0).format_, input(ctx, 0).same_format_},
74 out_tensors_[0].Name());
75 this->layer_ = cast_layer;
76 return RET_OK;
77 }
78 REGISTER_TENSORRT_CREATOR(ops::kNameCast, CastTensorRT)
79 } // namespace mindspore::lite
80