1 /**
2 * Copyright 2021-2023 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "nnacl/infer/shape_fusion_infer.h"
18 #include "nnacl/infer/infer_register.h"
19
CalculateOutput(const TensorC * in_tensor,const TensorC * matrix_tensor,TensorC * out_tensor,size_t input_len,size_t origin_out_size)20 int CalculateOutput(const TensorC *in_tensor, const TensorC *matrix_tensor, TensorC *out_tensor, size_t input_len,
21 size_t origin_out_size) {
22 size_t out_size = out_tensor->shape_size_ == 0 ? 1 : (size_t)(out_tensor->shape_[0]);
23 if (out_size != origin_out_size && out_tensor->data_ != NULL) {
24 free(out_tensor->data_);
25 out_tensor->data_ = NULL;
26 }
27 size_t matrix_data_size = input_len * out_size * sizeof(float);
28 float *matrix_data = (float *)(malloc(matrix_data_size));
29 NNACL_CHECK_NULL_RETURN_ERR(matrix_data);
30 if (matrix_tensor->data_type_ == kNumberTypeFloat32 || matrix_tensor->data_type_ == kNumberTypeFloat) {
31 memcpy(matrix_data, matrix_tensor->data_, matrix_data_size);
32 #ifdef ENABLE_FP16
33 } else if (matrix_tensor->data_type_ == kNumberTypeFloat16) {
34 for (size_t i = 0; i < input_len * out_size; i++) {
35 matrix_data[i] = (float)(((float16_t *)(matrix_tensor->data_))[i]);
36 }
37 #endif
38 } else {
39 free(matrix_data);
40 return NNACL_ERR;
41 }
42 if (out_tensor->data_ == NULL) {
43 out_tensor->data_ = malloc(out_size * sizeof(int));
44 }
45 int *data = (int *)out_tensor->data_;
46 if (data == NULL) {
47 free(matrix_data);
48 return NNACL_ERR;
49 }
50 memset(data, 0, out_size * sizeof(int));
51 for (size_t i = 0; i < out_size; i++) {
52 for (size_t j = 0; j < input_len - 1; j++) {
53 data[i] += (int)(in_tensor->shape_[j] * matrix_data[i * input_len + j]);
54 }
55 data[i] += (int)(matrix_data[i * input_len + input_len - 1]);
56 }
57 free(matrix_data);
58 return NNACL_OK;
59 }
60
ShapeFusionInferShape(const TensorC * const * inputs,size_t inputs_size,TensorC ** outputs,size_t outputs_size,OpParameter * parameter)61 int ShapeFusionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
62 OpParameter *parameter) {
63 int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter);
64 if (check_ret != NNACL_OK) {
65 return check_ret;
66 }
67 NNACL_CHECK_TRUE_RET(inputs_size == outputs_size + 1, NNACL_INPUT_TENSOR_ERROR);
68 const TensorC *in_tensor = inputs[0];
69 size_t input_len = in_tensor->shape_size_ + 1;
70 for (size_t out_idx = 0; out_idx < outputs_size; out_idx++) {
71 TensorC *out_tensor = outputs[out_idx];
72 size_t origin_out_size =
73 out_tensor->data_ == NULL ? 0 : (out_tensor->shape_size_ == 0 ? 1 : (size_t)out_tensor->shape_[0]);
74 out_tensor->data_type_ = kNumberTypeInt32;
75 out_tensor->format_ = in_tensor->format_;
76 if (!InferFlag(inputs, inputs_size)) {
77 return NNACL_INFER_INVALID;
78 }
79
80 // calculate output tensor shape.
81 const TensorC *matrix_tensor = inputs[out_idx + 1];
82 if (matrix_tensor->shape_size_ == 1) {
83 out_tensor->shape_size_ = 0;
84 out_tensor->shape_[0] = 0;
85 } else {
86 out_tensor->shape_size_ = 1;
87 out_tensor->shape_[0] = (int)(matrix_tensor->shape_[0]);
88 }
89 int ret = CalculateOutput(in_tensor, matrix_tensor, out_tensor, input_len, origin_out_size);
90 if (ret != NNACL_OK) {
91 return ret;
92 }
93 }
94 return NNACL_OK;
95 }
96
97 REG_INFER(ShapeFusion, PrimType_Inner_ShapeFusion, ShapeFusionInferShape)
98