1 /**
2 * Copyright 2021 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "nnacl/infer/where_infer.h"
18 #include "nnacl/infer/infer_register.h"
19 #include "nnacl/tensor_c_utils.h"
20 #include "nnacl/infer/broadcast_to_infer.h"
21
WhereBroadCastInferShape(const int input_shape0_size,const int input_shape1_size,const int * input_shape0,const int * input_shape1,int * ndim,int * in_shape0,int * in_shape1,int * out_shape,bool * has_broad_cast)22 int WhereBroadCastInferShape(const int input_shape0_size, const int input_shape1_size, const int *input_shape0,
23 const int *input_shape1, int *ndim, int *in_shape0, int *in_shape1, int *out_shape,
24 bool *has_broad_cast) {
25 if (input_shape0_size > MAX_SHAPE_SIZE || input_shape1_size > MAX_SHAPE_SIZE) {
26 return NNACL_ERR;
27 }
28 MakeUpInputShapes(input_shape0_size, input_shape1_size, input_shape0, input_shape1, ndim, in_shape0, in_shape1);
29 if (*ndim >= MAX_SHAPE_SIZE) {
30 return NNACL_INFER_INVALID;
31 }
32 return BroadCastOutputShape(in_shape0, in_shape1, *ndim, out_shape, has_broad_cast);
33 }
34
WhereInferShape(const TensorC * const * inputs,size_t inputs_size,TensorC ** outputs,size_t outputs_size,OpParameter * parameter)35 int WhereInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
36 OpParameter *parameter) {
37 int check_ret = CheckAugmentWithMinSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1);
38 if (check_ret != NNACL_OK) {
39 return check_ret;
40 }
41
42 const TensorC *input = inputs[0];
43 TensorC *output = outputs[0];
44
45 // Need to dynamically allocate at runtime.
46 if (inputs_size == 1) {
47 output->data_type_ = kNumberTypeInt32;
48 output->format_ = input->format_;
49 return NNACL_INFER_INVALID;
50 }
51
52 if (inputs_size < 3 || outputs_size != 1) {
53 return NNACL_INPUT_TENSOR_ERROR;
54 }
55
56 const TensorC *input0 = inputs[0];
57 const TensorC *input1 = inputs[1];
58 const TensorC *input2 = inputs[2];
59 SetDataTypeFormat(output, input1);
60 if (!InferFlag(inputs, inputs_size)) {
61 return NNACL_INFER_INVALID;
62 }
63 int in_shape0[MAX_SHAPE_SIZE] = {0};
64 int in_shape1[MAX_SHAPE_SIZE] = {0};
65 int in_shape2[MAX_SHAPE_SIZE] = {0};
66 int output_shape[MAX_SHAPE_SIZE] = {0};
67 size_t input_shape0_size = input0->shape_size_;
68 size_t input_shape1_size = input1->shape_size_;
69 size_t input_shape2_size = input2->shape_size_;
70 const int *input_shape0 = input0->shape_;
71 const int *input_shape1 = input1->shape_;
72 const int *input_shape2 = input2->shape_;
73 int ndim = (int)input_shape0_size;
74 bool has_broad_cast_1 = false;
75 bool has_broad_cast_2 = false;
76 if (WhereBroadCastInferShape(input_shape0_size, input_shape1_size, input_shape0, input_shape1, &ndim, in_shape0,
77 in_shape1, output_shape, &has_broad_cast_1) != NNACL_OK) {
78 return NNACL_ERR;
79 }
80 if (WhereBroadCastInferShape(ndim, input_shape2_size, output_shape, input_shape2, &ndim, in_shape0, in_shape2,
81 output_shape, &has_broad_cast_2) != NNACL_OK) {
82 return NNACL_ERR;
83 }
84 ShapeSet(output->shape_, &output->shape_size_, output_shape, ndim);
85 return NNACL_OK;
86 }
87
88 REG_INFER(Where, PrimType_Where, WhereInferShape)
89