1 /**
2 * Copyright 2020-2023 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include "minddata/dataset/kernels/tensor_op.h"
17
18 #include <memory>
19 #include <vector>
20
21 namespace mindspore {
22 namespace dataset {
23 // Name: Compute()
24 // Description: This Compute() take 1 Tensor and produce 1 Tensor.
25 // The derived class should override this function otherwise error.
Compute(const std::shared_ptr<Tensor> & input,std::shared_ptr<Tensor> * output)26 Status TensorOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
27 IO_CHECK(input, output);
28 if (!OneToOne()) {
29 RETURN_STATUS_UNEXPECTED("Wrong Compute() function is called. This is not 1-1 TensorOp.");
30 } else {
31 RETURN_STATUS_UNEXPECTED("Is this TensorOp 1-1? If yes, please implement this Compute() in the derived class.");
32 }
33 }
34
35 // Name: Compute()
36 // Description: This Compute() take multiple Tensors from different columns and produce multiple Tensors too.
37 // The derived class should override this function otherwise error.
Compute(const TensorRow & input,TensorRow * output)38 Status TensorOp::Compute(const TensorRow &input, TensorRow *output) {
39 IO_CHECK_VECTOR(input, output);
40 if (OneToOne()) {
41 CHECK_FAIL_RETURN_UNEXPECTED(input.size() == 1, "The op is OneToOne, can only accept one tensor as input.");
42 output->resize(1);
43 return Compute(input[0], &(*output)[0]);
44 }
45
46 RETURN_STATUS_UNEXPECTED("Is this TensorOp oneToOne? If no, please implement this Compute() in the derived class.");
47 }
48
Compute(const std::shared_ptr<DeviceTensor> & input,std::shared_ptr<DeviceTensor> * output)49 Status TensorOp::Compute(const std::shared_ptr<DeviceTensor> &input, std::shared_ptr<DeviceTensor> *output) {
50 IO_CHECK(input, output);
51 RETURN_STATUS_UNEXPECTED(
52 "Wrong Compute() function is called. This is a function for operators which can be executed by"
53 " Ascend310 device. If so, please implement it in the derived class.");
54 }
55
56 #if !defined(BUILD_LITE) && defined(ENABLE_D)
Compute(const std::vector<std::shared_ptr<DeviceTensorAscend910B>> & input,std::vector<std::shared_ptr<DeviceTensorAscend910B>> * output)57 Status TensorOp::Compute(const std::vector<std::shared_ptr<DeviceTensorAscend910B>> &input,
58 std::vector<std::shared_ptr<DeviceTensorAscend910B>> *output) {
59 IO_CHECK_VECTOR(input, output);
60 if (OneToOne()) {
61 CHECK_FAIL_RETURN_UNEXPECTED(input.size() == 1, "The op is OneToOne, can only accept one tensor as input.");
62 output->resize(1);
63 return Compute(input[0], &(*output)[0]);
64 }
65
66 RETURN_STATUS_UNEXPECTED("Is this TensorOp oneToOne? If no, please implement this Compute() in the derived class.");
67 }
68
Compute(const std::shared_ptr<DeviceTensorAscend910B> & input,std::shared_ptr<DeviceTensorAscend910B> * output)69 Status TensorOp::Compute(const std::shared_ptr<DeviceTensorAscend910B> &input,
70 std::shared_ptr<DeviceTensorAscend910B> *output) {
71 IO_CHECK(input, output);
72 RETURN_STATUS_UNEXPECTED(
73 "Wrong Compute() function is called. This is a function for operators which can be executed by"
74 " Ascend910B device. If so, please implement it in the derived class.");
75 }
76 #endif
77
OutputShape(const std::vector<TensorShape> & inputs,std::vector<TensorShape> & outputs)78 Status TensorOp::OutputShape(const std::vector<TensorShape> &inputs, std::vector<TensorShape> &outputs) {
79 if (inputs.size() != NumInput()) {
80 RETURN_STATUS_UNEXPECTED("The size of the input argument vector does not match the number of inputs");
81 }
82 outputs = inputs;
83 return Status::OK();
84 }
85
OutputType(const std::vector<DataType> & inputs,std::vector<DataType> & outputs)86 Status TensorOp::OutputType(const std::vector<DataType> &inputs, std::vector<DataType> &outputs) {
87 if (inputs.size() != NumInput()) {
88 RETURN_STATUS_UNEXPECTED("The size of the input argument vector does not match the number of inputs");
89 }
90 outputs = inputs;
91 return Status::OK();
92 }
93
SetAscendResource(const std::shared_ptr<DeviceResource> & resource)94 Status TensorOp::SetAscendResource(const std::shared_ptr<DeviceResource> &resource) {
95 RETURN_STATUS_UNEXPECTED("This is a CPU operator which doesn't have Ascend Resource. Please verify your context");
96 }
97
RandomTensorOp()98 RandomTensorOp::RandomTensorOp() {
99 is_deterministic_ = false;
100 random_generator_.seed(GetSeed());
101 }
102
SetSeed(uint32_t seed)103 void RandomTensorOp::SetSeed(uint32_t seed) { random_generator_.seed(seed); }
104 } // namespace dataset
105 } // namespace mindspore
106