1 /** 2 * Copyright 2020-2023 Huawei Technologies Co., Ltd 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_EXECUTE_H_ 18 #define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_EXECUTE_H_ 19 20 #include <map> 21 #include <memory> 22 #include <string> 23 #include <vector> 24 25 #include "include/api/context.h" 26 #include "include/api/types.h" 27 #include "include/dataset/constants.h" 28 #include "include/dataset/transforms.h" 29 #if !defined(BUILD_LITE) && defined(ENABLE_D) 30 #include "runtime/hardware/device_context.h" 31 #include "runtime/hardware/device_context_manager.h" 32 #endif 33 34 namespace mindspore { 35 namespace dataset { 36 class DeviceResource; 37 class Tensor; 38 class TensorOp; 39 40 // class to run tensor operations in eager mode 41 class DATASET_API Execute { 42 public: 43 /// \brief Constructor. 44 /// \param[in] op TensorOperation to be applied in Eager mode, it accepts operation in type of shared pointer. 45 /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). 46 /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). 47 explicit Execute(const std::shared_ptr<TensorOperation> &op, MapTargetDevice device_type = MapTargetDevice::kCpu, 48 uint32_t device_id = 0); 49 50 /// \brief Constructor. 51 /// \param[in] op TensorTransform to be applied in Eager mode, it accepts operation in type of shared pointer. 52 /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). 53 /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). 54 explicit Execute(const std::shared_ptr<TensorTransform> &op, MapTargetDevice device_type = MapTargetDevice::kCpu, 55 uint32_t device_id = 0); 56 57 /// \brief Constructor. 58 /// \param[in] op TensorTransform to be applied in Eager mode, it accepts operation in type of reference. 59 /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). 60 /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). 61 explicit Execute(const std::reference_wrapper<TensorTransform> &op, 62 MapTargetDevice device_type = MapTargetDevice::kCpu, uint32_t device_id = 0); 63 64 /// \brief Constructor. 65 /// \param[in] op TensorTransform to be applied in Eager mode, it accepts operation in type of raw pointer. 66 /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). 67 /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). 68 explicit Execute(TensorTransform *op, MapTargetDevice device_type = MapTargetDevice::kCpu, uint32_t device_id = 0); 69 70 /// \brief Constructor. 71 /// \param[in] ops A vector of TensorOperations to be applied in Eager mode, it accepts operation 72 /// in type of shared pointer. 73 /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). 74 /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). 75 explicit Execute(const std::vector<std::shared_ptr<TensorOperation>> &ops, 76 MapTargetDevice device_type = MapTargetDevice::kCpu, uint32_t device_id = 0); 77 78 /// \brief Constructor. 79 /// \param[in] ops A vector of TensorTransforms to be applied in Eager mode, it accepts operation 80 /// in type of shared pointer. 81 /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). 82 /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). 83 explicit Execute(const std::vector<std::shared_ptr<TensorTransform>> &ops, 84 MapTargetDevice device_type = MapTargetDevice::kCpu, uint32_t device_id = 0); 85 86 /// \brief Constructor. 87 /// \param[in] ops A vector of TensorTransforms to be applied in Eager mode, it accepts operation 88 /// in type of raw pointer. 89 /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). 90 /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). 91 explicit Execute(const std::vector<std::reference_wrapper<TensorTransform>> &ops, 92 MapTargetDevice device_type = MapTargetDevice::kCpu, uint32_t device_id = 0); 93 94 /// \brief Constructor. 95 /// \param[in] ops A vector of TensorTransforms to be applied in Eager mode, it accepts operation 96 /// in type of raw pointer. 97 /// \param[in] device_type Target device environment to perform operation, can be kCPU/kGPU/kAscend310 (default=kCPU). 98 /// \param[in] device_id Target device ID to perform operation, only valid when device_type=kAscend310 (default=0). 99 explicit Execute(const std::vector<TensorTransform *> &ops, MapTargetDevice device_type = MapTargetDevice::kCpu, 100 uint32_t device_id = 0); 101 102 /// \brief Destructor. 103 ~Execute(); 104 105 // Update the TensorOperation 106 Status UpdateOperation(const std::shared_ptr<TensorOperation> &op); 107 108 /// \brief Callable function to execute the TensorTransform in eager mode. 109 /// \param[in] input Tensor to be transformed. 110 /// \param[out] output Transformed tensor. 111 /// \return Status error code, returns OK if no error encountered. 112 /// \par Example 113 /// \code 114 /// /* Usage of Execute */ 115 /// std::shared_ptr<TensorTransform> decode = std::make_shared<vision::Decode>(); 116 /// std::shared_ptr<TensorTransform> center_crop(new vision::CenterCrop({30})); 117 /// std::shared_ptr<TensorTransform> rescale = std::make_shared<vision::Rescale>(1. / 3, 0.5); 118 /// mindspore::dataset::Execute transform = Execute({decode, center_crop, rescale}); 119 /// 120 /// /* Apply transforms */ 121 /// mindspore::MSTensor image = ReadFileToTensor("apple.jpg"); 122 /// Status rc = transform(image, &image); 123 /// \endcode 124 Status operator()(const mindspore::MSTensor &input, mindspore::MSTensor *output); 125 126 /// \brief Callable function to execute the TensorTransform in eager mode. 127 /// \param[in] input_tensor_list List of Tensor to be transformed. 128 /// \param[out] out Result tensor after transform. 129 /// \return Status error code, returns OK if no error encountered. 130 /// \par Example 131 /// \code 132 /// /* Usage of Execute */ 133 /// auto tokenizer = text::BasicTokenizer(); 134 /// mindspore::dataset::Execute transform = Execute({tokenizer}); 135 /// 136 /// /* Apply transforms */ 137 /// std::vector<mindspore::MSTensor> txt = ReadTextToTensor("demo.txt"); 138 /// std::vector<mindspore::MSTensor> txt_result; 139 /// Status rc = transform1({txt}, &txt_result); 140 /// \endcode 141 Status operator()(const std::vector<mindspore::MSTensor> &input_tensor_list, std::vector<mindspore::MSTensor> *out); 142 143 /// \brief Given a set of Executes, run them 144 static Status Run(const std::vector<std::shared_ptr<dataset::Execute>> &data_graph, 145 const std::vector<mindspore::MSTensor> &inputs, std::vector<mindspore::MSTensor> *outputs); 146 147 /// \brief The function to release device memory on Ascend310. 148 Status DeviceMemoryRelease(); 149 150 /// \brief The function to generate AIPP configuration. 151 std::string AippCfgGenerator(); 152 153 protected: 154 /// \brief The function to convert TensorTransforms into TensorOperations and then build TensorOps. 155 Status BuildTransforms(std::vector<std::shared_ptr<TensorOp>> *transforms_rt); 156 157 /// \brief The function to convert a TensorTransform object into a TensorOperation object. 158 Status ParseTransforms(); 159 160 /// \brief The function to validate target device setting is valid or not. 161 Status ValidateDevice(); 162 163 /// \brief Initialize 310 resource 164 Status InitResource(MapTargetDevice device_type, uint32_t device_id = 0); 165 166 std::vector<std::shared_ptr<TensorTransform>> transforms_; 167 std::vector<std::shared_ptr<TensorOperation>> ops_; 168 MapTargetDevice device_type_; 169 170 // Ascend310 171 std::shared_ptr<DeviceResource> device_resource_ = nullptr; 172 struct ExtraInfo; 173 std::shared_ptr<ExtraInfo> info_; 174 175 #if !defined(BUILD_LITE) && defined(ENABLE_D) 176 // Ascend910B 177 device::DeviceContext *device_context_ = nullptr; 178 size_t stream_id_; 179 #endif 180 }; 181 182 class PyExecute : public Execute { 183 public: 184 // inherit base class constructors 185 using Execute::Execute; 186 187 /// \brief Callable function to execute the TensorTransform in eager mode (only cpu). 188 /// \param[in] input_tensor_list List of Tensors to be transformed. 189 /// \param[out] out Result tensors list after transform. 190 /// \return Status error code, returns OK if no error encountered. 191 Status operator()(const std::vector<std::shared_ptr<Tensor>> &input_tensor_list, 192 std::vector<std::shared_ptr<Tensor>> *out); 193 }; 194 } // namespace dataset 195 } // namespace mindspore 196 #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_EXECUTE_H_ 197