/third_party/mindspore/mindspore/lite/src/delegate/npu/op/ |
D | npu_op.h | 42 NPUOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in NPUOp() argument 44 : inputs_(std::move(in_tensors)), outputs_(std::move(out_tensors)), name_(name) { in NPUOp() 52 …t IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in IsSupport() argument 57 …al int Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in Init() argument 62 virtual int SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, in SetNPUInputs() argument 68 virtual int SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, in SetNPUInputs() argument 73 return SetNPUInputs(in_tensors, out_tensors, npu_inputs); in SetNPUInputs() 81 …void set_inputs(const std::vector<mindspore::MSTensor> &in_tensors) { this->inputs_ = in_tensors; } in set_inputs() argument 117 …(*NPUGetOp)(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, 121 …p *GetNPUOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in GetNPUOp() argument [all …]
|
D | convolution_npu.cc | 21 …::IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in IsSupport() argument 30 auto in_shape = in_tensors[0].Shape(); // default format: nhwc, RunPass not called in IsSupport() 65 …NPUOp::Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in Init() argument 94 int ConvolutionNPUOp::SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, in SetNPUInputs() argument 97 auto ret = InitWeightConst(in_tensors); in SetNPUInputs() 103 if (in_tensors.size() == CONV_INPUT_SIZE) { in SetNPUInputs() 104 ret = InitBiasConst(in_tensors); in SetNPUInputs() 116 …const std::vector<mindspore::MSTensor> &in_tensors, const std::vector<mindspore::MSTensor> &out_te… in SetNPUInputs() argument 119 auto ret = InitWeightConst(in_tensors); in SetNPUInputs() 125 if (in_tensors.size() == CONV_INPUT_SIZE) { in SetNPUInputs() [all …]
|
D | cast_npu.cc | 21 …::IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in IsSupport() argument 23 CHECK_LESS_RETURN(in_tensors.size(), C2NUM); in IsSupport() 24 auto in_tensor = in_tensors[1]; in IsSupport() 28 if (in_tensors.size() >= C2NUM && in_tensor.ElementNum() == 1) { in IsSupport() 37 …NPUOp::Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in Init() argument 39 CHECK_LESS_RETURN(in_tensors.size(), 1); in Init() 40 CHECK_NULL_RETURN(in_tensors[0]); in Init() 49 …cast_->set_attr_src_dtype(ConverterToNPUDataType(static_cast<DataType>(in_tensors[0].DataType()))); in Init() 53 int CastNPUOp::SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, in SetNPUInputs() argument
|
D | gather_npu.cc | 23 …::IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in IsSupport() argument 25 if (in_tensors[1].DataType() != DataType::kNumberTypeInt32) { in IsSupport() 29 if (in_tensors.size() >= GATHER_INPUT_SIZE && in_tensors[AXIS_INDEX].ElementNum() == 1) { in IsSupport() 30 MS_ASSERT(in_tensors[AXIS_INDEX].Data()); in IsSupport() 31 axis_ = static_cast<const int *>(in_tensors[AXIS_INDEX].Data().get())[0]; in IsSupport() 39 …NPUOp::Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in Init() argument 50 int GatherNPUOp::SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, in SetNPUInputs() argument
|
D | tile_npu.cc | 22 …::IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in IsSupport() argument 24 if (in_tensors.size() != 2) { in IsSupport() 27 auto multiple_tensor = in_tensors[1]; in IsSupport() 34 …NPUOp::Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in Init() argument 44 int TileNPUOp::SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, in SetNPUInputs() argument 50 if (in_tensors[1].Data() == nullptr) { in SetNPUInputs() 53 auto multiple_data = reinterpret_cast<const int *>(in_tensors[1].Data().get()); in SetNPUInputs() 54 for (int i = 0; i < in_tensors[1].ElementNum(); ++i) { in SetNPUInputs()
|
D | scale_npu.cc | 26 …::IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in IsSupport() argument 35 axis_ = axis_ + in_tensors[INPUT_INDEX].Shape().size(); in IsSupport() 38 if (in_tensors.size() <= BIAS_INDEX) { in IsSupport() 49 …NPUOp::Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in Init() argument 78 int ScaleNPUOp::SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, in SetNPUInputs() argument 81 MS_ASSERT(in_tensors.size() > SCALE_INDEX); in SetNPUInputs() 83 auto ret = ConvertScaleToMul(npu_inputs, op_, in_tensors); in SetNPUInputs() 93 if (in_tensors.size() > BIAS_INDEX && in_tensors[BIAS_INDEX] != nullptr) { in SetNPUInputs() 125 const std::vector<mindspore::MSTensor> &in_tensors) { in ConvertScaleToMul() argument 126 auto input_shape = in_tensors[INPUT_INDEX].Shape(); in ConvertScaleToMul() [all …]
|
D | matmul_npu.cc | 24 …::IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in IsSupport() argument 26 if (in_tensors.size() == MATMUL_INPUT_SIZE) { in IsSupport() 27 if (in_tensors[BIAS_INDEX].Shape().size() != 1) { in IsSupport() 34 …NPUOp::Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in Init() argument 41 if (in_tensors.size() == MATMUL_INPUT_SIZE) { in Init() 54 int MatMulNPUOp::SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, in SetNPUInputs() argument 66 auto bias_shape = in_tensors[BIAS_INDEX].Shape(); in SetNPUInputs() 67 auto bias_tensor = ConverterToNPUTensor(in_tensors[BIAS_INDEX]); in SetNPUInputs()
|
D | convolution_npu.h | 28 …lutionNPUOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in ConvolutionNPUOp() argument 30 : ConvolutionBaseNPUOp(primitive, in_tensors, out_tensors, name) {} in ConvolutionNPUOp() 34 …t IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, 37 int Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, 40 int SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, 44 int SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, 56 …etNPUConvOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
D | transpose_npu.h | 26 TransposeNPUOp(const std::vector<mindspore::MSTensor> &in_tensors, in TransposeNPUOp() argument 28 : NPUOp(nullptr, in_tensors, out_tensors, name) { in TransposeNPUOp() 33 …nsposeNPUOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in TransposeNPUOp() argument 35 : NPUOp(primitive, in_tensors, out_tensors, name) {} in TransposeNPUOp() 39 …t IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, 42 int Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in Init() argument
|
D | strided_slice_npu.cc | 22 …::IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in IsSupport() argument 25 if (in_tensors.size() == ONNX_INPUT_SIZE) { in IsSupport() 27 size_t size = in_tensors[STRIDE_INDEX].Shape()[0]; in IsSupport() 29 MS_ASSERT(in_tensors[STRIDE_INDEX].Data()); in IsSupport() 30 memcpy(axes.data(), in_tensors[STRIDE_INDEX].Data().get(), sizeof(int) * size); in IsSupport() 38 auto input_x = in_tensors.at(0); in IsSupport() 46 …NPUOp::Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in Init() argument 66 int StridedSliceNPUOp::SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, in SetNPUInputs() argument 76 auto ret = SetCast(npu_inputs[0], strided_slice_, in_tensors[0], out_tensors[0]); in SetNPUInputs()
|
D | concat_npu.h | 28 …ConcatNPUOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in ConcatNPUOp() argument 30 : NPUOp(primitive, in_tensors, out_tensors, name) {} in ConcatNPUOp() 34 …t IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in IsSupport() argument 39 int Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, 42 int SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, 46 int SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors,
|
D | activation_npu.h | 28 …vationNPUOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in ActivationNPUOp() argument 30 : NPUOp(primitive, in_tensors, out_tensors, name) {} in ActivationNPUOp() 34 …t IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, 37 int Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, 40 int SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, 44 int SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors,
|
D | reshape_npu.h | 28 …eshapeNPUOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in ReshapeNPUOp() argument 30 : NPUOp(primitive, in_tensors, out_tensors, name) {} in ReshapeNPUOp() 34 int Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, 37 …t IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, 40 int SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, 44 int SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors,
|
D | transpose_npu.cc | 19 …::IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in IsSupport() argument 21 if (in_tensors.size() < 2) { in IsSupport() 25 auto perm_num = in_tensors.at(1).ElementNum(); in IsSupport() 26 if (in_tensors.at(1).Data() == nullptr) { in IsSupport() 30 auto perm_data = reinterpret_cast<const int *>(in_tensors.at(1).Data().get()); in IsSupport()
|
D | arithmetic_npu.cc | 21 …::IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in IsSupport() argument 23 if (in_tensors[0].Shape().size() != 0 && in_tensors[1].Shape().size() != 0 && in IsSupport() 24 in_tensors[0].Shape().size() != in_tensors[1].Shape().size()) { in IsSupport() 26 … << " size 1 is:" << in_tensors[0].Shape().size() << " size 2 is:" << in_tensors[1].Shape().size(); in IsSupport() 30 if (type == mindspore::schema::PrimitiveType_Less && in_tensors[0].Shape().size() == 1) { in IsSupport() 34 …if (type == mindspore::schema::PrimitiveType_Equal && in_tensors[0].Shape().size() == ARITHMETIC_I… in IsSupport() 51 …NPUOp::Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in Init() argument 144 int ArithmeticNPUOp::SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, in SetNPUInputs() argument 204 …const std::vector<mindspore::MSTensor> &in_tensors, const std::vector<mindspore::MSTensor> &out_te… in SetNPUInputs() argument 207 auto ret = SetNPUInputs(in_tensors, out_tensors, npu_inputs); in SetNPUInputs()
|
D | arithmetic_npu.h | 28 …hmeticNPUOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in ArithmeticNPUOp() argument 30 : NPUOp(primitive, in_tensors, out_tensors, name) {} in ArithmeticNPUOp() 34 …t IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, 37 int Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, 40 int SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, 44 int SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors,
|
D | pad_npu.cc | 27 …::IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in IsSupport() argument 41 if (in_tensors.size() >= PAD_INPUT_SIZE && in_tensors[1].Data() != nullptr) { in IsSupport() 48 …NPUOp::Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in Init() argument 75 } else if (in_tensors.size() >= PAD_INPUT_SIZE && in_tensors[1].Data() != nullptr) { in Init() 76 for (int i = 0; i < in_tensors[1].ElementNum(); i++) { in Init() 77 paddings_vec_.push_back(static_cast<const int *>(in_tensors[1].Data().get())[i]); in Init() 98 int PadNPUOp::SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, in SetNPUInputs() argument
|
D | scale_npu.h | 28 ScaleNPUOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in ScaleNPUOp() argument 30 : NPUOp(primitive, in_tensors, out_tensors, name) {} in ScaleNPUOp() 34 …t IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, 37 int Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, 40 int SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors, 52 const std::vector<mindspore::MSTensor> &in_tensors);
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/opencl/ |
D | opencl_subgraph.cc | 53 auto tensors = (mem_type == MemType::IMG) ? iv->in_tensors() : iv->out_tensors(); in ReplaceOutTensorAndKernelToConvert() 72 int OpenCLSubGraph::GenToFormatOp(const std::vector<lite::Tensor *> &in_tensors, in GenToFormatOp() argument 85 GetKernelFromToTensor(in_tensors, nodes_, &loop_kernels, true); in GenToFormatOp() 88 for (size_t i = 0; i < in_tensors.size(); ++i) { in GenToFormatOp() 89 auto *in_tensor = in_tensors.at(i); in GenToFormatOp() 153 auto tensors = iv->in_tensors(); in GenToFormatOp() 154 auto jv = std::find(tensors.begin(), tensors.end(), in_tensors.at(i)); in GenToFormatOp() 171 GetKernelFromToTensor(in_tensors(), in_nodes_, &from_kernels_, true); in InsertOpsPass() 173 …GenToFormatOp(in_tensors(), from_kernels_, &in_convert_tensors_, &in_parameters_, &in_convert_ops_… in InsertOpsPass() 194 …if (in_tensors()[0]->data_type() == kNumberTypeFloat32 || in_tensors()[0]->data_type() == kNumberT… in Init() [all …]
|
D | opencl_fusion.cc | 81 if (node->in_tensors().empty() || node->out_tensors().empty()) { in NC_N11C() 84 MS_ASSERT(node->in_tensors().front()); in NC_N11C() 86 auto input_shape = node->in_tensors().front()->shape(); in NC_N11C() 95 if (node->in_tensors().empty() || node->out_tensors().empty()) { in N11C_NC() 98 MS_ASSERT(node->in_tensors().front()); in N11C_NC() 100 auto input_shape = node->in_tensors().front()->shape(); in N11C_NC() 108 if (node->in_tensors().empty() || node->out_tensors().empty()) { in NC11_NC() 111 MS_ASSERT(node->in_tensors().front()); in NC11_NC() 113 auto input_shape = node->in_tensors().front()->shape(); in NC11_NC() 147 auto b_in_tensors = b->in_tensors(); in Merge() [all …]
|
/third_party/mindspore/mindspore/lite/micro/coder/opcoders/nnacl/fp32/ |
D | conv2d_delegate_fp32_coder.cc | 64 …d::unique_ptr<OperatorCoder> CPUConvolutionFP32CoderSelect(const std::vector<Tensor *> &in_tensors, in CPUConvolutionFP32CoderSelect() argument 79 conv_param->input_h_ = in_tensors.at(kInputIndex)->Height(); in CPUConvolutionFP32CoderSelect() 80 conv_param->input_w_ = in_tensors.at(kInputIndex)->Width(); in CPUConvolutionFP32CoderSelect() 81 conv_param->input_channel_ = in_tensors.at(kInputIndex)->Channel(); in CPUConvolutionFP32CoderSelect() 92 …coder = CPUOpCoderCreator<ConvolutionFP32Coder>(in_tensors, out_tensors, node, node_index, target,… in CPUConvolutionFP32CoderSelect() 95 …coder = std::make_unique<ConvolutionWinogradFP32Coder>(in_tensors, out_tensors, node, node_index, … in CPUConvolutionFP32CoderSelect() 98 …coder = CPUOpCoderCreator<ConvolutionFP32Coder>(in_tensors, out_tensors, node, node_index, target,… in CPUConvolutionFP32CoderSelect() 103 std::unique_ptr<OperatorCoder> CreateDelegateConv(const std::vector<Tensor *> &in_tensors, in CreateDelegateConv() argument 106 …return CPUOpCoderCreator<ConvDelegateCoder>(in_tensors, out_tensors, node, node_index, target, sch… in CreateDelegateConv() 109 std::unique_ptr<OperatorCoder> CPUConvDwFp32CoderCreator(const std::vector<Tensor *> &in_tensors, in CPUConvDwFp32CoderCreator() argument [all …]
|
D | conv2d_delegate_fp32_coder.h | 26 …ConvDelegateCoder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensor… in ConvDelegateCoder() argument 28 : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} in ConvDelegateCoder() 39 …d::unique_ptr<OperatorCoder> CPUConvolutionFP32CoderSelect(const std::vector<Tensor *> &in_tensors, 44 std::unique_ptr<OperatorCoder> CreateDelegateConv(const std::vector<Tensor *> &in_tensors, 48 std::unique_ptr<OperatorCoder> CPUConvDwFp32CoderCreator(const std::vector<Tensor *> &in_tensors, 53 …:unique_ptr<OperatorCoder> CPUConv2DFusionFP32CoderCreator(const std::vector<Tensor *> &in_tensors,
|
/third_party/mindspore/mindspore/lite/src/ |
D | lite_kernel.cc | 29 auto &in_tensors = this->in_tensors(); in IsReady() local 30 return std::all_of(in_tensors.begin(), in_tensors.end(), [&](lite::Tensor *in_tensor) { in IsReady() 46 auto &post_in_tensors = post_kernel->in_tensors(); in InitOutTensorInitRefCount() 60 oss << ", " << this->in_tensors().size() << " InputTensors:"; in ToString() 61 for (auto tensor : in_tensors()) { in ToString() 86 for (auto &in_tensor : in_tensors()) { in DoExecute()
|
/third_party/mindspore/mindspore/lite/src/delegate/tensorrt/op/ |
D | gather_tensorrt.cc | 23 …::IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors, in IsSupport() argument 29 if (in_tensors.size() != INPUT_SIZE3) { in IsSupport() 30 MS_LOG(ERROR) << "invalid input tensor size: " << in_tensors.size(); in IsSupport() 37 if (in_tensors[1].DataType() != DataType::kNumberTypeInt32) { in IsSupport() 41 if (in_tensors[AXIS_INDEX].ElementNum() == 1) { in IsSupport() 42 MS_ASSERT(in_tensors[AXIS_INDEX].Data().get()); in IsSupport() 43 axis_ = static_cast<const int *>(in_tensors[AXIS_INDEX].Data().get())[0]; in IsSupport()
|
/third_party/mindspore/mindspore/lite/src/runtime/ |
D | infer_manager.cc | 67 std::vector<mindspore::MSTensor> in_tensors; in KernelInferShape() local 68 std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_tensors), in KernelInferShape() 73 …auto ret = kernel_interface->Infer(&in_tensors, &out_tensors, static_cast<const schema::Primitive … in KernelInferShape() 132 std::vector<TensorC *> in_tensors; in KernelInferShape() local 140 int ret = GenerateInTensorC(parameter, inputs, outputs, &in_tensors); in KernelInferShape() 142 FreeAllTensorC(&in_tensors); in KernelInferShape() 147 FreeAllTensorC(&in_tensors); in KernelInferShape() 156 …ret = infer_shape_func(static_cast<TensorC **>(in_tensors.data()), in_tensors.size(), out_tensors.… in KernelInferShape() 192 FreeAllTensorC(&in_tensors); in KernelInferShape()
|