/third_party/mindspore/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/ |
D | slice_int8_tests.cc | 32 lite::Tensor in_tensor(kNumberTypeInt8, {1, 3, 2, 3}); in TEST_F() local 37 in_tensor.set_data(input_data); in TEST_F() 48 in_tensor.AddQuantParam(quant_in0); in TEST_F() 51 std::vector<lite::Tensor *> inputs = {&in_tensor, &begin_tensor, &size_tensor}; in TEST_F() 75 in_tensor.set_data(nullptr); in TEST_F() 83 lite::Tensor in_tensor(kNumberTypeInt8, {2, 1, 3, 2, 3}); in TEST_F() local 89 in_tensor.set_data(input_data); in TEST_F() 100 in_tensor.AddQuantParam(quant_in0); in TEST_F() 103 std::vector<lite::Tensor *> inputs = {&in_tensor, &begin_tensor, &size_tensor}; in TEST_F() 127 in_tensor.set_data(nullptr); in TEST_F() [all …]
|
D | l2_norm_int8_tests.cc | 32 lite::Tensor in_tensor(kNumberTypeInt8, {2, 1, 1, 5}); in TEST_F() local 37 in_tensor.set_data(input_data); in TEST_F() 42 in_tensor.AddQuantParam(quant_in); in TEST_F() 45 std::vector<lite::Tensor *> inputs = {&in_tensor}; in TEST_F() 72 in_tensor.set_data(nullptr); in TEST_F() 78 lite::Tensor in_tensor(kNumberTypeInt8, {1, 1, 1, 51}); in TEST_F() local 84 in_tensor.set_data(input_data); in TEST_F() 89 in_tensor.AddQuantParam(quant_in); in TEST_F() 92 std::vector<lite::Tensor *> inputs = {&in_tensor}; in TEST_F() 118 in_tensor.set_data(nullptr); in TEST_F()
|
D | relux_int8_tests.cc | 32 lite::Tensor in_tensor(kNumberTypeInt8, {2, 2}); in TEST_F() local 37 in_tensor.set_data(input_data); in TEST_F() 42 in_tensor.AddQuantParam(quant_in); in TEST_F() 45 std::vector<lite::Tensor *> inputs = {&in_tensor}; in TEST_F() 72 in_tensor.set_data(nullptr); in TEST_F() 78 lite::Tensor in_tensor(kNumberTypeInt8, {2, 4}); in TEST_F() local 84 in_tensor.set_data(input_data); in TEST_F() 89 in_tensor.AddQuantParam(quant_in); in TEST_F() 92 std::vector<lite::Tensor *> inputs = {&in_tensor}; in TEST_F() 120 in_tensor.set_data(nullptr); in TEST_F()
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/base/ |
D | reshape_base.cc | 41 auto in_tensor = in_tensors().front(); in Run() local 42 CHECK_NULL_RETURN(in_tensor); in Run() 45 if (in_tensor->data_type() != out_tensor->data_type() || in_tensor->data() == nullptr || in Run() 46 in_tensor->Size() != out_tensor->Size()) { in Run() 51 if (in_tensor->allocator() == nullptr || in_tensor->allocator() != out_tensor->allocator() || in Run() 52 in_tensor->allocator() != ms_context_->allocator || /* runtime allocator */ in Run() 55 CHECK_NULL_RETURN(in_tensor->data()); in Run() 56 MS_CHECK_FALSE(in_tensor->Size() == 0, RET_ERROR); in Run() 57 auto size = in_tensor->Size(); in Run() 64 auto in_data = static_cast<const uint8_t *>(in_tensor->data()); in Run() [all …]
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp16/ |
D | concat_fp16.cc | 44 for (const auto &in_tensor : in_tensors_) { in MallocTmpBuffer() local 46 … if (in_tensor->data_type() == kNumberTypeFloat32 || in_tensor->data_type() == kNumberTypeFloat) { in MallocTmpBuffer() 47 …et_cast<float16_t *>(ms_context_->allocator->Malloc(sizeof(float16_t) * in_tensor->ElementsNum())); in MallocTmpBuffer() 70 auto &in_tensor = in_tensors_.at(i); in FreeTmpBuffer() local 72 … if (in_tensor->data_type() == kNumberTypeFloat32 || in_tensor->data_type() == kNumberTypeFloat) { in FreeTmpBuffer() 102 const auto in_tensor = in_tensors_.at(i); in Run() local 103 CHECK_NULL_RETURN(in_tensor); in Run() 104 … if (in_tensor->data_type() == kNumberTypeFloat || in_tensor->data_type() == kNumberTypeFloat32) { in Run() 105 auto in_tensor_data = reinterpret_cast<float *>(in_tensor->data()); in Run() 107 Float32ToFloat16(in_tensor_data, fp16_inputs_[i], in_tensor->ElementsNum()); in Run() [all …]
|
D | quant_dtype_cast_fp16.cc | 35 auto in_tensor = in_tensors_.front(); in Init() local 37 CHECK_NULL_RETURN(in_tensor); in Init() 42 … if (in_tensor->data_type() != kNumberTypeFloat16 || out_tensor->data_type() != kNumberTypeInt8) { in Init() 49 … if (in_tensor->data_type() != kNumberTypeInt8 || out_tensor->data_type() != kNumberTypeFloat16) { in Init() 56 … if (in_tensor->data_type() != kNumberTypeFloat16 || out_tensor->data_type() != kNumberTypeUInt8) { in Init() 63 … if (in_tensor->data_type() != kNumberTypeUInt8 || out_tensor->data_type() != kNumberTypeFloat16) { in Init() 82 auto in_tensor = in_tensors_.front(); in ReSize() local 83 num_unit_ = static_cast<int>(in_tensor->ElementsNum()); in ReSize() 185 auto in_tensor = inputs.front(); in CpuQuantDTypeCastFp16KernelCreator() local 189 … if (in_tensor->data_type() != kNumberTypeFloat16 || out_tensor->data_type() != kNumberTypeInt8) { in CpuQuantDTypeCastFp16KernelCreator() [all …]
|
/third_party/mindspore/mindspore/lite/test/ut/src/runtime/kernel/arm/common/ |
D | strided_slice_tests.cc | 29 void InitStridedSliceParam(StridedSliceParameter *param, const lite::Tensor *in_tensor, in InitStridedSliceParam() argument 33 auto input_shape = in_tensor->shape(); in InitStridedSliceParam() 54 lite::Tensor in_tensor(kNumberTypeFloat32, {1, 2, 4}); in TEST_F() local 58 in_tensor.set_data(input_data); in TEST_F() 69 std::vector<lite::Tensor *> inputs = {&in_tensor, &begins_tensor, &ends_tensor, &strides_tensor}; in TEST_F() 73 InitStridedSliceParam(parameter, &in_tensor, &begins_tensor, &ends_tensor, &strides_tensor); in TEST_F() 88 in_tensor.set_data(nullptr); in TEST_F() 98 lite::Tensor in_tensor(kNumberTypeFloat32, {1, 2, 4, 1, 1, 1, 1}); variable 102 in_tensor.set_data(input_data); 113 std::vector<lite::Tensor *> inputs = {&in_tensor, &begins_tensor, &ends_tensor, &strides_tensor}; [all …]
|
/third_party/mindspore/mindspore/ccsrc/minddata/dataset/kernels/data/ |
D | unique_op.cc | 30 auto in_tensor = input[0]; in Compute() local 31 auto in_tensor_shape = in_tensor->shape(); in Compute() 32 auto in_tensor_type = in_tensor->type(); in Compute() 39 CHECK_FAIL_RETURN_UNEXPECTED(in_tensor->Size() <= std::numeric_limits<int32_t>::max(), in Compute() 42 ", got:" + std::to_string(in_tensor->Size())); in Compute() 44 RETURN_IF_NOT_OK(in_tensor->Reshape(TensorShape({in_tensor->Size()}))); in Compute() 50 RETURN_IF_NOT_OK(Unique(in_tensor, &out, &out_idx, &out_cnt)); in Compute()
|
/third_party/mindspore/mindspore/lite/micro/coder/opcoders/nnacl/int8/ |
D | pooling_int8_coder.cc | 33 Tensor *in_tensor = input_tensors_.at(kInputIndex); in DoCode() local 35 MS_CHECK_PTR(in_tensor); in DoCode() 37 pooling_parameter->input_batch_ = in_tensor->Batch(); in DoCode() 38 pooling_parameter->input_channel_ = in_tensor->Channel(); in DoCode() 39 pooling_parameter->input_h_ = in_tensor->Height(); in DoCode() 40 pooling_parameter->input_w_ = in_tensor->Width(); in DoCode() 47 std::vector<LiteQuantParam> in_quant_args = in_tensor->quant_params(); in DoCode() 67 …code.CodeFunction("MaxPoolingInt8", in_tensor, out_tensor, "(PoolingParameter *)&pooling_parameter… in DoCode() 70 …code.CodeFunction("AvgPoolingInt8", in_tensor, out_tensor, "(PoolingParameter *)&pooling_parameter… in DoCode()
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/opencl/kernel/ |
D | concat.cc | 91 for (auto &in_tensor : in_tensors_) { in CheckSpecs() local 92 auto in_tensors_shape_size = in_tensor->shape().size(); in CheckSpecs() 134 for (auto &in_tensor : in_tensors_) { in SetConstArgs() local 136 for (int j = 0; j < in_tensor->shape().size(); ++j) { in SetConstArgs() 137 temp.s[j] = in_tensor->shape()[j]; in SetConstArgs() 139 Broadcast2GpuShape(in_shape_.s, temp.s, in_tensor->shape().size(), 1); in SetConstArgs() 150 for (auto &in_tensor : in_tensors_) { in SetConstArgs() local 152 for (int j = 0; j < in_tensor->shape().size(); ++j) { in SetConstArgs() 153 temp.s[j] = in_tensor->shape()[j]; in SetConstArgs() 155 Broadcast2GpuShape(in_shape_.s, temp.s, in_tensor->shape().size(), 1); in SetConstArgs() [all …]
|
/third_party/mindspore/mindspore/lite/src/delegate/tensorrt/ |
D | tensorrt_subgraph.cc | 141 nvinfer1::ITensor *TensorRTSubGraph::SetTensorRTNetworkInput(const mindspore::MSTensor &in_tensor) { in SetTensorRTNetworkInput() argument 143 if (in_tensor.Name().compare(this->network_->getInput(i)->getName()) == 0) { in SetTensorRTNetworkInput() 144 MS_LOG(INFO) << "input tensor is already added in network: " << in_tensor.Name(); in SetTensorRTNetworkInput() 149 auto cuda_dtype = ConvertDataType(in_tensor.DataType()); in SetTensorRTNetworkInput() 151 MS_LOG(ERROR) << "Unsupported input data type " << static_cast<int>(in_tensor.DataType()); in SetTensorRTNetworkInput() 154 nvinfer1::Dims input_dims = ConvertCudaDims(in_tensor.Shape()); in SetTensorRTNetworkInput() 172 MS_LOG(INFO) << "input tensor format is (NHWC:1, NCHW:0): " << in_tensor.format(); in SetTensorRTNetworkInput() 173 input_hw_index_ = in_tensor.format() == Format::NHWC ? 1 : 2; // NCHW is 2 in SetTensorRTNetworkInput() 178 nvinfer1::Dims input_dims_min = ConvertCudaDims(in_tensor.Shape()); in SetTensorRTNetworkInput() 184 …if (!profile_->setDimensions(in_tensor.Name().c_str(), nvinfer1::OptProfileSelector::kMIN, input_d… in SetTensorRTNetworkInput() [all …]
|
/third_party/mindspore/mindspore/lite/src/delegate/ |
D | delegate_utils.h | 41 for (auto in_tensor : op->inputs()) { in GetGraphInTensors() local 42 if (in_tensor.Data() == nullptr && !is_op_output(in_tensor)) { in GetGraphInTensors() 43 inputs.push_back(in_tensor); in GetGraphInTensors() 95 for (auto in_tensor : op->inputs()) { in GraphInTensors() local 96 …if (in_tensor.Data() != nullptr && find(in_tensors.begin(), in_tensors.end(), in_tensor) == in_ten… in GraphInTensors() 97 all_in_tensors.push_back(in_tensor); in GraphInTensors() 134 for (auto in_tensor : (*iter)->inputs()) { in GraphOutTensors() 135 … if (find(all_out_tensors.begin(), all_out_tensors.end(), in_tensor) != all_out_tensors.end()) { in GraphOutTensors() 136 out_tensors.push_back(in_tensor); in GraphOutTensors() 146 for (auto in_tensor : cur_op->inputs()) { in FindPreOps() local [all …]
|
/third_party/mindspore/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/ |
D | tile_fp32_tests.cc | 29 lite::Tensor in_tensor(kNumberTypeFloat32, {2, 2}); in TEST_F() local 33 in_tensor.set_data(input_data); in TEST_F() 35 std::vector<lite::Tensor *> inputs = {&in_tensor}; in TEST_F() 69 in_tensor.set_data(nullptr); in TEST_F() 75 lite::Tensor in_tensor(kNumberTypeFloat32, {2, 2}); in TEST_F() local 79 in_tensor.set_data(input_data); in TEST_F() 81 std::vector<lite::Tensor *> inputs = {&in_tensor}; in TEST_F() 117 in_tensor.set_data(nullptr); in TEST_F() 123 lite::Tensor in_tensor(kNumberTypeFloat32, {2, 2}); in TEST_F() local 127 in_tensor.set_data(input_data); in TEST_F() [all …]
|
D | unstack_fp32_tests.cc | 30 lite::Tensor in_tensor(kNumberTypeFloat32, {3, 4, 2}); in TEST_F() local 40 in_tensor.set_data(input_data); in TEST_F() 45 std::vector<lite::Tensor *> inputs = {&in_tensor}; in TEST_F() 75 in_tensor.set_data(nullptr); in TEST_F() 84 lite::Tensor in_tensor(kNumberTypeFloat32, {3, 4, 2}); in TEST_F() local 92 in_tensor.set_data(input_data); in TEST_F() 96 std::vector<lite::Tensor *> inputs = {&in_tensor}; in TEST_F() 124 in_tensor.set_data(nullptr); in TEST_F()
|
/third_party/mindspore/tests/st/ops/cpu/ |
D | test_dropout_grad_op.py | 42 in_tensor = Tensor(np.array([[[3., 1., 2.]], \ 46 output = dropout_grad(in_tensor, in_mask) 60 in_tensor = Tensor(np.array([[[3., 1., 2.]], [[4., 1., 4.]]]), mstype.float16) 63 output = dropout_grad(in_tensor, in_mask) 77 in_tensor = Tensor(np.array([[[3., 1., 2.], [3., 1., 2.]], \ 82 output = dropout_grad(in_tensor, in_mask) 97 in_tensor = Tensor(np.array([[6.]]), mstype.float32) 100 output = dropout_grad(in_tensor, in_mask) 115 in_tensor = Tensor(np.array([[]]), mstype.float32) 118 output = dropout_grad(in_tensor, in_mask) [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/ |
D | constant_of_shape_infer.c | 27 const TensorC *in_tensor = inputs[0]; in ConstantOfShapeInferShape() local 31 out_tensor->format_ = in_tensor->format_; in ConstantOfShapeInferShape() 32 if (!InferFlag(inputs, inputs_size) || in_tensor->data_ == NULL) { in ConstantOfShapeInferShape() 35 int size = GetElementNum(in_tensor); in ConstantOfShapeInferShape() 41 switch (in_tensor->data_type_) { in ConstantOfShapeInferShape() 43 int32_t *in_data = (int32_t *)(in_tensor->data_); in ConstantOfShapeInferShape() 53 int64_t *in_data = (int64_t *)(in_tensor->data_); in ConstantOfShapeInferShape()
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/int8/ |
D | transpose_int8.cc | 49 auto in_tensor = in_tensors_.front(); in ReSize() local 51 auto in_shape = in_tensor->shape(); in ReSize() 54 transpose_param_->data_num_ = in_tensor->ElementsNum(); in ReSize() 84 void TransposeInt8CPUKernel::GetNHNCTransposeFunc(const lite::Tensor *in_tensor, const lite::Tensor… in GetNHNCTransposeFunc() argument 87 if (in_tensor->shape().size() == DIMENSION_4D && param->perm_[0] == 0 && param->perm_[1] == 2 && in GetNHNCTransposeFunc() 94 if (in_tensor->shape().size() == DIMENSION_4D && param->perm_[0] == 0 && param->perm_[1] == 3 && in GetNHNCTransposeFunc() 104 auto in_tensor = in_tensors_.front(); in Run() local 107 auto in_dims = in_tensor->shape(); in Run() 110 in_ptr_ = reinterpret_cast<int8_t *>(in_tensor->data()); in Run() 114 GetNHNCTransposeFunc(in_tensor, out_tensor, transpose_param_); in Run()
|
/third_party/mindspore/mindspore/lite/tools/converter/quantizer/quant_helper/ |
D | carry_data_quant_param_propogator.cc | 40 auto &in_tensor = graph->allTensors.at(node.inputIndex.at(0)); in PropogateQuantParams() local 41 MS_ASSERT(in_tensor != nullptr); in PropogateQuantParams() 42 auto in_quant_param = GetTensorQuantParam(in_tensor); in PropogateQuantParams() 44 in_tensor->quantParams.front() = std::move(out_quant_param); in PropogateQuantParams() 52 auto &in_tensor = graph->allTensors.at(node.inputIndex.at(0)); in PropogateQuantParams() local 53 MS_ASSERT(in_tensor != nullptr); in PropogateQuantParams() 54 auto in_quant_param = GetTensorQuantParam(in_tensor); in PropogateQuantParams()
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp32/ |
D | nonzero_fp32.cc | 41 auto in_tensor = in_tensors_.front(); in Run() local 43 auto input_data = reinterpret_cast<bool *>(in_tensor->MutableData()); in Run() 45 auto input_dim_size = in_tensor->shape().size(); in Run() 52 std::vector<int> coordiate_values(in_tensor->shape().size(), 0); in Run() 53 for (int i = 0; i < in_tensor->ElementsNum(); i += 1) { in Run() 61 if (coordiate_values[idx - 1] != in_tensor->shape()[idx - 1] - 1) { in Run()
|
D | scatter_nd_update_fp32.cc | 139 auto in_tensor = in_tensors().front(); in Run() local 141 if (in_tensor->allocator() == nullptr || in_tensor->allocator() != out_tensor->allocator() || in Run() 143 memcpy(out_tensor->data(), in_tensor->data(), in_tensor->Size()); in Run() 147 in_tensor->allocator()->IncRefCount(in_tensor->data(), out_tensor->ref_count()); in Run() 148 out_tensor->set_data(in_tensor->data()); in Run() 149 out_tensor->set_own_data(in_tensor->own_data()); in Run()
|
D | transpose_fp32.cc | 121 int TransposeCPUKernel::GetNHNCTransposeFunc(const lite::Tensor *in_tensor, const lite::Tensor *out… in GetNHNCTransposeFunc() argument 122 if (in_tensor->shape().size() != 4) { in GetNHNCTransposeFunc() 131 GetNchwToNhwcFunc(in_tensor->data_type()); in GetNHNCTransposeFunc() 138 GetNhwcToNchwFunc(in_tensor->data_type()); in GetNHNCTransposeFunc() 165 auto &in_tensor = in_tensors_.front(); in Run() local 167 if (in_tensor == nullptr || out_tensor == nullptr) { in Run() 171 in_data_ = in_tensor->data(); in Run() 176 if (in_tensor->shape().size() != static_cast<size_t>(param_->num_axes_)) { in Run() 177 memcpy(out_data_, in_data_, in_tensor->Size()); in Run() 180 if (GetNHNCTransposeFunc(in_tensor, out_tensor) != RET_OK) { in Run()
|
D | shape_fp32.cc | 39 auto in_tensor = in_tensors_.front(); in Run() local 40 if (in_tensor == nullptr || out_tensor == nullptr) { in Run() 44 if (in_tensor->MutableData() == nullptr || out_tensor->MutableData() == nullptr) { in Run() 49 for (size_t i = 0; i < in_tensor->shape().size(); i++) { in Run() 50 reinterpret_cast<int *>(out_tensor->MutableData())[i] = in_tensor->shape().at(i); in Run()
|
/third_party/mindspore/mindspore/lite/src/delegate/npu/ |
D | npu_subgraph.cc | 49 void NPUSubGraph::set_input(mindspore::MSTensor in_tensor, int index) { in set_input() argument 56 op->set_input(in_tensor, i); in set_input() 60 this->inputs_[index] = in_tensor; in set_input() 75 for (auto in_tensor : this->inputs()) { in GetGraphInOutOps() 77 if (find(op->inputs().begin(), op->inputs().end(), in_tensor) != op->inputs().end() && in GetGraphInOutOps() 105 for (auto in_tensor : cur_op->inputs()) { in FindPreOps() local 107 if (find(op->outputs().begin(), op->outputs().end(), in_tensor) != op->outputs().end()) { in FindPreOps() 159 auto in_tensor = op->inputs()[i]; in BuildNPUInputOp() local 160 if (IsSubGraphInputTensor(in_tensor)) { in BuildNPUInputOp() 163 data = ConverterToNPUData(in_tensor, tensor_name); in BuildNPUInputOp() [all …]
|
/third_party/mindspore/mindspore/lite/src/ |
D | lite_kernel.cc | 30 return std::all_of(in_tensors.begin(), in_tensors.end(), [&](lite::Tensor *in_tensor) { in IsReady() argument 31 if (IsContain(scope_tensors, in_tensor)) { in IsReady() 32 return in_tensor->IsReady(); in IsReady() 86 for (auto &in_tensor : in_tensors()) { in DoExecute() local 87 MS_ASSERT(in_tensor != nullptr); in DoExecute() 88 in_tensor->DecRefCount(); in DoExecute()
|
/third_party/mindspore/mindspore/lite/tools/converter/parser/tflite/ |
D | tflite_dequantize_parser.cc | 33 const auto &in_tensor = tflite_subgraph->tensors[tflite_op->inputs.at(0)]; in Parse() local 34 if (in_tensor == nullptr) { in Parse() 43 if ((GetTfliteDataType(in_tensor->type) == kNumberTypeInt8 || in Parse() 44 GetTfliteDataType(in_tensor->type) == kNumberTypeUInt8)) { in Parse() 47 prim->set_src_t(GetTfliteDataType(in_tensor->type)); in Parse()
|