/third_party/mindspore/mindspore/lite/micro/coder/wrapper/int8/ |
D | conv1x1_init_int8_wrapper.c | 21 int Conv1x1Init(int8_t *src_weight, int32_t *src_bias, int32_t *filter_zps, int32_t input_channel, in Conv1x1Init() argument 30 size_t size = UP_ROUND(input_channel, C16NUM) * UP_ROUND(output_channel, C2NUM) * sizeof(int8_t); in Conv1x1Init() 36 RowMajor2Row2x16MajorInt8(src_weight, packed_weight_, output_channel, input_channel); in Conv1x1Init() 51 …size_t size = support_optimize ? UP_ROUND(input_channel, C4NUM) * UP_ROUND(output_channel, C16NUM)… in Conv1x1Init() 52 … : UP_ROUND(input_channel, C16NUM) * UP_ROUND(output_channel, C4NUM) * sizeof(int8_t); in Conv1x1Init() 59 RowMajor2Row4x16MajorInt8(src_weight, packed_weight_, output_channel, input_channel); in Conv1x1Init() 61 RowMajor2Row16x4MajorInt8(src_weight, packed_weight_, output_channel, input_channel); in Conv1x1Init() 81 for (int ic = 0; ic < input_channel; ic++) { in Conv1x1Init() 82 weight_sum_value += src_weight[oc * input_channel + ic]; in Conv1x1Init() 84 bias_data_[oc] += filter_zp * input_zp * input_channel - weight_sum_value * input_zp; in Conv1x1Init()
|
D | conv_init_int8_wrapper.c | 23 … int kernel_w, int input_channel, int output_channel, int32_t input_zp, bool filter_peroc, in ConvInit() argument 32 up_round_deep = UP_ROUND(kernel_plane * input_channel, C16NUM); in ConvInit() 36 up_round_deep = UP_ROUND(kernel_plane * input_channel, C4NUM); in ConvInit() 39 up_round_deep = UP_ROUND(kernel_plane * input_channel, C16NUM); in ConvInit() 52 …RowMajor2Row2x16MajorInt8(origin_weight, packed_weight_, output_channel, input_channel * kernel_pl… in ConvInit() 55 …RowMajor2Row8x4MajorInt8(origin_weight, packed_weight_, output_channel, input_channel * kernel_pla… in ConvInit() 57 …RowMajor2Row16x4MajorInt8(origin_weight, packed_weight_, output_channel, input_channel * kernel_pl… in ConvInit() 78 for (int i = 0; i < kernel_plane * input_channel; i++) { in ConvInit() 79 weight_sum_value += origin_weight[oc * kernel_plane * input_channel + i] - filter_zp; in ConvInit()
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/ |
D | pack_int8.c | 21 … size_t plane_size, size_t input_channel, size_t output_channel) { in PackInputSum16x4PerChannelArm32() argument 23 size_t ic16 = UP_ROUND(input_channel, C16NUM); in PackInputSum16x4PerChannelArm32() 37 for (int di = 0; di < input_channel; di++) { in PackInputSum16x4PerChannelArm32() 52 size_t plane_size, size_t input_channel, size_t output_channel) { in PackInputSum16x4PerChannel() argument 54 size_t ic16 = UP_ROUND(input_channel, C16NUM); in PackInputSum16x4PerChannel() 68 for (int di = 0; di < input_channel; di++) { in PackInputSum16x4PerChannel() 81 …reOptPeroc(const int8_t *src_input, int8_t *packed_input, int32_t *input_sum, size_t input_channel, in Conv1x1PreOptPeroc() argument 83 int ic4 = UP_ROUND(input_channel, C4NUM); in Conv1x1PreOptPeroc() 89 size_t ic_4div = input_channel / C4NUM * C4NUM; in Conv1x1PreOptPeroc() 100 size_t src_stride = input_channel; in Conv1x1PreOptPeroc() [all …]
|
D | matmul_int8.c | 436 size_t input_channel, size_t plane_size, int32_t filter_zp) { in PackInput4x4AndInputSumPert() argument 437 int ic4 = UP_ROUND(input_channel, C4NUM); in PackInput4x4AndInputSumPert() 440 size_t ic_4div = input_channel / C4NUM * C4NUM; in PackInput4x4AndInputSumPert() 450 size_t src_stride = input_channel; in PackInput4x4AndInputSumPert() 451 size_t ic_4res = input_channel - ic_4div; in PackInput4x4AndInputSumPert() 457 tmp_sum_value[i] += src_ic[0 + i * input_channel]; in PackInput4x4AndInputSumPert() 458 tmp_sum_value[i] += src_ic[1 + i * input_channel]; in PackInput4x4AndInputSumPert() 459 tmp_sum_value[i] += src_ic[2 + i * input_channel]; in PackInput4x4AndInputSumPert() 460 tmp_sum_value[i] += src_ic[3 + i * input_channel]; in PackInput4x4AndInputSumPert() 461 pack_ic[0 + i * C4NUM] = src_ic[0 + i * input_channel]; in PackInput4x4AndInputSumPert() [all …]
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/int8/ |
D | convolution_int8.cc | 52 auto input_channel = filter_tensor->Channel(); in InitWeightBias() local 55 conv_param_->input_channel_ = input_channel; in InitWeightBias() 61 up_round_deep = UP_ROUND(kernel_plane * input_channel, C16NUM); in InitWeightBias() 65 up_round_deep = UP_ROUND(kernel_plane * input_channel, C4NUM); in InitWeightBias() 68 up_round_deep = UP_ROUND(kernel_plane * input_channel, C16NUM); in InitWeightBias() 85 …RowMajor2Row2x16MajorInt8(origin_weight, packed_weight_, output_channel, input_channel * kernel_pl… in InitWeightBias() 88 …RowMajor2Row8x4MajorInt8(origin_weight, packed_weight_, output_channel, input_channel * kernel_pla… in InitWeightBias() 90 …RowMajor2Row16x4MajorInt8(origin_weight, packed_weight_, output_channel, input_channel * kernel_pl… in InitWeightBias() 124 for (int i = 0; i < kernel_plane * input_channel; i++) { in InitWeightBias() 125 weight_sum_value += origin_weight[oc * kernel_plane * input_channel + i] - filter_zp; in InitWeightBias()
|
D | convolution_1x1_int8.cc | 154 int Convolution1x1Int8CPUKernel::InitBiasByzp(const void *src_weight, int input_channel, int output… in InitBiasByzp() argument 166 for (int ic = 0; ic < input_channel; ic++) { in InitBiasByzp() 167 weight_sum_value += weight[oc * input_channel + ic]; in InitBiasByzp() 169 bias_data[oc] += filter_zp * input_zp * input_channel - weight_sum_value * input_zp; in InitBiasByzp() 216 auto input_channel = filter_tensor->Channel(); in InitWeightBias() local 217 if (input_channel < 0) { in InitWeightBias() 227 …size_t size = support_optimize_ ? UP_ROUND(input_channel, C4NUM) * UP_ROUND(output_channel, C16NUM… in InitWeightBias() 228 … : UP_ROUND(input_channel, C16NUM) * UP_ROUND(output_channel, C4NUM) * sizeof(int8_t); in InitWeightBias() 238 input_channel); in InitWeightBias() 241 input_channel); in InitWeightBias() [all …]
|
D | convolution_3x3_int8.cc | 31 auto input_channel = conv_param->input_channel_; in ProcessFilterUint8() local 34 int iC8 = UP_DIV(input_channel, C8NUM); in ProcessFilterUint8() 83 auto input_channel = filter_tensor->Channel(); in InitWeightBias() local 84 if (input_channel < 0) { in InitWeightBias() 93 conv_param_->input_channel_ = input_channel; in InitWeightBias() 95 int iC8 = UP_DIV(input_channel, C8NUM); in InitWeightBias()
|
/third_party/mindspore/tests/ut/python/communication/ |
D | test_comm.py | 48 def __init__(self, input_channel, out_channel, op): argument 50 self.dense = Dense(input_channel, out_channel) 63 def __init__(self, input_channel, out_channel): argument 65 self.dense = Dense(input_channel, out_channel) 77 def __init__(self, input_channel, out_channel): argument 79 self.dense = Dense(input_channel, out_channel) 98 def __init__(self, input_channel, out_channel, op): argument 100 self.dense = Dense(input_channel, out_channel) 113 def __init__(self, input_channel, out_channel): argument 115 self.dense = Dense(input_channel, out_channel) [all …]
|
/third_party/mindspore/tests/st/fusion/ |
D | test_conv_bn1_fusion.py | 25 input_channel = 2048 variable 60 self.conv = nn.Conv2d(input_channel, output_channel, 62 self.conv1 = nn.Conv2d(input_channel, output_channel, 82 input_np = np.ones([batch_size, input_channel, 7, 7]).astype(np.float32) * 0.01 91 self.conv = nn.Conv2d(input_channel, output_channel, 109 input_np = np.ones([batch_size, input_channel, 7, 7]).astype(np.float32) * 0.01 118 self.conv = nn.Conv2d(input_channel, output_channel, 134 input_np = np.ones([batch_size, input_channel, 7, 7]).astype(np.float32) * 0.01
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/ |
D | pack_fp32.c | 162 …WCToNXHWCXFp32(int kernel_h, int kernel_w, int output_channel, int oc_block_num, int input_channel, in PackNHWCToNXHWCXFp32() argument 166 int ic8 = DOWN_ROUND(input_channel, C8NUM); in PackNHWCToNXHWCXFp32() 182 … Transpose8X8Fp32Avx(src + ic, tmp_weight + ic * oc_block + oc_tmp, input_channel, oc_block); in PackNHWCToNXHWCXFp32() 184 for (; ic < input_channel; ++ic) { in PackNHWCToNXHWCXFp32() 186 tmp_weight[ic * oc_block + oc_tmp + j] = src[ic + input_channel * j]; in PackNHWCToNXHWCXFp32() 189 src += C8NUM * input_channel; in PackNHWCToNXHWCXFp32() 191 tmp_weight += oc_block * input_channel; in PackNHWCToNXHWCXFp32() 195 for (int ic = 0; ic < input_channel; ++ic) { in PackNHWCToNXHWCXFp32() 196 tmp_weight[oc_remainder + oc_remainder_step * ic] = src[ic + oc_remainder * input_channel]; in PackNHWCToNXHWCXFp32() 206 Transpose8X8Fp32Avx(src + hw * input_channel + ic, in PackNHWCToNXHWCXFp32() [all …]
|
D | deconv_fp32.c | 19 void PackDeConvWeightFp32(const float *weight, float *dst, int input_channel, int output_channel, i… in PackDeConvWeightFp32() argument 21 int ic_up4 = UP_ROUND(input_channel, C4NUM); in PackDeConvWeightFp32() 25 for (int ic = 0; ic < input_channel; ic++) { in PackDeConvWeightFp32()
|
D | pack_fp32.h | 64 …WCToNXHWCXFp32(int kernel_h, int kernel_w, int output_channel, int oc_block_num, int input_channel, 67 …WCToNXHWCXFp32(int kernel_h, int kernel_w, int output_channel, int oc_block_num, int input_channel,
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp32/ |
D | convolution_slidewindow_fp32.cc | 42 auto input_channel = filter_tensor->Channel(); in Init() local 48 int pack_weight_size = oc_block_num * oc_tile_ * input_channel * kernel_plane; in Init() 190 auto input_channel = filter_tensor->Channel(); in PackWeight() local 197 PackNHWCToNXHWCXFp32(kernel_h, kernel_w, output_channel, oc_block_num, input_channel, in PackWeight() 203 auto input_channel = filter_tensor->Channel(); in MallocWeightBiasData() local 207 conv_param_->input_channel_ = input_channel; in MallocWeightBiasData() 211 int pack_weight_size = oc_block_num * oc_tile_ * input_channel * kernel_plane; in MallocWeightBiasData()
|
D | convolution_1x1_fp32.cc | 122 auto input_channel = filter_tensor->Channel(); in Init() local 124 int size = input_channel * UP_ROUND(output_channel, col_tile_) * sizeof(float); in Init() 273 auto input_channel = filter_tensor->Channel(); in PackWeight() local 274 if (input_channel < 0) { in PackWeight() 288 output_channel, input_channel); in PackWeight() 291 output_channel, input_channel); in PackWeight() 294 output_channel, input_channel); in PackWeight() 300 auto input_channel = filter_tensor->Channel(); in MallocWeightBiasData() local 302 int size = input_channel * UP_ROUND(output_channel, col_tile_) * sizeof(float); in MallocWeightBiasData()
|
D | deconvolution_fp32.cc | 58 auto input_channel = weight_tensor->Batch(); in MallocWeightBiasData() local 63 …size_t pack_weight_size = input_channel * kernel_w_ * kernel_h_ * output_aligned_size * sizeof(flo… in MallocWeightBiasData() 83 auto input_channel = weight_tensor->Batch(); in PackWeight() local 91 input_channel, kernel_w * kernel_h, output_channel); in PackWeight() 94 input_channel, kernel_w * kernel_h, output_channel); in PackWeight() 178 auto input_channel = weight_tensor->Batch(); in Init() local 183 …size_t pack_weight_size = input_channel * kernel_w_ * kernel_h_ * output_aligned_size * sizeof(flo… in Init()
|
/third_party/mindspore/tests/st/quantization/mobilenetv2_quant/ |
D | mobilenetV2.py | 158 input_channel = 32 175 input_channel = _make_divisible( 176 input_channel * width_mult, round_nearest) 180 features = [ConvBNReLU(3, input_channel, stride=2)] 187 block(input_channel, output_channel, stride, expand_ratio=t)) 188 input_channel = output_channel 191 input_channel, self.out_channels, kernel_size=1))
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/ |
D | space_to_depth_split.cc | 38 int64_t input_channel = SizeToLong(x_shape[kDim1]); in CreateTensor() local 40 …std::vector<int64_t> assist_input_shape = {assist_input_channel, input_channel, block_size, block_… in CreateTensor() 41 int64_t dest_size = assist_input_channel * input_channel * block_size * block_size; in CreateTensor() 42 …<< "For SpaceToDepth op, assist input shape is: (" << assist_input_channel << ", " << input_channel in CreateTensor() 51 int64_t channel_size = input_channel; in CreateTensor()
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp16/ |
D | convolution_1x1_fp16.cc | 85 auto input_channel = weight_tensor->Channel(); in MallocWeightBiasData() local 88 size_t size = input_channel * UP_ROUND(output_channel, col_tile_) * sizeof(float16_t); in MallocWeightBiasData() 116 auto input_channel = weight_tensor->Channel(); in PackWeight() local 122 …jorFp16(weight_origin, reinterpret_cast<float16_t *>(packed_weight_), input_channel, output_channe… in PackWeight() 126 … reinterpret_cast<float16_t *>(packed_weight_), output_channel, input_channel); in PackWeight() 129 …jorFp16(weight_origin, reinterpret_cast<float16_t *>(packed_weight_), input_channel, output_channe… in PackWeight() 153 auto input_channel = weight_tensor->Channel(); in Init() local 155 size_t size = input_channel * UP_ROUND(output_channel, col_tile_) * sizeof(float16_t); in Init()
|
D | deconvolution_fp16.cc | 58 auto input_channel = weight_tensor->Batch(); in PackWeight() local 65 input_channel, kernel_w * kernel_h, output_channel); in PackWeight() 70 auto input_channel = weight_tensor->Batch(); in MallocWeightBiasData() local 74 …size_t weight_pack_size = input_channel * kernel_w * kernel_h * UP_ROUND(output_channel, C8NUM) * … in MallocWeightBiasData() 192 auto input_channel = weight_tensor->Batch(); in Init() local 196 …size_t weight_pack_size = input_channel * kernel_w * kernel_h * UP_ROUND(output_channel, C8NUM) * … in Init()
|
/third_party/mindspore/mindspore/lite/micro/coder/opcoders/nnacl/int8/ |
D | conv2d_3x3_int8_coder.cc | 28 int input_channel = conv_param->input_channel_; in ProcessFilterUint8() local 31 int iC8 = UP_DIV(input_channel, C8NUM); in ProcessFilterUint8() 48 int input_channel = conv_param_->input_channel_; in InitWeightBias() local 50 MS_CHECK_TRUE(input_channel > 0, "invalid input_channel"); in InitWeightBias() 52 int iC8 = UP_DIV(input_channel, C8NUM); in InitWeightBias()
|
D | conv2d_1x1_int8_coder.cc | 152 int32_t input_channel = filter_tensor_->Channel(); in InitWeightBias() local 155 MS_CHECK_TRUE(input_channel > 0, "input_channel should be positive"); in InitWeightBias() 176 …de.CodeFunctionWithCheck("Conv1x1Init", filter_tensor_, bias_tensor_, filter_zp_str, input_channel, in InitWeightBias() 180 …de.CodeFunctionWithCheck("Conv1x1Init", filter_tensor_, bias_tensor_, filter_zp_str, input_channel, in InitWeightBias()
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/ |
D | depthwise_conv2d_infer.c | 39 int input_channel = input->shape_[3]; in DepthwiseConv2dInferShape() local 41 param->input_channel_ = input_channel; in DepthwiseConv2dInferShape() 77 out_shape[3] = input_channel; // in_channel * out_channel in DepthwiseConv2dInferShape()
|
/third_party/mindspore/mindspore/lite/test/ut/src/runtime/kernel/opencl/ |
D | depthwise_conv2d_tests.cc | 27 … int pad_r, int dilation_h, int dilation_w, ActType act_type, int input_channel) { in CreateParameter() argument 37 param->input_channel_ = input_channel; in CreateParameter() 38 param->output_channel_ = input_channel; in CreateParameter() 39 param->group_ = input_channel; in CreateParameter()
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/ |
D | pack_fp16.c | 98 int input_channel = conv_param->input_channel_; in PackWeightToC8Fp16() local 99 int ic8 = UP_DIV(input_channel, C8NUM); in PackWeightToC8Fp16() 104 int src_kernel_offset = k * input_channel; in PackWeightToC8Fp16() 107 int src_oc_offset = src_kernel_offset + o * kernel_plane * input_channel; in PackWeightToC8Fp16() 109 for (int i = 0; i < input_channel; i++) { in PackWeightToC8Fp16() 123 int input_channel = conv_param->input_channel_; in PackWeightToC4Fp16() local 124 int ic8 = UP_DIV(input_channel, C8NUM); in PackWeightToC4Fp16() 130 int src_kernel_offset = k * input_channel; in PackWeightToC4Fp16() 133 int src_oc_offset = src_kernel_offset + o * kernel_plane * input_channel; in PackWeightToC4Fp16() 135 for (int i = 0; i < input_channel; i++) { in PackWeightToC4Fp16()
|
/third_party/mindspore/tests/ut/python/parallel/ |
D | test_optimizer.py | 30 def __init__(self, input_channel, out_channel): argument 37 self.dense = Dense(input_channel, out_channel)
|