/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/ |
D | matmul_cpu_kernel.cc | 50 const size_t size = param_.batch * param_.row_align_ * param_.deep_; in InitMatrixA() 65 for (int i = 0; i < param_.batch; i++) { in InitMatrixA() 66 const float *src = src_ptr + i * param_.row_ * param_.deep_; in InitMatrixA() 67 float *dst = a_pack_ptr_ + i * param_.row_align_ * param_.deep_; in InitMatrixA() 69 if (param_.a_transpose_) { in InitMatrixA() 70 RowMajor2Row6Major(src, dst, param_.deep_, param_.row_); in InitMatrixA() 72 RowMajor2Col6Major(src, dst, param_.row_, param_.deep_); in InitMatrixA() 75 if (param_.a_transpose_) { in InitMatrixA() 76 RowMajor2Row4Major(src, dst, param_.deep_, param_.row_); in InitMatrixA() 78 RowMajor2Col4Major(src, dst, param_.row_, param_.deep_); in InitMatrixA() [all …]
|
/third_party/mindspore/mindspore/lite/micro/coder/opcoders/nnacl/int8/ |
D | transpose_int8_coder.cc | 31 param_ = reinterpret_cast<TransposeParameter *>(parameter_); in Prepare() 32 param_->data_num_ = in_tensor->ElementsNum(); in Prepare() 37 param_->num_axes_ = perm_tensor->ElementsNum(); in Prepare() 38 for (int i = 0; i < param_->num_axes_; ++i) { in Prepare() 39 param_->perm_[i] = perm_data[i]; in Prepare() 41 param_->strides_[param_->num_axes_ - 1] = 1; in Prepare() 42 param_->out_strides_[param_->num_axes_ - 1] = 1; in Prepare() 43 for (int i = param_->num_axes_ - 2; i >= 0; i--) { in Prepare() 44 param_->strides_[i] = in_shape.at(i + 1) * param_->strides_[i + 1]; in Prepare() 45 param_->out_strides_[i] = out_shape.at(i + 1) * param_->out_strides_[i + 1]; in Prepare() [all …]
|
D | sub_int8_coder.cc | 37 param_.in0_args_.scale_ = input0->quant_params().front().scale; in Prepare() 38 param_.in0_args_.zp_ = -input0->quant_params().front().zeroPoint; in Prepare() 39 param_.in1_args_.scale_ = input1->quant_params().front().scale; in Prepare() 40 param_.in1_args_.zp_ = -input1->quant_params().front().zeroPoint; in Prepare() 41 param_.out_args_.scale_ = output_tensor_->quant_params().front().scale; in Prepare() 42 param_.out_args_.zp_ = output_tensor_->quant_params().front().zeroPoint; in Prepare() 45 …const double twice_max_input_scale = 2 * std::max(param_.in0_args_.scale_, param_.in1_args_.scale_… in Prepare() 46 const double real_input0_multiplier = param_.in0_args_.scale_ / twice_max_input_scale; in Prepare() 47 const double real_input1_multiplier = param_.in1_args_.scale_ / twice_max_input_scale; in Prepare() 48 …const double real_output_multiplier = twice_max_input_scale / ((1 << left_shift) * param_.out_args… in Prepare() [all …]
|
D | matmul_base_int8_coder.cc | 34 a_pack_ptr_size_ = param_->row_align_ * param_->deep_16_ * sizeof(int8_t); in InitTmpBuffer() 37 b_pack_ptr_size_ = param_->batch * param_->col_align_ * param_->deep_16_ * sizeof(int8_t); in InitTmpBuffer() 38 if (param_->b_const_) { in InitTmpBuffer() 44 input_sums_size_ = static_cast<size_t>(param_->row_align_ * sizeof(int)); in InitTmpBuffer() 47 weight_bias_sums_size_ = static_cast<size_t>(param_->batch * param_->col_align_ * sizeof(int)); in InitTmpBuffer() 48 if (param_->b_const_) { in InitTmpBuffer() 61 param_->row_align_ = UP_ROUND(param_->row_, row_tile_); in ResizeParameter() 62 param_->col_align_ = UP_ROUND(param_->col_, col_tile_); in ResizeParameter() 63 param_->deep_16_ = UP_ROUND(param_->deep_, C16NUM); in ResizeParameter() 65 thread_count_ = MSMIN(kDefaultThreadNum, UP_DIV(param_->col_align_, col_tile_)); in ResizeParameter() [all …]
|
D | div_int8_coder.cc | 35 param_.in0_args_.scale_ = input0->quant_params().front().scale; in Prepare() 36 param_.in0_args_.zp_ = -input0->quant_params().front().zeroPoint; in Prepare() 37 param_.in1_args_.scale_ = input1->quant_params().front().scale; in Prepare() 38 param_.in1_args_.zp_ = -input1->quant_params().front().zeroPoint; in Prepare() 39 param_.out_args_.scale_ = output_tensor_->quant_params().front().scale; in Prepare() 40 param_.out_args_.zp_ = output_tensor_->quant_params().front().zeroPoint; in Prepare() 42 …const double real_multiplier = param_.in0_args_.scale_ / (param_.in1_args_.scale_ * param_.out_arg… in Prepare() 44 QuantizeMultiplier(real_multiplier, ¶m_.output_multiplier_, ¶m_.output_shift_); in Prepare() 46 param_.output_activation_min_ = std::numeric_limits<int8_t>::min(); in Prepare() 47 param_.output_activation_max_ = std::numeric_limits<int8_t>::max(); in Prepare() [all …]
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/int8/ |
D | matmul_base_int8.cc | 38 int res_stride = param_->col_ - cur_stride; in RunImpl() 50 …ulInt8Opt(pack_a_ptr_, batch_b_ptr_ + cur_stride * param_->deep_16_, batch_c_ptr_ + cur_stride, pa… in RunImpl() 51 … cur_oc, param_->deep_16_, input_sums_, weight_bias_sums_ + cur_stride, quant_param_->out_act_min_, in RunImpl() 52 … quant_param_->out_act_max_, quant_param_->output_.zp_, cur_mul, cur_left, cur_right, param_->col_, in RunImpl() 159 …CalculateActivationRangeQuantized(param_->act_type_ == ActType_Relu, param_->act_type_ == ActType_… in InitQuantParam() 165 param_->a_const_ = (in_tensors_[0]->data() != nullptr); in InitParameter() 166 param_->b_const_ = (in_tensors_[1]->data() != nullptr); in InitParameter() 178 param_->row_align_ = UP_ROUND(param_->row_, row_tile_); in ResizeParameter() 179 param_->col_align_ = UP_ROUND(param_->col_, col_tile_); in ResizeParameter() 180 param_->deep_16_ = UP_ROUND(param_->deep_, C16NUM); in ResizeParameter() [all …]
|
D | layer_norm_int8.cc | 99 CHECK_NULL_RETURN(param_); in Init() 110 param_->begin_norm_axis_ = in ReSize() 111 … param_->begin_norm_axis_ > 0 ? param_->begin_norm_axis_ : param_->begin_norm_axis_ + shape.size(); in ReSize() 112 param_->begin_params_axis_ = in ReSize() 113 …param_->begin_params_axis_ > 0 ? param_->begin_params_axis_ : param_->begin_params_axis_ + shape.s… in ReSize() 115 param_->norm_outer_size_ = 1; in ReSize() 116 for (int i = 0; i < param_->begin_norm_axis_; ++i) { in ReSize() 117 param_->norm_outer_size_ *= shape.at(i); in ReSize() 119 param_->norm_inner_size_ = 1; in ReSize() 120 for (size_t i = param_->begin_norm_axis_; i < shape.size(); ++i) { in ReSize() [all …]
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp32/ |
D | arithmetic_fp32.cc | 30 auto primitive_type = param_->op_parameter_.type_; in Init() 32 switch (param_->eltwise_mode_) { in Init() 43 MS_LOG(ERROR) << "Eltwise mode not support, mode:" << param_->eltwise_mode_; in Init() 55 CalcMultiplesAndStrides(param_); in ReSize() 56 if (param_->broadcasting_) { in ReSize() 58 …for (int i = static_cast<int>(param_->ndim_) - 1; i >= 0 && i < ARITHMETIC_SUPPORT_DIMS_NUM; --i) { in ReSize() 59 if (param_->in_shape0_[i] != param_->in_shape1_[i]) { in ReSize() 63 outside_ *= param_->out_shape_[i]; in ReSize() 89 …if ((param_->in_elements_num0_ == 1 || param_->in_elements_num1_ == 1) && (arithmetic_opt_run_ != … in IsScalarClac() 101 for (size_t i = 0; i < param_->ndim_; i++) { in IsBatchScalarCalc() [all …]
|
D | layer_norm_fp32.cc | 32 CHECK_NULL_RETURN(param_); in Init() 43 param_->begin_norm_axis_ = in ReSize() 44 … param_->begin_norm_axis_ > 0 ? param_->begin_norm_axis_ : param_->begin_norm_axis_ + shape.size(); in ReSize() 45 param_->begin_params_axis_ = in ReSize() 46 …param_->begin_params_axis_ > 0 ? param_->begin_params_axis_ : param_->begin_params_axis_ + shape.s… in ReSize() 48 param_->norm_outer_size_ = 1; in ReSize() 49 for (int i = 0; i < param_->begin_norm_axis_; ++i) { in ReSize() 50 param_->norm_outer_size_ *= shape.at(i); in ReSize() 52 param_->norm_inner_size_ = 1; in ReSize() 53 for (size_t i = param_->begin_norm_axis_; i < shape.size(); ++i) { in ReSize() [all …]
|
D | transpose_fp32.cc | 40 param_->num_axes_ = in_tensors_.at(1)->ElementsNum(); in ReSize() 45 if (input_tensor->shape().size() != static_cast<size_t>(param_->num_axes_)) { in ReSize() 46 if (input_tensor->shape().size() == 3 && param_->num_axes_ == 4) { in ReSize() 47 param_->num_axes_ = 3; in ReSize() 58 if (param_->num_axes_ > MAX_TRANSPOSE_DIM_SIZE || param_->num_axes_ < 0) { in ReSize() 59 MS_LOG(ERROR) << "num_axes_ " << param_->num_axes_ << "is invalid."; in ReSize() 62 for (int i = 0; i < param_->num_axes_; ++i) { in ReSize() 63 param_->perm_[i] = perm_data[i]; in ReSize() 69 param_->strides_[param_->num_axes_ - 1] = 1; in ReSize() 70 param_->out_strides_[param_->num_axes_ - 1] = 1; in ReSize() [all …]
|
D | relative_position_attention_fp32.cc | 166 param_->use_bias_ = true; in CheckBiases() 168 if (!param_->use_bias_) { in CheckBiases() 221 param_->row_tile_ = C6NUM; in PrepareParam() 222 param_->col_tile_ = C16NUM; in PrepareParam() 223 param_->bias_tile_ = C16NUM; in PrepareParam() 225 param_->row_tile_ = C12NUM; in PrepareParam() 226 param_->col_tile_ = C4NUM; in PrepareParam() 227 param_->bias_tile_ = C4NUM; in PrepareParam() 229 param_->row_tile_ = C4NUM; in PrepareParam() 230 param_->col_tile_ = C8NUM; in PrepareParam() [all …]
|
D | roi_pooling_fp32.cc | 59 param_->ndim_ = ndims; in ReSize() 60 param_->input_n_ = in_shape.at(0); in ReSize() 61 param_->input_h_ = in_shape.at(1); in ReSize() 62 param_->input_w_ = in_shape.at(2); in ReSize() 63 param_->input_c_ = in_shape.at(3); in ReSize() 64 param_->output_n_ = out_shape.at(0); in ReSize() 65 param_->output_h_ = out_shape.at(1); in ReSize() 66 param_->output_w_ = out_shape.at(2); in ReSize() 67 param_->output_c_ = out_shape.at(3); in ReSize() 68 param_->in_strides_[ndims - 1] = 1; in ReSize() [all …]
|
D | space_to_batch_fp32.cc | 32 param_->input_shape_[i] = input_tensor->shape().at(i); in ProcessInput() 33 param_->output_shape_[i] = output_tensor->shape().at(i); in ProcessInput() 35 ComputeStrides(param_->input_shape_, param_->in_stride_, DIMENSION_4D); in ProcessInput() 36 ComputeStrides(param_->output_shape_, param_->out_stride_, DIMENSION_4D); in ProcessInput() 41 param_->block_sizes_[i] = block_shape[i]; in ProcessInput() 47 param_->paddings_[i] = padding[i]; in ProcessInput() 76 param_->input_shape_[i] = input_tensor->shape().at(i); in ReSize() 77 param_->output_shape_[i] = output_tensor->shape().at(i); in ReSize() 80 ComputeStrides(param_->input_shape_, param_->in_stride_, DIMENSION_4D); in ReSize() 81 ComputeStrides(param_->output_shape_, param_->out_stride_, DIMENSION_4D); in ReSize() [all …]
|
D | exp_fp32.cc | 30 float log_base = (param_->base_ == -1) ? 1 : logf(param_->base_); in Init() 31 param_->in_scale_ = param_->scale_ * log_base; in Init() 32 if (param_->shift_ == 0) { in Init() 33 param_->out_scale_ = 1; in Init() 36 param_->out_scale_ = expf(param_->shift_); in Init() 38 param_->out_scale_ = powf(param_->base_, param_->shift_); in Init() 41 param_->op_parameter_.thread_num_ = ms_context_->thread_num_; in Init() 49 param_->element_num_ = in_tensors_.front()->ElementsNum(); in ReSize() 55 …(reinterpret_cast<float *>(input_addr_), reinterpret_cast<float *>(output_addr_), param_, task_id); in DoExcute()
|
D | embedding_lookup_fp32.cc | 30 CHECK_NULL_RETURN(param_); in Init() 38 param_->ids_size_ = in_tensors_.back()->ElementsNum(); in ReSize() 39 param_->layer_size_ = 1; in ReSize() 42 param_->layer_size_ *= in_shape[i]; in ReSize() 45 param_->layer_num_ = 0; in ReSize() 48 param_->layer_num_ += in_tensors_[i]->shape()[0]; in ReSize() 58 int error_code = EmbeddingLookup(input_addr_, ids_addr, output_addr, param_, task_id); in DoExcute() 79 …_cast<float *>(ms_context_->allocator->Malloc(sizeof(float) * param_->layer_size_ * param_->layer_… in Run() 80 …param_->is_regulated_ = reinterpret_cast<bool *>(ms_context_->allocator->Malloc(sizeof(bool) * par… in Run() 81 if (input_addr_ == nullptr || param_->is_regulated_ == nullptr) { in Run() [all …]
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp32_grad/ |
D | strided_slice_grad.cc | 37 param_ = reinterpret_cast<StridedSliceParameter *>(op_parameter_); in Init() 38 CHECK_NULL_RETURN(param_); in Init() 47 param_->data_type = kDataTypeFloat; in Init() 64 for (i = 0; i < param_->num_axes_; ++i) { in FillEmptyDims() 65 begins[i] = param_->begins_[i]; in FillEmptyDims() 66 ends[i] = MSMIN(param_->ends_[i], param_->in_shape_[i]); in FillEmptyDims() 67 strides[i] = param_->strides_[i]; in FillEmptyDims() 68 input_shape[i] = param_->in_shape_[i]; in FillEmptyDims() 70 for (i = param_->num_axes_; i < param_->in_shape_length_; ++i) { in FillEmptyDims() 71 input_shape[i] = param_->in_shape_[i]; in FillEmptyDims() [all …]
|
D | softmax_cross_entropy_with_logits.cc | 36 for (int i = 0; i < param_->batch_size_; ++i) { in ForwardPostExecute() 38 for (size_t j = 0; j < param_->number_of_classes_; ++j) { in ForwardPostExecute() 40 …-logf(logits[i * param_->number_of_classes_ + j] <= 0.0 ? eps : logits[i * param_->number_of_class… in ForwardPostExecute() 41 grads[i * param_->number_of_classes_ + j] = in ForwardPostExecute() 42 (logits[i * param_->number_of_classes_ + j] - labels[i * param_->number_of_classes_ + j]); in ForwardPostExecute() 43 loss += labels[i * param_->number_of_classes_ + j] * logit; in ForwardPostExecute() 48 for (int i = 0; i < param_->batch_size_; ++i) { in ForwardPostExecute() 50 for (size_t j = 0; j < param_->number_of_classes_; ++j) { in ForwardPostExecute() 52 …-logf(logits[i * param_->number_of_classes_ + j] <= 0.0 ? eps : logits[i * param_->number_of_class… in ForwardPostExecute() 53 loss += labels[i * param_->number_of_classes_ + j] * logit; in ForwardPostExecute() [all …]
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp16_grad/ |
D | strided_slice_fp16_grad.cc | 39 param_ = reinterpret_cast<StridedSliceParameter *>(op_parameter_); in Init() 40 CHECK_NULL_RETURN(param_); in Init() 46 param_->data_type = kDataTypeFloat16; in Init() 63 for (i = 0; i < param_->num_axes_; ++i) { in FillEmptyDims() 64 begins[i] = param_->begins_[i]; in FillEmptyDims() 65 ends[i] = MSMIN(param_->ends_[i], param_->in_shape_[i]); in FillEmptyDims() 66 strides[i] = param_->strides_[i]; in FillEmptyDims() 67 input_shape[i] = param_->in_shape_[i]; in FillEmptyDims() 69 for (i = param_->num_axes_; i < param_->in_shape_length_; ++i) { in FillEmptyDims() 70 input_shape[i] = param_->in_shape_[i]; in FillEmptyDims() [all …]
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp16/ |
D | arithmetic_compare_fp16.cc | 78 param_->in_elements_num0_ = in_tensors_.at(0)->ElementsNum(); in ReSize() 79 param_->in_elements_num1_ = in_tensors_.at(1)->ElementsNum(); in ReSize() 80 param_->out_elements_num_ = out_tensors_.at(0)->ElementsNum(); in ReSize() 82 if (param_->in_elements_num0_ == 1 || param_->in_elements_num1_ == 1) { in ReSize() 83 param_->broadcasting_ = false; in ReSize() 84 …arithmetic_opt_func_ = GetOptimizedArithmeticCompareFun(param_->op_parameter_.type_, param_->activ… in ReSize() 86 … arithmetic_func_ = GetArithmeticCompareFun(param_->op_parameter_.type_, param_->activation_type_); in ReSize() 92 if (param_->broadcasting_) { in ReSize() 94 for (int i = param_->ndim_ - 1; i >= 0; --i) { in ReSize() 95 if (param_->in_shape0_[i] != param_->in_shape1_[i]) { in ReSize() [all …]
|
D | layer_norm_fp16.cc | 33 CHECK_NULL_RETURN(param_); in Init() 44 param_->begin_norm_axis_ = in ReSize() 45 … param_->begin_norm_axis_ > 0 ? param_->begin_norm_axis_ : param_->begin_norm_axis_ + shape.size(); in ReSize() 46 param_->begin_params_axis_ = in ReSize() 47 …param_->begin_params_axis_ > 0 ? param_->begin_params_axis_ : param_->begin_params_axis_ + shape.s… in ReSize() 49 param_->norm_outer_size_ = 1; in ReSize() 50 for (int i = 0; i < param_->begin_norm_axis_; ++i) { in ReSize() 51 param_->norm_outer_size_ *= shape.at(i); in ReSize() 53 param_->norm_inner_size_ = 1; in ReSize() 54 for (size_t i = param_->begin_norm_axis_; i < shape.size(); ++i) { in ReSize() [all …]
|
/third_party/mindspore/mindspore/lite/micro/coder/opcoders/nnacl/fp32/ |
D | transpose_fp32_coder.cc | 27 param_->num_axes_ = input_tensors_.at(1)->ElementsNum(); in Resize() 29 if (input_tensors_.at(kInputIndex)->shape().size() != static_cast<size_t>(param_->num_axes_)) { in Resize() 37 for (int i = 0; i < param_->num_axes_; ++i) { in Resize() 38 param_->perm_[i] = perm_data[i]; in Resize() 42 param_->strides_[param_->num_axes_ - 1] = 1; in Resize() 43 param_->out_strides_[param_->num_axes_ - 1] = 1; in Resize() 44 param_->data_num_ = input_tensor_->ElementsNum(); in Resize() 45 for (int i = param_->num_axes_ - 2; i >= 0; i--) { in Resize() 46 param_->strides_[i] = in_shape.at(i + 1) * param_->strides_[i + 1]; in Resize() 47 param_->out_strides_[i] = out_shape.at(i + 1) * param_->out_strides_[i + 1]; in Resize() [all …]
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/base/ |
D | split_with_over_lap_base.cc | 34 CHECK_LESS_RETURN(SPLIT_MAX_SLICE_NUM, param_->num_split_ + 1); in CalculateSplitedShapes() 35 for (auto i = 0; i < param_->num_split_; i++) { in CalculateSplitedShapes() 36 total_block_count += param_->ratio_[i]; in CalculateSplitedShapes() 38 CHECK_LESS_RETURN(static_cast<int>(shape.size()), param_->split_dim_ + 1); in CalculateSplitedShapes() 39 auto split_dim_size = shape[param_->split_dim_]; in CalculateSplitedShapes() 44 for (auto i = 0; i < param_->num_split_ - 1; i++) { in CalculateSplitedShapes() 45 visited_block += param_->ratio_[i]; in CalculateSplitedShapes() 52 for (auto i = 0; i < param_->num_split_; i++) { in CalculateSplitedShapes() 57 start_indices_[i] -= param_->extend_top_[i]; in CalculateSplitedShapes() 58 end_indices_[i] += param_->extend_bottom_[i]; in CalculateSplitedShapes() [all …]
|
D | slice_base.cc | 55 param_->param_length_ = in_tensor->shape().size(); in ReSize() 56 if (param_->param_length_ > DIMENSION_8D) { in ReSize() 60 for (int i = 0; i < param_->param_length_; ++i) { in ReSize() 61 param_->shape_[i] = in_tensor->DimensionSize(i); in ReSize() 62 param_->begin_[i] = begin[i]; in ReSize() 63 param_->size_[i] = size[i] < 0 ? param_->shape_[i] - param_->begin_[i] : size[i]; in ReSize() 64 param_->end_[i] = param_->begin_[i] + param_->size_[i]; in ReSize() 66 if (param_->param_length_ < DIMENSION_8D) { in ReSize() 67 PadSliceParameterTo8D(param_); in ReSize() 87 DoSlice(in_tensors_.at(0)->data(), out_tensors_.at(0)->data(), param_, thread_id, in SliceParallelRun() [all …]
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/string/ |
D | lsh_projection.cc | 63 param_->hash_buff_size_ = sizeof(float) + sizeof(int32_t); in Run() 64 param_->feature_num_ = input1_tensor->ElementsNum(); in Run() 65 param_->hash_shape_[0] = input0_tensor->DimensionSize(0); in Run() 66 param_->hash_shape_[1] = input0_tensor->DimensionSize(1); in Run() 67 …param_->thread_stride_ = op_parameter_->thread_num_ > 1 ? UP_DIV(param_->hash_shape_[0], op_parame… in Run() 68 : param_->hash_shape_[0]; in Run() 82 param_->hash_buffs_ = in MallocKeys() 84 if (param_->hash_buffs_ == nullptr) { in MallocKeys() 89 …param_->hash_buffs_[i] = static_cast<char *>(ms_context_->allocator->Malloc(param_->hash_buff_size… in MallocKeys() 90 if (param_->hash_buffs_[i] == nullptr) { in MallocKeys() [all …]
|
/third_party/openh264/test/api/ |
D | encode_options_test.cpp | 45 param_.iPicWidth = WelsClip3 ((((rand() % MAX_WIDTH) >> 1) + 1) << 1, 2, MAX_WIDTH); in RandomParamExtCombination() 46 param_.iPicHeight = WelsClip3 ((((rand() % MAX_HEIGHT) >> 1) + 1) << 1, 2, MAX_HEIGHT); in RandomParamExtCombination() 48 param_.fMaxFrameRate = rand() % FRAME_RATE_RANGE + 0.5f; in RandomParamExtCombination() 49 param_.iUsageType = static_cast<EUsageType> (rand() % 2); in RandomParamExtCombination() 50 param_.iTemporalLayerNum = rand() % TEMPORAL_LAYER_NUM_RANGE; in RandomParamExtCombination() 51 param_.iSpatialLayerNum = rand() % SPATIAL_LAYER_NUM_RANGE; in RandomParamExtCombination() 53 param_.uiIntraPeriod = rand() - 1; in RandomParamExtCombination() 54 param_.iNumRefFrame = AUTO_REF_PIC_COUNT; in RandomParamExtCombination() 55 param_.iMultipleThreadIdc = rand(); in RandomParamExtCombination() 60 param_.eSpsPpsIdStrategy = CONSTANT_ID; in RandomParamExtCombination() [all …]
|