Home
last modified time | relevance | path

Searched refs:row_ (Results 1 – 25 of 42) sorted by relevance

12

/third_party/mindspore/mindspore/ccsrc/minddata/dataset/core/
Dtensor_row.cc27 : id_(kDefaultRowId), path_({}), row_(n, t), tensor_row_flag_(kFlagNone) {} in TensorRow()
30 : id_(kDefaultRowId), path_({}), row_(v), tensor_row_flag_(kFlagNone) {} in TensorRow()
33 : id_(id), path_({}), row_(lst), tensor_row_flag_(kFlagNone) {} in TensorRow()
36 : id_(tr.id_), path_(tr.path_), row_(tr.row_), tensor_row_flag_(tr.tensor_row_flag_) {} in TensorRow()
44 row_ = tr.row_; in operator =()
52 row_ = lst; in operator =()
58 : id_(kDefaultRowId), path_({}), row_(std::move(v)), tensor_row_flag_(kFlagNone) {} in TensorRow()
61 : id_(id), path_({}), row_(std::move(lst)), tensor_row_flag_(kFlagNone) {} in TensorRow()
66 row_ = std::move(tr.row_); in TensorRow()
74 row_ = std::move(tr.row_); in operator =()
[all …]
Dtensor_row.h168 const vector_type &getRow() const { return row_; } in getRow()
172 for (auto &it : row_) { in SizeInBytes()
179 void emplace_back(value_type t) { row_.emplace_back(t); } in emplace_back()
181 void push_back(value_type t) { row_.push_back(t); } in push_back()
183 void clear() noexcept { row_.clear(); } in clear()
185 size_type size() const noexcept { return row_.size(); } in size()
187 void reserve(size_type size) { row_.reserve(size); } in reserve()
189 void resize(size_type size) { row_.resize(size); } in resize()
191 bool empty() { return row_.empty(); } in empty()
193 …void insert(iterator position, iterator first, iterator last) { row_.insert(position, first, last)… in insert()
[all …]
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/
Dattention_fp32.c31 matrix->row_ = row; in InitMatrix()
43 int real_row = matrix->is_transpose_ ? matrix->col_ : matrix->row_; in LeftMatrixPackElementSize()
44 int deep = matrix->is_transpose_ ? matrix->row_ : matrix->col_; in LeftMatrixPackElementSize()
57 int deep = matrix->is_transpose_ ? matrix->col_ : matrix->row_; in RightMatrixPackElementSize()
58 int real_col = matrix->is_transpose_ ? matrix->row_ : matrix->col_; in RightMatrixPackElementSize()
71 int real_row = matrix->is_transpose_ ? matrix->col_ : matrix->row_; in PackLeftMatrix()
72 int deep = matrix->is_transpose_ ? matrix->row_ : matrix->col_; in PackLeftMatrix()
75 int src_area = matrix->row_ * matrix->col_; in PackLeftMatrix()
133 int deep = matrix->is_transpose_ ? matrix->col_ : matrix->row_; in PackRightMatrix()
134 int real_col = matrix->is_transpose_ ? matrix->row_ : matrix->col_; in PackRightMatrix()
[all …]
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp32/
Dconvolution_1x1_fp32.cc57 matmul_param_->row_ = conv_param_->output_h_ * conv_param_->output_w_; in InitConv1x1MatmulParam()
60 matmul_param_->row_align_ = UP_ROUND(matmul_param_->row_, row_tile_); in InitConv1x1MatmulParam()
66 …if ((matmul_param_->row_ > (row_tile_ * op_parameter_->thread_num_)) && (matmul_param_->row_ > mat… in InitConv1x1Param()
68 thread_count_ = MSMIN(op_parameter_->thread_num_, UP_DIV(matmul_param_->row_, row_tile_)); in InitConv1x1Param()
73 thread_stride_ = UP_DIV(UP_DIV(matmul_param_->row_, row_tile_), thread_count_) * row_tile_; in InitConv1x1Param()
87 …input_ptr_ = reinterpret_cast<float *>(malloc(matmul_param_->row_ * matmul_param_->deep_ * sizeof(… in InitConv1x1Param()
92 memset(input_ptr_, 0, matmul_param_->row_ * matmul_param_->deep_ * sizeof(float)); in InitConv1x1Param()
156 matmul_param_->row_, cur_oc, matmul_param_->col_, OutType_Nhwc); in DoConv1x1()
159 … output_ptr_ + task_id * thread_stride_ * matmul_param_->row_, bias, matmul_param_->act_type_, in DoConv1x1()
160 … matmul_param_->deep_, matmul_param_->row_, cur_oc, matmul_param_->row_, OutType_NC4HW4); in DoConv1x1()
[all …]
Dmatmul_fp32_base.cc56 if (params_->row_ == 1) { in ResizeParameter()
66 params_->row_align_ = UP_ROUND(params_->row_, row_tile_); in ResizeParameter()
178 const float *src = src_ptr + i * params_->deep_ * params_->row_; in InitMatrixA()
181 matrix_a_pack_fun_(src, dst, params_->deep_, params_->row_); in InitMatrixA()
183 matrix_a_pack_fun_(src, dst, params_->row_, params_->deep_); in InitMatrixA()
298 …MatMulOpt(batch_a_ptr_, b, c, bias, params_->act_type_, params_->deep_, params_->row_, cur_oc, par… in FloatRun()
326 params_->row_align_ = UP_ROUND(params_->row_, row_tile_); in init_global_variable()
424 params_->batch * params_->row_ * oc_block_num * col_tile_ * static_cast<int>(sizeof(float)))); in InitTmpOutBuffer()
477 batch_c_ptr_ = output_data_ + i * params_->row_ * params_->col_align_; in Run()
480 batch_c_ptr_ = output_data_ + i * params_->row_ * params_->col_; in Run()
[all …]
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp16/
Dconvolution_1x1_fp16.cc31 matmul_param_->row_ = conv_param_->output_h_ * conv_param_->output_w_; in InitMatmulParam()
34 matmul_param_->row_align_ = UP_ROUND(matmul_param_->row_, row_tile_); in InitMatmulParam()
53 …if ((matmul_param_->row_ > (row_tile_ * op_parameter_->thread_num_)) && (matmul_param_->row_ > mat… in InitConv1x1Param()
55 thread_count_ = MSMIN(op_parameter_->thread_num_, UP_DIV(matmul_param_->row_, row_tile_)); in InitConv1x1Param()
60 thread_stride_ = UP_DIV(UP_DIV(matmul_param_->row_, row_tile_), thread_count_) * row_tile_; in InitConv1x1Param()
73 …input_ptr_ = reinterpret_cast<float16_t *>(malloc(matmul_param_->row_ * matmul_param_->deep_ * siz… in InitConv1x1Param()
78 memset(input_ptr_, 0, matmul_param_->row_ * matmul_param_->deep_ * sizeof(float16_t)); in InitConv1x1Param()
217 matmul_param_->row_, cur_oc, matmul_param_->col_, OutType_Nhwc); in RunOc()
222 matmul_param_->row_, cur_oc, matmul_param_->col_, OutType_Nhwc); in RunOc()
234 int res_stride = matmul_param_->row_ - task_id * thread_stride_; in RunHw()
[all …]
Dmatmul_base_fp16.cc119 if (params_->row_ == 1) { in ResizeParameter()
131 params_->row_align_ = UP_ROUND(params_->row_, row_tile_); in ResizeParameter()
177 …const int8_t *src = int8_src + i * params_->deep_ * params_->row_ * lite::DataTypeSize(src_data_ty… in InitMatrixA()
181 RowMajor2RowNMajorFp16((const float16_t *)src, dst, params_->deep_, params_->row_); in InitMatrixA()
183 …RowMajor2Row12MajorFp16(src, dst, params_->deep_, params_->row_, src_data_type == kNumberTypeFloat… in InitMatrixA()
187 RowMajor2ColNMajorFp16((const float16_t *)src, dst, params_->row_, params_->deep_); in InitMatrixA()
189 …RowMajor2Col12MajorFp16(src, dst, params_->row_, params_->deep_, src_data_type == kNumberTypeFloat… in InitMatrixA()
304 …MatmulBaseFp16Neon(batch_a_ptr_, b, c, bias, params_->act_type_, params_->deep_, params_->row_, cu… in RunImpl()
307 …MatMulFp16(batch_a_ptr_, b, c, bias, params_->act_type_, params_->deep_, params_->row_, cur_oc, pa… in RunImpl()
341 batch_c_ptr_ = c_ptr + i * params_->row_ * params_->col_; in Run()
[all …]
/third_party/boost/boost/qvm/
Dmap_mat_vec.hpp150 row_ class
152 row_( row_ const & );
153 row_ & operator=( row_ const & );
154 ~row_();
160 row_ &
180 vec_traits< qvm_detail::row_<Row,OriginalMatrix> >
182 typedef qvm_detail::row_<Row,OriginalMatrix> this_vector;
233 deduce_vec<qvm_detail::row_<Row,OriginalMatrix>,D>
240 deduce_vec2<qvm_detail::row_<Row,OriginalMatrix>,qvm_detail::row_<Row,OriginalMatrix>,D>
248 qvm_detail::row_<Row,A> const &>::type
[all …]
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/
Dmatmul_cpu_kernel.cc66 const float *src = src_ptr + i * param_.row_ * param_.deep_; in InitMatrixA()
70 RowMajor2Row6Major(src, dst, param_.deep_, param_.row_); in InitMatrixA()
72 RowMajor2Col6Major(src, dst, param_.row_, param_.deep_); in InitMatrixA()
76 RowMajor2Row4Major(src, dst, param_.deep_, param_.row_); in InitMatrixA()
78 RowMajor2Col4Major(src, dst, param_.row_, param_.deep_); in InitMatrixA()
82 RowMajor2Row12Major(src, dst, param_.deep_, param_.row_); in InitMatrixA()
84 RowMajor2Col12Major(src, dst, param_.row_, param_.deep_); in InitMatrixA()
145 param_.row_ = SizeToInt(o_shape[rank_ - kIndexOffset]); in InitArmKernel()
148 vec_matmul_ = (param_.row_ == 1); in InitArmKernel()
149 param_.row_align_ = vec_matmul_ ? 1 : UP_ROUND(param_.row_, row_tile_); in InitArmKernel()
[all …]
/third_party/mindspore/mindspore/ccsrc/minddata/dataset/util/
Dqueue_map.h104 CHECK_FAIL_RETURN_UNEXPECTED(!row_.empty(), "Programming error"); in Wait()
105 *out = std::move(row_.front()); in Wait()
106 row_.pop_front(); in Wait()
112 row_.push_back(std::move(row)); in WakeUpAny()
120 out << "sz:" << rq.row_.size() << ",uc:" << rq.use_count_.Peek();
127 std::deque<T> row_; variable
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/int8/
Dconvolution_1x1_int8.cc116 …size_t size = support_optimize_ ? UP_ROUND(matmul_param_->row_, C8NUM) * UP_ROUND(matmul_param_->d… in InitRunBuf()
117 … : UP_ROUND(matmul_param_->row_, C4NUM) * UP_ROUND(matmul_param_->deep_, C16NUM); in InitRunBuf()
339 matmul_param_->row_ = conv_param_->output_h_ * conv_param_->output_w_; in InitParam()
342 matmul_param_->row_4_ = UP_ROUND(matmul_param_->row_, C4NUM); in InitParam()
363 input_sum_size_ = UP_ROUND(matmul_param_->row_, row_pack_count); in InitParam()
366 …input_ptr_ = reinterpret_cast<int8_t *>(malloc(matmul_param_->row_ * matmul_param_->deep_ * sizeof… in InitParam()
371 memset(input_ptr_, 0, matmul_param_->row_ * matmul_param_->deep_ * sizeof(int8_t)); in InitParam()
374 int hw_thread_count = UP_DIV(matmul_param_->row_, row_pack_count); in InitParam()
423 int res_stride = matmul_param_->row_ - task_id * thread_stride_hw_ * C4NUM; in RunArmHw()
451 int res_stride = matmul_param_->row_ - task_id * thread_stride_hw_ * C4NUM; in RunArm64OptHw()
[all …]
Dmatmul_base_int8.cc50 …pack_a_ptr_, batch_b_ptr_ + cur_stride * param_->deep_16_, batch_c_ptr_ + cur_stride, param_->row_, in RunImpl()
178 param_->row_align_ = UP_ROUND(param_->row_, row_tile_); in ResizeParameter()
328 auto current_src_a = a_ptr + i * param_->row_ * param_->deep_; in Run()
330 RowMajor2Col16x4MajorInt8(current_src_a, param_->deep_, param_->row_, pack_a_ptr_); in Run()
331 … CalcInputSums(current_src_a, param_->row_, param_->deep_, tmp_weight_zp, input_sums_, ColMajor); in Run()
333 RowMajor2Row16x4MajorInt8(current_src_a, pack_a_ptr_, param_->row_, param_->deep_); in Run()
334 … CalcInputSums(current_src_a, param_->row_, param_->deep_, tmp_weight_zp, input_sums_, RowMajor); in Run()
339 batch_c_ptr_ = c_ptr + i * param_->row_ * param_->col_; in Run()
Ddeconvolution_int8.cc132 matmul_param_->row_ = conv_param_->input_h_ * conv_param_->input_w_; in InitParam()
211 size = UP_ROUND(matmul_param_->row_, C4NUM); in InitRunBuf()
264 UP_ROUND(matmul_param_->row_, C4NUM), cur_oc * C4NUM * kernel_plane, in DoDeconv()
290 …RowMajor2Row16x4MajorInt8(src_in + batch_index * matmul_param_->row_ * conv_param_->input_channel_… in Run()
291 matmul_param_->row_, matmul_param_->deep_); in Run()
295 … UP_ROUND(matmul_param_->row_, C4NUM), UP_ROUND(matmul_param_->deep_, C16NUM), support_optimize_); in Run()
/third_party/flutter/skia/third_party/externals/libwebp/src/utils/
Dquant_levels_dec_utils.c48 int row_; // current input row being processed member
103 if (p->row_ >= 0 && p->row_ < p->height_ - 1) { in VFilter()
139 const uint8_t* const dither = kOrderedDither[p->row_ % DSIZE]; in ApplyFilter()
246 p->row_ = -radius; in InitParams()
278 for (; p.row_ < p.height_; ++p.row_) { in WebPDequantizeLevels()
282 if (p.row_ >= p.radius_) { in WebPDequantizeLevels()
/third_party/skia/third_party/externals/libwebp/src/utils/
Dquant_levels_dec_utils.c48 int row_; // current input row being processed member
103 if (p->row_ >= 0 && p->row_ < p->height_ - 1) { in VFilter()
139 const uint8_t* const dither = kOrderedDither[p->row_ % DSIZE]; in ApplyFilter()
246 p->row_ = -radius; in InitParams()
278 for (; p.row_ < p.height_; ++p.row_) { in WebPDequantizeLevels()
282 if (p.row_ >= p.radius_) { in WebPDequantizeLevels()
/third_party/mindspore/mindspore/ccsrc/minddata/dataset/include/dataset/
Diterator.h70 MSTensorMapChar row_; in GetNextRow() local
71 row_.clear(); in GetNextRow()
73 Status s = GetNextRowCharIF(&row_); in GetNextRow()
74 TensorMapCharToString(&row_, row); in GetNextRow()
/third_party/mindspore/mindspore/lite/micro/coder/opcoders/nnacl/int8/
Dconv2d_1x1_int8_coder.cc85 std::to_string(matmul_param_->row_ * matmul_param_->col_); in DoCode()
238 matmul_param_->row_ = conv_param_->output_h_ * conv_param_->output_w_; in InitParam()
241 matmul_param_->row_4_ = UP_ROUND(matmul_param_->row_, C4NUM); in InitParam()
247 input_sum_size_ = UP_ROUND(matmul_param_->row_, row_pack_count); in InitParam()
251 …allocator_->Malloc(kNumberTypeInt8, matmul_param_->row_ * matmul_param_->deep_ * sizeof(int8_t), k… in InitParam()
263 size_t size = MSMAX(UP_ROUND(matmul_param_->row_, C8NUM) * UP_ROUND(matmul_param_->deep_, C4NUM), in InitRunBuf()
264 … UP_ROUND(matmul_param_->row_, C4NUM) * UP_ROUND(matmul_param_->deep_, C16NUM)); in InitRunBuf()
Dmatmul_base_int8_coder.cc61 param_->row_align_ = UP_ROUND(param_->row_, row_tile_); in ResizeParameter()
222 std::string current_src_a = a_ptr_str + "+" + std::to_string(i * param_->row_ * param_->deep_); in DoCode()
224 …code.CodeFunction("RowMajor2Col16x4MajorInt8", current_src_a, param_->deep_, param_->row_, pack_a_… in DoCode()
225 …code.CodeFunction("CalcInputSums", current_src_a, param_->row_, param_->deep_, "tmp_weight_zp", in… in DoCode()
228 …code.CodeFunction("RowMajor2Row16x4MajorInt8", current_src_a, pack_a_ptr_, param_->row_, param_->d… in DoCode()
229 …code.CodeFunction("CalcInputSums", current_src_a, param_->row_, param_->deep_, "tmp_weight_zp", in… in DoCode()
233 std::string batch_c_ptr_str = c_ptr_str + "+" + std::to_string(i * param_->row_ * param_->col_); in DoCode()
261 …ction("MatmulInt8Opt", pack_a_ptr_, batch_b_ptr_str_final, batch_c_ptr_final, param_->row_, cur_oc, in DoCode()
Ddeconvolution_int8_coder.cc53 matmul_param_->row_ = conv_param_->input_h_ * conv_param_->input_w_; in InitParam()
118 input_sum_size_ = UP_ROUND(matmul_param_->row_, C4NUM) * sizeof(int32_t); in InitRunBuf()
145 …code.CodeFunction("RowMajor2Row16x4MajorInt8", input_tensor_, input_ptr_, matmul_param_->row_, mat… in DoCode()
147 … conv_param_->conv_quant_arg_.filter_quant_args_[0].zp_, UP_ROUND(matmul_param_->row_, C4NUM), in DoCode()
157 UP_ROUND(matmul_param_->row_, C4NUM), cur_oc * C4NUM * kernel_plane, in DoCode()
/third_party/mindspore/mindspore/lite/micro/coder/opcoders/nnacl/fp32/
Ddeconv2d_fp32_coder.cc58 matmul_param_.row_ = input_plane_; in InitParam()
61 matmul_param_.row_12_ = UP_ROUND(matmul_param_.row_, C12NUM); in InitParam()
62 matmul_param_.row_4_ = UP_ROUND(matmul_param_.row_, C4NUM); in InitParam()
180 …code.CodeFunction("RowMajor2Col4Major", input_ptr_, packed_input_, matmul_param_.row_, matmul_para… in DoCode()
182 …code.CodeFunction("RowMajor2Col12Major", input_ptr_, packed_input_, matmul_param_.row_, matmul_par… in DoCode()
Dfull_connection_fp32_coder.cc29 params_->row_ = row; in ReSize()
51 params_->row_ = a_shape.at(0); in Init()
64 if (params_->row_ == 1 && !params_->b_const_) { in Init()
Dmatmul_fp32_base_coder.cc71 if (params_->row_ == 1) { in ResizeParameter()
74 params_->row_align_ = vec_matmul_ ? 1 : UP_ROUND(params_->row_, row_tile_); in ResizeParameter()
241 …code << "\t\tfloat *batch_c_ptr = " << c_str << " + i * " << params_->row_ * params_->col_ << ";\n… in DoCode()
245 …code << "\t\tfloat *batch_c_ptr = " << c_str << " + i * " << params_->row_ * params_->col_ << ";\n… in DoCode()
253 params_->deep_, params_->row_, cur_oc, params_->col_, "OutType_Nhwc"); in DoCode()
/third_party/mindspore/mindspore/lite/micro/coder/wrapper/fp32/
Dmatmul_fp32_wrapper.c24 const float *src = src_ptr + i * params_->deep_ * params_->row_; in InitMatrixA()
27 RowMajor2Row12Major(src, dst, params_->deep_, params_->row_); in InitMatrixA()
29 RowMajor2Col12Major(src, dst, params_->row_, params_->deep_); in InitMatrixA()
/third_party/mindspore/mindspore/lite/micro/coder/wrapper/int8/
Dconv1x1_run_int8_wrapper.c36 int res_stride = args->matmul_param_->row_ - task_id * args->thread_stride_hw_ * C4NUM; in OcOptPre()
75 … args->matmul_param_->row_, cur_oc, args->matmul_param_->deep_4_, cur_left_shift, cur_right_shift, in RunArm64OptOc()
106 … args->matmul_param_->row_, cur_oc, args->matmul_param_->deep_16_, cur_left_shift, cur_right_shift, in RunArmOc()
114 int res_stride = args->matmul_param_->row_ - task_id * args->thread_stride_hw_ * C4NUM; in RunArm64OptHw()
140 int res_stride = args->matmul_param_->row_ - task_id * args->thread_stride_hw_ * C4NUM; in RunArmHw()
180 int hw_thread_count = UP_DIV(args->matmul_param_->row_, row_pack_count); in Conv1x1PreRun()
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/
Dconv_fp16.c122 int input_block = UP_DIV(param->row_, tile_n); in Conv1x1OutNc8hw8MultiThreadByInputFp16()
134 int cur_row_cnt = MSMIN(block_per_thread * tile_n, param->row_ - input_start_block * tile_n); in Conv1x1OutNc8hw8MultiThreadByInputFp16()
141 int real_in_row = (i != input_block - 1) ? tile_n : param->row_ - i * tile_n; in Conv1x1OutNc8hw8MultiThreadByInputFp16()
146 int out_offset = j * param->row_ * C8NUM + i * tile_n * real_weight_row; in Conv1x1OutNc8hw8MultiThreadByInputFp16()
164 int input_block = UP_DIV(param->row_, tile_n); in Conv1x1OutNc8hw8MultiThreadByWeightFp16()
174 int real_in_row = (i != input_block - 1) ? tile_n : param->row_ - i * tile_n; in Conv1x1OutNc8hw8MultiThreadByWeightFp16()
179 int out_offset = j * param->row_ * C8NUM + i * tile_n * real_weight_row; in Conv1x1OutNc8hw8MultiThreadByWeightFp16()

12