Home
last modified time | relevance | path

Searched refs:width_ (Results 1 – 25 of 135) sorted by relevance

123456

/third_party/mindspore/mindspore/ccsrc/minddata/dataset/kernels/image/lite_cv/
Dcanny.cc89 …if (dst.IsEmpty() || dst.width_ != src.width_ || dst.height_ != src.height_ || dst.channel_ != src… in Sobel()
91 dst.Init(src.width_, src.height_, src.channel_, LDataType::FLOAT32); in Sobel()
131 edges.Init(gx.width_, gx.height_, gx.channel_, gx.data_type_); in NonMaximumSuppression()
137 int size = gx.height_ * gx.width_; in NonMaximumSuppression()
150 for (int x = 0; x < gx.width_; x++) { in NonMaximumSuppression()
151 float gx_value = Round(gx_ptr[y * gx.width_ + x]); in NonMaximumSuppression()
152 float gy_value = Round(gy_ptr[y * gx.width_ + x]); in NonMaximumSuppression()
157 float edge_value = temp[y * gx.width_ + x]; in NonMaximumSuppression()
161 edge_pre = GetEdge(temp, gx.width_, gx.height_, x - 1, y); in NonMaximumSuppression()
162 edge_nex = GetEdge(temp, gx.width_, gx.height_, x + 1, y); in NonMaximumSuppression()
[all …]
Dlite_mat.cc31 width_ = 0; in LiteMat()
46 width_ = 0; in LiteMat()
62 width_ = 0; in LiteMat()
78 width_ = 0; in LiteMat()
94 width_ = 0; in LiteMat()
110 width_ = 0; in LiteMat()
134 width_ = m.width_; in LiteMat()
167 width_ = m.width_; in operator =()
183 width_ = width; in Init()
203 width_ = width; in Init()
[all …]
Dimage_process.cc253 int border_x = static_cast<int>(kernel.width_ / 2); in Conv2DImplement()
258 if ((border_x > INT_MAX / 2) || (src.width_ > INT_MAX - 2 * border_x)) { in Conv2DImplement()
265 pad_mat.Init(src.width_ + 2 * border_x, src.height_ + 2 * border_y, src.channel_, src.data_type_); in Conv2DImplement()
274 int pad_step = pad_mat.width_ * pad_mat.channel_; in Conv2DImplement()
275 int dst_step = src.width_ * src.channel_; in Conv2DImplement()
279 for (int x = border_x; x < pad_mat.width_ - border_x; x++) { in Conv2DImplement()
282 for (int j = -border_x; j < -border_x + kernel.width_; j++) { in Conv2DImplement()
284 kernel_ptr[(i + border_y) * kernel.width_ + (j + border_x)]; in Conv2DImplement()
296 for (int x = border_x; x < pad_mat.width_ - border_x; x++) { in Conv2DImplement()
301 for (int j = -border_x; j < -border_x + kernel.width_; j++) { in Conv2DImplement()
[all …]
Dwarp_affine.cc213 …if (borderType == PADD_BORDER_CONSTANT && (shx >= _src.width_ || shx + 1 < 0 || shy >= _src.height… in RemapBilinearCur1C()
222 sv0 = BorderPolate(shx, _src.width_, borderType); in RemapBilinearCur1C()
223 sv1 = BorderPolate(shx + 1, _src.width_, borderType); in RemapBilinearCur1C()
242 …if (borderType == PADD_BORDER_CONSTANT && (shx >= _src.width_ || shx + 1 < 0 || shy >= _src.height… in RemapBilinearCurMoreC()
250 sv0 = BorderPolate(shx, _src.width_, borderType); in RemapBilinearCurMoreC()
251 sv1 = BorderPolate(shx + 1, _src.width_, borderType); in RemapBilinearCurMoreC()
272 unsigned src_width = std::max(_src.width_ - 1, 0); in RemapBilinear()
282 for (int dx = 0; dx <= _dst.width_; dx++) { in RemapBilinear()
284 …dx < _dst.width_ ? (unsigned)HW[dx * 2] < src_width && (unsigned)HW[dx * 2 + 1] < src_height : !pr… in RemapBilinear()
338 int dst_width = std::min(buf_size / dst_height, dst.width_); in Remap()
[all …]
/third_party/boost/boost/xpressive/detail/dynamic/
Dsequence.hpp31 , width_(0) in sequence()
43 , width_(xpr->Matcher::get_width()) in sequence()
55 , width_(0) in sequence()
80 this->width_ += that.width_; in operator +=()
95 this->width_ = that.width_; in operator |=()
100 this->width_ |= that.width_; in operator |=()
129 return this->width_; in width()
147 this->quant_ = (!is_unknown(this->width_) && this->pure_) in set_quant_()
148 ? (!this->width_ ? quant_none : quant_fixed_width) in set_quant_()
153 detail::width width_; member
/third_party/skia/third_party/externals/tint/src/sem/
Dvector_type.cc25 : subtype_(subtype), width_(width) { in Vector()
26 TINT_ASSERT(Semantic, width_ > 1); in Vector()
27 TINT_ASSERT(Semantic, width_ < 5); in Vector()
35 return "__vec_" + std::to_string(width_) + subtype_->type_name(); in type_name()
40 out << "vec" << width_ << "<" << subtype_->FriendlyName(symbols) << ">"; in FriendlyName()
49 return SizeOf(width_); in Size()
53 return AlignOf(width_); in Align()
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/
Droi_align_cpu_kernel.cc42 width_ = SizeToInt(x_shape[WIDTH]); in InitKernel()
77 … height_, width_, pooled_height_, pooled_width_, &offset, &n, &c, &ph, &pw, &roi_bin_grid_h, in Launch()
95 …roi::bilinear_interpolate(height_, width_, y, x, &x_low, &y_low, &x_high, &y_high, &w1, &w2, &w3, … in Launch()
97 x_low < width_ && x_high < width_) { in Launch()
98 T v1 = input[offset + y_low * width_ + x_low]; in Launch()
99 T v2 = input[offset + y_low * width_ + x_high]; in Launch()
100 T v3 = input[offset + y_high * width_ + x_low]; in Launch()
101 T v4 = input[offset + y_high * width_ + x_high]; in Launch()
Droi_align_grad_cpu_kernel.cc105 width_ = xdiff_shape_[WIDTH]; in InitKernel()
116 int size_init = batch_size_ * channels_ * height_ * width_; in Launch()
139 … height_, width_, pooled_height_, pooled_width_, &offset, &n, &c, &ph, &pw, &roi_bin_grid_h, in Launch()
160 …roi::bilinear_interpolate(height_, width_, y, x, &x_low, &y_low, &x_high, &y_high, &w1, &w2, &w3, … in Launch()
162 x_low < width_ && x_high < width_) { in Launch()
168 T *dx_1 = dx + offset + y_low * width_ + x_low; in Launch()
169 T *dx_2 = dx + offset + y_low * width_ + x_high; in Launch()
170 T *dx_3 = dx + offset + y_high * width_ + x_low; in Launch()
171 T *dx_4 = dx + offset + y_high * width_ + x_high; in Launch()
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/quant/
Dcorrection_mul_grad_gpu_kernel.h30 …onMulGradGpuKernel() : is_null_input_(false), batch_size_(0), channel_(0), height_(0), width_(0) {} in CorrectionMulGradGpuKernel()
50 CalCorrectionMul(d_out, gamma, running_std, batch_size_, channel_, height_, width_, d_weight, in Launch()
52 …CalCorrectionMulGrad(d_out, weight, running_std, batch_size_, channel_, height_, width_, d_gamma, … in Launch()
80 width_ = input_shape[3]; in Init()
88 size_t input_size = batch_size_ * channel_ * height_ * width_ * sizeof(T); in InitSizeLists()
105 size_t width_; variable
Dcorrection_mul_gpu_kernel.h30 …CorrectionMulGpuKernel() : is_null_input_(false), batch_size_(0), channel_(0), height_(0), width_(… in CorrectionMulGpuKernel()
47 CalCorrectionMul(weight, gamma, running_std, batch_size_, channel_, height_, width_, output, in Launch()
74 width_ = input_shape[3]; in Init()
82 size_t input_size = batch_size_ * channel_ * height_ * width_ * sizeof(T); in InitSizeLists()
97 size_t width_; variable
Dbatchnorm_fold_grad_gpu_kernel.h44 width_(0) {} in BatchNormFoldGradGpuKernel()
72 …ThrustFillWith(dx, batch_ * channel_ * height_ * width_, 0.f, reinterpret_cast<cudaStream_t>(strea… in Launch()
75 …oldGrad(d_batch_mean, d_batch_std, x, batch_mean, batch_std, batch_, channel_, height_, width_, dx, in Launch()
116 width_ = input_shape[3]; in Init()
118 input_size_ = sizeof(T) * batch_ * channel_ * height_ * width_; in Init()
155 int width_; variable
Dbatchnorm_fold2_grad_gpu_kernel.h37 width_(0), in BatchNormFold2GradGpuKernel()
71 size_t x_size = batch_size_ * channel_ * height_ * width_ * sizeof(T); in Launch()
82 …old2GradReduce(dout, x, d_beta, tmp, reduce_x, tmp2, tmp_x, batch_size_, channel_, height_, width_, in Launch()
85 …tchNormFold2GradNotFreezeDxMul(batch_std, running_std, d_x, batch_size_, channel_, height_, width_, in Launch()
121 width_ = input_shape[3]; in Init()
134 size_t input_size = batch_size_ * channel_ * height_ * width_ * sizeof(T); in InitSizeLists()
166 size_t width_; variable
Dbatchnorm_fold2_gpu_kernel.h37 width_(0), in BatchNormFold2GpuKernel()
63 freeze_bn_, batch_size_, channel_, height_, width_, in Launch()
93 width_ = input_shape[3]; in Init()
106 size_t input_size = batch_size_ * channel_ * height_ * width_ * sizeof(T); in InitSizeLists()
125 size_t width_; variable
/third_party/mindspore/mindspore/ccsrc/minddata/dataset/kernels/image/
Dresize_cubic_op.cc143 normalize_coeff(output.width_, kernel_size, prekk, kk); in ImagingHorizontalInterp()
146 int32_t input_width = input.width_ * 3; in ImagingHorizontalInterp()
147 int32_t output_width = output.width_ * 3; in ImagingHorizontalInterp()
152 for (int xx = 0; xx < output.width_; xx++) { in ImagingHorizontalInterp()
181 const int32_t input_width = input.width_ * 3; in ImagingVerticalInterp()
182 const int32_t output_width = output.width_ * 3; in ImagingVerticalInterp()
190 for (int xx = 0; xx < output.width_; xx++) { in ImagingVerticalInterp()
214 horizontal_interp = x_size != input.width_ || rect[2] != x_size || rect[0]; in ImageInterpolation()
217 …horiz_kernel = calc_coeff(input.width_, x_size, rect[0], rect[2], interp, horiz_region, horiz_coef… in ImageInterpolation()
252 output.Init(input.width_, y_size, 3, LDataType::UINT8, false); in ImageInterpolation()
[all …]
Dcrop_op.cc35 …CHECK_FAIL_RETURN_UNEXPECTED(x_ + width_ <= input_w, "Crop: Crop width dimension: " + std::to_stri… in Compute()
37 return Crop(input, output, x_, y_, width_, height_); in Compute()
43 TensorShape out = TensorShape{height_, width_}; in OutputShape()
Dbounding_box.h44 …out << "Bounding Box with (X,Y,W,H): (" << bbox.x_ << "," << bbox.y_ << "," << bbox.width_ << "," …
52 bbox_float width() { return width_; } in width()
58 void SetWidth(bbox_float w) { width_ = w; } in SetWidth()
131 bbox_float width_; variable
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/nn/
Dbatch_norm_grad_gpu_kernel.h40 width_(0), in BatchNormGradGpuKernel()
111 width_, reinterpret_cast<cudaStream_t>(stream_ptr)); in Launch()
252 width_ = 1; in SetTensorDescriptor()
257 width_ = SizeToInt(shape[2]); in SetTensorDescriptor()
264 width_ = SizeToInt(shape[3]); in SetTensorDescriptor()
270 …nSetTensor4dDescriptor(x_desc_, cudnn_format, cudnn_data_type_, batch_, channel_, height_, width_), in SetTensorDescriptor()
276 …nSetTensor4dDescriptor(y_desc_, cudnn_format, cudnn_data_type_, batch_, channel_, height_, width_), in SetTensorDescriptor()
282 …SetTensor4dDescriptor(dy_desc_, cudnn_format, cudnn_data_type_, batch_, channel_, height_, width_), in SetTensorDescriptor()
287 …SetTensor4dDescriptor(dx_desc_, cudnn_format, cudnn_data_type_, batch_, channel_, height_, width_), in SetTensorDescriptor()
293 …SetTensor4dDescriptor(dz_desc_, cudnn_format, cudnn_data_type_, batch_, channel_, height_, width_), in SetTensorDescriptor()
[all …]
Dsoftmax_gpu_kernel.h48 width_(0) {} in SoftmaxGpuKernel()
144 SizeToInt(channel_size_), SizeToInt(height_), SizeToInt(width_)), in Init()
149 SizeToInt(channel_size_), SizeToInt(height_), SizeToInt(width_)), in Init()
224 width_ = 1; in InitSizeByAxis2D()
225 input_size_ = sizeof(T) * batch_size_ * channel_size_ * height_ * width_; in InitSizeByAxis2D()
248 width_ = 1; in InitSizeByAxisLastDim()
249 input_size_ = sizeof(T) * batch_size_ * channel_size_ * height_ * width_; in InitSizeByAxisLastDim()
288 width_ = 1; in InitSizeByAxisND()
289 input_size_ = sizeof(T) * batch_size_ * channel_size_ * height_ * width_; in InitSizeByAxisND()
318 size_t width_; variable
Droi_align_gpu_kernel.h41 width_(0), in ROIAlignGpuFwdKernel()
62 width_, pooled_height_, pooled_width_, reinterpret_cast<cudaStream_t>(stream_ptr)); in Launch()
101 width_ = x_shape[3]; in Init()
102 x_shape_ = {batch_N_, channels_, height_, width_}; in Init()
103 x_size_ = batch_N_ * channels_ * height_ * width_ * sizeof(T); in Init()
153 int width_; variable
Droi_align_grad_gpu_kernel.h40 width_(0), in ROIAlignGradGpuFwdKernel()
61 … height_, width_, pooled_height_, pooled_width_, reinterpret_cast<cudaStream_t>(stream_ptr)); in Launch()
130 width_ = xdiff_shape_[3]; in Init()
133 output_shape_ = {batch_size_, channels_, height_, width_}; in Init()
134 output_size_ = batch_size_ * channels_ * height_ * width_ * sizeof(T); in Init()
160 int width_; variable
Dsoftmax_cross_entropy_with_logits_gpu_kernel.h48 width_(0) {} in SoftmaxCrossEntropyWithLogitsGpuKernel()
97 … batch_size_, channel_size_, height_, width_), in Init()
102 channel_size_, height_, width_), in Init()
150 width_ = 1; in InferInputOutputSize()
151 logits_size_ = sizeof(T) * batch_size_ * channel_size_ * height_ * width_; in InferInputOutputSize()
201 size_t width_; variable
Dsparse_softmax_cross_entropy_with_logits_gpu_kernel.h48 width_(0) {} in SparseSoftmaxCrossEntropyWithLogitsGpuKernel()
102 … batch_size_, channel_size_, height_, width_), in Init()
107 channel_size_, height_, width_), in Init()
154 width_ = 1; in InferInputOutputSize()
155 logits_size_ = sizeof(T) * batch_size_ * channel_size_ * height_ * width_; in InferInputOutputSize()
200 size_t width_; variable
/third_party/mindspore/mindspore/ccsrc/minddata/dataset/text/kernels/
Dsliding_window_op.cc30 RETURN_IF_NOT_OK(SlidingWindowHelper(input, output, output_shape[0], width_, axis_)); in Compute()
41 if (input_shape[axis] >= width_) { in OutputShape()
46 output_shape_initializer.push_back(input_shape[idx] - (width_ - 1)); in OutputShape()
47 output_shape_initializer.push_back(width_); in OutputShape()
/third_party/boost/boost/xpressive/detail/core/matcher/
Dlookbehind_matcher.hpp40 , width_(wid) in lookbehind_matcher()
42 BOOST_XPR_ENSURE_(!is_unknown(this->width_), regex_constants::error_badlookbehind, in lookbehind_matcher()
64 … if(!detail::advance_to(state.cur_, -static_cast<difference_type>(this->width_), state.begin_)) in match_()
106 … if(!detail::advance_to(state.cur_, -static_cast<difference_type>(this->width_), state.begin_)) in match_()
163 std::size_t width_; member
/third_party/boost/libs/spirit/test/karma/
Dregression_adapt_adt.cpp25 int width_; member in data1
30 : width_(400), height_(400) in data1()
34 : width_(width), height_(height) in data1()
37 int width() const { return width_;} in width()
40 void set_width(int width) { width_ = width;} in set_width()

123456