Home
last modified time | relevance | path

Searched refs:out_width (Results 1 – 25 of 155) sorted by relevance

1234567

/external/tensorflow/tensorflow/core/kernels/image/
Dresize_nearest_neighbor_op_gpu.cu.cc38 const int out_width, const float height_scale, const float width_scale, in ResizeNearestNeighborNHWC() argument
44 int out_x = n % out_width; in ResizeNearestNeighborNHWC()
45 n /= out_width; in ResizeNearestNeighborNHWC()
69 const int out_width, const float height_scale, const float width_scale, in LegacyResizeNearestNeighborNHWC() argument
75 int out_x = n % out_width; in LegacyResizeNearestNeighborNHWC()
76 n /= out_width; in LegacyResizeNearestNeighborNHWC()
98 const int out_width, const float height_scale, const float width_scale, in ResizeNearestNeighborBackwardNHWC() argument
109 T* bottom_diff_n = bottom_diff + n * channels * out_height * out_width; in ResizeNearestNeighborBackwardNHWC()
118 out_width - 1), in ResizeNearestNeighborBackwardNHWC()
120 const int idx = (out_y * out_width + out_x) * channels + c; in ResizeNearestNeighborBackwardNHWC()
[all …]
Dresize_bilinear_op.cc138 const float ys_lerp, const int64_t out_width, in ResizeLineChannels() argument
140 for (int64_t x = 0; x < out_width; ++x) { in ResizeLineChannels()
177 const float ys_lerp, const int64_t out_width, in ResizeLine3ChannelsVector() argument
183 for (x = 0; x < out_width - 1; ++x) { in ResizeLine3ChannelsVector()
199 ResizeLineChannels(ys_input_lower_ptr, ys_input_upper_ptr, xs + out_width - 1, in ResizeLine3ChannelsVector()
200 ys_lerp, 1, out_y + (out_width - 1) * 3, 3); in ResizeLine3ChannelsVector()
208 const int64_t out_width, const int channels,
216 const int64_t out_width, const int channels, in resize_image() argument
222 const int64_t out_row_size = out_width * channels; in resize_image()
235 ys[y].lerp, out_width, output_y_ptr); in resize_image()
[all …]
Dresize_bilinear_op_gpu.cu.cc39 int in_width, int channels, int out_height, int out_width, in ResizeBilinearKernel_faster() argument
43 out_idx < out_width * out_height * num_channel_threads; in ResizeBilinearKernel_faster()
48 const int x = idx % out_width; in ResizeBilinearKernel_faster()
49 idx /= out_width; in ResizeBilinearKernel_faster()
108 output)[(((b * out_height + y) * out_width + x) * channels + c) / in ResizeBilinearKernel_faster()
119 int out_height, int out_width, float* __restrict__ output) { in ResizeBilinearKernel() argument
125 const int x = idx % out_width; in ResizeBilinearKernel()
126 idx /= out_width; in ResizeBilinearKernel()
287 int out_height, int out_width, float* __restrict__ output) { in LegacyResizeBilinearKernel() argument
293 const int x = idx % out_width; in LegacyResizeBilinearKernel()
[all …]
Dresize_nearest_neighbor_op.cc144 const Eigen::Index out_width = output.dimension(2); in operator ()() local
160 for (Eigen::Index x = 0; x < out_width; ++x) { in operator ()()
176 Eigen::Index x = b % out_width; in operator ()()
177 Eigen::Index y = (b / out_width) % out_height; in operator ()()
178 Eigen::Index bs = (b / out_width) / out_height; in operator ()()
198 Eigen::Index N = batch_size * out_height * out_width; in operator ()()
257 const int64_t out_width = sizes(1); in Compute() local
263 0, TensorShape({batch_size, out_height, out_width, channels}), in Compute()
275 CalculateResizeScale(out_width, in_width, align_corners_); in Compute()
335 const Eigen::Index out_width = output.dimension(2); in operator ()() local
[all …]
Dimage_ops.cc67 int32_t out_height, out_width; in DoImageProjectiveTransformOp() local
79 out_width = shape_vec(1); in DoImageProjectiveTransformOp()
80 OP_REQUIRES(ctx, out_height > 0 && out_width > 0, in DoImageProjectiveTransformOp()
85 out_width = images_t.shape().dim_size(2); in DoImageProjectiveTransformOp()
102 out_width, images_t.dim_size(3)}), in DoImageProjectiveTransformOp()
/external/ComputeLibrary/src/core/NEON/kernels/arm_gemm/
Dgemm_interleaved_pretransposed_2d.hpp195 unsigned int n_0 = std::min(this->_Nsize, strategy::out_width() * n_start); in execute_pretranspose()
196 unsigned int n_max = std::min(this->_Nsize, strategy::out_width() * n_end); in execute_pretranspose()
221 int bblocks = iceildiv(current.xmax() - current.x0(), strategy::out_width()); in execute_pretranspose()
227 unsigned b_thread_start_offset = iceildiv(current.x0(), strategy::out_width()); in execute_pretranspose()
230 … b_panel = b_panel_start + (b_thread_start_offset * strat.out_width() * kern_k); in execute_pretranspose()
302 b_panel += (bblocks * strat.out_width() * kern_k); in execute_pretranspose()
317 …k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::out_height()))… in get_k_block_size()
356 , _Nround_div ( iceildiv(_Nsize, strategy::out_width()) ) in GemmInterleavedPretransposed2d()
357 , _Nround ( _Nround_div * strategy::out_width() ) in GemmInterleavedPretransposed2d()
368 …_x_block = (((L2_size * 9) / 10) - (_k_block * sizeof(Toi) * (strategy::out_width() + strategy::ou… in GemmInterleavedPretransposed2d()
[all …]
Dgemm_interleaved.hpp86 const int bblocks = iceildiv(n_max - n_0, strategy::out_width()); in run()
90 …copedProfiler(PROFILE_KERNEL, (strategy::out_height() * bblocks * strategy::out_width() * kern_k)); in run()
98 …edProfiler(PROFILE_MERGE, (strategy::out_height() * bblocks * strategy::out_width() * sizeof(Tr))); in run()
183 const int bblocks = iceildiv(n_max - n_0, strategy::out_width()); in run()
187 …copedProfiler(PROFILE_KERNEL, (strategy::out_height() * bblocks * strategy::out_width() * kern_k)); in run()
195 …rofiler(PROFILE_QUANTIZE, (strategy::out_height() * bblocks * strategy::out_width() * sizeof(Tr))); in run()
202 unsigned int n_start = n_0 + (strategy::out_width() * i); in run()
203 unsigned int n_end = std::min(n_start + strategy::out_width(), n_max); in run()
209 … c_panel + (i * strategy::out_width() * strategy::out_height()), strategy::out_width(), in run()
423 size_t size_per_buffer = sizeof(Tab) * strategy::out_height() * strategy::out_width(); in get_accumulation_buffer_size()
[all …]
Dgemm_hybrid.hpp113 return strategy::out_width() * 3; in compute_n_block()
116 return strategy::out_width(); in compute_n_block()
179 … (multi * roundup(_Nsize, strategy::out_width()) * roundup(_Ksize, strategy::k_unroll())) + in execute()
180 (k0 * roundup(_Nsize, strategy::out_width())) + in execute()
184 …OFILE_KERNEL, (unsigned long)(m_end - m_start) * kern_k * roundup(nmax-n0, strategy::out_width())); in execute()
215 …return roundup(_Nsize, strategy::out_width()) * roundup(_Ksize, strategy::k_unroll()) * _nmulti * … in get_B_pretransposed_array_size()
231 const unsigned int size = roundup(xmax-x0, strategy::out_width()) * k_size; in pretranspose_B_array()
251 …nbatches) * args._nmulti * args._Msize * roundup(args._Nsize, strategy::out_width()) * roundup(arg… in estimate_cycles()
259 …if ((args._Nsize < strategy::out_width()) || (args._Nsize > strategy::out_width() && args._Nsize <… in estimate_cycles()
Dgemm_hybrid_quantized.hpp93 …unsigned int k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::o… in compute_k_block()
122 …const unsigned int k_block_area = k_block * sizeof(Toi) * (strategy::out_width() + strategy::out_h… in compute_n_block()
126 return strategy::out_width(); in compute_n_block()
132 n_block /= strategy::out_width(); in compute_n_block()
133 n_block = std::max(n_block, 1u) * strategy::out_width(); in compute_n_block()
138 n_block = roundup(n_block, strategy::out_width()); in compute_n_block()
207 … (multi * roundup(_Nsize, strategy::out_width()) * roundup(_Ksize, strategy::k_unroll())) + in execute()
208 (k0 * roundup(_Nsize, strategy::out_width())) + in execute()
213 …opedProfiler(PROFILE_KERNEL, (m_end - m_start) * kern_k * roundup(nmax-n0, strategy::out_width())); in execute()
263 …return get_col_sum_size() + (roundup(_Nsize, strategy::out_width()) * roundup(_Ksize, strategy::k_… in get_B_pretransposed_array_size()
[all …]
Dgemm_hybrid_quantized_inline.hpp92 …unsigned int k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::o… in compute_k_block()
120 …t n_block = (((L2_size * 9) / 10) - (k_block * sizeof(Toi) * (strategy::out_width() + strategy::ou… in compute_n_block()
124 n_block /= strategy::out_width(); in compute_n_block()
125 n_block = std::max(n_block, 1U) * strategy::out_width(); in compute_n_block()
130 n_block = roundup(n_block, strategy::out_width()); in compute_n_block()
191 … (multi * roundup(_Nsize, strategy::out_width()) * roundup(_Ksize, strategy::k_unroll())) + in execute()
192 (k0 * roundup(_Nsize, strategy::out_width())) + in execute()
197 …opedProfiler(PROFILE_KERNEL, (m_end - m_start) * kern_k * roundup(nmax-n0, strategy::out_width())); in execute()
219 …return get_col_sum_size() + (roundup(_Nsize, strategy::out_width()) * roundup(_Ksize, strategy::k_… in get_B_pretransposed_array_size()
242 const unsigned int size = roundup(xmax-x0, strategy::out_width()) * k_size; in pretranspose_B_array()
Dgemm_hybrid_indirect.hpp78 …prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width())); in run()
95 …prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width())); in run()
122 unsigned int output_width = roundup(N, strategy::out_width()); in run()
128 …prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width())); in run()
260 return roundup(n_block, strategy::out_width());
269 return strategy::out_width() * 3;
272 return strategy::out_width();
397 … (multi * roundup(_args._Nsize, strategy::out_width()) * _Ktotal) + in execute()
398 (k0 * roundup(_args._Nsize, strategy::out_width())) + in execute()
404 …OFILE_KERNEL, (unsigned long)(m_end - m_start) * kern_k * roundup(nmax-n0, strategy::out_width())); in execute()
[all …]
Dgemv_pretransposed.hpp64 … _buffer_per_multi(args._Ksize * roundup(args._Nsize, strategy::out_width())) { in GemvPretransposed()
81 return { iceildiv(_args._Nsize, strategy::out_width()) * _args._nmulti }; in get_window_size()
95 const unsigned int window_per_multi = iceildiv(_args._Nsize, strategy::out_width()); in execute()
100 const unsigned int n_0 = (start - (multi_0 * window_per_multi)) * strategy::out_width(); in execute()
101 const unsigned int n_max = (end - (multi_end * window_per_multi)) * strategy::out_width(); in execute()
121 …ffer_per_multi) + (n * roundup(_args._Ksize, strategy::k_unroll())) + (k0 * strategy::out_width()), in execute()
/external/tensorflow/tensorflow/lite/kernels/
Dpadding.h65 int filter_width, TfLitePadding padding, int* out_height, int* out_width) { in ComputePaddingHeightWidth() argument
66 *out_width = ComputeOutSize(padding, in_width, filter_width, stride_width, in ComputePaddingHeightWidth()
79 filter_width, *out_width, &offset); in ComputePaddingHeightWidth()
89 int* out_width, int* out_depth) { in ComputePadding3DValues() argument
90 *out_width = ComputeOutSize(padding, in_width, filter_width, stride_width, in ComputePadding3DValues()
109 filter_width, *out_width, &offset); in ComputePadding3DValues()
/external/libjpeg-turbo/simd/
Djsimd.h361 (JDIMENSION out_width, JSAMPIMAGE input_buf, JDIMENSION input_row,
364 (JDIMENSION out_width, JSAMPIMAGE input_buf, JDIMENSION input_row,
367 (JDIMENSION out_width, JSAMPIMAGE input_buf, JDIMENSION input_row,
370 (JDIMENSION out_width, JSAMPIMAGE input_buf, JDIMENSION input_row,
373 (JDIMENSION out_width, JSAMPIMAGE input_buf, JDIMENSION input_row,
376 (JDIMENSION out_width, JSAMPIMAGE input_buf, JDIMENSION input_row,
379 (JDIMENSION out_width, JSAMPIMAGE input_buf, JDIMENSION input_row,
384 (JDIMENSION out_width, JSAMPIMAGE input_buf, JDIMENSION input_row,
387 (JDIMENSION out_width, JSAMPIMAGE input_buf, JDIMENSION input_row,
390 (JDIMENSION out_width, JSAMPIMAGE input_buf, JDIMENSION input_row,
[all …]
/external/webrtc/test/
Dtest_video_capturer.cc27 int out_width = 0; in OnFrame() local
34 &cropped_width, &cropped_height, &out_width, &out_height)) { in OnFrame()
39 if (out_height != frame.height() || out_width != frame.width()) { in OnFrame()
44 I420Buffer::Create(out_width, out_height); in OnFrame()
55 out_width, out_height); in OnFrame()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Deigen_spatial_convolutions.h143 TensorIndex out_width; variable
148 out_width = numext::ceil((InputCols - kernelColsEff + 1.f) /
153 out_width = numext::ceil(InputCols / static_cast<float>(col_stride));
158 out_width = 0;
169 pre_contract_dims[1] = out_height * out_width;
175 pre_contract_dims[0] = out_height * out_width;
191 post_contract_dims[2] = out_width;
198 post_contract_dims[NumDims - 3] = out_width;
/external/tensorflow/tensorflow/core/kernels/
Dpooling_ops_common.h73 int64 out_width; member
219 params.out_width * params.out_height * params.tensor_in_batch); in SpatialMaxPool()
245 const int32_t out_width = params.out_width; in SpatialMaxPool() local
250 out_height * out_width * params.depth; in SpatialMaxPool()
271 const int32_t w_end = std::min(wpad / col_stride + 1, out_width); in SpatialMaxPool()
276 (out_offset_batch + ph) * out_width; in SpatialMaxPool()
316 params.depth, params.out_height, params.out_width, params.window_rows,
481 params.out_width * params.out_height * params.tensor_in_batch);
507 const int32_t out_width = params.out_width;
512 out_height * out_width * params.depth;
[all …]
Dquantized_resize_bilinear_op_test.cc115 const int64_t out_width, const int channels, in CalcReferenceResizedVal() argument
120 half_pixel_centers, out_width, in_width, channels, x, width_scale); in CalcReferenceResizedVal()
149 const int64_t out_height, const int64_t out_width, in CheckTensorValue() argument
154 const int64_t out_row_size = out_width * channels; in CheckTensorValue()
158 CalculateResizeScale(in_width, out_width, align_corners); in CheckTensorValue()
162 for (int64_t x = 0; x < out_width; ++x) { in CheckTensorValue()
166 out_height, out_width, channels, height_scale, width_scale, min, in CheckTensorValue()
300 int out_height, int out_width, int channels, in RunTestResizeBilinearTwoDims() argument
312 {out_height, out_width}, false, 1, min, max, in RunTestResizeBilinearTwoDims()
316 batch_size, in_height, in_width, out_height, out_width, channels, in RunTestResizeBilinearTwoDims()
[all …]
Deigen_spatial_convolutions_test.cc680 const int out_width = in_cols; in TEST() local
685 Tensor<float, 4> result(kern_filters, out_depth, out_height, out_width); in TEST()
695 EXPECT_EQ(result.dimension(3), out_width); in TEST()
704 for (int k = 0; k < out_width; ++k) { in TEST()
741 const int out_width = in_cols; in TEST() local
746 Tensor<float, 4, RowMajor> result(out_width, out_height, out_depth, in TEST()
757 EXPECT_EQ(result.dimension(0), out_width); in TEST()
766 for (int k = 0; k < out_width; ++k) { in TEST()
803 const int out_width = 3; in TEST() local
808 Tensor<float, 4> result(kern_filters, out_depth, out_height, out_width); in TEST()
[all …]
Dquantized_resize_bilinear_op.cc472 const int64_t out_width, const int channels, in ResizeImageReference() argument
480 out_width, in_width, width_scale, channels, 0, half_pixel_centers); in ResizeImageReference()
486 const int64_t out_row_size = out_width * channels; in ResizeImageReference()
496 for (int64_t x = 0; x < out_width; ++x) { in ResizeImageReference()
521 const int64_t out_width, const int channels, in ResizeImage() argument
527 out_width, channels, height_scale, width_scale, in ResizeImage()
535 const int64_t out_width, const int channels, in ResizeImage() argument
547 BuildLerpCache<int32>(out_width, in_width, width_scale, channels, in ResizeImage()
554 const int64_t out_row_size = out_width * channels; in ResizeImage()
571 for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { in ResizeImage()
[all …]
Dmaxpooling_op.cc92 params.out_width * params.out_height * params.tensor_in_batch); in SpatialMaxPoolWithArgMaxHelper()
95 params.out_width * params.out_height * params.tensor_in_batch); in SpatialMaxPoolWithArgMaxHelper()
123 const int32_t out_width = params.out_width; in SpatialMaxPoolWithArgMaxHelper() local
127 const int32_t output_image_size = out_height * out_width * depth; in SpatialMaxPoolWithArgMaxHelper()
149 const int w_end = std::min(wpad / col_stride + 1, out_width); in SpatialMaxPoolWithArgMaxHelper()
153 const int64_t out_index_base = (b * out_height + ph) * out_width; in SpatialMaxPoolWithArgMaxHelper()
193 const int out_size = out_height * out_width * depth; in SpatialMaxPoolWithArgMaxHelper()
565 params.out_width * params.out_height * params.tensor_in_batch); in SpatialMaxPoolGradGrad()
571 params.out_width * params.out_height * params.tensor_in_batch); in SpatialMaxPoolGradGrad()
605 const int32_t out_width = params.out_width; in SpatialMaxPoolGradGrad() local
[all …]
/external/tensorflow/tensorflow/core/util/
Dimage_resizer_state.h122 out_width = internal::SubtleMustCopy(Svec(1)); in ValidateAndCalculateOutputSize()
123 OP_REQUIRES(context, out_height > 0 && out_width > 0, in ValidateAndCalculateOutputSize()
127 width_scale = CalculateResizeScale(in_width, out_width, align_corners_); in ValidateAndCalculateOutputSize()
137 ceilf((out_width - 1) * width_scale) <= static_cast<float>(INT_MAX), in ValidateAndCalculateOutputSize()
149 0, TensorShape({batch_size, out_height, out_width, channels}), in ValidateAndCreateOutput()
155 int64 out_width; member
/external/webp/src/dec/
Dio_dec.c295 const int out_width = io->scaled_width; in InitYUVRescaler() local
297 const int uv_out_width = (out_width + 1) >> 1; in InitYUVRescaler()
302 const size_t work_size = 2 * (size_t)out_width; in InitYUVRescaler()
334 buf->y, out_width, out_height, buf->y_stride, 1, in InitYUVRescaler()
348 buf->a, out_width, out_height, buf->a_stride, 1, in InitYUVRescaler()
490 const int out_width = io->scaled_width; in InitRGBRescaler() local
495 const size_t work_size = 2 * (size_t)out_width; in InitRGBRescaler()
504 tmp_size2 = (uint64_t)num_rescalers * out_width; in InitRGBRescaler()
527 tmp + 0 * out_width, out_width, out_height, 0, 1, in InitRGBRescaler()
530 tmp + 1 * out_width, out_width, out_height, 0, 1, in InitRGBRescaler()
[all …]
/external/pdfium/fxbarcode/
Dcbc_eancode.cpp28 int32_t out_width = 0; in Encode() local
35 pWriter->Encode(str, format, out_width, out_height)); in Encode()
37 data.get(), out_width); in Encode()
/external/webrtc/media/base/
Dvideo_adapter.cc191 int* out_width, in AdaptFrameResolution() argument
263 *out_width = *cropped_width / scale.denominator * scale.numerator; in AdaptFrameResolution()
265 RTC_DCHECK_EQ(0, *out_width % resolution_alignment_); in AdaptFrameResolution()
273 (previous_width_ != *out_width || previous_height_ != *out_height)) { in AdaptFrameResolution()
280 << scale.denominator << " Output: " << *out_width << "x" in AdaptFrameResolution()
286 previous_width_ = *out_width; in AdaptFrameResolution()

1234567