/external/libjpeg-turbo/simd/powerpc/ |
D | jcgryext-altivec.c | 32 int pitch = img_width * RGB_PIXELSIZE, num_cols; in jsimd_rgb_gray_convert_altivec() local 68 for (num_cols = pitch; num_cols > 0; in jsimd_rgb_gray_convert_altivec() 69 num_cols -= RGB_PIXELSIZE * 16, inptr += RGB_PIXELSIZE * 16, in jsimd_rgb_gray_convert_altivec() 77 int bytes = num_cols + offset; in jsimd_rgb_gray_convert_altivec() 87 memcpy(tmpbuf, inptr, min(num_cols, RGB_PIXELSIZE * 16)); in jsimd_rgb_gray_convert_altivec() 116 if (num_cols < RGB_PIXELSIZE * 16 && (num_cols & 15)) { in jsimd_rgb_gray_convert_altivec() 118 memcpy(tmpbuf, inptr, min(num_cols, RGB_PIXELSIZE * 16)); in jsimd_rgb_gray_convert_altivec() 128 if (num_cols > 16) in jsimd_rgb_gray_convert_altivec() 130 if (num_cols > 32) in jsimd_rgb_gray_convert_altivec() 133 if (num_cols > 48) in jsimd_rgb_gray_convert_altivec() [all …]
|
D | jdcolext-altivec.c | 31 int pitch = out_width * RGB_PIXELSIZE, num_cols; in jsimd_ycc_rgb_convert_altivec() local 78 for (num_cols = pitch; num_cols > 0; in jsimd_ycc_rgb_convert_altivec() 79 num_cols -= RGB_PIXELSIZE * 16, outptr += RGB_PIXELSIZE * 16, in jsimd_ycc_rgb_convert_altivec() 203 int bytes = num_cols + offset; in jsimd_ycc_rgb_convert_altivec() 219 memcpy(outptr, tmpbuf, min(num_cols, RGB_PIXELSIZE * 16)); in jsimd_ycc_rgb_convert_altivec() 224 edgeh = vec_ld(min(num_cols - 1, RGB_PIXELSIZE * 16), outptr); in jsimd_ycc_rgb_convert_altivec() 250 if (num_cols < RGB_PIXELSIZE * 16 && (num_cols & 15)) { in jsimd_ycc_rgb_convert_altivec() 258 memcpy(outptr, tmpbuf, min(num_cols, RGB_PIXELSIZE * 16)); in jsimd_ycc_rgb_convert_altivec() 262 if (num_cols > 16) in jsimd_ycc_rgb_convert_altivec() 264 if (num_cols > 32) in jsimd_ycc_rgb_convert_altivec() [all …]
|
D | jccolext-altivec.c | 32 int pitch = img_width * RGB_PIXELSIZE, num_cols; in jsimd_rgb_ycc_convert_altivec() local 74 for (num_cols = pitch; num_cols > 0; in jsimd_rgb_ycc_convert_altivec() 75 num_cols -= RGB_PIXELSIZE * 16, inptr += RGB_PIXELSIZE * 16, in jsimd_rgb_ycc_convert_altivec() 83 int bytes = num_cols + offset; in jsimd_rgb_ycc_convert_altivec() 93 memcpy(tmpbuf, inptr, min(num_cols, RGB_PIXELSIZE * 16)); in jsimd_rgb_ycc_convert_altivec() 123 if (num_cols < RGB_PIXELSIZE * 16 && (num_cols & 15)) { in jsimd_rgb_ycc_convert_altivec() 125 memcpy(tmpbuf, inptr, min(num_cols, RGB_PIXELSIZE * 16)); in jsimd_rgb_ycc_convert_altivec() 135 if (num_cols > 16) in jsimd_rgb_ycc_convert_altivec() 137 if (num_cols > 32) in jsimd_rgb_ycc_convert_altivec() 140 if (num_cols > 48) in jsimd_rgb_ycc_convert_altivec()
|
D | jdmrgext-altivec.c | 32 int pitch = output_width * RGB_PIXELSIZE, num_cols, yloop; in jsimd_h2v1_merged_upsample_altivec() local 86 for (num_cols = pitch; num_cols > 0; inptr1 += 16, inptr2 += 16) { in jsimd_h2v1_merged_upsample_altivec() 153 for (yloop = 0; yloop < 2 && num_cols > 0; yloop++, in jsimd_h2v1_merged_upsample_altivec() 154 num_cols -= RGB_PIXELSIZE * 16, in jsimd_h2v1_merged_upsample_altivec() 232 int bytes = num_cols + offset; in jsimd_h2v1_merged_upsample_altivec() 248 memcpy(outptr, tmpbuf, min(num_cols, RGB_PIXELSIZE * 16)); in jsimd_h2v1_merged_upsample_altivec() 253 edgeh = vec_ld(min(num_cols - 1, RGB_PIXELSIZE * 16), outptr); in jsimd_h2v1_merged_upsample_altivec() 279 if (num_cols < RGB_PIXELSIZE * 16 && (num_cols & 15)) { in jsimd_h2v1_merged_upsample_altivec() 287 memcpy(outptr, tmpbuf, min(num_cols, RGB_PIXELSIZE * 16)); in jsimd_h2v1_merged_upsample_altivec() 291 if (num_cols > 16) in jsimd_h2v1_merged_upsample_altivec() [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | checkpoint_ops_test.py | 144 num_cols=self.old_num_cols) 159 num_cols=len(col_remapping)) 174 num_cols=len(col_remapping)) 189 num_cols=2) 209 num_cols=self.old_num_cols) 218 num_cols = 4 219 initializing_values = [42] * num_rows * num_cols 224 col_remapping=[-1] * num_cols, 227 num_cols=num_cols) 230 np.reshape(initializing_values, (num_rows, num_cols)), [all …]
|
D | fractional_avg_pool_op_test.py | 152 num_cols = 6 153 tensor_shape = (1, num_rows, num_cols, 1) 180 for j in range(num_cols): 201 num_cols = 30 204 tensor_shape = (num_batches, num_rows, num_cols, num_channels) 241 for num_cols in [10, 20, 50]: 242 tensor_shape = (num_batches, num_rows, num_cols, num_channels) 257 num_cols = 50 258 tensor_shape = (num_batches, num_rows, num_cols, num_channels) 278 num_cols = 50 [all …]
|
D | fractional_max_pool_op_test.py | 152 num_cols = 6 153 tensor_shape = (1, num_rows, num_cols, 1) 182 for j in range(num_cols): 203 num_cols = 30 206 tensor_shape = (num_batches, num_rows, num_cols, num_channels) 219 num_cols = 30 222 tensor_shape = (num_batches, num_rows, num_cols, num_channels) 238 for num_cols in [10, 20, 50]: 239 tensor_shape = (num_batches, num_rows, num_cols, num_channels) 254 num_cols = 50 [all …]
|
/external/tensorflow/tensorflow/python/data/experimental/benchmarks/ |
D | csv_dataset_benchmark.py | 64 def _runBenchmark(self, dataset, num_cols, prefix): argument 88 name='%s_with_cols_%d' % (prefix, num_cols)) 93 num_cols = self._num_cols[i] 94 kwargs = {'record_defaults': [[0.0]] * num_cols} 97 self._runBenchmark(dataset, num_cols, 'csv_float_map_decode_csv') 103 num_cols = self._num_cols[i] 104 kwargs = {'record_defaults': [['']] * num_cols} 107 self._runBenchmark(dataset, num_cols, 'csv_strings_map_decode_csv') 113 num_cols = self._num_cols[i] 114 kwargs = {'record_defaults': [[0.0]] * num_cols} [all …]
|
/external/libjpeg-turbo/ |
D | jdcol565.c | 29 JDIMENSION num_cols = cinfo->output_width; in LOCAL() local 58 num_cols--; in LOCAL() 60 for (col = 0; col < (num_cols >> 1); col++) { in LOCAL() 82 if (num_cols & 1) { in LOCAL() 108 JDIMENSION num_cols = cinfo->output_width; in LOCAL() local 139 num_cols--; in LOCAL() 141 for (col = 0; col < (num_cols >> 1); col++) { in LOCAL() 167 if (num_cols & 1) { in LOCAL() 192 JDIMENSION num_cols = cinfo->output_width; in LOCAL() local 211 num_cols--; in LOCAL() [all …]
|
D | jdcolext.c | 40 JDIMENSION num_cols = cinfo->output_width; in LOCAL() local 55 for (col = 0; col < num_cols; col++) { in LOCAL() 90 JDIMENSION num_cols = cinfo->output_width; in LOCAL() local 95 for (col = 0; col < num_cols; col++) { in LOCAL() 122 JDIMENSION num_cols = cinfo->output_width; in LOCAL() local 130 for (col = 0; col < num_cols; col++) { in LOCAL()
|
D | jccolext.c | 42 JDIMENSION num_cols = cinfo->image_width; in LOCAL() local 50 for (col = 0; col < num_cols; col++) { in LOCAL() 96 JDIMENSION num_cols = cinfo->image_width; in LOCAL() local 102 for (col = 0; col < num_cols; col++) { in LOCAL() 129 JDIMENSION num_cols = cinfo->image_width; in LOCAL() local 137 for (col = 0; col < num_cols; col++) { in LOCAL()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | topk_op_gpu.h | 412 SegmentOffsetCreator(int num_cols) : num_cols_(num_cols) {} 422 ColumnIndexCreator(int num_cols) : num_cols_(num_cols) {} 434 int num_cols, int k, 445 DT_INT32, TensorShape({num_rows, num_cols}), &input_indices)); 448 input_indices_t.generate(ColumnIndexCreator(num_cols)); 453 segment_offsets_t(counting_iter, SegmentOffsetCreator(num_cols)); 459 if (k == num_cols) { 466 DT_INT32, TensorShape({num_rows, num_cols}), &temp_indices)); 468 TensorShape({num_rows, num_cols}), 481 /* num_items */ num_cols * num_rows, [all …]
|
D | lu_op.cc | 85 const int64 num_cols = input.dim_size(input_rank - 1); in Compute() local 87 input_matrix_shape.AppendShape({num_rows, num_cols}); in Compute() 115 auto shard = [this, &input, &num_rows, &num_cols, &outputs, in Compute() 118 ComputeTensorSlice(context, i, input, num_rows, num_cols, outputs, in Compute() 129 const Tensor& input, int64 num_rows, int64 num_cols, in ComputeTensorSlice() argument 135 input.flat<Scalar>().data() + matrix_index * num_rows * num_cols, in ComputeTensorSlice() 136 num_rows, num_cols); in ComputeTensorSlice() 140 outputs[0]->flat<Scalar>().data() + matrix_index * num_rows * num_cols, in ComputeTensorSlice()
|
D | topk_op.cc | 74 const int64 num_cols = input.dimension(1); in Compute() local 91 context, sorted_, k, input, num_rows, num_cols, values, indices); in Compute() 107 const int64 num_cols, typename TTypes<T, 2>::Tensor values, in Compute() 126 for (int c = 0; c < num_cols; ++c) { in Compute() 157 if (k == num_cols) { in Compute() 182 filter.reserve(num_cols); in Compute() 183 for (int32 c = 0; c < num_cols; ++c) { in Compute() 214 static_cast<double>(num_cols * in Compute() 216 const double sort_cost = (k == num_cols) ? base_cost : 4 * base_cost; in Compute() 253 const int64 num_cols, typename TTypes<T, 2>::Tensor values, \
|
D | reduction_gpu_kernels.cu.h | 232 T in, outT out, int num_rows, int num_cols, Op op, 242 if (num_cols == 1) { 251 if (row < num_rows && col < num_cols) { 252 sum = in[row * num_cols + col]; 254 for (; col < num_cols; col += 32) { 255 sum = op(sum, in[row * num_cols + col]); 263 sum = WarpReduce(temp_storage).Reduce(sum, op, min(num_cols, 32)); 299 T in, outT out, int num_rows, int num_cols, Op op, 302 int rows_per_warp = 32 / num_cols; 305 const int lane_row = lane / num_cols; [all …]
|
D | softmax_op_gpu.cu.cc | 74 const int num_rows, const int num_cols, in GenerateNormalizedProb() argument 78 const int row = tid / num_cols; in GenerateNormalizedProb() 79 const int col = tid % num_cols; in GenerateNormalizedProb() 86 if (row < num_rows && col < num_cols) { in GenerateNormalizedProb() 100 const int num_cols) in SubtractAndExpFunctor() 101 : logits_(logits), max_logits_(max_logits), num_cols_(num_cols) {} in SubtractAndExpFunctor()
|
/external/libhevc/decoder/ |
D | ihevcd_fmt_conv.c | 393 WORD32 num_rows, num_cols, src_strd, dst_strd; in ihevcd_fmt_conv_420sp_to_420sp() local 401 num_cols = wd; in ihevcd_fmt_conv_420sp_to_420sp() 408 memcpy(pu1_dst, pu1_src, num_cols); in ihevcd_fmt_conv_420sp_to_420sp() 418 num_cols = wd; in ihevcd_fmt_conv_420sp_to_420sp() 425 memcpy(pu1_dst, pu1_src, num_cols); in ihevcd_fmt_conv_420sp_to_420sp() 492 WORD32 num_rows, num_cols, src_strd, dst_strd; in ihevcd_fmt_conv_420sp_to_420sp_swap_uv() local 500 num_cols = wd; in ihevcd_fmt_conv_420sp_to_420sp_swap_uv() 507 memcpy(pu1_dst, pu1_src, num_cols); in ihevcd_fmt_conv_420sp_to_420sp_swap_uv() 517 num_cols = wd; in ihevcd_fmt_conv_420sp_to_420sp_swap_uv() 525 for(j = 0; j < num_cols; j += 2) in ihevcd_fmt_conv_420sp_to_420sp_swap_uv() [all …]
|
/external/libavc/decoder/ |
D | ih264d_format_conv.c | 385 WORD32 num_rows, num_cols, src_strd, dst_strd; in ih264d_fmt_conv_420sp_to_420sp() local 393 num_cols = wd; in ih264d_fmt_conv_420sp_to_420sp() 400 memcpy(pu1_dst, pu1_src, num_cols); in ih264d_fmt_conv_420sp_to_420sp() 410 num_cols = wd; in ih264d_fmt_conv_420sp_to_420sp() 417 memcpy(pu1_dst, pu1_src, num_cols); in ih264d_fmt_conv_420sp_to_420sp() 482 WORD32 num_rows, num_cols, src_strd, dst_strd; in ih264d_fmt_conv_420sp_to_420sp_swap_uv() local 490 num_cols = wd; in ih264d_fmt_conv_420sp_to_420sp_swap_uv() 497 memcpy(pu1_dst, pu1_src, num_cols); in ih264d_fmt_conv_420sp_to_420sp_swap_uv() 507 num_cols = wd; in ih264d_fmt_conv_420sp_to_420sp_swap_uv() 515 for(j = 0; j < num_cols; j += 2) in ih264d_fmt_conv_420sp_to_420sp_swap_uv() [all …]
|
/external/libhevc/decoder/x86/ |
D | ihevcd_fmt_conv_ssse3_intr.c | 63 WORD32 num_rows, num_cols, src_strd, dst_strd, cols, rows; in ihevcd_fmt_conv_420sp_to_420p_ssse3() local 76 num_cols = wd; in ihevcd_fmt_conv_420sp_to_420p_ssse3() 82 memcpy(pu1_dst, pu1_src, num_cols); in ihevcd_fmt_conv_420sp_to_420p_ssse3() 119 num_cols = wd >> 1; in ihevcd_fmt_conv_420sp_to_420p_ssse3() 126 if(num_cols > 15) in ihevcd_fmt_conv_420sp_to_420p_ssse3() 128 cols = num_cols >> 4; in ihevcd_fmt_conv_420sp_to_420p_ssse3() 246 num_cols &= 0x0F; in ihevcd_fmt_conv_420sp_to_420p_ssse3() 248 if(num_cols) in ihevcd_fmt_conv_420sp_to_420p_ssse3() 256 for(j = 0; j < num_cols; j++) in ihevcd_fmt_conv_420sp_to_420p_ssse3()
|
/external/libavc/encoder/ |
D | ih264e_fmt_conv.c | 371 WORD32 num_rows, num_cols, src_strd, dst_strd; in ih264e_fmt_conv_420sp_to_420sp() local 379 num_cols = wd; in ih264e_fmt_conv_420sp_to_420sp() 386 memcpy(pu1_dst, pu1_src, num_cols); in ih264e_fmt_conv_420sp_to_420sp() 396 num_cols = wd; in ih264e_fmt_conv_420sp_to_420sp() 403 memcpy(pu1_dst, pu1_src, num_cols); in ih264e_fmt_conv_420sp_to_420sp() 423 WORD32 num_rows, num_cols, src_strd, dst_strd; in ih264e_fmt_conv_420sp_to_420sp_swap_uv() local 431 num_cols = wd; in ih264e_fmt_conv_420sp_to_420sp_swap_uv() 438 memcpy(pu1_dst, pu1_src, num_cols); in ih264e_fmt_conv_420sp_to_420sp_swap_uv() 448 num_cols = wd; in ih264e_fmt_conv_420sp_to_420sp_swap_uv() 456 for (j = 0; j < num_cols; j += 2) in ih264e_fmt_conv_420sp_to_420sp_swap_uv() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | gemm_thunk.cc | 41 num_cols(matrix_num_cols), in MatrixDescriptor() 47 int64 num_cols; member 70 auto k = lhs_matrix.transpose ? lhs_matrix.num_rows : lhs_matrix.num_cols; in DoGemm() 76 output_matrix.num_cols, /*size of reduce dim=*/k, /*alpha=*/alpha, in DoGemm() 83 int64 lhs_stride = lhs_matrix.num_rows * lhs_matrix.num_cols; in DoGemm() 84 int64 rhs_stride = rhs_matrix.num_rows * rhs_matrix.num_cols; in DoGemm() 85 int64 output_stride = output_matrix.num_rows * output_matrix.num_cols; in DoGemm() 89 output_matrix.num_cols, /*size of reduce dim=*/k, in DoGemm() 135 auto k = lhs_matrix.transpose ? lhs_matrix.num_rows : lhs_matrix.num_cols; in DoGemmWithAlgorithm() 140 output_matrix.num_cols, /*size of reduce dim=*/k, in DoGemmWithAlgorithm() [all …]
|
/external/eigen/unsupported/test/ |
D | cxx11_tensor_complex_cuda.cu | 80 const int num_cols = internal::random<int>(1024, 5*1024); in test_cuda_sum_reductions() local 82 Tensor<std::complex<float>, 2> in(num_rows, num_cols); in test_cuda_sum_reductions() 94 TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols); in test_cuda_sum_reductions() 117 const int num_cols = internal::random<int>(1024, 5*1024); in test_cuda_product_reductions() local 119 Tensor<std::complex<float>, 2> in(num_rows, num_cols); in test_cuda_product_reductions() 131 TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols); in test_cuda_product_reductions()
|
/external/libjpeg-turbo/simd/loongson/ |
D | jdcolext-mmi.c | 97 int num_cols, col; in jsimd_ycc_rgb_convert_mmi() local 108 for (num_cols = out_width; num_cols > 0; num_cols -= 8, in jsimd_ycc_rgb_convert_mmi() 253 if (num_cols >= 8) { in jsimd_ycc_rgb_convert_mmi() 259 col = num_cols * 3; in jsimd_ycc_rgb_convert_mmi() 359 if (num_cols >= 8) { in jsimd_ycc_rgb_convert_mmi() 366 col = num_cols; in jsimd_ycc_rgb_convert_mmi()
|
/external/tensorflow/tensorflow/tools/graph_transforms/ |
D | fold_old_batch_norms.cc | 78 const int64 num_cols = mean.shape().dim_size(0); in GetScaleAndOffsetValues() local 79 TF_RETURN_IF_ERROR(ErrorIfNotVector(variance, "Variance", num_cols)); in GetScaleAndOffsetValues() 80 TF_RETURN_IF_ERROR(ErrorIfNotVector(beta, "Beta", num_cols)); in GetScaleAndOffsetValues() 81 TF_RETURN_IF_ERROR(ErrorIfNotVector(gamma, "gamma", num_cols)); in GetScaleAndOffsetValues() 83 scale_values->resize(num_cols); in GetScaleAndOffsetValues() 84 offset_values->resize(num_cols); in GetScaleAndOffsetValues() 88 for (int i = 0; i < num_cols; ++i) { in GetScaleAndOffsetValues() 94 for (int i = 0; i < num_cols; ++i) { in GetScaleAndOffsetValues() 99 for (int i = 0; i < num_cols; ++i) { in GetScaleAndOffsetValues()
|
/external/libmpeg2/common/ |
D | ideint_utils.c | 260 WORD32 num_cols, num_rows; in ideint_pad_blk() local 266 num_cols = blk_wd + 4; in ideint_pad_blk() 274 num_cols -= 2; in ideint_pad_blk() 289 num_cols -= 2; in ideint_pad_blk() 296 memcpy(pu1_dst, pu1_src_top, num_cols); in ideint_pad_blk() 299 memcpy(pu1_dst, pu1_src_bot, num_cols); in ideint_pad_blk()
|