/external/executorch/extension/tensor/test/ |
D | tensor_ptr_test.cpp | 34 EXPECT_EQ(tensor->const_data_ptr<float>(), &scalar_data); in TEST_F() 35 EXPECT_EQ(tensor->const_data_ptr<float>()[0], 3.14f); in TEST_F() 45 EXPECT_EQ(tensor->const_data_ptr<float>()[0], 3.14f); in TEST_F() 54 EXPECT_EQ(tensor_float->const_data_ptr<float>()[0], 3.14f); in TEST_F() 62 EXPECT_EQ(tensor_int32->const_data_ptr<int32_t>()[0], 42); in TEST_F() 70 EXPECT_EQ(tensor_double->const_data_ptr<double>()[0], 2.718); in TEST_F() 78 EXPECT_EQ(tensor_int64->const_data_ptr<int64_t>()[0], 10000000000); in TEST_F() 90 EXPECT_EQ(tensor->const_data_ptr<float>(), data); in TEST_F() 91 EXPECT_EQ(tensor->const_data_ptr<float>()[0], 2); in TEST_F() 141 EXPECT_EQ(tensor->const_data_ptr<float>()[0], 1); in TEST_F() [all …]
|
D | tensor_ptr_maker_test.cpp | 38 EXPECT_EQ(tensor->const_data_ptr<float>(), data); in TEST_F() 39 EXPECT_EQ(tensor->const_data_ptr<float>()[0], 2); in TEST_F() 95 EXPECT_EQ(tensor->const_data_ptr<float>(), data); in TEST_F() 96 EXPECT_EQ(tensor->const_data_ptr<float>()[0], 2); in TEST_F() 97 EXPECT_EQ(tensor->const_data_ptr<float>()[19], 0); in TEST_F() 111 EXPECT_EQ(tensor->const_data_ptr<float>(), data); in TEST_F() 112 EXPECT_EQ(tensor->const_data_ptr<float>()[0], 3); in TEST_F() 132 EXPECT_EQ(tensor->const_data_ptr<float>()[0], 2); in TEST_F() 214 EXPECT_EQ(tensor->const_data_ptr<float>()[0], 7); in TEST_F() 221 EXPECT_EQ(tensor2->const_data_ptr<int32_t>()[0], 3); in TEST_F() [all …]
|
/external/pytorch/aten/src/ATen/native/cuda/ |
D | MultiMarginLoss.cu | 204 input.const_data_ptr<scalar_t>(), in multi_margin_loss_cuda_out() 205 target.const_data_ptr<int64_t>(), in multi_margin_loss_cuda_out() 206 weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr, in multi_margin_loss_cuda_out() 215 input.const_data_ptr<scalar_t>(), in multi_margin_loss_cuda_out() 216 target.const_data_ptr<int64_t>(), in multi_margin_loss_cuda_out() 217 weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr, in multi_margin_loss_cuda_out() 237 input.const_data_ptr<scalar_t>(), in multi_margin_loss_cuda_out() 238 target.const_data_ptr<int64_t>(), in multi_margin_loss_cuda_out() 239 weights.defined() ? weights.const_data_ptr<scalar_t>() : nullptr, in multi_margin_loss_cuda_out() 247 input.const_data_ptr<scalar_t>(), in multi_margin_loss_cuda_out() [all …]
|
D | EmbeddingBackwardKernel.cu | 246 sorted_indices.const_data_ptr<index_t>(), thrust::make_counting_iterator(0), in embedding_backward_cuda_kernel() 262 segment_offsets.const_data_ptr<index_t>(), in embedding_backward_cuda_kernel() 274 partials_per_segment.const_data_ptr<index_t>(), in embedding_backward_cuda_kernel() 282 partials_per_segment.const_data_ptr<index_t>(), in embedding_backward_cuda_kernel() 283 partials_per_segment_offset.const_data_ptr<index_t>(), in embedding_backward_cuda_kernel() 295 partials_per_segment.const_data_ptr<index_t>(), in embedding_backward_cuda_kernel() 296 partials_per_segment_offset.const_data_ptr<index_t>(), in embedding_backward_cuda_kernel() 297 segment_offsets.const_data_ptr<index_t>(), in embedding_backward_cuda_kernel() 322 orig_indices.const_data_ptr<index_t>(), in embedding_backward_cuda_kernel() 323 grad.const_data_ptr<scalar_t>(), in embedding_backward_cuda_kernel() [all …]
|
D | MultiLabelMarginCriterion.cu | 235 input_.const_data_ptr<scalar_t>(), in multilabel_margin_loss_forward_out_cuda_template() 236 target_.const_data_ptr<int64_t>(), in multilabel_margin_loss_forward_out_cuda_template() 260 input_.const_data_ptr<scalar_t>(), in multilabel_margin_loss_forward_out_cuda_template() 261 target_.const_data_ptr<int64_t>(), in multilabel_margin_loss_forward_out_cuda_template() 286 input_.const_data_ptr<scalar_t>(), in multilabel_margin_loss_forward_out_cuda_template() 287 target_.const_data_ptr<int64_t>(), in multilabel_margin_loss_forward_out_cuda_template() 345 grad_output_.const_data_ptr<scalar_t>(), in multilabel_margin_loss_backward_cuda_out_template() 346 input_.const_data_ptr<scalar_t>(), in multilabel_margin_loss_backward_cuda_out_template() 347 target_.const_data_ptr<int64_t>(), in multilabel_margin_loss_backward_cuda_out_template() 348 is_target_.const_data_ptr<scalar_t>(), in multilabel_margin_loss_backward_cuda_out_template() [all …]
|
D | LossCTC.cu | 306 …log_probs.const_data_ptr<scalar_t>(), input_lengths_t.const_data_ptr<int64_t>(), log_probs.size(0), in ctc_loss_gpu_template() 307 … targets.const_data_ptr<target_t>(), target_lengths_t.const_data_ptr<int64_t>(), max_target_length, in ctc_loss_gpu_template() 311 tg_batch_offsets.const_data_ptr<int64_t>(), tg_target_stride, in ctc_loss_gpu_template() 660 …log_probs.const_data_ptr<scalar_t>(), input_lengths_t.const_data_ptr<int64_t>(), log_probs.size(0), in ctc_loss_backward_gpu_template() 661 … targets.const_data_ptr<target_t>(), target_lengths_t.const_data_ptr<int64_t>(), max_target_length, in ctc_loss_backward_gpu_template() 664 tg_batch_offsets.const_data_ptr<int64_t>(), tg_target_stride, in ctc_loss_backward_gpu_template() 712 grad_out.const_data_ptr<scalar_t>(), grad_out.stride(0), in ctc_loss_backward_gpu_template() 713 log_alpha.const_data_ptr<scalar_t>(), log_beta.const_data_ptr<scalar_t>(), in ctc_loss_backward_gpu_template() 714 log_probs.const_data_ptr<scalar_t>(), input_lengths_t.const_data_ptr<int64_t>(), in ctc_loss_backward_gpu_template() 715 targets.const_data_ptr<target_t>(), target_lengths_t.const_data_ptr<int64_t>(), in ctc_loss_backward_gpu_template() [all …]
|
D | Loss.cu | 319 target.const_data_ptr<index_t>(), in nll_loss_forward_out_cuda_template() 321 weight_.defined() ? weight_.const_data_ptr<scalar_t>() in nll_loss_forward_out_cuda_template() 363 input.const_data_ptr<scalar_t>(), in nll_loss_forward_out_cuda_template() 364 target.const_data_ptr<index_t>(), in nll_loss_forward_out_cuda_template() 365 weight_.defined() ? weight_.const_data_ptr<scalar_t>() in nll_loss_forward_out_cuda_template() 392 input.const_data_ptr<scalar_t>(), in nll_loss_forward_out_cuda_template() 393 target.const_data_ptr<index_t>(), in nll_loss_forward_out_cuda_template() 394 weight_.defined() ? weight_.const_data_ptr<scalar_t>() in nll_loss_forward_out_cuda_template() 522 target.const_data_ptr<index_t>(), in nll_loss_backward_out_cuda_template() 525 weight.defined() ? weight_.const_data_ptr<scalar_t>() : nullptr, in nll_loss_backward_out_cuda_template() [all …]
|
D | CUDAScalar.cu | 45 …at::cuda::memcpy_and_sync((void *)value.const_data_ptr<scalar_t>(), self.const_data_ptr<scalar_t>(… in _local_scalar_dense_cuda() 46 r = Scalar(*value.const_data_ptr<scalar_t>()); in _local_scalar_dense_cuda() 53 r = Scalar(*cpu_self.const_data_ptr<scalar_t>()); in _local_scalar_dense_cuda()
|
/external/executorch/runtime/core/exec_aten/testing_util/ |
D | tensor_util.cpp | 118 a.const_data_ptr<float>(), in tensors_are_close() 119 b.const_data_ptr<float>(), in tensors_are_close() 125 a.const_data_ptr<double>(), in tensors_are_close() 126 b.const_data_ptr<double>(), in tensors_are_close() 132 a.const_data_ptr<Half>(), in tensors_are_close() 133 b.const_data_ptr<Half>(), in tensors_are_close() 139 a.const_data_ptr<BFloat16>(), in tensors_are_close() 140 b.const_data_ptr<BFloat16>(), in tensors_are_close() 146 return memcmp(a.const_data_ptr(), b.const_data_ptr(), a.nbytes()) == 0; in tensors_are_close() 173 a.const_data_ptr<float>(), in tensor_data_is_close() [all …]
|
/external/pytorch/aten/src/ATen/test/ |
D | apply_utils_test.cpp | 32 ASSERT(zero_dim.const_data_ptr<scalar_t>()[0] == std::exp(2)); in test() 60 auto target = a1.const_data_ptr<scalar_t>()[i] * a1.const_data_ptr<scalar_t>()[i]; in test() 61 ASSERT(a0.const_data_ptr<scalar_t>()[i] == target); in test() 62 ASSERT(a4.const_data_ptr<double>()[i] == target); in test() 76 auto target = a1.const_data_ptr<scalar_t>()[i] * a1.const_data_ptr<scalar_t>()[i]; in test() 77 target = target + a2.const_data_ptr<scalar_t>()[i]; in test() 78 ASSERT(a0.const_data_ptr<scalar_t>()[i] == target); in test() 79 ASSERT(a4.const_data_ptr<double>()[i] == target); in test() 102 auto target = a1.const_data_ptr<scalar_t>()[i] * a1.const_data_ptr<scalar_t>()[i]; in test() 103 target = target + a2.const_data_ptr<scalar_t>()[i] * a3.const_data_ptr<scalar_t>()[i]; in test() [all …]
|
/external/executorch/backends/cadence/hifi/operators/ |
D | quantized_linear_out.cpp | 48 const uint8_t* __restrict__ in_data = in.const_data_ptr<uint8_t>(); in _quantized_linear_asym8u() 49 const uint8_t* __restrict__ weight_data = weight.const_data_ptr<uint8_t>(); in _quantized_linear_asym8u() 50 const int32_t* __restrict__ bias_data = bias.const_data_ptr<int32_t>(); in _quantized_linear_asym8u() 66 -weight_zero_point.const_data_ptr<int32_t>()[0], // mat1_zero_bias in _quantized_linear_asym8u() 68 out_multiplier.const_data_ptr<int32_t>()[0], // out_multiplier in _quantized_linear_asym8u() 69 out_shift.const_data_ptr<int32_t>()[0], // out_shift in _quantized_linear_asym8u() 93 const int8_t* __restrict__ in_data = in.const_data_ptr<int8_t>(); in _quantized_linear_asym8s() 94 const int8_t* __restrict__ weight_data = weight.const_data_ptr<int8_t>(); in _quantized_linear_asym8s() 95 const int32_t* __restrict__ bias_data = bias.const_data_ptr<int32_t>(); in _quantized_linear_asym8s() 111 -weight_zero_point.const_data_ptr<int32_t>()[0], // mat1_zero_bias in _quantized_linear_asym8s() [all …]
|
D | dequantize_per_tensor.cpp | 35 const uint8_t* input_data = input.const_data_ptr<uint8_t>(); in dequantize_per_tensor_out() 38 const int8_t* input_data = input.const_data_ptr<int8_t>(); in dequantize_per_tensor_out() 42 const int16_t* input_data = input.const_data_ptr<int16_t>(); in dequantize_per_tensor_out() 47 const uint16_t* input_data = input.const_data_ptr<uint16_t>(); in dequantize_per_tensor_out() 50 const int32_t* input_data = input.const_data_ptr<int32_t>(); in dequantize_per_tensor_out()
|
/external/pytorch/torch/csrc/lazy/core/ |
D | hash.h | 116 return DataHash(ctensor.const_data_ptr<bool>(), size); in TensorHash() 118 return DataHash(ctensor.const_data_ptr<uint8_t>(), size); in TensorHash() 120 return DataHash(ctensor.const_data_ptr<int8_t>(), size); in TensorHash() 122 return DataHash(ctensor.const_data_ptr<int16_t>(), size); in TensorHash() 124 return DataHash(ctensor.const_data_ptr<int32_t>(), size); in TensorHash() 126 return DataHash(ctensor.const_data_ptr<int64_t>(), size); in TensorHash() 128 return DataHash(ctensor.const_data_ptr<float>(), size); in TensorHash() 130 return DataHash(ctensor.const_data_ptr<double>(), size); in TensorHash() 132 return DataHash(ctensor.const_data_ptr<at::BFloat16>(), size); in TensorHash() 134 return DataHash(ctensor.const_data_ptr<at::Half>(), size); in TensorHash() [all …]
|
/external/executorch/kernels/optimized/cpu/ |
D | op_div.cpp | 76 CTYPE_SCALAR scalar_val = *scalar->const_data_ptr<CTYPE_SCALAR>(); in opt_div_out() 84 tensor->const_data_ptr<CTYPE>(), in opt_div_out() 93 tensor->const_data_ptr<CTYPE>(), in opt_div_out() 118 a.const_data_ptr<CTYPE>(), in opt_div_out() 119 b.const_data_ptr<CTYPE>(), in opt_div_out() 151 lhs->const_data_ptr<CTYPE>(), in opt_div_out() 152 rhs->const_data_ptr<CTYPE>(), in opt_div_out() 159 lhs->const_data_ptr<CTYPE>(), in opt_div_out() 160 rhs->const_data_ptr<CTYPE>(), in opt_div_out() 230 a.const_data_ptr<CTYPE>(), in opt_div_scalar_out() [all …]
|
D | op_sub.cpp | 114 CTYPE_SCALAR scalar_val = *scalar->const_data_ptr<CTYPE_SCALAR>(); in opt_sub_out() 124 tensor->const_data_ptr<CTYPE>(), in opt_sub_out() 132 tensor->const_data_ptr<CTYPE>(), in opt_sub_out() 161 a.const_data_ptr<CTYPE>(), in opt_sub_out() 162 b.const_data_ptr<CTYPE>(), in opt_sub_out() 198 lhs->const_data_ptr<CTYPE>(), in opt_sub_out() 199 rhs->const_data_ptr<CTYPE>(), in opt_sub_out() 206 lhs->const_data_ptr<CTYPE>(), in opt_sub_out() 207 rhs->const_data_ptr<CTYPE>(), in opt_sub_out() 288 a.const_data_ptr<CTYPE>(), in opt_sub_scalar_out() [all …]
|
/external/executorch/backends/cadence/reference/operators/ |
D | quantized_linear_out.cpp | 31 const T* __restrict__ src_data = src.const_data_ptr<T>(); in _typed_quantized_linear() 32 const T* __restrict__ weight_data = weight.const_data_ptr<T>(); in _typed_quantized_linear() 33 const int32_t* __restrict__ bias_data = bias.const_data_ptr<int32_t>(); in _typed_quantized_linear() 36 int32_t weight_zero_point = weight_zero_point_t.const_data_ptr<int32_t>()[0]; in _typed_quantized_linear() 55 out_multiplier.const_data_ptr<int32_t>(); in _typed_quantized_linear() 57 out_shift.const_data_ptr<int32_t>(); in _typed_quantized_linear()
|
D | quantized_conv_out.cpp | 195 input.const_data_ptr<uint8_t>(), in quantized_conv_out() 196 weight.const_data_ptr<uint8_t>(), in quantized_conv_out() 197 bias.const_data_ptr<int32_t>(), in quantized_conv_out() 217 weight_zero_point.const_data_ptr<int32_t>(), in quantized_conv_out() 218 bias_scale.const_data_ptr<float>(), in quantized_conv_out() 224 input.const_data_ptr<int8_t>(), in quantized_conv_out() 225 weight.const_data_ptr<int8_t>(), in quantized_conv_out() 226 bias.const_data_ptr<int32_t>(), in quantized_conv_out() 246 weight_zero_point.const_data_ptr<int32_t>(), in quantized_conv_out() 247 bias_scale.const_data_ptr<float>(), in quantized_conv_out()
|
D | dequantize_per_tensor.cpp | 33 const uint8_t* input_data = input.const_data_ptr<uint8_t>(); in dequantize_per_tensor_out() 37 const int8_t* input_data = input.const_data_ptr<int8_t>(); in dequantize_per_tensor_out() 43 const uint16_t* input_data = input.const_data_ptr<uint16_t>(); in dequantize_per_tensor_out() 47 const int16_t* input_data = input.const_data_ptr<int16_t>(); in dequantize_per_tensor_out() 51 const int32_t* input_data = input.const_data_ptr<int32_t>(); in dequantize_per_tensor_out()
|
/external/pytorch/aten/src/ATen/native/miopen/ |
D | BatchNorm_miopen.cpp | 121 idesc.desc(), input->const_data_ptr(), in miopen_batch_norm() 127 const_cast<void*>(weight->const_data_ptr()), in miopen_batch_norm() 128 const_cast<void*>(bias->const_data_ptr()), in miopen_batch_norm() 140 idesc.desc(), input->const_data_ptr(), in miopen_batch_norm() 146 const_cast<void*>(weight->const_data_ptr()), in miopen_batch_norm() 147 const_cast<void*>(bias->const_data_ptr()), in miopen_batch_norm() 226 idesc.desc(), input->const_data_ptr(), in miopen_batch_norm_backward() 227 idesc.desc(), grad_output->const_data_ptr(), in miopen_batch_norm_backward() 229 wdesc.desc(), weight->const_data_ptr(), in miopen_batch_norm_backward() 233 save_mean->const_data_ptr(), in miopen_batch_norm_backward() [all …]
|
/external/executorch/extension/aten_util/test/ |
D | aten_bridge_test.cpp | 51 EXPECT_EQ(at_tensor.const_data_ptr(), etensor.const_data_ptr()); in TEST() 103 EXPECT_EQ(sliced_tensor_contig.const_data_ptr(), etensor.const_data_ptr()); in TEST() 104 EXPECT_NE(sliced_tensor.const_data_ptr(), etensor.const_data_ptr()); in TEST() 148 EXPECT_EQ(aliased_at_tensor.const_data_ptr(), etensor_data.data()); in TEST() 155 EXPECT_EQ(at_tensor.const_data_ptr(), et_tensor_ptr->const_data_ptr()); in TEST()
|
/external/pytorch/aten/src/ATen/native/cudnn/ |
D | BatchNorm.cpp | 220 input->const_data_ptr(), in cudnn_batch_norm() 226 weight->const_data_ptr(), in cudnn_batch_norm() 227 bias->const_data_ptr(), in cudnn_batch_norm() 250 input->const_data_ptr(), in cudnn_batch_norm() 254 weight->const_data_ptr(), in cudnn_batch_norm() 255 bias->const_data_ptr(), in cudnn_batch_norm() 256 running_mean->const_data_ptr(), in cudnn_batch_norm() 257 running_var->const_data_ptr(), in cudnn_batch_norm() 370 input->const_data_ptr(), in cudnn_batch_norm_backward() 374 grad_output->const_data_ptr(), in cudnn_batch_norm_backward() [all …]
|
/external/executorch/runtime/core/portable_type/test/ |
D | tensor_test.cpp | 50 EXPECT_EQ(a.const_data_ptr(), data); in TEST_F() 52 EXPECT_EQ(a.const_data_ptr(), nullptr); in TEST_F() 65 EXPECT_EQ(a.const_data_ptr<int32_t>()[0], 0); in TEST_F() 66 EXPECT_EQ(a.const_data_ptr<int32_t>()[0 + a.strides()[0]], 1); in TEST_F() 79 EXPECT_EQ(a.const_data_ptr<int32_t>()[0], 0); in TEST_F()
|
/external/pytorch/aten/src/ATen/native/ |
D | Blas.cpp | 94 …gemv<scalar_t>('n', mat.size(0), mat.size(1), alpha, mat.const_data_ptr<scalar_t>(), mat.stride(1), in TORCH_IMPL_FUNC() 95 …vec.const_data_ptr<scalar_t>(), vec.stride(0), beta, result.mutable_data_ptr<scalar_t>(), r_stride… in TORCH_IMPL_FUNC() 98 …gemv<scalar_t>('t', mat.size(1), mat.size(0), alpha, mat.const_data_ptr<scalar_t>(), mat.stride(0), in TORCH_IMPL_FUNC() 99 …vec.const_data_ptr<scalar_t>(), vec.stride(0), beta, result.mutable_data_ptr<scalar_t>(), r_stride… in TORCH_IMPL_FUNC() 103 …gemv<scalar_t>('t', mat.size(1), mat.size(0), alpha, cmat.const_data_ptr<scalar_t>(), cmat.stride(… in TORCH_IMPL_FUNC() 104 …vec.const_data_ptr<scalar_t>(), vec.stride(0), beta, result.mutable_data_ptr<scalar_t>(), r_stride… in TORCH_IMPL_FUNC() 188 …el(), const_cast<scalar_t*>(self.const_data_ptr<scalar_t>()), self.stride(0), const_cast<scalar_t*… in dot() 219 …l(), const_cast<scalar_t*>(self.const_data_ptr<scalar_t>()), self.stride(0), const_cast<scalar_t *… in vdot()
|
/external/executorch/kernels/portable/cpu/ |
D | op_addmm.cpp | 70 in.const_data_ptr<CTYPE>(), in addmm_out() 71 mat1.const_data_ptr<CTYPE>(), in addmm_out() 72 mat2.const_data_ptr<CTYPE>(), in addmm_out() 85 mat1.const_data_ptr<CTYPE>(), in addmm_out() 86 mat2.const_data_ptr<CTYPE>(), in addmm_out()
|
/external/pytorch/aten/src/ATen/native/quantized/cpu/ |
D | Normalization.cpp | 83 const float* weight_data = weight.template const_data_ptr<float>(); in q_batch_norm1d_impl() 84 const float* bias_data = bias.template const_data_ptr<float>(); in q_batch_norm1d_impl() 94 const float* mean_data = mean.template const_data_ptr<float>(); in q_batch_norm1d_impl() 95 const float* var_data = var.template const_data_ptr<float>(); in q_batch_norm1d_impl() 192 const float* weight_data = weight.template const_data_ptr<float>(); in q_batch_norm2d_impl() 193 const float* bias_data = bias.template const_data_ptr<float>(); in q_batch_norm2d_impl() 203 const float* mean_data = mean.template const_data_ptr<float>(); in q_batch_norm2d_impl() 204 const float* var_data = var.template const_data_ptr<float>(); in q_batch_norm2d_impl() 288 const float* weight_data = weight.template const_data_ptr<float>(); in q_batch_norm3d_impl() 289 const float* bias_data = bias.template const_data_ptr<float>(); in q_batch_norm3d_impl() [all …]
|