/external/pytorch/aten/src/ATen/ |
D | InferSize.h | 22 NumelType numel, in infer_size_impl() 67 inline std::vector<int64_t> infer_size(IntArrayRef shape, int64_t numel) { in infer_size() 73 inline at::DimVector infer_size_dv(IntArrayRef shape, int64_t numel) { in infer_size_dv() 81 c10::SymInt numel) { in infer_size_dv()
|
D | TensorGeometry.h | 69 int64_t numel() const { in numel() function 127 c10::SymInt numel = 1; in recompute() local
|
D | MatrixRef.h | 74 size_t numel() const { in numel() function
|
/external/pytorch/c10/xpu/test/impl/ |
D | XPUTest.h | 5 static inline void initHostData(int* hostData, int numel) { in initHostData() 11 static inline void clearHostData(int* hostData, int numel) { in clearHostData() 17 static inline void validateHostData(int* hostData, int numel) { in validateHostData()
|
/external/pytorch/aten/src/ATen/native/cuda/ |
D | KernelUtils.cuh | 48 const index_t numel, in fastSpecializedAtomicAdd() 88 const index_t numel, in fastSpecializedAtomicAdd() 129 const index_t numel, in fastSpecializedAtomicAdd() 138 const index_t numel, in fastAtomicAdd()
|
D | ScatterGatherKernel.cu | 24 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()() 34 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()() 43 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()() 52 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()() 62 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()() 72 …constexpr C10_DEVICE void operator() (scalar_t* self_data_start, int64_t index, int64_t numel, con… in operator ()()
|
D | SortStable.cu | 143 const auto numel = nsort * nsegments; in segmented_sort_pairs_by_full_sort() local 191 const auto numel = nsort * nsegments; in segmented_sort_pairs() local 224 const auto numel = self.numel(); in launch_stable_sort_kernel() local
|
D | EmbeddingBackwardKernel.cu | 48 const int64_t *num_of_segments_ptr, int64_t numel) { in krn_partials_per_segment() 83 const index_t *offset2bag, const index_t *count, ptrdiff_t numel, in compute_grad_weight_bags() 128 ptrdiff_t numel, in compute_grad_weight() 225 const ptrdiff_t numel = sorted_indices.numel(); in embedding_backward_cuda_kernel() local
|
D | DistributionTemplates.h | 50 const uint64_t numel = static_cast<uint64_t>(total_elements); in calc_execution_policy() local 121 int64_t numel = iter.numel(); in distribution_nullary_kernel() local 180 int numel, in distribution_binary_elementwise_kernel() 250 int64_t numel = iter.numel(); in distribution_binary_kernel() local
|
/external/executorch/backends/vulkan/runtime/api/containers/ |
D | StagingBuffer.h | 36 const size_t numel) in StagingBuffer() 70 inline size_t numel() { in numel() function
|
/external/executorch/kernels/optimized/cpu/ |
D | op_exp.cpp | 35 const size_t numel, in exp_data() 57 const size_t numel, in exp_data()
|
D | op_sigmoid.cpp | 34 const size_t numel, in sigmoid_data() 56 const size_t numel, in sigmoid_data()
|
/external/executorch/runtime/core/exec_aten/testing_util/ |
D | tensor_util.cpp | 41 size_t numel, in data_is_close() 242 std::ostream& print_data(std::ostream& os, const T* data, size_t numel) { in print_data() 262 std::ostream& print_data(std::ostream& os, const uint8_t* data, size_t numel) { in print_data()
|
/external/pytorch/torch/_inductor/codegen/aoti_runtime/ |
D | implementation.cpp | 39 int64_t numel; in convert_handle_to_arrayref_tensor() local 79 void assert_numel(const ArrayRefTensor<T>& tensor, uint64_t numel) { in assert_numel()
|
/external/executorch/backends/vulkan/runtime/graph/ops/ |
D | PrepackNode.cpp | 54 size_t numel = utils::multiply_integers(packed->sizes()); in create_staging_buffer() local 61 size_t numel = utils::multiply_integers(tref->sizes); in create_staging_buffer() local
|
/external/pytorch/torch/csrc/distributed/c10d/ |
D | CUDASymmetricMemoryOps.cu | 52 size_t numel, in init_elementwise_launch_config() 80 size_t numel, in multimem_all_reduce_kernel() 173 size_t numel, in multimem_one_shot_all_reduce_kernel()
|
D | Utils.cpp | 22 size_t numel = 0; in getTensorsNumel() local
|
D | SymmetricMemory.cpp | 74 const size_t numel = in empty_strided_p2p_persistent() local 159 const size_t numel = in empty_strided_p2p() local
|
/external/pytorch/aten/src/ATen/test/ |
D | quantized_test.cpp | 107 int numel = 10; in TEST() local 125 int numel = 10; in TEST() local 218 auto numel = c10::multiply_integers(shape); in TEST() local 261 auto numel = c10::multiply_integers(shape); in TEST() local 295 const int numel = 132; in TEST() local
|
/external/pytorch/aten/src/ATen/native/ |
D | Scalar.cpp | 17 auto numel = self.sym_numel(); in item() local
|
D | Unique.cpp | 42 int64_t numel = input.numel(); in unique_cpu_bool_template() local 165 int64_t numel = input.numel(); in unique_cpu_sorted_template() local 273 int64_t numel = input.numel(); in unique_consecutive_cpu_template() local 391 int64_t numel = input_flat.size(1); in _unique_dim_cpu_template() local
|
/external/executorch/backends/cadence/reference/operators/ |
D | quantize_per_tensor.cpp | 32 size_t numel = out.numel(); in quantize_per_tensor_out() local
|
D | dequantize_per_tensor.cpp | 30 size_t numel = out.numel(); in dequantize_per_tensor_out() local
|
/external/executorch/backends/cadence/hifi/operators/ |
D | quantize_per_tensor.cpp | 34 const size_t numel = out.numel(); in quantize_per_tensor_out() local
|
D | dequantize_per_tensor.cpp | 33 const size_t numel = out.numel(); in dequantize_per_tensor_out() local
|