/external/pytorch/aten/src/ATen/native/cpu/ |
D | SortingKernel.cpp | 37 .declare_static_shape(values.sizes(), /*squash_dims=*/dim) in _dim_apply() 239 .declare_static_shape(sizes, /*squash_dims=*/dim) in topk_kernel()
|
D | ScatterGatherKernel.cpp | 187 .declare_static_shape(index.sizes(), /*squash_dim=*/dim) in operator ()() 274 .declare_static_shape(index.sizes(), /*squash_dim=*/dim) in operator ()() 370 .declare_static_shape(index.sizes(), /*squash_dim=*/dim) in operator ()() 465 .declare_static_shape(index.sizes(), /*squash_dim=*/dim) in operator ()() 561 .declare_static_shape(index.sizes(), /*squash_dim=*/dim) in operator ()()
|
D | TensorCompareKernel.cpp | 59 .declare_static_shape(self.sizes(), /*squash_dims=*/dim) in compare_base_kernel_core()
|
D | ReduceOpsKernel.cpp | 53 .declare_static_shape(self.sizes(), /*squash_dim=*/dim) in cpu_cum_base_kernel()
|
/external/pytorch/aten/src/ATen/native/cuda/ |
D | Activation.cpp | 50 .declare_static_shape(iter_shape) in glu_backward_cuda_out()
|
D | CrossKernel.cu | 74 .declare_static_shape(result.sizes(), /*squash_dims=*/dim) in cross_impl()
|
/external/pytorch/aten/src/ATen/native/ |
D | Embedding.cpp | 129 .declare_static_shape(grad.sizes(), /*squash_dims=*/0) in embedding_dense_backward_cpu()
|
D | Normalization.cpp | 253 .declare_static_shape(input.sizes(), /*squash_dims=*/1) in batch_norm_cpu_update_stats_template() 370 .declare_static_shape(input.sizes(), /*squash_dims=*/1) in batch_norm_backward_cpu_template() 381 .declare_static_shape(input.sizes(), /*squash_dims=*/1)); in batch_norm_backward_cpu_template() 390 .declare_static_shape(input.sizes(), /*squash_dims=*/1)); in batch_norm_backward_cpu_template()
|
D | Sorting.cpp | 449 .declare_static_shape(sizes, /*squash_dims=*/dim) in kthvalue_out_impl_cpu() 546 .declare_static_shape(sizes, /*squash_dims=*/dim) in median_with_indices_impl()
|
D | LossCTC.cpp | 286 .declare_static_shape(tensor.sizes(), squash_dims) in ctc_loss_backward_cpu_template()
|
D | SpectralOps.cpp | 1222 .declare_static_shape(input_sizes, dim) in _fft_fill_with_conjugate_symmetry_()
|
D | BatchLinearAlgebra.cpp | 2088 .declare_static_shape(pivots.sizes(), /*squash_dims=*/pivots.dim() - 1) in TORCH_IMPL_FUNC()
|
/external/pytorch/aten/src/ATen/native/mps/operations/ |
D | CrossKernel.mm | 91 .declare_static_shape(out.sizes(), /*squash_dims=*/dim)
|
/external/pytorch/aten/src/ATen/ |
D | TensorIterator.h | 954 TensorIteratorConfig& declare_static_shape(IntArrayRef shape); 955 TensorIteratorConfig& declare_static_shape(
|
D | TensorIterator.cpp | 169 TensorIteratorConfig& TensorIteratorConfig::declare_static_shape(IntArrayRef shape) { in declare_static_shape() function in at::TensorIteratorConfig 178 TensorIteratorConfig& TensorIteratorConfig::declare_static_shape(IntArrayRef shape, IntArrayRef squ… in declare_static_shape() function in at::TensorIteratorConfig 179 declare_static_shape(shape); in declare_static_shape()
|
/external/pytorch/aten/src/ATen/native/sparse/ |
D | SparseTensorMath.cpp | 1847 .declare_static_shape(grad_values_expand.sizes(), /*squash_dims=*/0) in _sparse_sum_backward_cpu()
|
/external/pytorch/aten/src/ATen/native/cuda/linalg/ |
D | BatchLinearAlgebra.cpp | 2444 .declare_static_shape(pivots_->sizes(), /*squash_dim=*/pivots_->dim() - 1) in lu_solve_kernel()
|
/external/pytorch/aten/src/ATen/native/quantized/cpu/kernels/ |
D | QuantizedOpKernels.cpp | 2377 .declare_static_shape(sizes, /*squash_dims=*/dim)
|