Lines Matching full:dim
10 #define TORCH_CHECK_DIM_SIZE(T, DIM, DIM_SIZE, SIZE) \ argument
12 T.dim() == DIM && T.size(DIM_SIZE) == SIZE, \
14 DIM, \
37 template <int64_t dim>
45 for (const auto index : c10::irange(dim)) { in get_output_size()
48 input.size(index + input.dim() - dim) + 2 * pad_size[index] - in get_output_size()
57 template <int64_t dim>
65 auto output_size = get_output_size<dim>( in get_output_size()
68 if (input.dim() == dim + 2) { in get_output_size()
77 template <int64_t dim>
103 kernel_size.size() == dim, in slow_conv_dilated_shape_check()
105 dim, in slow_conv_dilated_shape_check()
109 stride_size.size() == dim, in slow_conv_dilated_shape_check()
111 dim, in slow_conv_dilated_shape_check()
115 dilation_size.size() == dim, in slow_conv_dilated_shape_check()
117 dim, in slow_conv_dilated_shape_check()
121 pad_size.size() == dim, in slow_conv_dilated_shape_check()
123 dim, in slow_conv_dilated_shape_check()
142 bool is_batch = input.dim() == dim + 2; in slow_conv_dilated_shape_check()
144 int64_t ndim = n + dim; in slow_conv_dilated_shape_check()
146 // input dim has to be dim + 1 if not batched in slow_conv_dilated_shape_check()
148 input.dim() == dim + 1, in slow_conv_dilated_shape_check()
150 input.dim(), in slow_conv_dilated_shape_check()
155 auto output_size = get_output_size<dim>( in slow_conv_dilated_shape_check()
167 weight.dim() == dim + 2, in slow_conv_dilated_shape_check()
169 dim + 2, in slow_conv_dilated_shape_check()
171 weight.dim(), in slow_conv_dilated_shape_check()
172 "D tensor dim=", in slow_conv_dilated_shape_check()
173 dim); in slow_conv_dilated_shape_check()
181 TORCH_CHECK_DIM_SIZE(input, input.dim(), (is_batch ? 1 : 0), weight.size(1)); in slow_conv_dilated_shape_check()
186 bias.dim() == 1, in slow_conv_dilated_shape_check()
188 bias.dim(), in slow_conv_dilated_shape_check()
196 grad_output.dim() == ndim, in slow_conv_dilated_shape_check()
200 grad_output.dim(), in slow_conv_dilated_shape_check()