• Home
  • Raw
  • Download

Lines Matching full:dim

120     dim: int,
126 dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
127 last_dim_size = n if n is not None else 2 * (input.shape[dim] - 1)
139 output = prims.fft_c2r(input, dim=dims, last_dim_size=last_dim_size)
147 dim: int,
158 dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
159 dim_size = n if n is not None else input.shape[dim]
167 ret = prims.fft_r2c(input, dim=dims, onesided=onesided)
176 dim: int,
185 dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
186 dim_size = n if n is not None else input.shape[dim]
194 ret = prims.fft_c2c(input, dim=dims, forward=forward)
203 dim: int = -1,
207 return _fft_c2c("fft", input, n, dim, norm, forward=True)
209 return _fft_r2c("fft", input, n, dim, norm, forward=True, onesided=False)
217 dim: int = -1,
221 return _fft_c2c("ifft", input, n, dim, norm, forward=False)
223 return _fft_r2c("ifft", input, n, dim, norm, forward=False, onesided=False)
231 dim: int = -1,
234 return _fft_r2c("rfft", input, n, dim, norm, forward=True, onesided=True)
242 dim: int = -1,
245 return _fft_c2r("irfft", input, n, dim, norm, forward=False)
253 dim: int = -1,
256 return _fft_c2r("hfft", input, n, dim, norm, forward=True)
264 dim: int = -1,
267 return _fft_r2c("ihfft", input, n, dim, norm, forward=False, onesided=True)
276 input: TensorLikeType, shape: Optional[ShapeType], dim: Optional[DimsType]
278 """Convert the shape and dim arguments into a canonical form where neither are optional"""
282 if dim is not None:
283 if not isinstance(dim, Sequence):
284 dim = (dim,)
285 ret_dims = utils.canonicalize_dims(input_dim, dim, wrap_scalar=False)
296 # Has shape, might have dim
298 dim is None or len(dim) == len(shape),
299 lambda: "When given, dim and shape arguments must have the same length",
310 if dim is None:
317 elif dim is None:
318 # No shape, no dim
322 # No shape, has dim
343 dim: Tuple[int, ...],
353 x = _resize_fft_input(input, dim, shape)
354 output = prims.fft_c2c(x, dim=dim, forward=forward)
363 dim: Optional[DimsType] = None,
366 (shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
368 return _fftn_c2c("fftn", x, shape, dim, norm, forward=True)
376 dim: Optional[DimsType] = None,
379 (shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
381 return _fftn_c2c("ifftn", x, shape, dim, norm, forward=False)
389 dim: Optional[DimsType] = None,
396 shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim)
398 input = _resize_fft_input(input, dim, shape)
399 out = prims.fft_r2c(input, dim=dim, onesided=True)
408 dim: Optional[DimsType] = None,
415 shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim)
418 input = _resize_fft_input(input, dim, shape)
420 tmp = prims.fft_r2c(input, dim=dim[-1:], onesided=True)
422 if len(dim) == 1:
427 tmp = prims.fft_c2c(tmp, dim=dim[:-1], forward=False)
433 dim: Tuple[int, ...]
441 dim: Optional[DimsType],
443 """Canonicalize shape and dim arguments for n-dimensional c2r transforms,
444 as well as calculating the last_dim_size which is shape[dim[-1]] for the output"""
445 (shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
449 last_dim_size = 2 * (input.shape[dim[-1]] - 1)
461 shape=tuple(shape_list), dim=dim, last_dim_size=last_dim_size
470 dim: Optional[DimsType] = None,
473 shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args(
474 "irfftn", input, s, dim
477 input = _resize_fft_input(input, dim, shape)
478 out = prims.fft_c2r(input, dim=dim, last_dim_size=last_dim_size)
479 return _apply_norm(out, norm, _prod(out.shape[d] for d in dim), forward=False)
487 dim: Optional[DimsType] = None,
490 shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args(
491 "hfftn", input, s, dim
494 input = _resize_fft_input(input, dim, shape)
496 tmp = prims.fft_c2c(input, dim=dim[:-1], forward=True) if len(dim) > 1 else input
499 out = prims.fft_c2r(tmp, dim=dim[-1:], last_dim_size=last_dim_size)
508 dim: Optional[DimsType] = (-2, -1),
511 return torch.fft.fftn(input, s=s, dim=dim, norm=norm)
519 dim: Optional[DimsType] = (-2, -1),
522 return torch.fft.ifftn(input, s=s, dim=dim, norm=norm)
530 dim: Optional[DimsType] = (-2, -1),
533 return torch.fft.rfftn(input, s=s, dim=dim, norm=norm)
541 dim: Optional[DimsType] = (-2, -1),
544 return torch.fft.irfftn(input, s=s, dim=dim, norm=norm)
552 dim: Optional[DimsType] = (-2, -1),
555 return torch.fft.hfftn(input, s=s, dim=dim, norm=norm)
563 dim: Optional[DimsType] = (-2, -1),
566 return torch.fft.ihfftn(input, s=s, dim=dim, norm=norm)
569 def _default_alldims(dim: Optional[DimsType], x: TensorLikeType) -> List[int]:
571 if dim is None:
573 elif not isinstance(dim, Sequence):
574 return [dim]
576 return list(dim)
580 def fftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType:
581 dims = _default_alldims(dim, input)
587 def ifftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType:
588 dims = _default_alldims(dim, input)