• Home
  • Raw
  • Download

Lines Matching +full:multigpu +full:- +full:test

58     H(x)[i] = conj(x[-i])
61 mid = (x.size(dim) - 1) // 2
68 idx_neg[dim] = slice(-mid, None)
81 # Decompose into Hermitian (FFT of real) and anti-Hermitian (FFT of imaginary)
82 n_fft = x.size(-2)
85 hconj = _hermitian_conj(x, dim=-2)
87 x_antihermitian = (x - hconj) / 2
89 istft_imag = torch.istft(-1j * x_antihermitian[slc], *args, **kwargs, onesided=True)
98 .. math:: X(m, \omega) = \sum_n x[n]w[n - m] e^{-jn\omega}
102 X = torch.empty((n_fft, (x.numel() - n_fft + hop_length) // hop_length),
149 (-1, 0),
153 # Test transforming middle dimensions of multi-dim tensor
157 (1, 2, -2,),
175 torch.half : tol(1e-2, 1e-2),
176 torch.chalf : tol(1e-2, 1e-2),
181 # Test that round trip through ifft(fft(x)) is the identity
190 (-1, 0),
203 (-1, 0),
209 # Real-only functions
212 # generate true half-complex input
238 match = r"Invalid number of data points \([-\d]*\) specified"
246 match = r"Invalid number of data points \([-\d]*\) specified"
260 with self.assertRaisesRegex(RuntimeError, "rfftn expects a real-valued input tensor"):
276 t = torch.randint(-2, 2, (64,), device=device, dtype=dtype)
301 # NOTE: With hfft and default args where output_size n=2*(input_size - 1),
352 # nd-fft tests
365 *product(range(2, 5), (None,), (None, (0,), (0, -1))),
388 torch.half : tol(1e-2, 1e-2),
389 torch.chalf : tol(1e-2, 1e-2),
400 *product(range(2, 5), (None, (0,), (0, -1))),
409 # Real-only functions
412 # generate true half-complex input
452 op(a, dim=(2, -1))
491 torch.half : tol(1e-2, 1e-2),
499 *product(range(2, 5), (None, (0,), (0, -1))),
516 lastdim = actual_dims[-1]
530 torch.half : tol(1e-2, 1e-2),
538 *product(range(2, 5), (None, (0,), (0, -1))),
555 # Slice off the half-symmetric component
556 lastdim = -1 if dim is None else dim[-1]
566 # 2d-fft tests
568 # NOTE: 2d transforms are only thin wrappers over n-dim transforms,
599 …def fn(t: torch.Tensor, s: Optional[List[int]], dim: List[int] = (-2, -1), norm: Optional[str] = N…
631 # Real-only functions
649 expect = fnd(x, dim=(-2, -1), **kwargs)
667 func(a, dim=(2, -1))
676 with self.assertRaisesRegex(RuntimeError, "rfftn expects a real-valued input"):
726 *product(((11,), (12,)), (None, 0, -1)),
727 *product(((4, 5), (6, 6)), (None, 0, (-1,))),
751 sorted_fft_freqs = torch.arange(-(n // 2), n - (n // 2),
755 # Test fftshift sorts the fftfreq output
769 dim = tuple(range(-signal_ndim, 0))
773 self.assertEqual(x, rec, atol=1e-8, rtol=0, msg='fft and ifft')
776 self.assertEqual(x, rec, atol=1e-8, rtol=0, msg='ifft and fft')
781 signal_sizes = x.size()[-signal_ndim:]
782 dim = tuple(range(-signal_ndim, 0))
786 self.assertEqual(x, rec, atol=1e-8, rtol=0, msg='rfft and irfft')
790 self.assertEqual(x_complex, rec, atol=1e-8, rtol=0, msg='fft and ifft (from real)')
807 # non-contiguous case
843 … with plan_cache_max_size(devices[0], max(1, torch.backends.cuda.cufft_plan_cache.size - 10)):
855 with self.assertRaisesRegex(RuntimeError, r"must be non-negative"):
856 torch.backends.cuda.cufft_plan_cache.max_size = -1
858 with self.assertRaisesRegex(RuntimeError, r"read-only property"):
859 torch.backends.cuda.cufft_plan_cache.size = -1
864 # Multigpu tests
866 # Test that different GPU has different cache
869 self.assertEqual(torch.fft.rfftn(x0, dim=(-2, -1)), torch.fft.rfftn(x1, dim=(-2, -1)))
877 # Test that un-indexed `torch.backends.cuda.cufft_plan_cache` uses current device
903 # Regression test for https://github.com/pytorch/pytorch/issues/109448
913 self.assertTrue((x.grad - dx).abs().max() == 0)
914 self.assertFalse((x.grad - x).abs().max() == 0)
932 x = x.view(1, -1)
935 # however, we use the pre-0.9 default ('reflect')
943 result.append(torch.from_numpy(np.stack([ri.real, ri.imag], -1)))
962 …self.assertEqual(result, ref_result, atol=7e-6, rtol=0, msg='stft comparison against librosa', exa…
986 _test((10,), -1, 1, expected_error=RuntimeError)
1071 length=x.size(-1), **common_kwargs)
1077 length=x.size(-1), **common_kwargs)
1105 self.assertEqual(x_stft.size(-2), n_fft) # Not onesided
1109 center=True, normalized=normalized, length=x.size(-1),
1115 atol=1e-6, rtol=0)
1192 n_fft = x.size(-2)
1220 # stft is currently warning that it requires return-complex while an upgrader is written
1246 # FFT functions should not modify their input (gh-34551)
1250 spectrum = torch.fft.fftn(signal, dim=(-2, -1))
1254 _ = torch.fft.ifftn(spectrum, dim=(-2, -1))
1257 half_spectrum = torch.fft.rfftn(signal, dim=(-2, -1))
1261 _ = torch.fft.irfftn(half_spectrum_copy, s=(2, 2), dim=(-2, -1))
1267 # Regression test for gh-58724 and gh-63152
1283 """stft -> istft should recover the original signale"""
1296 """stft -> istft should recover the original signale"""
1311 atol=7e-6, rtol=0, exact_dtype=True)
1380 inversed = torch.istft(stft, length=original.size(-1), **istft_kwargs)
1381 n_frames = stft.size(-1)
1383 … len_expected = stft_kwargs["n_fft"] // 2 + stft_kwargs["hop_length"] * (n_frames - 1)
1385 len_expected = stft_kwargs["n_fft"] + stft_kwargs["hop_length"] * (n_frames - 1)
1390 # test the padding points of the inversed signal are all zeros
1394 atol=7e-6, rtol=0, exact_dtype=True)
1397 atol=7e-6, rtol=0, exact_dtype=True)
1462 stft[n].imag = torch.tensor(-stft_largest_val, dtype=dtype)
1464 if 0 <= L - n < stft.size(0):
1466 stft[L - n].imag = torch.tensor(stft_largest_val, dtype=dtype)
1472 original = original[..., :inverse.size(-1)]
1473 self.assertEqual(inverse, original, atol=1e-3, rtol=0)
1500 self.assertEqual(istft, estimate, atol=1e-5, rtol=0)
1566 self.assertEqual(i_original.repeat(1, 1), i_single, atol=1e-6, rtol=0, exact_dtype=True)
1567 self.assertEqual(i_original.repeat(4, 1), i_multi, atol=1e-6, rtol=0, exact_dtype=True)
1570 @skipIf(not TEST_MKL, "Test requires MKL")
1572 # Test the (i)stft window must be on the same device as the input
1595 def __init__(self) -> None:
1623 def test(self, device): function
1632 setattr(TestFFTDocExamples, 'test_' + doc_test.name, skipCPUIfNoFFT(test))