• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1import time
2
3import numpy as np
4
5import torch
6
7
8"""Microbenchmarks for Tensor repeat operator. Supports PyTorch."""
9
10input_shapes = (
11    (4, 4, 1),
12    (16, 1, 32),
13    (64, 64, 1, 1),
14    (8, 256, 128),
15    (1, 64, 128, 32),
16    (512, 512),
17)
18
19repeats = (
20    (1, 1, 1, 64),
21    (1, 4, 1, 2),
22    (1, 2, 2, 15),
23    (1, 1, 3, 2),
24    (128, 1, 8, 1),
25    (1, 1, 2, 16),
26)
27
28NUM_WARMUP_ITERS = 5
29NUM_BENCHMARK_ITERS = 10
30DTYPE_TO_BYTES = {"float": 4}
31
32
33def generate_data_for_repeat():
34    input_tensors = [torch.randn(*input_shape) for input_shape in input_shapes]
35    total_num_elements = 0
36    for input_tensor, repeat in zip(input_tensors, repeats):
37        total_num_elements += input_tensor.numel()
38        total_num_elements += input_tensor.numel() * np.prod(repeat)
39    return input_tensors, (total_num_elements * DTYPE_TO_BYTES["float"])
40
41
42input_tensors, total_bytes = generate_data_for_repeat()
43BYTES_TO_MB = 1.0 / 1000.0 / 1000.0
44
45
46def pt_repeat(input_tensor, repeat):
47    return input_tensor.repeat(repeat)
48
49
50def pt_repeat_n_times(niters):
51    for _ in range(niters):
52        for input_tensor, repeat in zip(input_tensors, repeats):
53            pt_repeat(input_tensor, repeat)
54
55
56if __name__ == "__main__":
57    # Warm up runs.
58    pt_repeat_n_times(NUM_WARMUP_ITERS)
59    s = time.time()
60    pt_repeat_n_times(NUM_BENCHMARK_ITERS)
61    total_time_s = time.time() - s
62    total_time_per_iter_s = total_time_s / NUM_BENCHMARK_ITERS
63    achieved_bandwidth = (total_bytes * BYTES_TO_MB) / total_time_per_iter_s
64    print(f"Time:{total_time_per_iter_s} Achieved Bandwidth:{achieved_bandwidth} MB/s")
65