• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1import operator_benchmark as op_bench
2
3import torch
4
5
6"""Microbenchmarks for ClipRanges operator."""
7torch.ops.load_library("//caffe2/torch/fb/sparsenn:sparsenn_operators")
8
9# Configs for C2 ClipRanges operator
10clip_ranges_long_configs = op_bench.cross_product_configs(
11    LENGTH=range(1, 100),
12    M=[1],
13    N=[2],
14    MAX_LENGTH=range(1, 100),
15    device=["cpu", "cuda"],
16    dtype=[torch.int32],
17    tags=["long"],
18)
19
20
21clip_ranges_short_configs = op_bench.config_list(
22    attrs=[
23        [6, 1, 2, 1, torch.int32],
24        [7, 1, 2, 2, torch.int32],
25        [8, 1, 2, 3, torch.int32],
26        [9, 1, 2, 4, torch.int32],
27        [10, 1, 2, 5, torch.int32],
28    ],
29    attr_names=["LENGTH", "M", "N", "MAX_LENGTH", "dtype"],
30    cross_product_configs={
31        "device": ["cpu", "cuda"],
32    },
33    tags=["short"],
34)
35
36
37class ClipRangesBenchmark(op_bench.TorchBenchmarkBase):
38    def init(self, LENGTH, M, N, MAX_LENGTH, device, dtype):
39        self.inputs = {
40            "input": torch.rand(LENGTH, M, N, device=device).type(dtype),
41            "max_length": MAX_LENGTH,
42        }
43        self.set_module_name("clip_ranges")
44
45    def forward(self, input, max_length: int):
46        return torch.ops.fb.clip_ranges(input, max_length)
47
48
49op_bench.generate_pt_test(
50    clip_ranges_long_configs + clip_ranges_short_configs, ClipRangesBenchmark
51)
52
53
54if __name__ == "__main__":
55    op_bench.benchmark_runner.main()
56