• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2
3import argparse
4import ast
5import sys
6import os
7from time import time
8
9import _peg_parser
10
11try:
12    import memory_profiler
13except ModuleNotFoundError:
14    print("Please run `make venv` to create a virtual environment and install"
15          " all the dependencies, before running this script.")
16    sys.exit(1)
17
18sys.path.insert(0, os.getcwd())
19from scripts.test_parse_directory import parse_directory
20
21argparser = argparse.ArgumentParser(
22    prog="benchmark", description="Reproduce the various pegen benchmarks"
23)
24argparser.add_argument(
25    "--parser",
26    action="store",
27    choices=["new", "old"],
28    default="pegen",
29    help="Which parser to benchmark (default is pegen)",
30)
31argparser.add_argument(
32    "--target",
33    action="store",
34    choices=["xxl", "stdlib"],
35    default="xxl",
36    help="Which target to use for the benchmark (default is xxl.py)",
37)
38
39subcommands = argparser.add_subparsers(title="Benchmarks", dest="subcommand")
40command_compile = subcommands.add_parser(
41    "compile", help="Benchmark parsing and compiling to bytecode"
42)
43command_parse = subcommands.add_parser(
44    "parse", help="Benchmark parsing and generating an ast.AST"
45)
46command_notree = subcommands.add_parser(
47    "notree", help="Benchmark parsing and dumping the tree"
48)
49
50
51def benchmark(func):
52    def wrapper(*args):
53        times = list()
54        for _ in range(3):
55            start = time()
56            result = func(*args)
57            end = time()
58            times.append(end - start)
59        memory = memory_profiler.memory_usage((func, args))
60        print(f"{func.__name__}")
61        print(f"\tTime: {sum(times)/3:.3f} seconds on an average of 3 runs")
62        print(f"\tMemory: {max(memory)} MiB on an average of 3 runs")
63        return result
64
65    return wrapper
66
67
68@benchmark
69def time_compile(source, parser):
70    if parser == "old":
71        return _peg_parser.compile_string(
72            source,
73            oldparser=True,
74        )
75    else:
76        return _peg_parser.compile_string(source)
77
78
79@benchmark
80def time_parse(source, parser):
81    if parser == "old":
82        return _peg_parser.parse_string(source, oldparser=True)
83    else:
84        return _peg_parser.parse_string(source)
85
86
87@benchmark
88def time_notree(source, parser):
89    if parser == "old":
90        return _peg_parser.parse_string(source, oldparser=True, ast=False)
91    else:
92        return _peg_parser.parse_string(source, ast=False)
93
94
95def run_benchmark_xxl(subcommand, parser, source):
96    if subcommand == "compile":
97        time_compile(source, parser)
98    elif subcommand == "parse":
99        time_parse(source, parser)
100    elif subcommand == "notree":
101        time_notree(source, parser)
102
103
104def run_benchmark_stdlib(subcommand, parser):
105    modes = {"compile": 2, "parse": 1, "notree": 0}
106    for _ in range(3):
107        parse_directory(
108            "../../Lib",
109            verbose=False,
110            excluded_files=["*/bad*", "*/lib2to3/tests/data/*",],
111            tree_arg=0,
112            short=True,
113            mode=modes[subcommand],
114            oldparser=(parser == "old"),
115        )
116
117
118def main():
119    args = argparser.parse_args()
120    subcommand = args.subcommand
121    parser = args.parser
122    target = args.target
123
124    if subcommand is None:
125        argparser.error("A benchmark to run is required")
126
127    if target == "xxl":
128        with open(os.path.join("data", "xxl.py"), "r") as f:
129            source = f.read()
130            run_benchmark_xxl(subcommand, parser, source)
131    elif target == "stdlib":
132        run_benchmark_stdlib(subcommand, parser)
133
134
135if __name__ == "__main__":
136    main()
137