• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15r"""Benchmarks for low-level graph building primitives.
16
17To run CPU benchmarks:
18  bazel run -c opt graph_building_benchmarks -- --benchmarks=.
19
20To run GPU benchmarks:
21  bazel run --config=cuda -c opt --copt="-mavx" graph_building_benchmarks -- \
22    --benchmarks=.
23
24To run a subset of benchmarks using --benchmarks flag.
25--benchmarks: the list of benchmarks to run. The specified value is interpreted
26as a regular expression and any benchmark whose name contains a partial match
27to the regular expression is executed.
28e.g. --benchmarks=".*MatMul.*" will run all matmul related benchmarks.
29
30"""
31from __future__ import absolute_import
32from __future__ import division
33from __future__ import print_function
34
35import time
36
37from tensorflow.python.eager import context
38from tensorflow.python.framework import dtypes
39from tensorflow.python.ops import array_ops
40from tensorflow.python.ops import gen_math_ops
41from tensorflow.python.platform import test
42
43
44def run_benchmark(func, num_iters):
45  start = time.time()
46  for _ in range(num_iters):
47    func()
48  end = time.time()
49  return end - start
50
51
52class SingleOpBenchmarks(test.Benchmark):
53  """Benchmark for graph building time of ops."""
54
55  def _run_and_report(self, func, num_iters):
56    total_time = run_benchmark(func, num_iters)
57    mean_us = total_time * 1e6 / num_iters
58    self.report_benchmark(
59        iters=num_iters,
60        wall_time=mean_us,
61        extras={
62            "examples_per_sec": float("{0:.3f}".format(num_iters / total_time)),
63        })
64
65  def benchmarkAddScalars(self):
66    with context.execution_mode(context.GRAPH_MODE):
67      x = array_ops.placeholder(shape=[], dtype=dtypes.float32, name="x")
68      y = array_ops.placeholder(shape=[], dtype=dtypes.float32, name="y")
69
70      def bench():
71        return gen_math_ops.add(x, y)
72
73      self._run_and_report(bench, 1000)
74
75  def benchmarkAddBatchedMatrices(self):
76    with context.execution_mode(context.GRAPH_MODE):
77      x = array_ops.placeholder(
78          shape=[32, 784, 1000], dtype=dtypes.float32, name="x")
79      y = array_ops.placeholder(
80          shape=[32, 784, 1000], dtype=dtypes.float32, name="y")
81
82      def bench():
83        return gen_math_ops.add(x, y)
84
85      self._run_and_report(bench, 1000)
86
87  def benchmarkMatMul(self):
88    with context.execution_mode(context.GRAPH_MODE):
89      x = array_ops.placeholder(
90          shape=[784, 1000], dtype=dtypes.float32, name="x")
91      y = array_ops.placeholder(
92          shape=[1000, 1000], dtype=dtypes.float32, name="y")
93
94      def bench():
95        return gen_math_ops.mat_mul(x, y)
96
97      self._run_and_report(bench, 1000)
98
99
100if __name__ == "__main__":
101  test.main()
102