• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Benchmark for accumulate_n() in math_ops."""
16
17import random
18import time
19
20
21from tensorflow.python.client import session
22from tensorflow.python.framework import ops
23from tensorflow.python.framework import tensor_shape
24from tensorflow.python.ops import array_ops
25from tensorflow.python.ops import data_flow_ops
26from tensorflow.python.ops import gen_control_flow_ops
27from tensorflow.python.ops import gen_state_ops
28from tensorflow.python.ops import math_ops
29from tensorflow.python.ops import random_ops
30from tensorflow.python.ops import state_ops
31from tensorflow.python.platform import test
32
33
34class AccumulateNBenchmark(test.Benchmark):
35
36  def _AccumulateNTemplate(self, inputs, init, shape, validate_shape):
37    var = gen_state_ops.temporary_variable(
38        shape=shape, dtype=inputs[0].dtype.base_dtype)
39    ref = state_ops.assign(var, init, validate_shape=validate_shape)
40    update_ops = [
41        state_ops.assign_add(
42            ref, tensor, use_locking=True).op for tensor in inputs
43    ]
44    with ops.control_dependencies(update_ops):
45      return gen_state_ops.destroy_temporary_variable(ref, var_name=var.op.name)
46
47  def _AccumulateNInitializedWithFirst(self, inputs):
48    return self._AccumulateNTemplate(
49        inputs,
50        init=array_ops.zeros_like(inputs[0]),
51        shape=inputs[0].get_shape(),
52        validate_shape=True)
53
54  def _AccumulateNInitializedWithMerge(self, inputs):
55    return self._AccumulateNTemplate(
56        inputs,
57        init=array_ops.zeros_like(gen_control_flow_ops.merge(inputs)[0]),
58        shape=tensor_shape.TensorShape([0]),
59        validate_shape=False)
60
61  def _AccumulateNInitializedWithShape(self, inputs):
62    return self._AccumulateNTemplate(
63        inputs,
64        init=array_ops.zeros(
65            shape=inputs[0].get_shape(), dtype=inputs[0].dtype.base_dtype),
66        shape=inputs[0].get_shape(),
67        validate_shape=True)
68
69  def _GenerateUnorderedInputs(self, size, n):
70    inputs = [random_ops.random_uniform(shape=[size]) for _ in range(n)]
71    random.shuffle(inputs)
72    return inputs
73
74  def _GenerateReplicatedInputs(self, size, n):
75    return n * self._GenerateUnorderedInputs(size, 1)
76
77  def _GenerateOrderedInputs(self, size, n):
78    inputs = self._GenerateUnorderedInputs(size, 1)
79    queue = data_flow_ops.FIFOQueue(
80        capacity=1, dtypes=[inputs[0].dtype], shapes=[inputs[0].get_shape()])
81    for _ in range(n - 1):
82      op = queue.enqueue(inputs[-1])
83      with ops.control_dependencies([op]):
84        inputs.append(math_ops.tanh(1.0 + queue.dequeue()))
85    return inputs
86
87  def _GenerateReversedInputs(self, size, n):
88    inputs = self._GenerateOrderedInputs(size, n)
89    inputs.reverse()
90    return inputs
91
92  def _SetupAndRunBenchmark(self, graph, inputs, repeats, format_args):
93    with graph.as_default():
94      add_n = math_ops.add_n(inputs)
95      acc_n_first = self._AccumulateNInitializedWithFirst(inputs)
96      acc_n_merge = self._AccumulateNInitializedWithMerge(inputs)
97      acc_n_shape = self._AccumulateNInitializedWithShape(inputs)
98
99    test_ops = (("AddN", add_n.op),
100                ("AccNFirst", acc_n_first.op),
101                ("AccNMerge", acc_n_merge.op),
102                ("AccNShape", acc_n_shape.op))
103
104    with session.Session(graph=graph):
105      for tag, op in test_ops:
106        for _ in range(100):
107          op.run()  # Run for warm up.
108        start = time.time()
109        for _ in range(repeats):
110          op.run()
111        duration = time.time() - start
112        args = format_args + (tag, duration)
113        print(self._template.format(*args))
114
115  def _RunBenchmark(self, tag, input_fn, sizes, ninputs, repeats):
116    for size in sizes:
117      for ninput in ninputs:
118        graph = ops.Graph()
119        with graph.as_default():
120          inputs = input_fn(size, ninput)
121
122        format_args = (tag, size, ninput, repeats)
123        self._SetupAndRunBenchmark(graph, inputs, repeats, format_args)
124
125  def benchmarkAccumulateN(self):
126    self._template = "{:<15}" * 6
127    args = {
128        "sizes": (128, 128**2),
129        "ninputs": (1, 10, 100, 300),
130        "repeats": 100
131    }
132    benchmarks = (("Replicated", self._GenerateReplicatedInputs),
133                  ("Unordered", self._GenerateUnorderedInputs),
134                  ("Ordered", self._GenerateOrderedInputs),
135                  ("Reversed", self._GenerateReversedInputs))
136
137    print(self._template.format("", "Size", "#Inputs", "#Repeat", "Method",
138                                "Duration"))
139    print("-" * 90)
140    for benchmark in benchmarks:
141      self._RunBenchmark(*benchmark, **args)
142
143
144if __name__ == "__main__":
145  test.main()
146