1# Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================== 15 16"""Operations for automatic batching and unbatching.""" 17from __future__ import absolute_import 18from __future__ import division 19from __future__ import print_function 20 21from tensorflow.python.eager import function 22from tensorflow.python.framework import ops 23from tensorflow.python.framework import tensor_spec 24from tensorflow.python.ops import gen_batch_ops 25# pylint: disable=wildcard-import 26from tensorflow.python.ops.gen_batch_ops import * 27# pylint: enable=wildcard-import 28from tensorflow.python.util.tf_export import tf_export 29 30 31@tf_export("nondifferentiable_batch_function") 32def batch_function(num_batch_threads, 33 max_batch_size, 34 batch_timeout_micros, 35 allowed_batch_sizes=None, 36 max_enqueued_batches=10, 37 autograph=True): 38 """Batches the computation done by the decorated function. 39 40 So, for example, in the following code 41 42 ```python 43 @batch_function(1, 2, 3) 44 def layer(a): 45 return tf.matmul(a, a) 46 47 b = layer(w) 48 ``` 49 50 if more than one session.run call is simultaneously trying to compute `b` 51 the values of `w` will be gathered, non-deterministically concatenated 52 along the first axis, and only one thread will run the computation. See the 53 documentation of the `Batch` op for more details. 54 55 Assumes that all arguments of the decorated function are Tensors which will 56 be batched along their first dimension. 57 58 SparseTensor is not supported. The return value of the decorated function 59 must be a Tensor or a list/tuple of Tensors. 60 61 Args: 62 num_batch_threads: Number of scheduling threads for processing batches 63 of work. Determines the number of batches processed in parallel. 64 max_batch_size: Batch sizes will never be bigger than this. 65 batch_timeout_micros: Maximum number of microseconds to wait before 66 outputting an incomplete batch. 67 allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, 68 does nothing. Otherwise, supplies a list of batch sizes, causing the op 69 to pad batches up to one of those sizes. The entries must increase 70 monotonically, and the final entry must equal max_batch_size. 71 max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10. 72 autograph: Whether to use autograph to compile python and eager style code 73 for efficient graph-mode execution. 74 75 Returns: 76 The decorated function will return the unbatched computation output Tensors. 77 """ 78 79 def decorator(fn): # pylint: disable=missing-docstring 80 81 def decorated(*args): # pylint: disable=missing-docstring 82 83 @function.defun(autograph=autograph) 84 def computation(*computation_args): 85 return fn(*computation_args) 86 87 computation = computation.get_concrete_function( 88 *[tensor_spec.TensorSpec(dtype=x.dtype, shape=x.shape, name=str(i)) 89 for i, x in enumerate(args)]) 90 91 with ops.name_scope("batch") as name: 92 for a in args: 93 if not isinstance(a, ops.Tensor): 94 raise ValueError("All arguments to functions decorated with " 95 "`batch_function` are supposed to be Tensors; " 96 "found %s" % repr(a)) 97 return gen_batch_ops.batch_function( 98 num_batch_threads=num_batch_threads, 99 max_batch_size=max_batch_size, 100 batch_timeout_micros=batch_timeout_micros, 101 allowed_batch_sizes=allowed_batch_sizes, 102 max_enqueued_batches=max_enqueued_batches, 103 shared_name=name, 104 f=computation, 105 in_tensors=list(args), 106 captured_tensors=computation.captured_inputs, 107 Tout=[o.dtype for o in computation.outputs]) 108 109 return decorated 110 111 return decorator 112