1# Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================== 15 16"""Helper functions for creating partitioned variables. 17 18This is a convenient abstraction to partition a large variable across 19multiple smaller variables that can be assigned to different devices. 20 21The full variable can be reconstructed by concatenating the smaller variables. 22Using partitioned variables instead of a single variable is mostly a 23performance choice. It however also has an impact on: 24 251. Random initialization, as the random number generator is called once per 26 slice 272. Updates, as they happen in parallel across slices 28 29A key design goal is to allow a different graph to repartition a variable 30with the same name but different slicings, including possibly no partitions. 31 32TODO(touts): If an initializer provides a seed, the seed must be changed 33deterministically for each slice, maybe by adding one to it, otherwise each 34slice will use the same values. Maybe this can be done by passing the 35slice offsets to the initializer functions. 36 37Typical usage: 38 39```python 40# Create a list of partitioned variables with: 41vs = create_partitioned_variables( 42 <shape>, <slicing>, <initializer>, name=<optional-name>) 43 44# Pass the list as inputs to embedding_lookup for sharded, parallel lookup: 45y = embedding_lookup(vs, ids, partition_strategy="div") 46 47# Or fetch the variables in parallel to speed up large matmuls: 48z = matmul(x, concat(slice_dim, vs)) 49``` 50""" 51from __future__ import absolute_import 52from __future__ import division 53from __future__ import print_function 54 55import math 56 57from tensorflow.python.framework import dtypes 58from tensorflow.python.framework import tensor_shape 59from tensorflow.python.ops import variable_scope 60from tensorflow.python.util import deprecation 61from tensorflow.python.util.tf_export import tf_export 62 63__all__ = [ 64 "create_partitioned_variables", 65 "variable_axis_size_partitioner", 66 "min_max_variable_partitioner", 67 "fixed_size_partitioner", 68] 69 70 71@tf_export(v1=["variable_axis_size_partitioner"]) 72def variable_axis_size_partitioner( 73 max_shard_bytes, axis=0, bytes_per_string_element=16, max_shards=None): 74 """Get a partitioner for VariableScope to keep shards below `max_shard_bytes`. 75 76 This partitioner will shard a Variable along one axis, attempting to keep 77 the maximum shard size below `max_shard_bytes`. In practice, this is not 78 always possible when sharding along only one axis. When this happens, 79 this axis is sharded as much as possible (i.e., every dimension becomes 80 a separate shard). 81 82 If the partitioner hits the `max_shards` limit, then each shard may end up 83 larger than `max_shard_bytes`. By default `max_shards` equals `None` and no 84 limit on the number of shards is enforced. 85 86 One reasonable value for `max_shard_bytes` is `(64 << 20) - 1`, or almost 87 `64MB`, to keep below the protobuf byte limit. 88 89 Args: 90 max_shard_bytes: The maximum size any given shard is allowed to be. 91 axis: The axis to partition along. Default: outermost axis. 92 bytes_per_string_element: If the `Variable` is of type string, this provides 93 an estimate of how large each scalar in the `Variable` is. 94 max_shards: The maximum number of shards in int created taking precedence 95 over `max_shard_bytes`. 96 97 Returns: 98 A partition function usable as the `partitioner` argument to 99 `variable_scope` and `get_variable`. 100 101 Raises: 102 ValueError: If any of the byte counts are non-positive. 103 """ 104 if max_shard_bytes < 1 or bytes_per_string_element < 1: 105 raise ValueError( 106 "Both max_shard_bytes and bytes_per_string_element must be positive.") 107 if max_shards and max_shards < 1: 108 raise ValueError( 109 "max_shards must be positive.") 110 111 def _partitioner(shape, dtype): 112 """Partitioner that partitions shards to have max_shard_bytes total size. 113 114 Args: 115 shape: A `TensorShape`. 116 dtype: A `DType`. 117 118 Returns: 119 A tuple representing how much to slice each axis in shape. 120 121 Raises: 122 ValueError: If shape is not a fully defined `TensorShape` or dtype is not 123 a `DType`. 124 """ 125 if not isinstance(shape, tensor_shape.TensorShape): 126 raise ValueError("shape is not a TensorShape: %s" % shape) 127 if not shape.is_fully_defined(): 128 raise ValueError("shape is not fully defined: %s" % shape) 129 if not isinstance(dtype, dtypes.DType): 130 raise ValueError("dtype is not a DType: %s" % dtype) 131 132 if dtype.base_dtype == dtypes.string: 133 element_size = bytes_per_string_element 134 else: 135 element_size = dtype.size 136 137 partitions = [1] * shape.ndims 138 bytes_per_slice = 1.0 * ( 139 shape.num_elements() / shape.dims[axis].value) * element_size 140 # How many slices can we fit on one shard of size at most max_shard_bytes? 141 # At least one slice is required. 142 slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice)) 143 # How many shards do we need for axis given that each shard fits 144 # slices_per_shard slices from a total of shape[axis] slices? 145 axis_shards = int(math.ceil( 146 1.0 * shape.dims[axis].value / slices_per_shard)) 147 if max_shards: 148 axis_shards = min(max_shards, axis_shards) 149 150 partitions[axis] = axis_shards 151 152 return partitions 153 154 return _partitioner 155 156 157@tf_export(v1=["min_max_variable_partitioner"]) 158def min_max_variable_partitioner(max_partitions=1, axis=0, 159 min_slice_size=256 << 10, 160 bytes_per_string_element=16): 161 """Partitioner to allocate minimum size per slice. 162 163 Returns a partitioner that partitions the variable of given shape and dtype 164 such that each partition has a minimum of `min_slice_size` slice of the 165 variable. The maximum number of such partitions (upper bound) is given by 166 `max_partitions`. 167 168 Args: 169 max_partitions: Upper bound on the number of partitions. Defaults to 1. 170 axis: Axis along which to partition the variable. Defaults to 0. 171 min_slice_size: Minimum size of the variable slice per partition. Defaults 172 to 256K. 173 bytes_per_string_element: If the `Variable` is of type string, this provides 174 an estimate of how large each scalar in the `Variable` is. 175 176 Returns: 177 A partition function usable as the `partitioner` argument to 178 `variable_scope` and `get_variable`. 179 180 """ 181 def _partitioner(shape, dtype): 182 """Partitioner that partitions list for a variable of given shape and type. 183 184 Ex: Consider partitioning a variable of type float32 with 185 shape=[1024, 1024]. 186 If `max_partitions` >= 16, this function would return 187 [(1024 * 1024 * 4) / (256 * 1024), 1] = [16, 1]. 188 If `max_partitions` < 16, this function would return 189 [`max_partitions`, 1]. 190 191 Args: 192 shape: Shape of the variable. 193 dtype: Type of the variable. 194 195 Returns: 196 List of partitions for each axis (currently only one axis can be 197 partitioned). 198 199 Raises: 200 ValueError: If axis to partition along does not exist for the variable. 201 """ 202 if axis >= len(shape): 203 raise ValueError("Can not partition variable along axis %d when shape is " 204 "only %s" % (axis, shape)) 205 if dtype.base_dtype == dtypes.string: 206 bytes_per_element = bytes_per_string_element 207 else: 208 bytes_per_element = dtype.size 209 total_size_bytes = shape.num_elements() * bytes_per_element 210 partitions = total_size_bytes / min_slice_size 211 partitions_list = [1] * len(shape) 212 # We can not partition the variable beyond what its shape or 213 # `max_partitions` allows. 214 partitions_list[axis] = max(1, min(shape.dims[axis].value, 215 max_partitions, 216 int(math.ceil(partitions)))) 217 return partitions_list 218 return _partitioner 219 220 221@tf_export(v1=["fixed_size_partitioner"]) 222def fixed_size_partitioner(num_shards, axis=0): 223 """Partitioner to specify a fixed number of shards along given axis. 224 225 Args: 226 num_shards: `int`, number of shards to partition variable. 227 axis: `int`, axis to partition on. 228 229 Returns: 230 A partition function usable as the `partitioner` argument to 231 `variable_scope` and `get_variable`. 232 """ 233 def _partitioner(shape, **unused_args): 234 partitions_list = [1] * len(shape) 235 partitions_list[axis] = min(num_shards, shape.dims[axis].value) 236 return partitions_list 237 return _partitioner 238 239 240@tf_export(v1=["create_partitioned_variables"]) 241@deprecation.deprecated( 242 date=None, 243 instructions="Use `tf.get_variable` with a partitioner set.") 244def create_partitioned_variables( 245 shape, slicing, initializer, dtype=dtypes.float32, 246 trainable=True, collections=None, name=None, reuse=None): 247 """Create a list of partitioned variables according to the given `slicing`. 248 249 Currently only one dimension of the full variable can be sliced, and the 250 full variable can be reconstructed by the concatenation of the returned 251 list along that dimension. 252 253 Args: 254 shape: List of integers. The shape of the full variable. 255 slicing: List of integers. How to partition the variable. 256 Must be of the same length as `shape`. Each value 257 indicate how many slices to create in the corresponding 258 dimension. Presently only one of the values can be more than 1; 259 that is, the variable can only be sliced along one dimension. 260 261 For convenience, The requested number of partitions does not have to 262 divide the corresponding dimension evenly. If it does not, the 263 shapes of the partitions are incremented by 1 starting from partition 264 0 until all slack is absorbed. The adjustment rules may change in the 265 future, but as you can save/restore these variables with different 266 slicing specifications this should not be a problem. 267 initializer: A `Tensor` of shape `shape` or a variable initializer 268 function. If a function, it will be called once for each slice, 269 passing the shape and data type of the slice as parameters. The 270 function must return a tensor with the same shape as the slice. 271 dtype: Type of the variables. Ignored if `initializer` is a `Tensor`. 272 trainable: If True also add all the variables to the graph collection 273 `GraphKeys.TRAINABLE_VARIABLES`. 274 collections: List of graph collections keys to add the variables to. 275 Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. 276 name: Optional name for the full variable. Defaults to 277 `"PartitionedVariable"` and gets uniquified automatically. 278 reuse: Boolean or `None`; if `True` and name is set, it would reuse 279 previously created variables. if `False` it will create new variables. 280 if `None`, it would inherit the parent scope reuse. 281 282 Returns: 283 A list of Variables corresponding to the slicing. 284 285 Raises: 286 ValueError: If any of the arguments is malformed. 287 """ 288 if len(shape) != len(slicing): 289 raise ValueError("The 'shape' and 'slicing' of a partitioned Variable " 290 "must have the length: shape: %s, slicing: %s" % 291 (shape, slicing)) 292 if len(shape) < 1: 293 raise ValueError("A partitioned Variable must have rank at least 1: " 294 "shape: %s" % shape) 295 296 # Legacy: we are provided the slicing directly, so just pass it to 297 # the partitioner. 298 partitioner = lambda **unused_kwargs: slicing 299 300 with variable_scope.variable_scope( 301 name, "PartitionedVariable", reuse=reuse): 302 # pylint: disable=protected-access 303 partitioned_var = variable_scope._get_partitioned_variable( 304 name=None, 305 shape=shape, 306 dtype=dtype, 307 initializer=initializer, 308 trainable=trainable, 309 partitioner=partitioner, 310 collections=collections) 311 return list(partitioned_var) 312 # pylint: enable=protected-access 313