• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Strategy combinations for combinations.combine()."""
16
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21from tensorflow.core.protobuf import config_pb2
22from tensorflow.python import tf2
23from tensorflow.python.distribute import central_storage_strategy
24from tensorflow.python.distribute import cluster_resolver
25from tensorflow.python.distribute import collective_all_reduce_strategy
26from tensorflow.python.distribute import combinations
27from tensorflow.python.distribute import distribution_strategy_context
28from tensorflow.python.distribute import mirrored_strategy as mirrored_lib
29from tensorflow.python.distribute import multi_process_runner
30from tensorflow.python.distribute import multi_worker_test_base
31from tensorflow.python.distribute import one_device_strategy as one_device_lib
32from tensorflow.python.distribute import parameter_server_strategy_v2
33from tensorflow.python.distribute import sharded_variable
34from tensorflow.python.distribute import test_util
35from tensorflow.python.distribute import tpu_strategy as tpu_lib
36from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
37from tensorflow.python.eager import context
38from tensorflow.python.eager import remote
39from tensorflow.python.framework import test_util as framework_test_util
40from tensorflow.python.platform import flags
41from tensorflow.python.tpu import device_assignment as device_assignment_lib
42from tensorflow.python.tpu import tpu_strategy_util
43from tensorflow.python.training import server_lib
44from tensorflow.python.util.tf_export import tf_export
45
46_TF_INTERNAL_API_PREFIX = "__internal__.distribute.combinations."
47
48_did_connect_to_cluster = False
49_topology = None
50CollectiveAllReduceExtended = (
51    collective_all_reduce_strategy.CollectiveAllReduceExtended)
52
53
54def _version_chooser(tf1_cls, tf2_cls):
55
56  def creator(*args, **kwargs):
57    if tf2.enabled():
58      return tf2_cls(*args, **kwargs)
59    return tf1_cls(*args, **kwargs)
60
61  return creator
62
63
64MirroredStrategy = _version_chooser(mirrored_lib.MirroredStrategyV1,
65                                    mirrored_lib.MirroredStrategy)
66CentralStorageStrategy = _version_chooser(
67    central_storage_strategy.CentralStorageStrategyV1,
68    central_storage_strategy.CentralStorageStrategy)
69OneDeviceStrategy = _version_chooser(one_device_lib.OneDeviceStrategyV1,
70                                     one_device_lib.OneDeviceStrategy)
71# Only V2 CollectiveAllReduceStrategy combinations are supported.
72CollectiveAllReduceStrategy = (
73    collective_all_reduce_strategy.CollectiveAllReduceStrategy)
74
75
76# pylint: disable=missing-docstring
77def _get_tpu_strategy_creator(steps_per_run,
78                              use_single_core=False,
79                              enable_packed_variable=False,
80                              **kwargs):
81
82  def _create_tpu_strategy():
83    FLAGS = flags.FLAGS  # pylint: disable=invalid-name
84    global _did_connect_to_cluster
85    global _topology
86
87    try:
88      # Attempt to locally discover the TPU. This will fail for Cloud TPU, in
89      # which case we fall back to the values passed as flags.
90      resolver = tpu_cluster_resolver.TPUClusterResolver()
91      did_automatically_resolve = True
92    except ValueError:
93      did_automatically_resolve = False
94
95      # These flags will be defined by tpu_test_wrapper.py.
96      resolver = tpu_cluster_resolver.TPUClusterResolver(
97          tpu=hasattr(FLAGS, "tpu") and FLAGS.tpu or "",
98          zone=hasattr(FLAGS, "zone") and FLAGS.zone or None,
99          project=hasattr(FLAGS, "project") and FLAGS.project or None,
100      )
101
102    # Only connect once per process, rather than per test method.
103    if not _did_connect_to_cluster:
104      if getattr(FLAGS, "tpu", "") or did_automatically_resolve:
105        remote.connect_to_cluster(resolver)
106        _did_connect_to_cluster = True
107      _topology = tpu_strategy_util.initialize_tpu_system(resolver)
108
109    device_assignment = None
110    if use_single_core:
111      device_assignment = device_assignment_lib.DeviceAssignment(
112          _topology,
113          core_assignment=device_assignment_lib.SINGLE_CORE_ASSIGNMENT)
114
115    # Steps per run is only supported in TF 1.x
116    if tf2.enabled():
117      strategy = tpu_lib.TPUStrategy(resolver, device_assignment, **kwargs)
118    else:
119      strategy = tpu_lib.TPUStrategyV1(resolver, steps_per_run,
120                                       device_assignment, **kwargs)
121    strategy._enable_packed_variable_in_eager_mode = enable_packed_variable  # pylint: disable=protected-access
122    return strategy
123
124  return _create_tpu_strategy
125
126
127def _mirrored_strategy_with_collective_key_base(devices):
128  mirrored_lib.MirroredStrategyV1._collective_key_base += 100000
129  mirrored_lib.MirroredStrategy._collective_key_base += 100000
130  return MirroredStrategy(devices)
131
132
133def _mirrored_strategy_with_no_merge_call(devices):
134  mirrored_lib.MirroredStrategyV1._collective_key_base += 100000
135  mirrored_lib.MirroredStrategy._collective_key_base += 100000
136  out = MirroredStrategy(devices)
137  # Stub out merge call usage.
138  out.extended._use_merge_call = lambda: False  # pylint: disable=protected-access
139  return out
140
141
142def _get_multi_worker_mirrored_creator(required_gpus, use_merge_call=True):
143
144  def _create_multi_worker_mirrored():
145    tf_config = cluster_resolver.TFConfigClusterResolver()
146    master = tf_config.master()
147    if tf_config.rpc_layer:
148      # Strip off the rpc_layer suffix.
149      master = master[len("%s://" % tf_config.rpc_layer):]
150    resolver = cluster_resolver.SimpleClusterResolver(
151        cluster_spec=tf_config.cluster_spec(),
152        task_type=tf_config.task_type,
153        task_id=tf_config.task_id,
154        master=master,
155        environment=tf_config.environment,
156        num_accelerators={"GPU": required_gpus},
157        rpc_layer=tf_config.rpc_layer or "grpc",
158    )
159    # Disable health check. We don't have a reliable to shutdown the strategy
160    # (and thus the health check) at the end of a test. Turning on health check
161    # causes some flakiness since we re-create part of the server when creating
162    # a strategy, and our tests are capable of handling failures.
163    CollectiveAllReduceExtended._enable_check_health = False  # pylint: disable=protected-access
164    # Always create the strategy in eager mode so that it starts the server and
165    # configures the eager context. The eager context can no longer be
166    # configured after initialization.
167    with context.eager_mode():
168      strategy = CollectiveAllReduceStrategy(cluster_resolver=resolver)
169
170    if not use_merge_call:
171      strategy.extended._use_merge_call = lambda: False  # pylint: disable=protected-access
172    # TODO(b/152320929): Wait for the cluster before proceeding, otherwise
173    # collectives may hang if any worker launches collectives before the chief
174    # creates the strategy.
175    try:
176      multi_process_runner.get_barrier().wait()
177    except ValueError:
178      # If the creator is called in the main process,
179      # multi_process_runner.get_barrier() raises ValueError, which is safe to
180      # ignore.
181      pass
182    return strategy
183
184  return _create_multi_worker_mirrored
185
186_ps_cluster = None
187MAX_NUM_WORKER = 3
188MAX_NUM_PS = 2
189
190
191def get_cluster_def(num_workers, num_ps):
192  if num_workers > MAX_NUM_WORKER or num_ps > MAX_NUM_PS:
193    raise ValueError("Requesting more servers than the maximum, adjust"
194                     "MAX_NUM_PS and MAX_NUM_WORKER")
195  global _ps_cluster
196  if _ps_cluster is None:
197    _ps_cluster = multi_worker_test_base.create_in_process_cluster(
198        num_workers=MAX_NUM_WORKER, num_ps=MAX_NUM_PS)
199  return {
200      "worker": _ps_cluster["worker"][:num_workers],
201      "ps": _ps_cluster["ps"][:num_ps],
202  }
203
204
205def _get_ps_strategy_creator(
206    num_workers, num_ps, required_gpus=0,
207    variable_partitioner=sharded_variable.FixedShardsPartitioner(2)):
208
209  def _create_ps_strategy(resolver, variable_partitioner):
210    return parameter_server_strategy_v2.ParameterServerStrategyV2(
211        resolver,
212        variable_partitioner=variable_partitioner
213        )
214
215  def _create_parameter_server():
216    if framework_test_util.is_xla_enabled():
217      # To address test failures resulting in XLA with MultiProcessRunner,
218      # continue to use in-process cluster for XLA tests.
219      cluster_def = multi_worker_test_base.create_in_process_cluster(
220          num_workers=num_workers, num_ps=num_ps, rpc_layer="grpc")
221      resolver = cluster_resolver.SimpleClusterResolver(
222          server_lib.ClusterSpec(cluster_def),
223          num_accelerators={"GPU": required_gpus},
224          rpc_layer="grpc")
225      return _create_ps_strategy(resolver, variable_partitioner)
226    else:
227      tf_config = cluster_resolver.TFConfigClusterResolver()
228      cluster_def = tf_config.cluster_spec().as_dict()
229      if not cluster_def:
230        # When MultiProcessRunner cluster is used, the cluster is not created
231        # initially when the decorator is called. When the test runs, initially
232        # this method is invoked via decorator before setting up the
233        # MultiProcessRunner with worker and ps in the combinations.py. After
234        # setup is done, the subprocess invokes this method again to get
235        # strategy object. We return None strategy when the main thread invokes
236        # this method before setting up cluster.
237        # Returning None is fine here, since this thread will proceed to create
238        # MultiProcessRunner and invoke tests with decorator inside
239        # subprocesses.
240        return None
241      # MultiProcessRunner is already setup and this method is invoked from a
242      # subprocess running the actual test.
243      resolver = cluster_resolver.SimpleClusterResolver(
244          server_lib.ClusterSpec(cluster_def),
245          num_accelerators={"GPU": required_gpus},
246          task_type=tf_config.task_type,
247          task_id=tf_config.task_id,
248          environment=tf_config.environment,
249          rpc_layer=tf_config.rpc_layer or "grpc")
250      if tf_config.task_type in ("worker", "ps"):
251        worker_config = config_pb2.ConfigProto()
252        worker_config.inter_op_parallelism_threads = 4  # max num_workers + 1
253        server = server_lib.Server(
254            cluster_def,
255            job_name=tf_config.task_type,
256            task_index=tf_config.task_id,
257            protocol="grpc",
258            config=worker_config)
259
260        # Blocking the process that starts a server from exiting.
261        server.join()
262
263      return _create_ps_strategy(resolver, variable_partitioner)
264
265  return _create_parameter_server
266
267
268def _deferred_pool_runner(has_chief, num_workers, initializer=None):
269  """Returns a callable that returns the pool runner.
270
271  It creates the pool runner only upon first invocation. This avoids creating it
272  when this file is imported.
273
274  Args:
275    has_chief: whether there should be a chief.
276    num_workers: the number of workers excluding the chief.
277    initializer: initializer of each process.
278
279  Returns:
280    A callable that returns the runner.
281  """
282
283  container = []
284
285  def get_or_create():
286    if not container:
287      cluster_spec = multi_worker_test_base.create_cluster_spec(
288          has_chief=has_chief,
289          num_workers=num_workers,
290          num_ps=0,
291          has_eval=False)
292      runner = multi_process_runner.MultiProcessPoolRunner(
293          cluster_spec, initializer=initializer)
294      container.append(runner)
295    return container[0]
296
297  return get_or_create
298
299
300# We need to create the strategy in the initializer to start the server before
301# any test runs.
302_two_worker_pool = _deferred_pool_runner(
303    has_chief=True,
304    num_workers=1,
305    initializer=_get_multi_worker_mirrored_creator(required_gpus=0))
306_four_worker_pool = _deferred_pool_runner(
307    has_chief=True,
308    num_workers=3,
309    initializer=_get_multi_worker_mirrored_creator(required_gpus=0))
310
311# pylint: disable=g-long-lambda
312default_strategy = combinations.NamedDistribution(
313    "Default",
314    distribution_strategy_context._get_default_strategy,  # pylint: disable=protected-access
315    required_gpus=None)
316one_device_strategy = combinations.NamedDistribution(
317    "OneDeviceCPU", lambda: OneDeviceStrategy("/cpu:0"), required_gpus=None)
318one_device_strategy_gpu = combinations.NamedDistribution(
319    "OneDeviceGPU", lambda: OneDeviceStrategy("/gpu:0"), required_gpus=1)
320one_device_strategy_on_worker_1 = combinations.NamedDistribution(
321    "OneDeviceOnWorker1CPU",
322    lambda: OneDeviceStrategy("/job:worker/replica:0/task:1/cpu:0"),
323    required_gpus=None)
324one_device_strategy_gpu_on_worker_1 = combinations.NamedDistribution(
325    "OneDeviceOnWorker1GPU",
326    lambda: OneDeviceStrategy("/job:worker/replica:0/task:1/gpu:0"),
327    required_gpus=1)
328tpu_strategy = combinations.NamedDistribution(
329    "TPU", _get_tpu_strategy_creator(steps_per_run=2), required_tpu=True)
330tpu_strategy_packed_var = combinations.NamedDistribution(
331    "TPUPackedVar",
332    _get_tpu_strategy_creator(steps_per_run=2, enable_packed_variable=True),
333    required_tpu=True)
334tpu_strategy_one_step = combinations.NamedDistribution(
335    "TPUOneStep", _get_tpu_strategy_creator(steps_per_run=1), required_tpu=True)
336tpu_strategy_one_core = combinations.NamedDistribution(
337    "TPUOneCore",
338    _get_tpu_strategy_creator(steps_per_run=2, use_single_core=True),
339    required_tpu=True)
340tpu_strategy_one_step_one_core = combinations.NamedDistribution(
341    "TPUOneStepOneCore",
342    _get_tpu_strategy_creator(steps_per_run=1, use_single_core=True),
343    required_tpu=True)
344cloud_tpu_strategy = combinations.NamedDistribution(
345    "CloudTPU",
346    _get_tpu_strategy_creator(steps_per_run=2),
347    required_tpu=True,
348    use_cloud_tpu=True)
349mirrored_strategy_with_one_cpu = combinations.NamedDistribution(
350    "Mirrored1CPU",
351    lambda: _mirrored_strategy_with_collective_key_base(["/cpu:0"]))
352mirrored_strategy_with_one_gpu = combinations.NamedDistribution(
353    "Mirrored1GPU",
354    lambda: _mirrored_strategy_with_collective_key_base(["/gpu:0"]),
355    required_gpus=1)
356mirrored_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
357    "MirroredCPUAndGPU",
358    lambda: _mirrored_strategy_with_collective_key_base(["/gpu:0", "/cpu:0"]),
359    required_gpus=1)
360mirrored_strategy_with_two_cpus = combinations.NamedDistribution(
361    "Mirrored2CPUs",
362    lambda: _mirrored_strategy_with_collective_key_base(["/cpu:0", "/cpu:1"]),
363    required_gpus=0)
364mirrored_strategy_with_two_gpus = combinations.NamedDistribution(
365    "Mirrored2GPUs",
366    lambda: _mirrored_strategy_with_collective_key_base(["/gpu:0", "/gpu:1"]),
367    required_gpus=2)
368mirrored_strategy_with_two_gpus_no_merge_call = combinations.NamedDistribution(
369    "Mirrored2GPUsNoMergeCall",
370    lambda: _mirrored_strategy_with_no_merge_call(["/gpu:0", "/gpu:1"]),
371    required_physical_gpus=2)
372# Should call set_virtual_cpus_to_at_least(3) in your test's setUp methods.
373# Deprecated, use mirrored_strategy_with_two_cpus instead.
374mirrored_strategy_with_cpu_1_and_2 = combinations.NamedDistribution(
375    "Mirrored2CPU",
376    lambda: _mirrored_strategy_with_collective_key_base(["/cpu:1", "/cpu:2"]))
377mirrored_strategy_with_cpu_1_and_2.__doc__ = (
378    """Mirrored strategy with 2 virtual CPUs.
379
380    Should set up logical devices before use
381    """)
382central_storage_strategy_with_two_gpus = combinations.NamedDistribution(
383    "CentralStorage2GPUs",
384    lambda: CentralStorageStrategy(["/gpu:0", "/gpu:1"]),
385    required_gpus=2)
386central_storage_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
387    "CentralStorageCPUAndGPU",
388    lambda: CentralStorageStrategy(["/gpu:0", "/cpu:0"]),
389    required_gpus=1)
390# chief + 1 worker, with CPU.
391multi_worker_mirrored_2x1_cpu = combinations.NamedDistribution(
392    "MultiWorkerMirrored2x1CPU",
393    _get_multi_worker_mirrored_creator(required_gpus=0),
394    has_chief=True,
395    num_workers=1,
396    pool_runner_fn=_two_worker_pool,
397    no_xla=True,
398)
399# chief + 1 worker, with 1 GPU each.
400multi_worker_mirrored_2x1_gpu = combinations.NamedDistribution(
401    "MultiWorkerMirrored2x1GPU",
402    _get_multi_worker_mirrored_creator(required_gpus=1),
403    has_chief=True,
404    num_workers=1,
405    required_gpus=1,
406    pool_runner_fn=_two_worker_pool,
407    no_xla=True,
408)
409# chief + 1 worker, with 2 GPU each.
410multi_worker_mirrored_2x2_gpu = combinations.NamedDistribution(
411    "MultiWorkerMirrored2x2GPU",
412    _get_multi_worker_mirrored_creator(required_gpus=2),
413    has_chief=True,
414    num_workers=1,
415    required_gpus=2,
416    pool_runner_fn=_two_worker_pool,
417    no_xla=True,
418)
419multi_worker_mirrored_2x2_gpu_no_merge_call = combinations.NamedDistribution(
420    "MultiWorkerMirrored2x2GPUNoMergeCall",
421    _get_multi_worker_mirrored_creator(
422        required_gpus=2, use_merge_call=False),
423    has_chief=True,
424    num_workers=1,
425    required_physical_gpus=2,
426    pool_runner_fn=_two_worker_pool,
427    no_xla=True,
428)
429# chief + 3 workers, with CPU.
430multi_worker_mirrored_4x1_cpu = combinations.NamedDistribution(
431    "MultiWorkerMirrored4x1CPU",
432    _get_multi_worker_mirrored_creator(required_gpus=0),
433    has_chief=True,
434    num_workers=3,
435    pool_runner_fn=_four_worker_pool,
436    no_xla=True,
437)
438
439
440def parameter_server_strategy_fn(
441    name, num_workers, num_ps, required_gpus=0,
442    variable_partitioner=sharded_variable.FixedShardsPartitioner(2)):
443  return combinations.NamedDistribution(
444      name,
445      _get_ps_strategy_creator(
446          num_workers=num_workers, num_ps=num_ps, required_gpus=required_gpus,
447          variable_partitioner=variable_partitioner),
448      required_gpus=required_gpus,
449      num_workers=num_workers,
450      has_chief=True,
451      num_ps=num_ps)
452
453
454parameter_server_strategy_3worker_2ps_cpu = parameter_server_strategy_fn(
455    "ParameterServer3Worker2PSCPU", num_workers=3, num_ps=2)
456parameter_server_strategy_1worker_2ps_cpu = parameter_server_strategy_fn(
457    "ParameterServer1Worker2PSCPU", num_workers=1, num_ps=2)
458parameter_server_strategy_3worker_2ps_1gpu = parameter_server_strategy_fn(
459    "ParameterServer3Worker2PS1GPU", num_workers=3, num_ps=2, required_gpus=1)
460parameter_server_strategy_1worker_2ps_1gpu = parameter_server_strategy_fn(
461    "ParameterServer1Worker2PS1GPU", num_workers=1, num_ps=2, required_gpus=1)
462
463
464graph_and_eager_modes = ["graph", "eager"]
465
466
467# TODO(crccw): remove after tf-nightly picks up the new API.
468def set_virtual_cpus_to_at_least(num_virtual_cpus):
469  test_util.set_logical_devices_to_at_least("CPU", num_virtual_cpus)
470
471
472strategies_minus_tpu = [
473    default_strategy,
474    one_device_strategy,
475    one_device_strategy_gpu,
476    mirrored_strategy_with_gpu_and_cpu,
477    mirrored_strategy_with_two_gpus,
478    central_storage_strategy_with_gpu_and_cpu,
479]
480
481strategies_minus_default_and_tpu = [
482    one_device_strategy,
483    one_device_strategy_gpu,
484    mirrored_strategy_with_gpu_and_cpu,
485    mirrored_strategy_with_two_gpus,
486]
487
488tpu_strategies = [
489    tpu_strategy,  # steps_per_run=2
490    tpu_strategy_one_step,
491    tpu_strategy_packed_var,
492    cloud_tpu_strategy,
493]
494
495all_strategies_minus_default = strategies_minus_default_and_tpu + tpu_strategies
496
497all_strategies = strategies_minus_tpu + tpu_strategies
498
499two_replica_strategies = [
500    mirrored_strategy_with_gpu_and_cpu,
501    mirrored_strategy_with_two_gpus,
502    multi_worker_mirrored_2x1_cpu,
503    multi_worker_mirrored_2x1_gpu,
504    tpu_strategy,  # steps_per_run=2
505    tpu_strategy_one_step,
506    central_storage_strategy_with_gpu_and_cpu,
507]
508
509four_replica_strategies = [
510    multi_worker_mirrored_2x2_gpu,
511    multi_worker_mirrored_4x1_cpu,
512]
513
514# TODO(b/159831907): replace with two_replica_strategies after the tests using
515# it work with MWMS.
516multidevice_strategies = [
517    mirrored_strategy_with_gpu_and_cpu,
518    mirrored_strategy_with_two_gpus,
519    tpu_strategy,  # steps_per_run=2
520    tpu_strategy_one_step
521]
522
523multiworker_strategies = [
524    multi_worker_mirrored_2x1_cpu, multi_worker_mirrored_2x1_gpu,
525    multi_worker_mirrored_2x2_gpu
526]
527
528
529def strategy_minus_tpu_combinations():
530  return combinations.combine(
531      distribution=strategies_minus_tpu, mode=["graph", "eager"])
532
533
534def tpu_strategy_combinations():
535  return combinations.combine(distribution=tpu_strategies, mode=["graph"])
536
537
538def all_strategy_combinations():
539  return strategy_minus_tpu_combinations() + tpu_strategy_combinations()
540
541
542def all_strategy_minus_default_and_tpu_combinations():
543  return combinations.combine(
544      distribution=[
545          one_device_strategy, one_device_strategy_gpu,
546          mirrored_strategy_with_gpu_and_cpu, mirrored_strategy_with_two_gpus
547      ],
548      mode=["graph", "eager"])
549
550
551def all_strategy_combinations_minus_default():
552  return (all_strategy_minus_default_and_tpu_combinations() +
553          tpu_strategy_combinations())
554
555
556tf_export(
557    _TF_INTERNAL_API_PREFIX + "central_storage_strategy_with_gpu_and_cpu",
558    v1=[]).export_constant(__name__,
559                           "central_storage_strategy_with_gpu_and_cpu")
560tf_export(
561    _TF_INTERNAL_API_PREFIX + "central_storage_strategy_with_two_gpus",
562    v1=[]).export_constant(__name__, "central_storage_strategy_with_two_gpus")
563tf_export(
564    _TF_INTERNAL_API_PREFIX + "cloud_tpu_strategy",
565    v1=[]).export_constant(__name__, "cloud_tpu_strategy")
566tf_export(
567    _TF_INTERNAL_API_PREFIX + "default_strategy",
568    v1=[]).export_constant(__name__, "default_strategy")
569tf_export(
570    _TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_cpu_1_and_2",
571    v1=[]).export_constant(__name__, "mirrored_strategy_with_cpu_1_and_2")
572tf_export(
573    _TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_gpu_and_cpu",
574    v1=[]).export_constant(__name__, "mirrored_strategy_with_gpu_and_cpu")
575tf_export(
576    _TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_one_cpu",
577    v1=[]).export_constant(__name__, "mirrored_strategy_with_one_cpu")
578tf_export(
579    _TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_one_gpu",
580    v1=[]).export_constant(__name__, "mirrored_strategy_with_one_gpu")
581tf_export(
582    _TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_two_gpus",
583    v1=[]).export_constant(__name__, "mirrored_strategy_with_two_gpus")
584tf_export(
585    _TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_two_gpus_no_merge_call",
586    v1=[]).export_constant(__name__,
587                           "mirrored_strategy_with_two_gpus_no_merge_call")
588tf_export(
589    _TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x1_cpu",
590    v1=[]).export_constant(__name__, "multi_worker_mirrored_2x1_cpu")
591tf_export(
592    _TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x1_gpu",
593    v1=[]).export_constant(__name__, "multi_worker_mirrored_2x1_gpu")
594tf_export(
595    _TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x2_gpu",
596    v1=[]).export_constant(__name__, "multi_worker_mirrored_2x2_gpu")
597tf_export(
598    _TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x2_gpu_no_merge_call",
599    v1=[]).export_constant(__name__,
600                           "multi_worker_mirrored_2x2_gpu_no_merge_call")
601tf_export(
602    _TF_INTERNAL_API_PREFIX + "one_device_strategy",
603    v1=[]).export_constant(__name__, "one_device_strategy")
604tf_export(
605    _TF_INTERNAL_API_PREFIX + "one_device_strategy_gpu",
606    v1=[]).export_constant(__name__, "one_device_strategy_gpu")
607tf_export(
608    _TF_INTERNAL_API_PREFIX + "tpu_strategy",
609    v1=[]).export_constant(__name__, "tpu_strategy")
610tf_export(
611    _TF_INTERNAL_API_PREFIX + "parameter_server_strategy_3worker_2ps_cpu",
612    v1=[]).export_constant(__name__,
613                           "parameter_server_strategy_3worker_2ps_cpu")
614tf_export(
615    _TF_INTERNAL_API_PREFIX + "parameter_server_strategy_1worker_2ps_cpu",
616    v1=[]).export_constant(__name__,
617                           "parameter_server_strategy_1worker_2ps_cpu")
618tf_export(
619    _TF_INTERNAL_API_PREFIX + "parameter_server_strategy_3worker_2ps_1gpu",
620    v1=[]).export_constant(__name__,
621                           "parameter_server_strategy_3worker_2ps_1gpu")
622tf_export(
623    _TF_INTERNAL_API_PREFIX + "parameter_server_strategy_1worker_2ps_1gpu",
624    v1=[]).export_constant(__name__,
625                           "parameter_server_strategy_1worker_2ps_1gpu")
626tf_export(
627    _TF_INTERNAL_API_PREFIX + "tpu_strategy_one_core",
628    v1=[]).export_constant(__name__, "tpu_strategy_one_core")
629tf_export(
630    _TF_INTERNAL_API_PREFIX + "tpu_strategy_packed_var",
631    v1=[]).export_constant(__name__, "tpu_strategy_packed_var")
632