1# Copyright 2018 The TensorFlow Authors. All Rights Reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# =================================================================== 15"""TPU system metadata and associated tooling.""" 16 17from __future__ import absolute_import 18from __future__ import division 19from __future__ import print_function 20 21import collections 22 23from tensorflow.core.protobuf import config_pb2 24from tensorflow.python.client import session as session_lib 25from tensorflow.python.distribute import device_util 26from tensorflow.python.eager import context 27from tensorflow.python.framework import config 28from tensorflow.python.framework import device as tf_device 29from tensorflow.python.framework import errors 30from tensorflow.python.framework import ops 31from tensorflow.python.platform import tf_logging as logging 32from tensorflow.python.tpu import tpu 33from tensorflow.python.util.tf_export import tf_export 34 35_PINGING_MASTER_TIMEOUT_IN_MS = 5 * 60 * 1000 # 10 min 36_RETRY_TIMES = 12 * 24 # 1 day 37_INITIAL_TPU_SYSTEM_TIMEOUT_IN_MS = 300 * 1000 # 5 mins 38 39_DEFAULT_JOB_NAME = 'tpu_worker' 40_DEFAULT_COORDINATOR_JOB_NAME = 'coordinator' 41_LOCAL_MASTERS = ('', 'local') 42 43 44@tf_export('tpu.experimental.TPUSystemMetadata') 45class TPUSystemMetadata( 46 collections.namedtuple('TPUSystemMetadata', [ 47 'num_cores', 48 'num_hosts', 49 'num_of_cores_per_host', 50 'topology', 51 'devices', 52 ])): 53 """Describes some metadata about the TPU system. 54 55 Attributes: 56 num_cores: interger. Total number of TPU cores in the TPU system. 57 num_hosts: interger. Total number of hosts (TPU workers) in the TPU system. 58 num_of_cores_per_host: interger. Number of TPU cores per host (TPU worker). 59 topology: an instance of `tf.tpu.experimental.Topology`, which describes the 60 physical topology of TPU system. 61 devices: a tuple of strings, which describes all the TPU devices in the 62 system. 63 """ 64 65 def __new__(cls, num_cores, num_hosts, num_of_cores_per_host, topology, 66 devices): 67 return super(TPUSystemMetadata, 68 cls).__new__(cls, num_cores, num_hosts, num_of_cores_per_host, 69 topology, devices) 70 71 72def _query_tpu_system_metadata(master_address, cluster_def=None, 73 query_topology=False): 74 """Automatically detects the TPU system metadata in the system.""" 75 tpu_core_count = 0 76 devices = [] 77 device_dict = collections.defaultdict(list) 78 79 if context.executing_eagerly(): 80 logical_devices = config.list_logical_devices() 81 82 # We want the output type to match in both eager and session mode 83 devices = [session_lib._DeviceAttributes(device_util.canonicalize(d.name), # pylint: disable=protected-access 84 d.device_type, 0, 0) 85 for d in logical_devices] 86 else: 87 # TODO(b/120564445): Replace with standard library for retries. 88 retry_count = 1 89 while True: 90 logging.info('Querying Tensorflow master (%s) for TPU system metadata.', 91 master_address) 92 try: 93 with ops.Graph().as_default(): 94 with session_lib.Session( 95 master_address, 96 config=get_session_config_with_timeout( 97 _PINGING_MASTER_TIMEOUT_IN_MS, 98 cluster_def)) as sess: 99 devices = sess.list_devices() 100 break 101 except errors.DeadlineExceededError: 102 msg = ('Failed to connect to the Tensorflow master. The TPU worker may ' 103 'not be ready (still scheduling) or the Tensorflow master ' 104 'address is incorrect: got (%s).' % 105 (master_address)) 106 107 # TODO(xiejw): For local or grpc master we might not need retry logic 108 # here. 109 if retry_count <= _RETRY_TIMES: 110 logging.warning('%s', msg) 111 logging.warning('Retrying (%d/%d).', retry_count, _RETRY_TIMES) 112 retry_count += 1 113 else: 114 raise ValueError(msg) 115 116 for device in devices: 117 spec = tf_device.DeviceSpec.from_string(device.name) 118 if spec.device_type == 'TPU': 119 device_dict[spec.task].append(spec.device_index) 120 tpu_core_count += 1 121 122 num_of_cores_per_host = 0 123 if tpu_core_count: 124 num_cores_per_host_set = set( 125 [len(core_ids) for core_ids in device_dict.values()]) 126 if len(num_cores_per_host_set) != 1: 127 raise RuntimeError( 128 'TPU cores on each host is not same. This should not happen!. ' 129 'devices: {}'.format(devices)) 130 num_of_cores_per_host = num_cores_per_host_set.pop() 131 132 topology = None 133 if query_topology: 134 if not tpu_core_count: 135 raise RuntimeError( 136 'Cannot find any TPU cores in the system (master address {}). ' 137 'This usually means the master address is incorrect or the ' 138 'TPU worker has some problems. Available devices: {}'.format( 139 master_address, devices)) 140 141 topology = _obtain_topology(master_address, cluster_def) 142 143 # We sort the metadata devices so that downstream users get a sorted list 144 # for creating mirrored variables correctly. 145 def _sort_key(device): 146 spec = tf_device.DeviceSpec.from_string(device.name) 147 return (spec.job, spec.replica, spec.task, spec.device_type, 148 spec.device_index) 149 devices = tuple(sorted(devices, key=_sort_key)) 150 151 metadata = TPUSystemMetadata( 152 num_cores=tpu_core_count, 153 num_hosts=len(device_dict), 154 num_of_cores_per_host=num_of_cores_per_host, 155 topology=topology, 156 devices=devices) 157 158 if tpu_core_count: 159 logging.info('Found TPU system:') 160 logging.info('*** Num TPU Cores: %d', metadata.num_cores) 161 logging.info('*** Num TPU Workers: %d', metadata.num_hosts) 162 logging.info('*** Num TPU Cores Per Worker: %d', 163 metadata.num_of_cores_per_host) 164 for device in metadata.devices: 165 logging.info('*** Available Device: %s', device) 166 else: 167 logging.info('Failed to find TPU: %s', metadata) 168 return metadata 169 170 171def _obtain_topology(master_address, cluster_def): 172 """Obtains TPU fabric topology.""" 173 try: 174 logging.info('Initializing TPU system (master: %s) to fetch topology ' 175 'for model parallelism. This might take a while.', 176 master_address) 177 with ops.Graph().as_default(): 178 session_config = get_session_config_with_timeout( 179 _INITIAL_TPU_SYSTEM_TIMEOUT_IN_MS, cluster_def) 180 with session_lib.Session( 181 master_address, config=session_config) as sess: 182 topology = sess.run(tpu.initialize_system()) 183 return topology 184 except errors.DeadlineExceededError: 185 raise ValueError( 186 'Fail to initialize TPU system with master (%s). ' 187 'Please double check the TPU system is functional.' % ( 188 master_address)) 189 190 191def get_session_config_with_timeout(timeout_in_secs, cluster_def): 192 """Returns a session given a timeout and a cluster configuration.""" 193 config_proto = config_pb2.ConfigProto( 194 operation_timeout_in_ms=timeout_in_secs, cluster_def=cluster_def) 195 return config_proto 196 197 198def master_job(master, cluster_def): 199 """Returns the canonical job name to use to place TPU computations on. 200 201 Args: 202 master: A `string` representing the TensorFlow master to use. 203 cluster_def: A ClusterDef object describing the TPU cluster. 204 205 Returns: 206 A string containing the job name, or None if no job should be specified. 207 208 Raises: 209 ValueError: If the user needs to specify a tpu_job_name, because we are 210 unable to infer the job name automatically, or if the user-specified job 211 names are inappropriate. 212 """ 213 # If the user specifies the tpu_job_name, use that. 214 215 if master in _LOCAL_MASTERS: 216 return None 217 218 if (not cluster_def or not cluster_def.job): 219 return _DEFAULT_JOB_NAME 220 job_names = set(job.name for job in cluster_def.job) 221 if _DEFAULT_JOB_NAME in job_names: 222 # b/37868888 tracks allowing ClusterSpec propagation to reuse job names. 223 raise ValueError('Currently, tpu_worker is not an allowed job name.') 224 if len(job_names) == 1: 225 return cluster_def.job[0].name 226 if len(job_names) == 2: 227 if _DEFAULT_COORDINATOR_JOB_NAME in job_names: 228 job_names.remove(_DEFAULT_COORDINATOR_JOB_NAME) 229 return job_names.pop() 230 # TODO(b/67716447): Include more sophisticated heuristics. 231 raise ValueError('Could not infer TPU job name.') 232