• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# TensorFlow Bazel configuration file.
2# This file tries to group and simplify build options for TensorFlow
3#
4# ----CONFIG OPTIONS----
5# Android options:
6#    android:
7#    android_arm:
8#    android_arm64:
9#    android_x86:
10#    android_x86_64:
11#
12# iOS options:
13#     ios:
14#     ios_armv7:
15#     ios_arm64:
16#     ios_i386:
17#     ios_x86_64:
18#     ios_fat:
19#
20# Macosx options
21#     darwin_arm64:
22#
23# Compiler options:
24#     cuda_clang:             Use clang when building CUDA code.
25#     avx_linux:              Build with avx instruction set on linux.
26#     avx2_linux:             Build with avx2 instruction set on linux.
27#     native_arch_linux:      Build with instruction sets available to the host machine on linux
28#     avx_win:                Build with avx instruction set on windows
29#     avx2_win:               Build with avx2 instruction set on windows
30#
31# Other build options:
32#     short_logs:       Only log errors during build, skip warnings.
33#     verbose_logs:     Show all compiler warnings during build.
34#     monolithic:       Build all TF C++ code into a single shared object.
35#     dynamic_kernels:  Try to link all kernels dynamically (experimental).
36#     libc++:           Link against libc++ instead of stdlibc++
37#     asan:             Build with the clang address sanitizer
38#     msan:             Build with the clang memory sanitizer
39#     ubsan:            Build with the clang undefined behavior sanitizer
40#     dbg:              Build with debug info
41#
42#
43# TF version options;
44#     v1: Build TF V1 (without contrib)
45#     v2: Build TF v2
46#
47# Feature and Third party library support options:
48#     xla:          Build TF with XLA
49#     tpu:          Build TF with TPU support
50#     cuda:         Build with full cuda support.
51#     rocm:         Build with AMD GPU support (rocm).
52#     mkl:          Enable full mkl support.
53#     tensorrt:     Enable Tensorrt support.
54#     numa:         Enable numa using hwloc.
55#     noaws:        Disable AWS S3 storage support
56#     nogcp:        Disable GCS support.
57#     nohdfs:       Disable hadoop hdfs support.
58#     nonccl:       Disable nccl support.
59#
60#
61# Remote build execution options (only configured to work with TF team projects for now.)
62#     rbe:       General RBE options shared by all flavors.
63#     rbe_linux: General RBE options used on all linux builds.
64#     rbe_win:   General RBE options used on all windows builds.
65#
66#     rbe_cpu_linux:                  RBE options to build with only CPU support.
67#     rbe_linux_cuda_nvcc_py*:        RBE options to build with GPU support using nvcc.
68#
69#     rbe_linux_py3:        Linux Python 3 RBE config
70#
71#     rbe_win_py37: Windows Python 3.7 RBE config
72#     rbe_win_py38: Windows Python 3.8 RBE config
73#     rbe_win_py39: Windows Python 3.9 RBE config
74#     rbe_win_py310: Windows Python 3.10 RBE config
75#
76#     tensorflow_testing_rbe_linux: RBE options to use RBE with tensorflow-testing project on linux
77#     tensorflow_testing_rbe_win:   RBE options to use RBE with tensorflow-testing project on windows
78#
79#     rbe_lite_linux: RBE options to build TF Lite.
80#
81# Embedded Linux options (experimental and only tested with TFLite build yet)
82#     elinux:          General Embedded Linux options shared by all flavors.
83#     elinux_aarch64:  Embedded Linux options for aarch64 (ARM64) CPU support.
84#     elinux_armhf:    Embedded Linux options for armhf (ARMv7) CPU support.
85#
86# Release build options (for all operating systems)
87#     release_base:                    Common options for all builds on all operating systems.
88#     release_gpu_base:                Common options for GPU builds on Linux and Windows.
89#     release_cpu_linux:               Toolchain and CUDA options for Linux CPU builds.
90#     release_cpu_macos:               Toolchain and CUDA options for MacOS CPU builds.
91#     release_gpu_linux:               Toolchain and CUDA options for Linux GPU builds.
92#     release_cpu_windows:             Toolchain and CUDA options for Windows CPU builds.
93#     release_gpu_windows:             Toolchain and CUDA options for Windows GPU builds.
94
95# Default build options. These are applied first and unconditionally.
96
97# For projects which use TensorFlow as part of a Bazel build process, putting
98# nothing in a bazelrc will default to a monolithic build. The following line
99# opts in to modular op registration support by default.
100build --define framework_shared_object=true
101
102build --define=use_fast_cpp_protos=true
103build --define=allow_oversize_protos=true
104
105build --spawn_strategy=standalone
106build -c opt
107
108# Make Bazel print out all options from rc files.
109build --announce_rc
110
111# TODO(mihaimaruseac): Document this option or remove if no longer needed
112build --define=grpc_no_ares=true
113
114# See https://github.com/bazelbuild/bazel/issues/7362 for information on what
115# --incompatible_remove_legacy_whole_archive flag does.
116# This flag is set to true in Bazel 1.0 and newer versions. We tried to migrate
117# Tensorflow to the default, however test coverage wasn't enough to catch the
118# errors.
119# There is ongoing work on Bazel team's side to provide support for transitive
120# shared libraries. As part of migrating to transitive shared libraries, we
121# hope to provide a better mechanism for control over symbol exporting, and
122# then tackle this issue again.
123#
124# TODO: Remove this line once TF doesn't depend on Bazel wrapping all library
125# archives in -whole_archive -no_whole_archive.
126build --noincompatible_remove_legacy_whole_archive
127
128# TODO(mihaimaruseac): Document this option or remove if no longer needed
129build --enable_platform_specific_config
130
131# Enable XLA support by default.
132build --define=with_xla_support=true
133
134# TODO(mihaimaruseac): Document this option or remove if no longer needed
135build --config=short_logs
136
137# TODO(mihaimaruseac): Document this option or remove if no longer needed
138build --config=v2
139
140# Disable AWS/HDFS support by default
141build --define=no_aws_support=true
142build --define=no_hdfs_support=true
143
144# TF now has `cc_shared_library` targets, so it needs the experimental flag
145# TODO(rostam): Remove when `cc_shared_library` is enabled by default
146build --experimental_cc_shared_library
147
148# cc_shared_library ensures no library is linked statically more than once.
149build --experimental_link_static_libraries_once=false
150
151# Default options should come above this line.
152
153# Allow builds using libc++ as a linker library
154# This is mostly for OSSFuzz, so we also pass in the flags from environment to clean build file
155build:libc++ --action_env=CC
156build:libc++ --action_env=CXX
157build:libc++ --action_env=CXXFLAGS=-stdlib=libc++
158build:libc++ --action_env=PATH
159build:libc++ --define force_libcpp=enabled
160build:libc++ --linkopt -fuse-ld=lld
161
162# Android configs. Bazel needs to have --cpu and --fat_apk_cpu both set to the
163# target CPU to build transient dependencies correctly. See
164# https://docs.bazel.build/versions/master/user-manual.html#flag--fat_apk_cpu
165build:android --crosstool_top=//external:android/crosstool
166build:android --host_crosstool_top=@bazel_tools//tools/cpp:toolchain
167build:android_arm --config=android
168build:android_arm --cpu=armeabi-v7a
169build:android_arm --fat_apk_cpu=armeabi-v7a
170build:android_arm64 --config=android
171build:android_arm64 --cpu=arm64-v8a
172build:android_arm64 --fat_apk_cpu=arm64-v8a
173build:android_x86 --config=android
174build:android_x86 --cpu=x86
175build:android_x86 --fat_apk_cpu=x86
176build:android_x86_64 --config=android
177build:android_x86_64 --cpu=x86_64
178build:android_x86_64 --fat_apk_cpu=x86_64
179
180# Sets the default Apple platform to macOS.
181build:macos --apple_platform_type=macos
182
183# gRPC on MacOS requires this #define
184build:macos --copt=-DGRPC_BAZEL_BUILD
185
186# Settings for MacOS on ARM CPUs.
187build:macos_arm64 --cpu=darwin_arm64
188build:macos_arm64 --macos_minimum_os=11.0
189
190# iOS configs for each architecture and the fat binary builds.
191build:ios --apple_platform_type=ios
192build:ios --apple_bitcode=embedded --copt=-fembed-bitcode
193build:ios --copt=-Wno-c++11-narrowing
194build:ios_armv7 --config=ios
195build:ios_armv7 --cpu=ios_armv7
196build:ios_arm64 --config=ios
197build:ios_arm64 --cpu=ios_arm64
198build:ios_sim_arm64 --config=ios
199build:ios_sim_arm64 --cpu=ios_sim_arm64
200build:ios_i386 --config=ios
201build:ios_i386 --cpu=ios_i386
202build:ios_x86_64 --config=ios
203build:ios_x86_64 --cpu=ios_x86_64
204build:ios_fat --config=ios
205build:ios_fat --ios_multi_cpus=armv7,arm64,i386,x86_64
206
207# Config to use a mostly-static build and disable modular op registration
208# support (this will revert to loading TensorFlow with RTLD_GLOBAL in Python).
209# By default, TensorFlow will build with a dependence on
210# //tensorflow:libtensorflow_framework.so.
211build:monolithic --define framework_shared_object=false
212build:monolithic --experimental_link_static_libraries_once=false  # b/229868128
213
214# Please note that MKL on MacOS or windows is still not supported.
215# If you would like to use a local MKL instead of downloading, please set the
216# environment variable "TF_MKL_ROOT" every time before build.
217build:mkl --define=build_with_mkl=true --define=enable_mkl=true
218build:mkl --define=tensorflow_mkldnn_contraction_kernel=0
219build:mkl --define=build_with_openmp=true
220build:mkl -c opt
221
222# config to build OneDNN backend with a user specified threadpool.
223build:mkl_threadpool --define=build_with_mkl=true --define=enable_mkl=true
224build:mkl_threadpool --define=tensorflow_mkldnn_contraction_kernel=0
225build:mkl_threadpool --define=build_with_mkl_opensource=true
226build:mkl_threadpool -c opt
227
228# Config setting to build oneDNN with Compute Library for the Arm Architecture (ACL).
229build:mkl_aarch64 --define=build_with_mkl_aarch64=true
230build:mkl_aarch64 --define=build_with_openmp=true
231build:mkl_aarch64 --define=build_with_acl=true
232build:mkl_aarch64 -c opt
233
234# Config setting to build oneDNN with Compute Library for the Arm Architecture (ACL).
235# with Eigen threadpool support
236build:mkl_aarch64_threadpool --define=build_with_mkl_aarch64=true
237build:mkl_aarch64_threadpool -c opt
238
239# This config refers to building CUDA op kernels with nvcc.
240build:cuda --repo_env TF_NEED_CUDA=1
241build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
242build:cuda --@local_config_cuda//:enable_cuda
243
244# This config refers to building CUDA op kernels with clang.
245build:cuda_clang --config=cuda
246build:cuda_clang --repo_env TF_CUDA_CLANG=1
247build:cuda_clang --@local_config_cuda//:cuda_compiler=clang
248
249# Debug config
250build:dbg -c dbg
251# Only include debug info for files under tensorflow/, excluding kernels, to
252# reduce the size of the debug info in the binary. This is because if the debug
253# sections in the ELF binary are too large, errors can occur. See
254# https://github.com/tensorflow/tensorflow/issues/48919.
255# Users can still include debug info for a specific kernel, e.g. with:
256#     --config=dbg --per_file_copt=+tensorflow/core/kernels/identity_op.*@-g
257build:dbg --per_file_copt=+.*,-tensorflow.*@-g0
258build:dbg --per_file_copt=+tensorflow/core/kernels.*@-g0
259# for now, disable arm_neon. see: https://github.com/tensorflow/tensorflow/issues/33360
260build:dbg --cxxopt -DTF_LITE_DISABLE_X86_NEON
261# AWS SDK must be compiled in release mode. see: https://github.com/tensorflow/tensorflow/issues/37498
262build:dbg --copt -DDEBUG_BUILD
263
264# Config to build TPU backend
265build:tpu --define=with_tpu_support=true
266
267build:tensorrt --repo_env TF_NEED_TENSORRT=1
268
269build:rocm --crosstool_top=@local_config_rocm//crosstool:toolchain
270build:rocm --define=using_rocm_hipcc=true
271build:rocm --define=tensorflow_mkldnn_contraction_kernel=0
272build:rocm --repo_env TF_NEED_ROCM=1
273build:rocm --experimental_link_static_libraries_once=false  # b/230048163
274
275# Options extracted from configure script
276build:numa --define=with_numa_support=true
277
278# Options to disable default on features
279build:noaws --define=no_aws_support=true
280build:nogcp --define=no_gcp_support=true
281build:nohdfs --define=no_hdfs_support=true
282build:nonccl --define=no_nccl_support=true
283
284build:stackdriver_support --define=stackdriver_support=true
285
286# Modular TF build options
287build:dynamic_kernels --define=dynamic_loaded_kernels=true
288build:dynamic_kernels --copt=-DAUTOLOAD_DYNAMIC_KERNELS
289
290# Don't trigger --config=<host platform> when cross-compiling.
291build:android --noenable_platform_specific_config
292build:ios --noenable_platform_specific_config
293
294# Suppress C++ compiler warnings, otherwise build logs become 10s of MBs.
295build:android --copt=-w
296build:ios --copt=-w
297build:linux --copt=-w
298build:linux --host_copt=-w
299build:macos --copt=-w
300build:windows --copt=/W0
301build:windows --host_copt=/W0
302
303
304# On Windows, `__cplusplus` is wrongly defined without this switch
305# See https://devblogs.microsoft.com/cppblog/msvc-now-correctly-reports-__cplusplus/
306build:windows --copt=/Zc:__cplusplus
307build:windows --host_copt=/Zc:__cplusplus
308
309# Tensorflow uses M_* math constants that only get defined by MSVC headers if
310# _USE_MATH_DEFINES is defined.
311build:windows --copt=/D_USE_MATH_DEFINES
312build:windows --host_copt=/D_USE_MATH_DEFINES
313
314# Windows has a relatively short command line limit, which TF has begun to hit.
315# See https://docs.bazel.build/versions/main/windows.html
316build:windows --features=compiler_param_file
317
318# Speed Windows compile times. Available in VS 16.4 (we are on 16.11). See
319# https://groups.google.com/a/tensorflow.org/d/topic/build/SsW98Eo7l3o/discussion
320build:windows --copt=/d2ReducedOptimizeHugeFunctions
321build:windows --host_copt=/d2ReducedOptimizeHugeFunctions
322
323# Default paths for TF_SYSTEM_LIBS
324build:linux --define=PREFIX=/usr
325build:linux --define=LIBDIR=$(PREFIX)/lib
326build:linux --define=INCLUDEDIR=$(PREFIX)/include
327build:linux --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include
328build:macos --define=PREFIX=/usr
329build:macos --define=LIBDIR=$(PREFIX)/lib
330build:macos --define=INCLUDEDIR=$(PREFIX)/include
331build:macos --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include
332# TF_SYSTEM_LIBS do not work on windows.
333
334# By default, build TF in C++ 17 mode.
335build:android --cxxopt=-std=c++17
336build:android --host_cxxopt=-std=c++17
337build:ios --cxxopt=-std=c++17
338build:ios --host_cxxopt=-std=c++17
339build:linux --cxxopt=-std=c++17
340build:linux --host_cxxopt=-std=c++17
341build:macos --cxxopt=-std=c++17
342build:macos --host_cxxopt=-std=c++17
343build:windows --cxxopt=/std:c++17
344build:windows --host_cxxopt=/std:c++17
345
346# On windows, we still link everything into a single DLL.
347build:windows --config=monolithic
348
349# On linux, we dynamically link small amount of kernels
350build:linux --config=dynamic_kernels
351
352# Make sure to include as little of windows.h as possible
353build:windows --copt=-DWIN32_LEAN_AND_MEAN
354build:windows --host_copt=-DWIN32_LEAN_AND_MEAN
355build:windows --copt=-DNOGDI
356build:windows --host_copt=-DNOGDI
357
358# MSVC (Windows): Standards-conformant preprocessor mode
359# See https://docs.microsoft.com/en-us/cpp/preprocessor/preprocessor-experimental-overview
360build:windows --copt=/experimental:preprocessor
361build:windows --host_copt=/experimental:preprocessor
362
363# Misc build options we need for windows.
364build:windows --linkopt=/DEBUG
365build:windows --host_linkopt=/DEBUG
366build:windows --linkopt=/OPT:REF
367build:windows --host_linkopt=/OPT:REF
368build:windows --linkopt=/OPT:ICF
369build:windows --host_linkopt=/OPT:ICF
370
371# Verbose failure logs when something goes wrong
372build:windows --verbose_failures
373
374# Work around potential issues with large command lines on windows.
375# See: https://github.com/bazelbuild/bazel/issues/5163
376build:windows --features=compiler_param_file
377
378# On windows, we never cross compile
379build:windows --distinct_host_configuration=false
380# On linux, don't cross compile by default
381build:linux --distinct_host_configuration=false
382
383# Do not risk cache corruption. See:
384# https://github.com/bazelbuild/bazel/issues/3360
385build:linux --experimental_guard_against_concurrent_changes
386
387# Configure short or long logs
388build:short_logs --output_filter=DONT_MATCH_ANYTHING
389build:verbose_logs --output_filter=
390
391# Instruction set optimizations
392# TODO(gunan): Create a feature in toolchains for avx/avx2 to
393#   avoid having to define linux/win separately.
394build:avx_linux --copt=-mavx
395build:avx_linux --host_copt=-mavx
396build:avx2_linux --copt=-mavx2
397build:native_arch_linux --copt=-march=native
398build:avx_win --copt=/arch=AVX
399build:avx2_win --copt=/arch=AVX2
400
401# Options to build TensorFlow 1.x or 2.x.
402build:v1 --define=tf_api_version=1 --action_env=TF2_BEHAVIOR=0
403build:v2 --define=tf_api_version=2 --action_env=TF2_BEHAVIOR=1
404
405# Disable XLA on mobile.
406build:xla     --define=with_xla_support=true # TODO: remove, it's on by default.
407build:android --define=with_xla_support=false
408build:ios     --define=with_xla_support=false
409
410# BEGIN TF REMOTE BUILD EXECUTION OPTIONS
411# Options when using remote execution
412# WARNING: THESE OPTIONS WONT WORK IF YOU DO NOT HAVE PROPER AUTHENTICATION AND PERMISSIONS
413
414# Flag to enable remote config
415common --experimental_repo_remote_exec
416
417build:rbe --repo_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
418build:rbe --google_default_credentials
419build:rbe --bes_backend=buildeventservice.googleapis.com
420build:rbe --bes_results_url="https://source.cloud.google.com/results/invocations"
421build:rbe --bes_timeout=600s
422build:rbe --define=EXECUTOR=remote
423build:rbe --distinct_host_configuration=false
424build:rbe --flaky_test_attempts=3
425build:rbe --jobs=800
426build:rbe --remote_executor=grpcs://remotebuildexecution.googleapis.com
427build:rbe --remote_timeout=3600
428build:rbe --spawn_strategy=remote,worker,standalone,local
429test:rbe --test_env=USER=anon
430# Attempt to minimize the amount of data transfer between bazel and the remote
431# workers:
432build:rbe --remote_download_toplevel
433
434build:rbe_linux_base --config=rbe
435build:rbe_linux_base --action_env=PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin"
436
437build:rbe_linux --config=rbe_linux_base
438# Non-rbe settings we should include because we do not run configure
439build:rbe_linux --config=avx_linux
440# TODO(gunan): Check why we need this specified in rbe, but not in other builds.
441build:rbe_linux --linkopt=-lrt
442build:rbe_linux --host_linkopt=-lrt
443build:rbe_linux --linkopt=-lm
444build:rbe_linux --host_linkopt=-lm
445
446# Use the GPU toolchain until the CPU one is ready.
447# https://github.com/bazelbuild/bazel/issues/13623
448build:rbe_cpu_linux_base --host_crosstool_top="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
449build:rbe_cpu_linux_base --crosstool_top="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
450build:rbe_cpu_linux_base --extra_toolchains="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain-linux-x86_64"
451build:rbe_cpu_linux_base --extra_execution_platforms="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
452build:rbe_cpu_linux_base --host_platform="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
453build:rbe_cpu_linux_base --platforms="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
454
455build:rbe_cpu_linux --config=rbe_linux
456build:rbe_cpu_linux --config=rbe_cpu_linux_base
457
458build:rbe_lite_linux --config=rbe_linux_base
459build:rbe_lite_linux --config=rbe_cpu_linux_base
460build:rbe_lite_linux --config=rbe_linux_py3_base
461build:rbe_lite_linux --noexperimental_check_desugar_deps
462
463build:rbe_linux_cuda_base --config=rbe_linux
464build:rbe_linux_cuda_base --config=cuda
465build:rbe_linux_cuda_base --config=tensorrt
466build:rbe_linux_cuda_base --action_env=TF_CUDA_VERSION=11
467build:rbe_linux_cuda_base --action_env=TF_CUDNN_VERSION=8
468build:rbe_linux_cuda_base --repo_env=REMOTE_GPU_TESTING=1
469# TensorRT 7 for CUDA 11.1 is compatible with CUDA 11.2, but requires
470# libnvrtc.so.11.1. See https://github.com/NVIDIA/TensorRT/issues/1064.
471# TODO(b/187962120): Remove when upgrading to TensorRT 8.
472test:rbe_linux_cuda_base --test_env=LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda-11.1/lib64"
473
474build:rbe_linux_cuda11.2_nvcc_base --config=rbe_linux_cuda_base
475build:rbe_linux_cuda11.2_nvcc_base --host_crosstool_top="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
476build:rbe_linux_cuda11.2_nvcc_base --crosstool_top="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
477build:rbe_linux_cuda11.2_nvcc_base --extra_toolchains="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain-linux-x86_64"
478build:rbe_linux_cuda11.2_nvcc_base --extra_execution_platforms="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
479build:rbe_linux_cuda11.2_nvcc_base --host_platform="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
480build:rbe_linux_cuda11.2_nvcc_base --platforms="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
481build:rbe_linux_cuda11.2_nvcc_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda"
482build:rbe_linux_cuda11.2_nvcc_base --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_tensorrt"
483build:rbe_linux_cuda11.2_nvcc_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_nccl"
484build:rbe_linux_cuda11.2_nvcc_py3.7 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.7"
485build:rbe_linux_cuda11.2_nvcc_py3.8 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.8"
486build:rbe_linux_cuda11.2_nvcc_py3.9 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.9"
487build:rbe_linux_cuda11.2_nvcc_py3.10 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.10"
488
489# Map default to CUDA 11.2.
490build:rbe_linux_cuda_nvcc_py37 --config=rbe_linux_cuda11.2_nvcc_py3.7
491build:rbe_linux_cuda_nvcc_py38 --config=rbe_linux_cuda11.2_nvcc_py3.8
492build:rbe_linux_cuda_nvcc_py39 --config=rbe_linux_cuda11.2_nvcc_py3.9
493build:rbe_linux_cuda_nvcc_py310 --config=rbe_linux_cuda11.2_nvcc_py3.10
494
495# Deprecated configs that people might still use.
496build:rbe_linux_cuda_nvcc --config=rbe_linux_cuda_nvcc_py39
497build:rbe_gpu_linux       --config=rbe_linux_cuda_nvcc
498
499build:rbe_linux_cuda_clang_base --config=rbe_linux_cuda_base
500build:rbe_linux_cuda_clang_base --repo_env TF_CUDA_CLANG=1
501build:rbe_linux_cuda_clang_base --@local_config_cuda//:cuda_compiler=clang
502build:rbe_linux_cuda_clang_base --crosstool_top="@ubuntu20.04-clang_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
503build:rbe_linux_cuda_clang_base --extra_toolchains="@ubuntu20.04-clang_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain-linux-x86_64"
504build:rbe_linux_cuda_clang_base --extra_execution_platforms="@ubuntu20.04-clang_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
505build:rbe_linux_cuda_clang_base --host_platform="@ubuntu20.04-clang_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
506build:rbe_linux_cuda_clang_base --platforms="@ubuntu20.04-clang_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
507build:rbe_linux_cuda_clang_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu20.04-clang_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda"
508build:rbe_linux_cuda_clang_base --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu20.04-clang_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_tensorrt"
509build:rbe_linux_cuda_clang_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu20.04-clang_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_nccl"
510build:rbe_linux_cuda_clang_py37 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-clang_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.7"
511build:rbe_linux_cuda_clang_py38 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-clang_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.8"
512build:rbe_linux_cuda_clang_py39 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-clang_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.9"
513build:rbe_linux_cuda_clang_py310 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-clang_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.10"
514
515# ROCm
516build:rbe_linux_rocm_base --config=rocm
517build:rbe_linux_rocm_base --config=rbe_linux
518build:rbe_linux_rocm_base --crosstool_top="@ubuntu20.04-gcc9_manylinux2014-rocm_config_rocm//crosstool:toolchain"
519build:rbe_linux_rocm_base --extra_toolchains="@ubuntu20.04-gcc9_manylinux2014-rocm_config_rocm//crosstool:toolchain-linux-x86_64"
520build:rbe_linux_rocm_base --extra_execution_platforms="@ubuntu20.04-gcc9_manylinux2014-rocm_config_platform//:platform"
521build:rbe_linux_rocm_base --host_platform="@ubuntu20.04-gcc9_manylinux2014-rocm_config_platform//:platform"
522build:rbe_linux_rocm_base --platforms="@ubuntu20.04-gcc9_manylinux2014-rocm_config_platform//:platform"
523build:rbe_linux_rocm_base --action_env=TF_ROCM_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-rocm_config_rocm"
524build:rbe_linux_rocm_py3.7 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-rocm_config_python3.7"
525build:rbe_linux_rocm_py3.8 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-rocm_config_python3.8"
526build:rbe_linux_rocm_py3.9 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-rocm_config_python3.9"
527build:rbe_linux_rocm_py3.10 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-rocm_config_python3.10"
528
529# Linux CPU
530
531build:rbe_linux_py3 --config=rbe_linux
532build:rbe_linux_py3 --config=rbe_linux_py3_base
533build:rbe_linux_py3_base --python_path="/usr/local/bin/python3.9"
534build:rbe_linux_py3_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.9"
535
536build:rbe_win --config=rbe
537build:rbe_win --crosstool_top="//tensorflow/tools/toolchains/win/tf_win_06152022:toolchain"
538build:rbe_win --extra_toolchains="//tensorflow/tools/toolchains/win/tf_win_06152022:cc-toolchain-x64_windows"
539build:rbe_win --extra_execution_platforms="//tensorflow/tools/toolchains/win:rbe_windows_ltsc2019"
540build:rbe_win --host_platform="//tensorflow/tools/toolchains/win:rbe_windows_ltsc2019"
541build:rbe_win --platforms="//tensorflow/tools/toolchains/win:rbe_windows_ltsc2019"
542build:rbe_win --shell_executable=C:\\tools\\msys64\\usr\\bin\\bash.exe
543build:rbe_win --experimental_strict_action_env=true
544
545# TODO(gunan): Remove once we use MSVC 2019 with latest patches.
546build:rbe_win --define=override_eigen_strong_inline=true
547
548# Don't build the python zip archive in the RBE build.
549build:rbe_win --remote_download_minimal
550build:rbe_win --enable_runfiles
551build:rbe_win --nobuild_python_zip
552
553build:rbe_win_py37 --config=rbe
554build:rbe_win_py37 --repo_env=TF_PYTHON_CONFIG_REPO="@windows_py37_config_python"
555build:rbe_win_py37 --python_path=C:\\Python37\\python.exe
556
557build:rbe_win_py38 --config=rbe
558build:rbe_win_py38 --repo_env=PYTHON_BIN_PATH=C:\\Python38\\python.exe
559build:rbe_win_py38 --repo_env=PYTHON_LIB_PATH=C:\\Python38\\lib\\site-packages
560build:rbe_win_py38 --repo_env=TF_PYTHON_CONFIG_REPO=//tensorflow/tools/toolchains/win_1803/py38
561build:rbe_win_py38 --python_path=C:\\Python38\\python.exe
562
563build:rbe_win_py39 --config=rbe
564build:rbe_win_py39 --repo_env=PYTHON_BIN_PATH=C:\\Python39\\python.exe
565build:rbe_win_py39 --repo_env=PYTHON_LIB_PATH=C:\\Python39\\lib\\site-packages
566build:rbe_win_py39 --repo_env=TF_PYTHON_CONFIG_REPO=//tensorflow/tools/toolchains/win_1803/py39
567build:rbe_win_py39 --python_path=C:\\Python39\\python.exe
568
569build:rbe_win_py310 --config=rbe
570build:rbe_win_py310 --repo_env=PYTHON_BIN_PATH=C:\\Python310\\python.exe
571build:rbe_win_py310 --repo_env=PYTHON_LIB_PATH=C:\\Python310\\lib\\site-packages
572build:rbe_win_py310 --repo_env=TF_PYTHON_CONFIG_REPO=//tensorflow/tools/toolchains/win_1803/py310
573build:rbe_win_py310 --python_path=C:\\Python310\\python.exe
574
575# These you may need to change for your own GCP project.
576build:tensorflow_testing_rbe --project_id=tensorflow-testing
577common:tensorflow_testing_rbe_linux --remote_instance_name=projects/tensorflow-testing/instances/default_instance
578build:tensorflow_testing_rbe_linux --config=tensorflow_testing_rbe
579# Build GPU binaries for the RBE test machines (Tesla T4s).
580build:tensorflow_testing_rbe_linux --repo_env=TF_CUDA_COMPUTE_CAPABILITIES=sm_75
581
582common:tensorflow_testing_rbe_win --remote_instance_name=projects/tensorflow-testing/instances/windows
583build:tensorflow_testing_rbe_win --config=tensorflow_testing_rbe
584
585# TFLite build configs for generic embedded Linux
586build:elinux --crosstool_top=@local_config_embedded_arm//:toolchain
587build:elinux --host_crosstool_top=@bazel_tools//tools/cpp:toolchain
588build:elinux_aarch64 --config=elinux
589build:elinux_aarch64 --cpu=aarch64
590build:elinux_aarch64 --distinct_host_configuration=true
591build:elinux_armhf --config=elinux
592build:elinux_armhf --cpu=armhf
593build:elinux_armhf --distinct_host_configuration=true
594build:elinux_armhf --copt -mfp16-format=ieee
595# END TF REMOTE BUILD EXECUTION OPTIONS
596
597# Config-specific options should come above this line.
598
599# Load rc file written by ./configure.
600try-import %workspace%/.tf_configure.bazelrc
601
602# Load rc file with user-specific options.
603try-import %workspace%/.bazelrc.user
604
605# Here are bazelrc configs for release builds
606build:release_base --config=v2
607build:release_base --distinct_host_configuration=false
608test:release_base --flaky_test_attempts=3
609test:release_base --test_size_filters=small,medium
610
611build:release_cpu_linux --config=release_base
612build:release_cpu_linux --config=avx_linux
613build:release_cpu_linux --crosstool_top="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
614test:release_cpu_linux --test_env=LD_LIBRARY_PATH
615
616build:release_cpu_macos --config=release_base
617build:release_cpu_macos --config=avx_linux
618
619build:release_gpu_base --config=cuda
620build:release_gpu_base --action_env=TF_CUDA_VERSION="11"
621build:release_gpu_base --action_env=TF_CUDNN_VERSION="8"
622build:release_gpu_base --repo_env=TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75,compute_80"
623
624build:release_gpu_linux --config=release_cpu_linux
625build:release_gpu_linux --config=release_gpu_base
626build:release_gpu_linux --config=tensorrt
627build:release_gpu_linux --action_env=CUDA_TOOLKIT_PATH="/usr/local/cuda-11.2"
628build:release_gpu_linux --action_env=LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda-11.1/lib64:/usr/local/tensorrt/lib"
629build:release_gpu_linux --action_env=GCC_HOST_COMPILER_PATH="/dt9/usr/bin/gcc"
630build:release_gpu_linux --crosstool_top=@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain
631
632build:release_cpu_windows --config=release_base
633build:release_cpu_windows --config=avx_win
634build:release_cpu_windows --define=no_tensorflow_py_deps=true
635
636build:release_gpu_windows --config=release_cpu_windows
637build:release_gpu_windows --config=release_gpu_base
638
639# Address sanitizer
640# CC=clang bazel build --config asan
641build:asan --strip=never
642build:asan --copt -fsanitize=address
643build:asan --copt -DADDRESS_SANITIZER
644build:asan --copt -g
645build:asan --copt -O3
646build:asan --copt -fno-omit-frame-pointer
647build:asan --linkopt -fsanitize=address
648
649# Memory sanitizer
650# CC=clang bazel build --config msan
651build:msan --strip=never
652build:msan --copt -fsanitize=memory
653build:msan --copt -DMEMORY_SANITIZER
654build:msan --copt -g
655build:msan --copt -O3
656build:msan --copt -fno-omit-frame-pointer
657build:msan --linkopt -fsanitize=memory
658
659# Undefined Behavior Sanitizer
660# CC=clang bazel build --config ubsan
661build:ubsan --strip=never
662build:ubsan --copt -fsanitize=undefined
663build:ubsan --copt -DUNDEFINED_BEHAVIOR_SANITIZER
664build:ubsan --copt -g
665build:ubsan --copt -O3
666build:ubsan --copt -fno-omit-frame-pointer
667build:ubsan --linkopt -fsanitize=undefined
668build:ubsan --linkopt -lubsan
669
670# Disable TFRT integration for now unless --config=tfrt is specified.
671build      --deleted_packages=tensorflow/compiler/mlir/tfrt,tensorflow/compiler/mlir/tfrt/benchmarks,tensorflow/compiler/mlir/tfrt/jit/python_binding,tensorflow/compiler/mlir/tfrt/jit/transforms,tensorflow/compiler/mlir/tfrt/python_tests,tensorflow/compiler/mlir/tfrt/tests,tensorflow/compiler/mlir/tfrt/tests/ir,tensorflow/compiler/mlir/tfrt/tests/analysis,tensorflow/compiler/mlir/tfrt/tests/jit,tensorflow/compiler/mlir/tfrt/tests/lhlo_to_tfrt,tensorflow/compiler/mlir/tfrt/tests/lhlo_to_jitrt,tensorflow/compiler/mlir/tfrt/tests/tf_to_corert,tensorflow/compiler/mlir/tfrt/tests/tf_to_tfrt_data,tensorflow/compiler/mlir/tfrt/tests/saved_model,tensorflow/compiler/mlir/tfrt/transforms/lhlo_gpu_to_tfrt_gpu,tensorflow/core/runtime_fallback,tensorflow/core/runtime_fallback/conversion,tensorflow/core/runtime_fallback/kernel,tensorflow/core/runtime_fallback/opdefs,tensorflow/core/runtime_fallback/runtime,tensorflow/core/runtime_fallback/util,tensorflow/core/tfrt/common,tensorflow/core/tfrt/eager,tensorflow/core/tfrt/eager/backends/cpu,tensorflow/core/tfrt/eager/backends/gpu,tensorflow/core/tfrt/eager/core_runtime,tensorflow/core/tfrt/eager/cpp_tests/core_runtime,tensorflow/core/tfrt/gpu,tensorflow/core/tfrt/run_handler_thread_pool,tensorflow/core/tfrt/runtime,tensorflow/core/tfrt/saved_model,tensorflow/core/tfrt/graph_executor,tensorflow/core/tfrt/saved_model/tests,tensorflow/core/tfrt/tpu,tensorflow/core/tfrt/utils
672# TODO(b/240450920): We are in the process of migrating JitRt backend to XLA
673# and while we are doing this we can't keep it buildable/testable in OSS.
674build:tfrt --deleted_packages=tensorflow/compiler/mlir/tfrt,tensorflow/compiler/mlir/tfrt/benchmarks,tensorflow/compiler/mlir/tfrt/jit/python_binding,tensorflow/compiler/mlir/tfrt/jit/transforms,tensorflow/compiler/mlir/tfrt/python_tests,tensorflow/compiler/mlir/tfrt/tests,tensorflow/compiler/mlir/tfrt/tests/ir,tensorflow/compiler/mlir/tfrt/tests/analysis,tensorflow/compiler/mlir/tfrt/tests/jit,tensorflow/compiler/mlir/tfrt/tests/lhlo_to_tfrt,tensorflow/compiler/mlir/tfrt/tests/lhlo_to_jitrt,tensorflow/compiler/mlir/tfrt/tests/tf_to_corert,tensorflow/compiler/mlir/tfrt/tests/tf_to_tfrt_data,tensorflow/compiler/mlir/tfrt/tests/saved_model,tensorflow/compiler/mlir/tfrt/transforms/lhlo_gpu_to_tfrt_gpu,tensorflow/core/runtime_fallback,tensorflow/core/runtime_fallback/conversion,tensorflow/core/runtime_fallback/kernel,tensorflow/core/runtime_fallback/opdefs,tensorflow/core/runtime_fallback/runtime,tensorflow/core/runtime_fallback/util,tensorflow/core/tfrt/common,tensorflow/core/tfrt/eager,tensorflow/core/tfrt/eager/backends/cpu,tensorflow/core/tfrt/eager/backends/gpu,tensorflow/core/tfrt/eager/core_runtime,tensorflow/core/tfrt/eager/cpp_tests/core_runtime,tensorflow/core/tfrt/gpu,tensorflow/core/tfrt/run_handler_thread_pool,tensorflow/core/tfrt/runtime,tensorflow/core/tfrt/saved_model,tensorflow/core/tfrt/graph_executor,tensorflow/core/tfrt/saved_model/tests,tensorflow/core/tfrt/tpu,tensorflow/core/tfrt/utils
675