• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# TensorFlow Bazel configuration file.
2# This file tries to group and simplify build options for TensorFlow
3#
4# ----CONFIG OPTIONS----
5# Android options:
6#    android:
7#    android_arm:
8#    android_arm64:
9#    android_x86:
10#    android_x86_64:
11#
12# iOS options:
13#     ios:
14#     ios_armv7:
15#     ios_arm64:
16#     ios_i386:
17#     ios_x86_64:
18#     ios_fat:
19#
20# Macosx options
21#     darwin_arm64:
22#
23# Compiler options:
24#     cuda_clang:             Use clang when building CUDA code.
25#     c++17:                  Build with C++17 options (links with libc++)
26#     c++1z:                  Build with C++17 options (links with libc++)
27#     c++17_gcc:              Build with C++17 options (links with stdlibc++)
28#     c++1z_gcc:              Build with C++17 options (links with stdlibc++)
29#     avx_linux:              Build with avx instruction set on linux.
30#     avx2_linux:             Build with avx2 instruction set on linux.
31#     native_arch_linux:      Build with instruction sets available to the host machine on linux
32#     avx_win:                Build with avx instruction set on windows
33#     avx2_win:               Build with avx2 instruction set on windows
34#
35# Other build options:
36#     short_logs:       Only log errors during build, skip warnings.
37#     verbose_logs:     Show all compiler warnings during build.
38#     monolithic:       Build all TF C++ code into a single shared object.
39#     dynamic_kernels:  Try to link all kernels dynamically (experimental).
40#     libc++:           Link against libc++ instead of stdlibc++
41#     asan:             Build with the clang address sanitizer
42#     msan:             Build with the clang memory sanitizer
43#     ubsan:            Build with the clang undefined behavior sanitizer
44#     dbg:              Build with debug info
45#
46#
47# TF version options;
48#     v1: Build TF V1 (without contrib)
49#     v2: Build TF v2
50#
51# Feature and Third party library support options:
52#     xla:          Build TF with XLA
53#     tpu:          Build TF with TPU support
54#     cuda:         Build with full cuda support.
55#     rocm:         Build with AMD GPU support (rocm).
56#     mkl:          Enable full mkl support.
57#     tensorrt:     Enable Tensorrt support.
58#     numa:         Enable numa using hwloc.
59#     noaws:        Disable AWS S3 storage support
60#     nogcp:        Disable GCS support.
61#     nohdfs:       Disable hadoop hdfs support.
62#     nonccl:       Disable nccl support.
63#
64#
65# Remote build execution options (only configured to work with TF team projects for now.)
66#     rbe:       General RBE options shared by all flavors.
67#     rbe_linux: General RBE options used on all linux builds.
68#     rbe_win:   General RBE options used on all windows builds.
69#
70#     rbe_cpu_linux:           RBE options to build with only CPU support.
71#     rbe_linux_cuda_nvcc_py*: RBE options to build with GPU support using nvcc.
72#
73#     rbe_linux_py2: Linux Python 2 RBE config.
74#     rbe_linux_py3: Linux Python 3 RBE config
75#
76#     rbe_win_py37: Windows Python 3.7 RBE config
77#     rbe_win_py38: Windows Python 3.8 RBE config
78#
79#     tensorflow_testing_rbe_linux: RBE options to use RBE with tensorflow-testing project on linux
80#     tensorflow_testing_rbe_win:   RBE options to use RBE with tensorflow-testing project on windows
81#
82# Embedded Linux options (experimental and only tested with TFLite build yet)
83#     elinux:          General Embedded Linux options shared by all flavors.
84#     elinux_aarch64:  Embedded Linux options for aarch64 (ARM64) CPU support.
85#     elinux_armhf:    Embedded Linux options for armhf (ARMv7) CPU support.
86#
87# Release build options (for all operating systems)
88#     release_base:        Common options for all builds on all operating systems.
89#     release_gpu_base:    Common options for GPU builds on Linux and Windows.
90#     release_cpu_linux:   Toolchain and CUDA options for Linux CPU builds.
91#     release_cpu_macos:   Toolchain and CUDA options for MacOS CPU builds.
92#     release_gpu_linux:   Toolchain and CUDA options for Linux GPU builds.
93#     release_cpu_windows: Toolchain and CUDA options for Windows CPU builds.
94#     release_gpu_windows: Toolchain and CUDA options for Windows GPU builds.
95
96# Default build options. These are applied first and unconditionally.
97
98# For projects which use TensorFlow as part of a Bazel build process, putting
99# nothing in a bazelrc will default to a monolithic build. The following line
100# opts in to modular op registration support by default.
101build --define framework_shared_object=true
102
103# For workaround https://github.com/bazelbuild/bazel/issues/8772 with Bazel >= 0.29.1
104build --java_toolchain=@tf_toolchains//toolchains/java:tf_java_toolchain
105build --host_java_toolchain=@tf_toolchains//toolchains/java:tf_java_toolchain
106
107build --define=use_fast_cpp_protos=true
108build --define=allow_oversize_protos=true
109
110build --spawn_strategy=standalone
111build -c opt
112
113# Make Bazel print out all options from rc files.
114build --announce_rc
115
116build --define=grpc_no_ares=true
117
118# See https://github.com/bazelbuild/bazel/issues/7362 for information on what
119# --incompatible_remove_legacy_whole_archive flag does.
120# This flag is set to true in Bazel 1.0 and newer versions. We tried to migrate
121# Tensorflow to the default, however test coverage wasn't enough to catch the
122# errors.
123# There is ongoing work on Bazel team's side to provide support for transitive
124# shared libraries. As part of migrating to transitive shared libraries, we
125# hope to provide a better mechanism for control over symbol exporting, and
126# then tackle this issue again.
127#
128# TODO: Remove this line once TF doesn't depend on Bazel wrapping all library
129# archives in -whole_archive -no_whole_archive.
130build --noincompatible_remove_legacy_whole_archive
131
132build --enable_platform_specific_config
133
134# Enable XLA support by default.
135build --define=with_xla_support=true
136
137build --config=short_logs
138
139build --config=v2
140
141# Disable AWS/HDFS support by default
142build --define=no_aws_support=true
143build --define=no_hdfs_support=true
144
145# Default options should come above this line.
146
147# Allow builds using libc++ as a linker library
148# This is mostly for OSSFuzz, so we also pass in the flags from environment to clean build file
149build:libc++ --action_env=CC
150build:libc++ --action_env=CXX
151build:libc++ --action_env=CXXFLAGS=-stdlib=libc++
152build:libc++ --action_env=PATH
153build:libc++ --define force_libcpp=enabled
154build:libc++ --linkopt -fuse-ld=lld
155
156# Android configs. Bazel needs to have --cpu and --fat_apk_cpu both set to the
157# target CPU to build transient dependencies correctly. See
158# https://docs.bazel.build/versions/master/user-manual.html#flag--fat_apk_cpu
159build:android --crosstool_top=//external:android/crosstool
160build:android --host_crosstool_top=@bazel_tools//tools/cpp:toolchain
161build:android_arm --config=android
162build:android_arm --cpu=armeabi-v7a
163build:android_arm --fat_apk_cpu=armeabi-v7a
164build:android_arm64 --config=android
165build:android_arm64 --cpu=arm64-v8a
166build:android_arm64 --fat_apk_cpu=arm64-v8a
167build:android_x86 --config=android
168build:android_x86 --cpu=x86
169build:android_x86 --fat_apk_cpu=x86
170build:android_x86_64 --config=android
171build:android_x86_64 --cpu=x86_64
172build:android_x86_64 --fat_apk_cpu=x86_64
173
174# Sets the default Apple platform to macOS.
175build:macos --apple_platform_type=macos
176
177# gRPC on MacOS requires this #define
178build:macos --copt=-DGRPC_BAZEL_BUILD
179
180# Settings for MacOS on ARM CPUs.
181build:macos_arm64 --cpu=darwin_arm64
182
183# iOS configs for each architecture and the fat binary builds.
184build:ios --apple_platform_type=ios
185build:ios --apple_bitcode=embedded --copt=-fembed-bitcode
186build:ios --copt=-Wno-c++11-narrowing
187build:ios_armv7 --config=ios
188build:ios_armv7 --cpu=ios_armv7
189build:ios_arm64 --config=ios
190build:ios_arm64 --cpu=ios_arm64
191build:ios_i386 --config=ios
192build:ios_i386 --cpu=ios_i386
193build:ios_x86_64 --config=ios
194build:ios_x86_64 --cpu=ios_x86_64
195build:ios_fat --config=ios
196build:ios_fat --ios_multi_cpus=armv7,arm64,i386,x86_64
197
198# Config to use a mostly-static build and disable modular op registration
199# support (this will revert to loading TensorFlow with RTLD_GLOBAL in Python).
200# By default, TensorFlow will build with a dependence on
201# //tensorflow:libtensorflow_framework.so.
202build:monolithic --define framework_shared_object=false
203
204# Please note that MKL on MacOS or windows is still not supported.
205# If you would like to use a local MKL instead of downloading, please set the
206# environment variable "TF_MKL_ROOT" every time before build.
207build:mkl --define=build_with_mkl=true --define=enable_mkl=true
208build:mkl --define=tensorflow_mkldnn_contraction_kernel=0
209build:mkl --define=build_with_openmp=true
210build:mkl -c opt
211
212# config to build OneDNN backend with a user specified threadpool.
213build:mkl_threadpool --define=build_with_mkl=true --define=enable_mkl=true
214build:mkl_threadpool --define=tensorflow_mkldnn_contraction_kernel=0
215build:mkl_threadpool --define=build_with_mkl_opensource=true
216build:mkl_threadpool -c opt
217
218# Config setting to build oneDNN with Compute Library for the Arm Architecture (ACL).
219# This build is for the inference regime only.
220build:mkl_aarch64 --define=build_with_mkl_aarch64=true --define=enable_mkl=true
221build:mkl_aarch64 --define=tensorflow_mkldnn_contraction_kernel=0
222build:mkl_aarch64 --define=build_with_mkl_opensource=true
223build:mkl_aarch64 --define=build_with_openmp=true
224build:mkl_aarch64 -c opt
225
226# This config refers to building CUDA op kernels with nvcc.
227build:cuda --repo_env TF_NEED_CUDA=1
228build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
229build:cuda --@local_config_cuda//:enable_cuda
230
231# This config refers to building CUDA op kernels with clang.
232build:cuda_clang --config=cuda
233build:cuda_clang --repo_env TF_CUDA_CLANG=1
234build:cuda_clang --@local_config_cuda//:cuda_compiler=clang
235
236# Debug config
237build:dbg -c dbg
238# Only include debug info for files under tensorflow/, excluding kernels, to
239# reduce the size of the debug info in the binary. This is because if the debug
240# sections in the ELF binary are too large, errors can occur. See
241# https://github.com/tensorflow/tensorflow/issues/48919.
242# Users can still include debug info for a specific kernel, e.g. with:
243#     --config=dbg --per_file_copt=+tensorflow/core/kernels/identity_op.*@-g
244build:dbg --per_file_copt=+.*,-tensorflow.*@-g0
245build:dbg --per_file_copt=+tensorflow/core/kernels.*@-g0
246# for now, disable arm_neon. see: https://github.com/tensorflow/tensorflow/issues/33360
247build:dbg --cxxopt -DTF_LITE_DISABLE_X86_NEON
248# AWS SDK must be compiled in release mode. see: https://github.com/tensorflow/tensorflow/issues/37498
249build:dbg --copt -DDEBUG_BUILD
250
251# Config to build TPU backend
252build:tpu --define=with_tpu_support=true
253
254build:tensorrt --repo_env TF_NEED_TENSORRT=1
255
256build:rocm --crosstool_top=@local_config_rocm//crosstool:toolchain
257build:rocm --define=using_rocm_hipcc=true
258build:rocm --repo_env TF_NEED_ROCM=1
259
260# Options extracted from configure script
261build:numa --define=with_numa_support=true
262
263# Options to disable default on features
264build:noaws --define=no_aws_support=true
265build:nogcp --define=no_gcp_support=true
266build:nohdfs --define=no_hdfs_support=true
267build:nonccl --define=no_nccl_support=true
268
269build:stackdriver_support --define=stackdriver_support=true
270
271# Modular TF build options
272build:dynamic_kernels --define=dynamic_loaded_kernels=true
273build:dynamic_kernels --copt=-DAUTOLOAD_DYNAMIC_KERNELS
274
275# Build TF with C++ 17 features.
276build:c++17 --cxxopt=-std=c++1z
277build:c++17 --cxxopt=-stdlib=libc++
278build:c++1z --config=c++17
279build:c++17_gcc --cxxopt=-std=c++1z
280build:c++1z_gcc --config=c++17_gcc
281
282# Don't trigger --config=<host platform> when cross-compiling.
283build:android --noenable_platform_specific_config
284build:ios --noenable_platform_specific_config
285
286# Suppress C++ compiler warnings, otherwise build logs become 10s of MBs.
287build:android --copt=-w
288build:ios --copt=-w
289build:linux --copt=-w
290build:linux --host_copt=-w
291build:macos --copt=-w
292build:windows --copt=/W0
293
294# Tensorflow uses M_* math constants that only get defined by MSVC headers if
295# _USE_MATH_DEFINES is defined.
296build:windows --copt=/D_USE_MATH_DEFINES
297build:windows --host_copt=/D_USE_MATH_DEFINES
298
299# Default paths for TF_SYSTEM_LIBS
300build:linux --define=PREFIX=/usr
301build:linux --define=LIBDIR=$(PREFIX)/lib
302build:linux --define=INCLUDEDIR=$(PREFIX)/include
303build:linux --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include
304build:macos --define=PREFIX=/usr
305build:macos --define=LIBDIR=$(PREFIX)/lib
306build:macos --define=INCLUDEDIR=$(PREFIX)/include
307build:macos --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include
308# TF_SYSTEM_LIBS do not work on windows.
309
310# By default, build TF in C++ 14 mode.
311build:android --cxxopt=-std=c++14
312build:android --host_cxxopt=-std=c++14
313build:ios --cxxopt=-std=c++14
314build:ios --host_cxxopt=-std=c++14
315build:linux --cxxopt=-std=c++14
316build:linux --host_cxxopt=-std=c++14
317build:macos --cxxopt=-std=c++14
318build:macos --host_cxxopt=-std=c++14
319build:windows --cxxopt=/std:c++14
320build:windows --host_cxxopt=/std:c++14
321
322# On windows, we still link everything into a single DLL.
323build:windows --config=monolithic
324
325# On linux, we dynamically link small amount of kernels
326build:linux --config=dynamic_kernels
327
328# Make sure to include as little of windows.h as possible
329build:windows --copt=-DWIN32_LEAN_AND_MEAN
330build:windows --host_copt=-DWIN32_LEAN_AND_MEAN
331build:windows --copt=-DNOGDI
332build:windows --host_copt=-DNOGDI
333
334# MSVC (Windows): Standards-conformant preprocessor mode
335# See https://docs.microsoft.com/en-us/cpp/preprocessor/preprocessor-experimental-overview
336build:windows --copt=/experimental:preprocessor
337build:windows --host_copt=/experimental:preprocessor
338
339# Misc build options we need for windows.
340build:windows --linkopt=/DEBUG
341build:windows --host_linkopt=/DEBUG
342build:windows --linkopt=/OPT:REF
343build:windows --host_linkopt=/OPT:REF
344build:windows --linkopt=/OPT:ICF
345build:windows --host_linkopt=/OPT:ICF
346
347# Verbose failure logs when something goes wrong
348build:windows --verbose_failures
349
350# On windows, we never cross compile
351build:windows --distinct_host_configuration=false
352# On linux, don't cross compile by default
353build:linux --distinct_host_configuration=false
354
355# Do not risk cache corruption. See:
356# https://github.com/bazelbuild/bazel/issues/3360
357build:linux --experimental_guard_against_concurrent_changes
358
359# Configure short or long logs
360build:short_logs --output_filter=DONT_MATCH_ANYTHING
361build:verbose_logs --output_filter=
362
363# Instruction set optimizations
364# TODO(gunan): Create a feature in toolchains for avx/avx2 to
365#   avoid having to define linux/win separately.
366build:avx_linux --copt=-mavx
367build:avx_linux --host_copt=-mavx
368build:avx2_linux --copt=-mavx2
369build:native_arch_linux --copt=-march=native
370build:avx_win --copt=/arch=AVX
371build:avx2_win --copt=/arch=AVX2
372
373# Options to build TensorFlow 1.x or 2.x.
374build:v1 --define=tf_api_version=1 --action_env=TF2_BEHAVIOR=0
375build:v2 --define=tf_api_version=2 --action_env=TF2_BEHAVIOR=1
376
377# Disable XLA on mobile.
378build:xla     --define=with_xla_support=true # TODO: remove, it's on by default.
379build:android --define=with_xla_support=false
380build:ios     --define=with_xla_support=false
381
382# BEGIN TF REMOTE BUILD EXECUTION OPTIONS
383# Options when using remote execution
384# WARNING: THESE OPTIONS WONT WORK IF YOU DO NOT HAVE PROPER AUTHENTICATION AND PERMISSIONS
385
386# Flag to enable remote config
387common --experimental_repo_remote_exec
388
389build:rbe --repo_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
390build:rbe --google_default_credentials
391build:rbe --bes_backend=buildeventservice.googleapis.com
392build:rbe --bes_results_url="https://source.cloud.google.com/results/invocations"
393build:rbe --bes_timeout=600s
394build:rbe --define=EXECUTOR=remote
395build:rbe --distinct_host_configuration=false
396build:rbe --flaky_test_attempts=3
397build:rbe --jobs=200
398build:rbe --remote_executor=grpcs://remotebuildexecution.googleapis.com
399build:rbe --remote_timeout=3600
400build:rbe --spawn_strategy=remote,worker,standalone,local
401test:rbe --test_env=USER=anon
402# Attempt to minimize the amount of data transfer between bazel and the remote
403# workers:
404build:rbe --remote_download_toplevel
405
406build:rbe_linux --config=rbe
407build:rbe_linux --action_env=PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin"
408build:rbe_linux --host_javabase=@bazel_toolchains//configs/ubuntu16_04_clang/1.1:jdk8
409build:rbe_linux --javabase=@bazel_toolchains//configs/ubuntu16_04_clang/1.1:jdk8
410build:rbe_linux --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
411build:rbe_linux --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
412
413# Non-rbe settings we should include because we do not run configure
414build:rbe_linux --config=avx_linux
415# TODO(gunan): Check why we need this specified in rbe, but not in other builds.
416build:rbe_linux --linkopt=-lrt
417build:rbe_linux --host_linkopt=-lrt
418build:rbe_linux --linkopt=-lm
419build:rbe_linux --host_linkopt=-lm
420
421# Use the GPU toolchain until the CPU one is ready.
422# https://github.com/bazelbuild/bazel/issues/13623
423build:rbe_cpu_linux --config=rbe_linux
424build:rbe_cpu_linux --host_crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
425build:rbe_cpu_linux --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
426build:rbe_cpu_linux --extra_toolchains="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain-linux-x86_64"
427build:rbe_cpu_linux --extra_execution_platforms="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
428build:rbe_cpu_linux --host_platform="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
429build:rbe_cpu_linux --platforms="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
430
431build:rbe_linux_cuda_base --config=rbe_linux
432build:rbe_linux_cuda_base --config=cuda
433build:rbe_linux_cuda_base --config=tensorrt
434build:rbe_linux_cuda_base --action_env=TF_CUDA_VERSION=11
435build:rbe_linux_cuda_base --action_env=TF_CUDNN_VERSION=8
436build:rbe_linux_cuda_base --repo_env=REMOTE_GPU_TESTING=1
437# TensorRT 7 for CUDA 11.1 is compatible with CUDA 11.2, but requires
438# libnvrtc.so.11.1. See https://github.com/NVIDIA/TensorRT/issues/1064.
439# TODO(b/187962120): Remove when upgrading to TensorRT 8.
440test:rbe_linux_cuda_base --test_env=LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda-11.1/lib64"
441
442build:rbe_linux_cuda11.2_nvcc_base --config=rbe_linux_cuda_base
443build:rbe_linux_cuda11.2_nvcc_base --host_crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
444build:rbe_linux_cuda11.2_nvcc_base --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
445build:rbe_linux_cuda11.2_nvcc_base --extra_toolchains="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain-linux-x86_64"
446build:rbe_linux_cuda11.2_nvcc_base --extra_execution_platforms="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
447build:rbe_linux_cuda11.2_nvcc_base --host_platform="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
448build:rbe_linux_cuda11.2_nvcc_base --platforms="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
449build:rbe_linux_cuda11.2_nvcc_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda"
450build:rbe_linux_cuda11.2_nvcc_base --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_tensorrt"
451build:rbe_linux_cuda11.2_nvcc_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_nccl"
452build:rbe_linux_cuda11.2_nvcc_py3.6 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.6"
453build:rbe_linux_cuda11.2_nvcc_py3.7 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.7"
454build:rbe_linux_cuda11.2_nvcc_py3.8 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.8"
455build:rbe_linux_cuda11.2_nvcc_py3.9 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.9"
456
457# Map default to CUDA 11.2.
458build:rbe_linux_cuda_nvcc_py36 --config=rbe_linux_cuda11.2_nvcc_py3.6
459build:rbe_linux_cuda_nvcc_py37 --config=rbe_linux_cuda11.2_nvcc_py3.7
460build:rbe_linux_cuda_nvcc_py38 --config=rbe_linux_cuda11.2_nvcc_py3.8
461build:rbe_linux_cuda_nvcc_py39 --config=rbe_linux_cuda11.2_nvcc_py3.9
462
463# Deprecated configs that people might still use.
464build:rbe_linux_cuda_nvcc --config=rbe_linux_cuda_nvcc_py36
465build:rbe_gpu_linux       --config=rbe_linux_cuda_nvcc
466
467build:rbe_linux_cuda_clang_base --config=rbe_linux_cuda_base
468build:rbe_linux_cuda_clang_base --repo_env TF_CUDA_CLANG=1
469build:rbe_linux_cuda_clang_base --@local_config_cuda//:cuda_compiler=clang
470build:rbe_linux_cuda_clang_base --crosstool_top="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
471build:rbe_linux_cuda_clang_base --extra_toolchains="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain-linux-x86_64"
472build:rbe_linux_cuda_clang_base --extra_execution_platforms="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
473build:rbe_linux_cuda_clang_base --host_platform="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
474build:rbe_linux_cuda_clang_base --platforms="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform"
475build:rbe_linux_cuda_clang_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda"
476build:rbe_linux_cuda_clang_base --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_tensorrt"
477build:rbe_linux_cuda_clang_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_nccl"
478build:rbe_linux_cuda_clang_py27 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python2.7"
479build:rbe_linux_cuda_clang_py35 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.5"
480build:rbe_linux_cuda_clang_py36 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.6"
481build:rbe_linux_cuda_clang_py37 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.7"
482build:rbe_linux_cuda_clang_py38 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.8"
483
484# ROCm
485build:rbe_linux_rocm_base --config=rocm
486build:rbe_linux_rocm_base --config=rbe_linux
487build:rbe_linux_rocm_base --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-rocm_config_rocm//crosstool:toolchain"
488build:rbe_linux_rocm_base --extra_toolchains="@ubuntu18.04-gcc7_manylinux2010-rocm_config_rocm//crosstool:toolchain-linux-x86_64"
489build:rbe_linux_rocm_base --extra_execution_platforms="@ubuntu18.04-gcc7_manylinux2010-rocm_config_platform//:platform"
490build:rbe_linux_rocm_base --host_platform="@ubuntu18.04-gcc7_manylinux2010-rocm_config_platform//:platform"
491build:rbe_linux_rocm_base --platforms="@ubuntu18.04-gcc7_manylinux2010-rocm_config_platform//:platform"
492build:rbe_linux_rocm_base --action_env=TF_ROCM_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_rocm"
493build:rbe_linux_rocm_py2.7 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python2.7"
494build:rbe_linux_rocm_py3.5 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.5"
495build:rbe_linux_rocm_py3.6 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.6"
496build:rbe_linux_rocm_py3.7 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.7"
497build:rbe_linux_rocm_py3.8 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.8"
498
499# Linux CPU
500
501build:rbe_linux_py3 --config=rbe_linux
502build:rbe_linux_py3 --python_path="/usr/local/bin/python3.9"
503build:rbe_linux_py3 --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.9"
504
505build:rbe_win --config=rbe
506build:rbe_win --crosstool_top="@tf_toolchains//toolchains/win/tf_win_06242021:toolchain"
507build:rbe_win --extra_toolchains="@tf_toolchains//toolchains/win/tf_win_06242021:cc-toolchain-x64_windows"
508build:rbe_win --host_javabase="@tf_toolchains//toolchains/win:windows_jdk8"
509build:rbe_win --javabase="@tf_toolchains//toolchains/win:windows_jdk8"
510build:rbe_win --extra_execution_platforms="@tf_toolchains//toolchains/win:rbe_windows_ltsc2019"
511build:rbe_win --host_platform="@tf_toolchains//toolchains/win:rbe_windows_ltsc2019"
512build:rbe_win --platforms="@tf_toolchains//toolchains/win:rbe_windows_ltsc2019"
513build:rbe_win --shell_executable=C:\\tools\\msys64\\usr\\bin\\bash.exe
514build:rbe_win --experimental_strict_action_env=true
515
516# TODO(gunan): Remove once we use MSVC 2019 with latest patches.
517build:rbe_win --define=override_eigen_strong_inline=true
518build:rbe_win --jobs=100
519
520# Don't build the python zip archive in the RBE build.
521build:rbe_win --remote_download_minimal
522build:rbe_win --enable_runfiles
523build:rbe_win --nobuild_python_zip
524
525build:rbe_win_py37 --config=rbe
526build:rbe_win_py37 --repo_env=TF_PYTHON_CONFIG_REPO="@windows_py37_config_python"
527build:rbe_win_py37 --python_path=C:\\Python37\\python.exe
528
529build:rbe_win_py38 --config=rbe
530build:rbe_win_py38 --repo_env=PYTHON_BIN_PATH=C:\\Python38\\python.exe
531build:rbe_win_py38 --repo_env=PYTHON_LIB_PATH=C:\\Python38\\lib\\site-packages
532build:rbe_win_py38 --repo_env=TF_PYTHON_CONFIG_REPO=@tf_toolchains//toolchains/win_1803/py38
533build:rbe_win_py38 --python_path=C:\\Python38\\python.exe
534
535# These you may need to change for your own GCP project.
536build:tensorflow_testing_rbe --project_id=tensorflow-testing
537common:tensorflow_testing_rbe_linux --remote_instance_name=projects/tensorflow-testing/instances/default_instance
538build:tensorflow_testing_rbe_linux --config=tensorflow_testing_rbe
539
540common:tensorflow_testing_rbe_win --remote_instance_name=projects/tensorflow-testing/instances/windows
541build:tensorflow_testing_rbe_win --config=tensorflow_testing_rbe
542
543# TFLite build configs for generic embedded Linux
544build:elinux --crosstool_top=@local_config_embedded_arm//:toolchain
545build:elinux --host_crosstool_top=@bazel_tools//tools/cpp:toolchain
546build:elinux_aarch64 --config=elinux
547build:elinux_aarch64 --cpu=aarch64
548build:elinux_aarch64 --distinct_host_configuration=true
549build:elinux_armhf --config=elinux
550build:elinux_armhf --cpu=armhf
551build:elinux_armhf --distinct_host_configuration=true
552# END TF REMOTE BUILD EXECUTION OPTIONS
553
554# Config-specific options should come above this line.
555
556# Load rc file written by ./configure.
557try-import %workspace%/.tf_configure.bazelrc
558
559# Load rc file with user-specific options.
560try-import %workspace%/.bazelrc.user
561
562# Here are bazelrc configs for release builds
563build:release_base --config=v2
564build:release_base --distinct_host_configuration=false
565test:release_base --flaky_test_attempts=3
566test:release_base --test_size_filters=small,medium
567
568build:release_cpu_linux --config=release_base
569build:release_cpu_linux --config=avx_linux
570build:release_cpu_linux --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
571test:release_cpu_linux --test_env=LD_LIBRARY_PATH
572
573build:release_cpu_macos --config=release_base
574build:release_cpu_macos --config=avx_linux
575
576build:release_gpu_base --config=cuda
577build:release_gpu_base --action_env=TF_CUDA_VERSION="11"
578build:release_gpu_base --action_env=TF_CUDNN_VERSION="8"
579build:release_gpu_base --repo_env=TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75,compute_80"
580
581build:release_gpu_linux --config=release_cpu_linux
582build:release_gpu_linux --config=release_gpu_base
583build:release_gpu_linux --config=tensorrt
584build:release_gpu_linux --action_env=CUDA_TOOLKIT_PATH="/usr/local/cuda-11.2"
585build:release_gpu_linux --action_env=LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/tensorrt/lib"
586build:release_gpu_linux --action_env=GCC_HOST_COMPILER_PATH="/dt7/usr/bin/gcc"
587build:release_gpu_linux --crosstool_top=@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain
588
589build:release_cpu_windows --config=release_base
590build:release_cpu_windows --config=avx_win
591build:release_cpu_windows --define=no_tensorflow_py_deps=true
592# First available in VS 16.4. Speeds Windows compile times by a lot. See
593# https://groups.google.com/a/tensorflow.org/d/topic/build/SsW98Eo7l3o/discussion
594build:release_cpu_windows --copt=/d2ReducedOptimizeHugeFunctions --host_copt=/d2ReducedOptimizeHugeFunctions
595
596build:release_gpu_windows --config=release_cpu_windows
597build:release_gpu_windows --config=release_gpu_base
598
599# Address sanitizer
600# CC=clang bazel build --config asan
601build:asan --strip=never
602build:asan --copt -fsanitize=address
603build:asan --copt -DADDRESS_SANITIZER
604build:asan --copt -g
605build:asan --copt -O3
606build:asan --copt -fno-omit-frame-pointer
607build:asan --linkopt -fsanitize=address
608
609# Memory sanitizer
610# CC=clang bazel build --config msan
611build:msan --strip=never
612build:msan --copt -fsanitize=memory
613build:msan --copt -DMEMORY_SANITIZER
614build:msan --copt -g
615build:msan --copt -O3
616build:msan --copt -fno-omit-frame-pointer
617build:msan --linkopt -fsanitize=memory
618
619# Undefined Behavior Sanitizer
620# CC=clang bazel build --config ubsan
621build:ubsan --strip=never
622build:ubsan --copt -fsanitize=undefined
623build:ubsan --copt -DUNDEFINED_BEHAVIOR_SANITIZER
624build:ubsan --copt -g
625build:ubsan --copt -O3
626build:ubsan --copt -fno-omit-frame-pointer
627build:ubsan --linkopt -fsanitize=undefined
628build:ubsan --linkopt -lubsan
629
630# Exclude TFRT integration for anything but Linux.
631build:android --config=no_tfrt
632build:macos   --config=no_tfrt
633build:windows --config=no_tfrt
634build:rocm    --config=no_tfrt
635build:no_tfrt --deleted_packages=tensorflow/compiler/mlir/tfrt,tensorflow/compiler/mlir/tfrt/benchmarks,tensorflow/compiler/mlir/tfrt/jit/python_binding,tensorflow/compiler/mlir/tfrt/python_tests,tensorflow/compiler/mlir/tfrt/tests,tensorflow/compiler/mlir/tfrt/tests/saved_model,tensorflow/compiler/mlir/tfrt/transforms/lhlo_gpu_to_tfrt_gpu,tensorflow/core/runtime_fallback,tensorflow/core/runtime_fallback/conversion,tensorflow/core/runtime_fallback/kernel,tensorflow/core/runtime_fallback/opdefs,tensorflow/core/runtime_fallback/runtime,tensorflow/core/runtime_fallback/util,tensorflow/core/tfrt/common,tensorflow/core/tfrt/eager,tensorflow/core/tfrt/eager/backends/cpu,tensorflow/core/tfrt/eager/backends/gpu,tensorflow/core/tfrt/eager/core_runtime,tensorflow/core/tfrt/eager/cpp_tests/core_runtime,tensorflow/core/tfrt/fallback,tensorflow/core/tfrt/gpu,tensorflow/core/tfrt/run_handler_thread_pool,tensorflow/core/tfrt/runtime,tensorflow/core/tfrt/saved_model,tensorflow/core/tfrt/saved_model/tests,tensorflow/core/tfrt/tpu,tensorflow/core/tfrt/utils
636
637# Experimental configuration for testing XLA GPU lowering to TFRT BEF thunks.
638# bazel test --config=experimental_enable_bef_thunk \
639#   //tensorflow/compiler/xla/service/gpu/tests:mlir_gemm_test
640build:experimental_enable_bef_thunk --config=cuda
641build:experimental_enable_bef_thunk --//tensorflow/compiler/xla/service/gpu:enable_bef_thunk
642build:experimental_enable_bef_thunk --@tf_runtime//:enable_gpu
643build:experimental_enable_bef_thunk --@rules_cuda//cuda:enable_cuda
644build:experimental_enable_bef_thunk --nocheck_visibility
645build:experimental_enable_bef_thunk --incompatible_strict_action_env
646build:experimental_enable_bef_thunk --config=monolithic
647