@@ -129,6 +129,9 @@ build --config=v2
129129# Disable AWS/HDFS support by default
130130build --define=no_aws_support=true
131131build --define=no_hdfs_support=true
132+ # Precompiling results in some action conflicts. Disable it for now until
133+ # the problematic targets are fixed.
134+ build --@rules_python//python/config_settings:precompile=force_disabled
132135
133136# TF now has `cc_shared_library` targets, so it needs the experimental flag
134137# TODO(rostam): Remove when `cc_shared_library` is enabled by default
@@ -164,15 +167,19 @@ build:android --host_crosstool_top=@bazel_tools//tools/cpp:toolchain
164167build:android_arm --config=android
165168build:android_arm --cpu=armeabi-v7a
166169build:android_arm --fat_apk_cpu=armeabi-v7a
170+ build:android_arm --platforms=@org_tensorflow//tensorflow/tools/toolchains/android:armeabi-v7a
167171build:android_arm64 --config=android
168172build:android_arm64 --cpu=arm64-v8a
169173build:android_arm64 --fat_apk_cpu=arm64-v8a
174+ build:android_arm64 --platforms=@org_tensorflow//tensorflow/tools/toolchains/android:arm64-v8a
170175build:android_x86 --config=android
171176build:android_x86 --cpu=x86
172177build:android_x86 --fat_apk_cpu=x86
178+ build:android_x86 --platforms=@org_tensorflow//tensorflow/tools/toolchains/android:x86
173179build:android_x86_64 --config=android
174180build:android_x86_64 --cpu=x86_64
175181build:android_x86_64 --fat_apk_cpu=x86_64
182+ build:android_x86_64 --platforms=@org_tensorflow//tensorflow/tools/toolchains/android:x86_64
176183
177184# Build everything statically for Android since all static libs are later
178185# bundled together into a single .so for deployment.
@@ -205,6 +212,7 @@ build:apple-toolchain --host_crosstool_top=@local_config_apple_cc//:toolchain
205212# Settings for MacOS on ARM CPUs.
206213build:macos_arm64 --cpu=darwin_arm64
207214build:macos_arm64 --macos_minimum_os=11.0
215+ build:macos_arm64 --platforms=@build_bazel_apple_support//configs/platforms:darwin_arm64
208216
209217# iOS configs for each architecture and the fat binary builds.
210218build:ios --apple_platform_type=ios
@@ -213,14 +221,19 @@ build:ios --copt=-Wno-c++11-narrowing
213221build:ios --config=apple-toolchain
214222build:ios_armv7 --config=ios
215223build:ios_armv7 --cpu=ios_armv7
224+ build:ios_armv7 --platforms=@org_tensorflow//tensorflow/tools/toolchains/ios:ios_armv7
216225build:ios_arm64 --config=ios
217226build:ios_arm64 --cpu=ios_arm64
227+ build:ios_arm64 --platforms=@build_bazel_apple_support//configs/platforms:ios_arm64
218228build:ios_arm64e --config=ios
219229build:ios_arm64e --cpu=ios_arm64e
230+ build:ios_arm64e --platforms=@build_bazel_apple_support//configs/platforms:ios_arm64e
220231build:ios_sim_arm64 --config=ios
221232build:ios_sim_arm64 --cpu=ios_sim_arm64
233+ build:ios_sim_arm64 --platforms=@build_bazel_apple_support//configs/platforms:ios_sim_arm64
222234build:ios_x86_64 --config=ios
223235build:ios_x86_64 --cpu=ios_x86_64
236+ build:ios_x86_64 --platforms=@build_bazel_apple_support//configs/platforms:ios_x86_64
224237build:ios_fat --config=ios
225238build:ios_fat --ios_multi_cpus=armv7,arm64,i386,x86_64
226239
@@ -246,24 +259,24 @@ build:mkl_threadpool --define=tensorflow_mkldnn_contraction_kernel=0
246259build:mkl_threadpool --define=build_with_mkl_opensource=true
247260build:mkl_threadpool -c opt
248261
249- # Config setting to build oneDNN with Compute Library for the Arm Architecture (ACL).
250- build:mkl_aarch64 --define=build_with_mkl_aarch64=true
251- build:mkl_aarch64 --define=build_with_openmp=true
252- build:mkl_aarch64 --define=build_with_acl=true
253- build:mkl_aarch64 -c opt
254-
255262# Config setting to build oneDNN with Compute Library for the Arm Architecture (ACL).
256263# with Eigen threadpool support
257264build:mkl_aarch64_threadpool --define=build_with_mkl_aarch64=true
265+ build:mkl_aarch64_threadpool --define=build_with_acl=true
258266build:mkl_aarch64_threadpool -c opt
259267
268+ # This is an alias for the mkl_aarch64_threadpool build.
269+ build:mkl_aarch64 --config=mkl_aarch64_threadpool
270+
271+ # Default CUDA and CUDNN versions.
272+ build:cuda_version --repo_env=HERMETIC_CUDA_VERSION="12.5.1"
273+ build:cuda_version --repo_env=HERMETIC_CUDNN_VERSION="9.3.0"
274+
260275# CUDA: This config refers to building CUDA op kernels with nvcc.
261276build:cuda --repo_env TF_NEED_CUDA=1
262277build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
263278build:cuda --@local_config_cuda//:enable_cuda
264- # Default CUDA and CUDNN versions.
265- build:cuda --repo_env=HERMETIC_CUDA_VERSION="12.5.1"
266- build:cuda --repo_env=HERMETIC_CUDNN_VERSION="9.3.0"
279+ build:cuda --config=cuda_version
267280# This flag is needed to include CUDA libraries.
268281build:cuda --@local_config_cuda//cuda:include_cuda_libs=true
269282
@@ -293,8 +306,7 @@ build:cuda_clang --linkopt="-lm"
293306
294307# Set up compilation CUDA version and paths and use the CUDA Clang toolchain.
295308build:cuda_clang_official --config=cuda_clang
296- build:cuda_clang_official --repo_env=HERMETIC_CUDA_VERSION="12.5.1"
297- build:cuda_clang_official --repo_env=HERMETIC_CUDNN_VERSION="9.3.0"
309+ build:cuda_clang_official --config=cuda_version
298310build:cuda_clang_official --action_env=CLANG_CUDA_COMPILER_PATH="/usr/lib/llvm-18/bin/clang"
299311build:cuda_clang_official --crosstool_top="@local_config_cuda//crosstool:toolchain"
300312
@@ -458,12 +470,8 @@ build:windows --dynamic_mode=off
458470
459471# Default paths for TF_SYSTEM_LIBS
460472build:linux --define=PREFIX=/usr
461- build:linux --define=LIBDIR=$(PREFIX)/lib
462- build:linux --define=INCLUDEDIR=$(PREFIX)/include
463473build:linux --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include
464474build:macos --define=PREFIX=/usr
465- build:macos --define=LIBDIR=$(PREFIX)/lib
466- build:macos --define=INCLUDEDIR=$(PREFIX)/include
467475build:macos --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include
468476# TF_SYSTEM_LIBS do not work on windows.
469477
@@ -624,6 +632,12 @@ build:rbe_linux_cpu --python_path="/usr/bin/python3"
624632# These you may need to change for your own GCP project.
625633common:rbe_linux_cpu --remote_instance_name=projects/tensorflow-testing/instances/default_instance
626634
635+ # Download CUDA/CUDNN redistributions to preserve the repositories cache between
636+ # CPU and GPU builds.
637+ # TODO(ybaturina): Uncomment when RBE is ready to support this.
638+ # build:rbe_linux_cpu --repo_env USE_CUDA_REDISTRIBUTIONS=1
639+ # build:rbe_linux_cpu --config=cuda_version
640+
627641# TODO(kanglan): Remove it after toolchain update is complete.
628642build:rbe_linux_cpu_old --config=rbe_linux
629643build:rbe_linux_cpu_old --host_crosstool_top="@ubuntu20.04-gcc9_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain"
@@ -637,6 +651,7 @@ common:rbe_linux_cpu_old --remote_instance_name=projects/tensorflow-testing/inst
637651
638652build:rbe_linux_cuda --config=cuda_clang_official
639653build:rbe_linux_cuda --config=rbe_linux_cpu
654+ build:rbe_linux_cuda --repo_env=USE_CUDA_TAR_ARCHIVE_FILES=1
640655# For Remote build execution -- GPU configuration
641656build:rbe_linux_cuda --repo_env=REMOTE_GPU_TESTING=1
642657
@@ -677,8 +692,10 @@ build:elinux --crosstool_top=@local_config_embedded_arm//:toolchain
677692build:elinux --host_crosstool_top=@bazel_tools//tools/cpp:toolchain
678693build:elinux_aarch64 --config=elinux
679694build:elinux_aarch64 --cpu=aarch64
695+ build:elinux_aarch64 --platforms=@org_tensorflow//tensorflow/tools/toolchains/linux:linux_aarch64
680696build:elinux_armhf --config=elinux
681697build:elinux_armhf --cpu=armhf
698+ build:elinux_armhf --platforms=@org_tensorflow//tensorflow/tools/toolchains/linux:linux_armhf
682699build:elinux_armhf --copt -mfp16-format=ieee
683700
684701# Config-specific options should come above this line.
0 commit comments