|
| 1 | +cmake_minimum_required(VERSION 3.26) |
| 2 | + |
| 3 | +project(vllm_flash_attn LANGUAGES CXX) |
| 4 | +set(CMAKE_CXX_STANDARD 17) |
| 5 | +set(CMAKE_CXX_EXTENSIONS OFF) |
| 6 | + |
| 7 | +# CUDA by default, can be overridden by using -DVLLM_TARGET_DEVICE=... (used by setup.py) |
| 8 | +set(VLLM_TARGET_DEVICE "cuda" CACHE STRING "Target device backend for vLLM") |
| 9 | + |
| 10 | +message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") |
| 11 | +message(STATUS "Target device: ${VLLM_TARGET_DEVICE}") |
| 12 | + |
| 13 | +include(${CMAKE_CURRENT_LIST_DIR}/cmake/utils.cmake) |
| 14 | + |
| 15 | +# Suppress potential warnings about unused manually-specified variables |
| 16 | +set(ignoreMe "${VLLM_PYTHON_PATH}") |
| 17 | + |
| 18 | +# Supported python versions. These should be kept in sync with setup.py. |
| 19 | +set(PYTHON_SUPPORTED_VERSIONS "3.8" "3.9" "3.10" "3.11" "3.12") |
| 20 | + |
| 21 | +# Supported NVIDIA architectures. |
| 22 | +set(CUDA_SUPPORTED_ARCHS "8.0;8.6;8.9;9.0") |
| 23 | + |
| 24 | +# Supported AMD GPU architectures. |
| 25 | +set(HIP_SUPPORTED_ARCHS "gfx906;gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100") |
| 26 | + |
| 27 | +# |
| 28 | +# Supported/expected torch versions for CUDA/ROCm. |
| 29 | +# |
| 30 | +# Currently, having an incorrect pytorch version results in a warning |
| 31 | +# rather than an error. |
| 32 | +# |
| 33 | +# Note: these should be kept in sync with the torch version in setup.py. |
| 34 | +# Likely should also be in sync with the vLLM version. |
| 35 | +# |
| 36 | +set(TORCH_SUPPORTED_VERSION_CUDA "2.4.0") |
| 37 | + |
| 38 | +find_python_constrained_versions(${PYTHON_SUPPORTED_VERSIONS}) |
| 39 | + |
| 40 | +if (VLLM_PARENT_BUILD) |
| 41 | + # vLLM extracts the supported architectures from the global CMAKE_CUDA_FLAGS, which are set by torch. |
| 42 | + # Because CMAKE_CUDA_FLAGS has been modified, we cannot use the same logic. |
| 43 | + # Hence, we just use the parent's VLLM_GPU_ARCHES and VLLM_GPU_FLAGS. |
| 44 | + message(STATUS "Building vllm-flash-attn inside vLLM. Skipping flag detection and relying on parent build.") |
| 45 | + macro(check_found NAME VAR) |
| 46 | + if (NOT ${VAR}) |
| 47 | + message(FATAL_ERROR "${NAME} must have been found by parent.") |
| 48 | + endif () |
| 49 | + endmacro() |
| 50 | + |
| 51 | + check_found("Torch" TORCH_FOUND) |
| 52 | + |
| 53 | + set(VLLM_FA_GPU_FLAGS ${VLLM_GPU_FLAGS}) |
| 54 | + set(VLLM_FA_GPU_ARCHES ${VLLM_GPU_ARCHES}) |
| 55 | + |
| 56 | + # Allow direct override of GPU architectures. |
| 57 | + # These have to be in CMake syntax (75-real, 89-virtual, etc). |
| 58 | + if (DEFINED ENV{VLLM_FA_CMAKE_GPU_ARCHES}) |
| 59 | + message(STATUS "Overriding GPU architectures to $ENV{VLLM_FA_CMAKE_GPU_ARCHES}") |
| 60 | + set(VLLM_FA_GPU_ARCHES $ENV{VLLM_FA_CMAKE_GPU_ARCHES}) |
| 61 | + |
| 62 | + # Generally, we want to build with a subset of the parent arches. |
| 63 | + foreach (VLLM_FA_GPU_ARCH IN LISTS VLLM_FA_GPU_ARCHES) |
| 64 | + if (NOT VLLM_FA_GPU_ARCH IN_LIST VLLM_GPU_ARCHES) |
| 65 | + message(WARNING "Using GPU architecture ${VLLM_FA_GPU_ARCH}, " |
| 66 | + "which is not included in the parent list.") |
| 67 | + endif () |
| 68 | + endforeach () |
| 69 | + endif () |
| 70 | + |
| 71 | +else () |
| 72 | + message(STATUS "Standalone vllm-flash-attn build.") |
| 73 | + |
| 74 | + # |
| 75 | + # Update cmake's `CMAKE_PREFIX_PATH` with torch location. |
| 76 | + # |
| 77 | + append_cmake_prefix_path("torch" "torch.utils.cmake_prefix_path") |
| 78 | + message(DEBUG "CMAKE_PREFIX_PATH: ${CMAKE_PREFIX_PATH}") |
| 79 | + |
| 80 | + # |
| 81 | + # Import torch cmake configuration. |
| 82 | + # Torch also imports CUDA (and partially HIP) languages with some customizations, |
| 83 | + # so there is no need to do this explicitly with check_language/enable_language, |
| 84 | + # etc. |
| 85 | + # |
| 86 | + find_package(Torch REQUIRED) |
| 87 | + |
| 88 | + # |
| 89 | + # Set up GPU language and check the torch version and warn if it isn't |
| 90 | + # what is expected. |
| 91 | + # |
| 92 | + if (NOT HIP_FOUND AND CUDA_FOUND) |
| 93 | + set(VLLM_GPU_LANG "CUDA") |
| 94 | + |
| 95 | + # Check CUDA is at least 11.6 |
| 96 | + if (CUDA_VERSION VERSION_LESS 11.6) |
| 97 | + message(FATAL_ERROR "CUDA version 11.6 or greater is required.") |
| 98 | + endif () |
| 99 | + |
| 100 | + if (NOT Torch_VERSION VERSION_EQUAL ${TORCH_SUPPORTED_VERSION_CUDA}) |
| 101 | + message(WARNING "Pytorch version ${TORCH_SUPPORTED_VERSION_CUDA} " |
| 102 | + "expected for CUDA build, saw ${Torch_VERSION} instead.") |
| 103 | + endif () |
| 104 | + elseif (HIP_FOUND) |
| 105 | + message(FATAL_ERROR "ROCm build is not currently supported for vllm-flash-attn.") |
| 106 | + |
| 107 | + set(VLLM_GPU_LANG "HIP") |
| 108 | + |
| 109 | + # Importing torch recognizes and sets up some HIP/ROCm configuration but does |
| 110 | + # not let cmake recognize .hip files. In order to get cmake to understand the |
| 111 | + # .hip extension automatically, HIP must be enabled explicitly. |
| 112 | + enable_language(HIP) |
| 113 | + |
| 114 | + # ROCm 5.X and 6.X |
| 115 | + if (ROCM_VERSION_DEV_MAJOR GREATER_EQUAL 5 AND |
| 116 | + NOT Torch_VERSION VERSION_EQUAL ${TORCH_SUPPORTED_VERSION_ROCM}) |
| 117 | + message(WARNING "Pytorch version >= ${TORCH_SUPPORTED_VERSION_ROCM} " |
| 118 | + "expected for ROCm build, saw ${Torch_VERSION} instead.") |
| 119 | + endif () |
| 120 | + else () |
| 121 | + message(FATAL_ERROR "Can't find CUDA or HIP installation.") |
| 122 | + endif () |
| 123 | + |
| 124 | + # |
| 125 | + # Override the GPU architectures detected by cmake/torch and filter them by |
| 126 | + # the supported versions for the current language. |
| 127 | + # The final set of arches is stored in `VLLM_GPU_ARCHES`. |
| 128 | + # |
| 129 | + override_gpu_arches(VLLM_FA_GPU_ARCHES |
| 130 | + ${VLLM_GPU_LANG} |
| 131 | + "${${VLLM_GPU_LANG}_SUPPORTED_ARCHS}") |
| 132 | + |
| 133 | + # |
| 134 | + # Query torch for additional GPU compilation flags for the given |
| 135 | + # `VLLM_GPU_LANG`. |
| 136 | + # The final set of arches is stored in `VLLM_FA_GPU_FLAGS`. |
| 137 | + # |
| 138 | + get_torch_gpu_compiler_flags(VLLM_FA_GPU_FLAGS ${VLLM_GPU_LANG}) |
| 139 | + |
| 140 | + # |
| 141 | + # Set nvcc parallelism. |
| 142 | + # |
| 143 | + if (NVCC_THREADS AND VLLM_GPU_LANG STREQUAL "CUDA") |
| 144 | + list(APPEND VLLM_FA_GPU_FLAGS "--threads=${NVCC_THREADS}") |
| 145 | + endif () |
| 146 | +endif () |
| 147 | + |
| 148 | +# Other flags |
| 149 | +list(APPEND VLLM_FA_GPU_FLAGS --expt-relaxed-constexpr --expt-extended-lambda --use_fast_math) |
| 150 | + |
| 151 | +# Replace instead of appending, nvcc doesn't like duplicate -O flags. |
| 152 | +string(REPLACE "-O2" "-O3" CMAKE_CUDA_FLAGS_RELWITHDEBINFO "${CMAKE_CUDA_FLAGS_RELWITHDEBINFO}") |
| 153 | + |
| 154 | +# |
| 155 | +# _C extension |
| 156 | +# |
| 157 | + |
| 158 | +file(GLOB FLASH_ATTN_GEN_SRCS "csrc/flash_attn/src/flash_fwd_*.cu") |
| 159 | +message(DEBUG "FLASH_ATTN_GEN_SRCS: ${FLASH_ATTN_GEN_SRCS}") |
| 160 | + |
| 161 | +define_gpu_extension_target( |
| 162 | + vllm_flash_attn_c |
| 163 | + DESTINATION vllm_flash_attn |
| 164 | + LANGUAGE ${VLLM_GPU_LANG} |
| 165 | + SOURCES csrc/flash_attn/flash_api.cpp ${FLASH_ATTN_GEN_SRCS} |
| 166 | + COMPILE_FLAGS ${VLLM_FA_GPU_FLAGS} |
| 167 | + ARCHITECTURES ${VLLM_FA_GPU_ARCHES} |
| 168 | + USE_SABI 3 |
| 169 | + WITH_SOABI |
| 170 | +) |
| 171 | + |
| 172 | +target_include_directories(vllm_flash_attn_c PRIVATE |
| 173 | + csrc/flash_attn |
| 174 | + csrc/flash_attn/src |
| 175 | + csrc/cutlass/include) |
| 176 | + |
| 177 | +# custom definitions |
| 178 | +target_compile_definitions(vllm_flash_attn_c PRIVATE |
| 179 | + # FLASHATTENTION_DISABLE_BACKWARD |
| 180 | + FLASHATTENTION_DISABLE_DROPOUT |
| 181 | + # FLASHATTENTION_DISABLE_ALIBI |
| 182 | + # FLASHATTENTION_DISABLE_SOFTCAP |
| 183 | + FLASHATTENTION_DISABLE_UNEVEN_K |
| 184 | + # FLASHATTENTION_DISABLE_LOCAL |
| 185 | +) |
| 186 | + |
| 187 | +# Check for old generator |
| 188 | +find_file(OLD_GENERATOR_FILE "ATen/CUDAGeneratorImpl.h" ${TORCH_INCLUDE_DIRS} NO_DEFAULT_PATH) |
| 189 | +if (OLD_GENERATOR_FILE) |
| 190 | + target_compile_definitions(vllm_flash_attn_c PRIVATE -DOLD_GENERATOR_PATH) |
| 191 | +endif () |
0 commit comments