diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index a97636edc..30cf78080 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -48,7 +48,7 @@ body:
attributes:
label: CUDA Versions
description:
- placeholder: e.g. CUDA 11.0, CUDA 12.2
+ placeholder: e.g. CUDA 11.2, CUDA 12.2
validations:
required: false
- type: input
diff --git a/.github/workflows/CMake.yml b/.github/workflows/CMake.yml
index 9975a424e..75e27cc0b 100644
--- a/.github/workflows/CMake.yml
+++ b/.github/workflows/CMake.yml
@@ -26,7 +26,7 @@ jobs:
# Multiplicative build matrix
matrix:
cudacxx:
- - cuda: "11.0"
+ - cuda: "11.2"
cuda_arch: "35"
hostcxx: gcc-8
os: ubuntu-20.04
diff --git a/.github/workflows/Draft-Release.yml b/.github/workflows/Draft-Release.yml
index 984d609c5..4570d15db 100644
--- a/.github/workflows/Draft-Release.yml
+++ b/.github/workflows/Draft-Release.yml
@@ -49,7 +49,7 @@ jobs:
cuda_arch: "35-real;90-real;90-virtual"
hostcxx: gcc-9
os: ubuntu-20.04
- - cuda: "11.0"
+ - cuda: "11.2"
cuda_arch: "35-real;80-real;80-virtual"
hostcxx: gcc-8
os: ubuntu-20.04
@@ -202,7 +202,7 @@ jobs:
cuda_arch: "35-real;90-real;90-virtual"
hostcxx: "Visual Studio 16 2019"
os: windows-2019
- - cuda: "11.0.3"
+ - cuda: "11.2.2"
cuda_arch: "35-real;80-real;80-virtual"
hostcxx: "Visual Studio 16 2019"
os: windows-2019
diff --git a/.github/workflows/Ubuntu.yml b/.github/workflows/Ubuntu.yml
index 646e33a6c..df9a569e7 100644
--- a/.github/workflows/Ubuntu.yml
+++ b/.github/workflows/Ubuntu.yml
@@ -37,7 +37,7 @@ jobs:
cuda_arch: "35"
hostcxx: gcc-11
os: ubuntu-22.04
- - cuda: "11.0"
+ - cuda: "11.2"
cuda_arch: "35"
hostcxx: gcc-8
os: ubuntu-20.04
@@ -56,7 +56,7 @@ jobs:
exclude:
# Exclude VIS=ON for oldest cuda.
- cudacxx:
- cuda: "11.0"
+ cuda: "11.2"
VISUALISATION: "ON"
# Exclude beltsoff builds for old cuda's
- cudacxx:
@@ -64,7 +64,7 @@ jobs:
config:
name: "Beltsoff"
- cudacxx:
- cuda: "11.0"
+ cuda: "11.2"
config:
name: "Beltsoff"
# Exclude beltsoff vis builds to keep the matrix lighter.
diff --git a/.github/workflows/Windows-Tests.yml b/.github/workflows/Windows-Tests.yml
index 4762c62e7..284976693 100644
--- a/.github/workflows/Windows-Tests.yml
+++ b/.github/workflows/Windows-Tests.yml
@@ -31,7 +31,7 @@ jobs:
cuda_arch: "35"
hostcxx: "Visual Studio 17 2022"
os: windows-2022
- - cuda: "11.0.3"
+ - cuda: "11.2.2"
cuda_arch: "35"
hostcxx: "Visual Studio 16 2019"
os: windows-2019
diff --git a/.github/workflows/Windows.yml b/.github/workflows/Windows.yml
index 4511c3538..8b5b7e270 100644
--- a/.github/workflows/Windows.yml
+++ b/.github/workflows/Windows.yml
@@ -37,7 +37,7 @@ jobs:
cuda_arch: "35"
hostcxx: "Visual Studio 17 2022"
os: windows-2022
- - cuda: "11.0.3"
+ - cuda: "11.2.2"
cuda_arch: "35"
hostcxx: "Visual Studio 16 2019"
os: windows-2019
@@ -56,7 +56,7 @@ jobs:
exclude:
# Exclude VIS=ON for oldest cuda.
- cudacxx:
- cuda: "11.0.3"
+ cuda: "11.2.2"
VISUALISATION: "ON"
# Exclude beltsoff builds for old cuda's
- cudacxx:
@@ -64,7 +64,7 @@ jobs:
config:
name: "Beltsoff"
- cudacxx:
- cuda: "11.0.3"
+ cuda: "11.2.2"
config:
name: "Beltsoff"
# Exclude beltsoff vis builds to keep the matrix lighter.
diff --git a/CMakeLists.txt b/CMakeLists.txt
index f634809aa..d004369bf 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -49,10 +49,15 @@ if(CMAKE_CUDA_COMPILER)
flamegpu_set_cuda_architectures()
endif()
-# Set the minimum supported version of CUDA for FLAME GPU, currently 11.0
-set(MINIMUM_SUPPORTED_CUDA_VERSION 11.0)
-# Set the minimum, usable, but deprecated CUDA version. Currently there are no deprecated versions
+# Set the minimum supported version of CUDA for FLAME GPU, currently 11.2
+set(MINIMUM_SUPPORTED_CUDA_VERSION 11.2)
+# Set the minimum, potentially usable, but unsupported CUDA version.
+# Currently 11.0 on linux and 11.1 on windows (due to CCCL support).
+# CUDA 11.1 is not supported to simplify python releases
set(MINIMUM_CUDA_VERSION 11.0)
+if(WIN32)
+ set(MINIMUM_CUDA_VERSION 11.1)
+endif()
# If the CUDA compiler is too old, trigger a docs only build.
if(CMAKE_CUDA_COMPILER_VERSION VERSION_LESS ${MINIMUM_CUDA_VERSION})
@@ -60,9 +65,9 @@ if(CMAKE_CUDA_COMPILER_VERSION VERSION_LESS ${MINIMUM_CUDA_VERSION})
message(STATUS "Documentation-only build: CUDA ${MINIMUM_SUPPORTED_CUDA_VERSION} or greater is required for compilation.")
endif()
-# If the CUDA compiler is atleast the minimum deprecated version, but less than the minimum actually supported version, issue a dev warning.
+# If the CUDA compiler is at least the minimum (unsupported) version, but less than the minimum actually supported version, issue a warning.
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL ${MINIMUM_CUDA_VERSION} AND CMAKE_CUDA_COMPILER_VERSION VERSION_LESS ${MINIMUM_SUPPORTED_CUDA_VERSION})
- message(DEPRECATION "Support for CUDA verisons <= ${MINIMUM_SUPPORTED_CUDA_VERSION} is deprecated and will be removed in a future release.")
+ message(WARNING "CUDA versions >= ${MINIMUM_CUDA_VERSION} && < ${MINIMUM_SUPPORTED_CUDA_VERSION} are unsupported buy may work on some platforms.")
endif()
# If CUDA is not available, or the minimum version is too low only build the docs.
@@ -78,7 +83,7 @@ endif()
# include for dependent modules
include(CMakeDependentOption)
-# Option to enable building all examples, defaults to ON if FLAMEPGU is the top level cmake, else OFF
+# Option to enable building all examples, defaults to ON if FLAMEGPU is the top level cmake, else OFF
cmake_dependent_option(FLAMEGPU_BUILD_ALL_EXAMPLES "Enable building all FLAMEGPU examples" ON "FLAMEGPU_PROJECT_IS_TOP_LEVEL" OFF)
# Options to enable building individual examples, if FLAMEGPU_BUILD_ALL_EXAMPLES is off.
diff --git a/README.md b/README.md
index 10bd8cec3..7310d4d6e 100644
--- a/README.md
+++ b/README.md
@@ -63,7 +63,7 @@ Building FLAME GPU has the following requirements. There are also optional depen
+ [CMake](https://cmake.org/download/) `>= 3.18`
+ `>= 3.20` if building python bindings using a multi-config generator (Visual Studio, Eclipse or Ninja Multi-Config)
-+ [CUDA](https://developer.nvidia.com/cuda-downloads) `>= 11.0` and a [Compute Capability](https://developer.nvidia.com/cuda-gpus) `>= 3.5` NVIDIA GPU.
++ [CUDA](https://developer.nvidia.com/cuda-downloads) `>= 11.2` and a [Compute Capability](https://developer.nvidia.com/cuda-gpus) `>= 3.5` NVIDIA GPU.
+ C++17 capable C++ compiler (host), compatible with the installed CUDA version
+ [Microsoft Visual Studio 2019 or 2022](https://visualstudio.microsoft.com/) (Windows)
+ *Note:* Visual Studio must be installed before the CUDA toolkit is installed. See the [CUDA installation guide for Windows](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html) for more information.
@@ -247,7 +247,7 @@ Several environmental variables are used or required by FLAME GPU 2.
| Environment Variable | Description |
|--------------------------------------|-------------|
-| `CUDA_PATH` | Required when using RunTime Compilation (RTC), pointing to the root of the CUDA Toolkit where NVRTC resides.
i.e. `/usr/local/cuda-11.0/` or `C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.0`.
Alternatively `CUDA_HOME` may be used if `CUDA_PATH` was not set. |
+| `CUDA_PATH` | Required when using RunTime Compilation (RTC), pointing to the root of the CUDA Toolkit where NVRTC resides.
i.e. `/usr/local/cuda-11.2/` or `C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2`.
Alternatively `CUDA_HOME` may be used if `CUDA_PATH` was not set. |
| `FLAMEGPU_INC_DIR` | When RTC compilation is required, if the location of the `include` directory cannot be found it must be specified using the `FLAMEGPU_INC_DIR` environment variable. |
| `FLAMEGPU_TMP_DIR` | FLAME GPU may cache some files to a temporary directory on the system, using the temporary directory returned by [`std::filesystem::temp_directory_path`](https://en.cppreference.com/w/cpp/filesystem/temp_directory_path). The location can optionally be overridden using the `FLAMEGPU_TMP_DIR` environment variable. |
| `FLAMEGPU_RTC_INCLUDE_DIRS` | A list of include directories that should be provided to the RTC compiler, these should be separated using `;` (Windows) or `:` (Linux). If this variable is not found, the working directory will be used as a default. |
@@ -367,7 +367,4 @@ For a full list of known issues pleases see the [Issue Tracker](https://github.c
+ Warnings and a loss of performance due to hash collisions in device code ([#356](https://github.com/FLAMEGPU/FLAMEGPU2/issues/356))
+ Multiple known areas where performance can be improved (e.g. [#449](https://github.com/FLAMEGPU/FLAMEGPU2/issues/449), [#402](https://github.com/FLAMEGPU/FLAMEGPU2/issues/402))
-+ Windows/MSVC builds using CUDA 11.0 may encounter errors when performing incremental builds if the static library has been recompiled. If this presents itself, re-save any `.cu` file in your executable producing project and re-trigger the build.
-+ Debug builds under linux with CUDA 11.0 may encounter cuda errors during `validateIDCollisions`. Consider using an alternate CUDA version if this is required ([#569](https://github.com/FLAMEGPU/FLAMEGPU2/issues/569)).
-+ CUDA 11.0 with GCC 9 may encounter a segmentation fault during compilation of the test suite. Consider using GCC 8 with CUDA 11.0.
+ CUDA 12.2+ suffers from poor RTC compilation times, to be fixed in a future release. ([#1118](https://github.com/FLAMEGPU/FLAMEGPU2/issues/1118)).
diff --git a/cmake/CUDAArchitectures.cmake b/cmake/CUDAArchitectures.cmake
index 27963a332..84eea2418 100644
--- a/cmake/CUDAArchitectures.cmake
+++ b/cmake/CUDAArchitectures.cmake
@@ -215,7 +215,7 @@ function(flamegpu_set_cuda_architectures)
endif()
message(AUTHOR_WARNING
" ${CMAKE_CURRENT_FUNCTION} failed to parse NVCC --help output for default architecture generation\n"
- " Using ${default_archs} based on CUDA 11.0 to 11.8."
+ " Using ${default_archs} based on CUDA 11.2 to 11.8."
)
endif()
# We actually want real for each arch, then virtual for the final, but only for library-provided values, to only embed one arch worth of ptx.
diff --git a/cmake/common.cmake b/cmake/common.cmake
index f1ed88a17..408ac0a0d 100644
--- a/cmake/common.cmake
+++ b/cmake/common.cmake
@@ -134,22 +134,26 @@ if(FLAMEGPU_ENABLE_NVTX)
endif()
endif(FLAMEGPU_ENABLE_NVTX)
-# Set the minimum supported cuda version, if not already set. Currently duplicated due to docs only build logic.
-# CUDA 11.0 is current minimum cuda version, and the minimum supported
+# Set the minimum unsupported and minimum supported cuda version, if not already set.
+# Currently duplicated due to docs only build logic.
+# CUDA 11.0/11.1 is current minimum (unsupported but usable) cuda version
if(NOT DEFINED MINIMUM_CUDA_VERSION)
set(MINIMUM_CUDA_VERSION 11.0)
+ if(WIN32)
+ set(MINIMUM_CUDA_VERSION 11.1)
+ endif()
# Require a minimum cuda version
if(CMAKE_CUDA_COMPILER_VERSION VERSION_LESS ${MINIMUM_CUDA_VERSION})
message(FATAL_ERROR "CUDA version must be at least ${MINIMUM_CUDA_VERSION}")
endif()
endif()
-# CUDA 11.0 is the current minimum supported version.
+# CUDA 11.2 is the current minimum supported version.
if(NOT DEFINED MINIMUM_SUPPORTED_CUDA_VERSION)
- set(MINIMUM_SUPPORTED_CUDA_VERSION 11.0)
+ set(MINIMUM_SUPPORTED_CUDA_VERSION 11.2)
# Warn on deprecated cuda version.
# If the CUDA compiler is atleast the minimum deprecated version, but less than the minimum actually supported version, issue a dev warning.
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL ${MINIMUM_CUDA_VERSION} AND CMAKE_CUDA_COMPILER_VERSION VERSION_LESS ${MINIMUM_SUPPORTED_CUDA_VERSION})
- message(DEPRECATION "Support for CUDA verisons <= ${MINIMUM_SUPPORTED_CUDA_VERSION} is deprecated and will be removed in a future release.")
+ message(WARNING "CUDA versions >= ${MINIMUM_CUDA_VERSION} && < ${MINIMUM_SUPPORTED_CUDA_VERSION} are unsupported buy may work on some platforms.")
endif()
endif()
diff --git a/src/flamegpu/detail/compute_capability.cu b/src/flamegpu/detail/compute_capability.cu
index cfd8f3e12..2f38c69c1 100644
--- a/src/flamegpu/detail/compute_capability.cu
+++ b/src/flamegpu/detail/compute_capability.cu
@@ -71,8 +71,8 @@ std::vector compute_capability::getNVRTCSupportedComputeCapabilties() {
}
// If any of the above functions failed, we have no idea what arch's are supported, so assume none are?
return {};
-// Older CUDA's do not support this, but this is simple to hard-code for CUDA 11.0/11.1 (and our deprected CUDA 10.x).
-// CUDA 11.1 suports 35 to 86
+// Older CUDA's do not support this, but this is simple to hard-code for CUDA 11.0/11.1 (and our CUDA 10.x).
+// CUDA 11.1 supports 35 to 86
#elif (__CUDACC_VER_MAJOR__ == 11) && __CUDACC_VER_MINOR__ == 1
return {35, 37, 50, 52, 53, 60, 61, 62, 70, 72, 75, 80, 86};
// CUDA 11.0 supports 35 to 80