diff --git a/.github/workflows/CMake.yml b/.github/workflows/CMake.yml index 4e2e6ef79..50e181355 100644 --- a/.github/workflows/CMake.yml +++ b/.github/workflows/CMake.yml @@ -52,16 +52,16 @@ jobs: INDIVIDUAL_EXAMPLE: "game_of_life" # Define constants BUILD_DIR: "build" - BUILD_TESTS: "ON" + FLAMEGPU_BUILD_TESTS: "ON" # Conditional based on matrix via awkward almost ternary - BUILD_SWIG_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} + FLAMEGPU_BUILD_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} # Port matrix options to environment, for more portability. CUDA: ${{ matrix.cudacxx.cuda }} CUDA_ARCH: ${{ matrix.cudacxx.cuda_arch }} HOSTCXX: ${{ matrix.cudacxx.hostcxx }} OS: ${{ matrix.cudacxx.os }} CONFIG: ${{ matrix.config.config }} - SEATBELTS: ${{ matrix.config.SEATBELTS }} + FLAMEGPU_SEATBELTS: ${{ matrix.config.SEATBELTS }} PYTHON: ${{ matrix.python}} VISUALISATION: ${{ matrix.VISUALISATION }} CMAKE: ${{ matrix.cmake }} @@ -94,14 +94,14 @@ jobs: echo "CUDAHOSTCXX=/usr/bin/g++-${gcc_version}" >> $GITHUB_ENV - name: Select Python - if: ${{ env.PYTHON != '' && env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.PYTHON != '' && env.FLAMEGPU_BUILD_PYTHON == 'ON' }} uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON }} # @todo - is some/all of this still required when using select Python? - name: Install python dependencies - if: ${{ env.PYTHON != '' && env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.PYTHON != '' && env.FLAMEGPU_BUILD_PYTHON == 'ON' }} run: | sudo apt-get install python3-venv python3 -m pip install --upgrade wheel build setuptools @@ -147,13 +147,13 @@ jobs: -DCMAKE_BUILD_TYPE="${{ env.CONFIG }}" -Werror=dev -DCMAKE_WARN_DEPRECATED="OFF" - -DWARNINGS_AS_ERRORS="ON" - -DCUDA_ARCH="${{ env.CUDA_ARCH }}" - -DBUILD_TESTS="${{ env.BUILD_TESTS }}" - -DBUILD_SWIG_PYTHON="${{ env.BUILD_SWIG_PYTHON }}" + -DFLAMEGPU_WARNINGS_AS_ERRORS="ON" + -DCMAKE_CUDA_ARCHITECTURES="${{ env.CUDA_ARCH }}" + -DFLAMEGPU_BUILD_TESTS="${{ env.FLAMEGPU_BUILD_TESTS }}" + -DFLAMEGPU_BUILD_PYTHON="${{ env.FLAMEGPU_BUILD_PYTHON }}" -DPYTHON3_EXACT_VERSION="${{ env.PYTHON }}" - -DVISUALISATION="${{ env.VISUALISATION }}" - -DUSE_NVTX="ON" + -DFLAMEGPU_VISUALISATION="${{ env.VISUALISATION }}" + -DFLAMEGPU_ENABLE_NVTX="ON" - name: Configure Individual example if: ${{ env.INDIVIDUAL_EXAMPLE != '' }} @@ -163,6 +163,6 @@ jobs: -DCMAKE_BUILD_TYPE="${{ env.CONFIG }}" -Werror=dev -DCMAKE_WARN_DEPRECATED="OFF" - -DWARNINGS_AS_ERRORS="ON" - -DCUDA_ARCH="${{ env.CUDA_ARCH }}" - -DUSE_NVTX="ON" + -DFLAMEGPU_WARNINGS_AS_ERRORS="ON" + -DCMAKE_CUDA_ARCHITECTURES="${{ env.CUDA_ARCH }}" + -DFLAMEGPU_ENABLE_NVTX="ON" diff --git a/.github/workflows/Docs.yml b/.github/workflows/Docs.yml index c0917c186..64aa3b3e2 100644 --- a/.github/workflows/Docs.yml +++ b/.github/workflows/Docs.yml @@ -46,8 +46,8 @@ jobs: cmake . -B "${{ env.BUILD_DIR }}" -Werror=dev -DCMAKE_WARN_DEPRECATED="OFF" - -DWARNINGS_AS_ERRORS="ON" - -DBUILD_API_DOCUMENTATION="ON" + -DFLAMEGPU_WARNINGS_AS_ERRORS="ON" + -DFLAMEGPU_BUILD_API_DOCUMENTATION="ON" - name: Docs working-directory: ${{ env.BUILD_DIR }} diff --git a/.github/workflows/Draft-Release.yml b/.github/workflows/Draft-Release.yml index 26e95182a..248292e04 100644 --- a/.github/workflows/Draft-Release.yml +++ b/.github/workflows/Draft-Release.yml @@ -38,11 +38,11 @@ jobs: # CUDA_ARCH values are reduced compared to wheels due to CI memory issues while compiling the test suite. cudacxx: - cuda: "11.8" - cuda_arch: "35 60 80" + cuda_arch: "35;60;90" hostcxx: gcc-9 os: ubuntu-20.04 - cuda: "11.0" - cuda_arch: "35 60 80" + cuda_arch: "35;60;80" hostcxx: gcc-8 os: ubuntu-20.04 python: @@ -61,16 +61,16 @@ jobs: env: # Define constants BUILD_DIR: "build" - BUILD_TESTS: "ON" + FLAMEGPU_BUILD_TESTS: "ON" # Conditional based on matrix via awkward almost ternary - BUILD_SWIG_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} + FLAMEGPU_BUILD_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} # Port matrix options to environment, for more portability. CUDA: ${{ matrix.cudacxx.cuda }} CUDA_ARCH: ${{ matrix.cudacxx.cuda_arch }} HOSTCXX: ${{ matrix.cudacxx.hostcxx }} OS: ${{ matrix.cudacxx.os }} CONFIG: ${{ matrix.config.config }} - SEATBELTS: ${{ matrix.config.SEATBELTS }} + FLAMEGPU_SEATBELTS: ${{ matrix.config.SEATBELTS }} PYTHON: ${{ matrix.python}} VISUALISATION: ${{ matrix.VISUALISATION }} @@ -93,14 +93,14 @@ jobs: echo "CUDAHOSTCXX=/usr/bin/g++-${gcc_version}" >> $GITHUB_ENV - name: Select Python - if: ${{ env.PYTHON != '' && env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.PYTHON != '' && env.FLAMEGPU_BUILD_PYTHON == 'ON' }} uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON }} # @todo - is some/all of this still required when using select Python? - name: Install python dependencies - if: ${{ env.PYTHON != '' && env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.PYTHON != '' && env.FLAMEGPU_BUILD_PYTHON == 'ON' }} run: | sudo apt-get install python3-venv python3 -m pip install --upgrade wheel build setuptools @@ -146,25 +146,25 @@ jobs: -DCMAKE_BUILD_TYPE="${{ env.CONFIG }}" -Werror=dev -DCMAKE_WARN_DEPRECATED="OFF" - -DWARNINGS_AS_ERRORS="ON" - -DCUDA_ARCH="${{ env.CUDA_ARCH }}" - -DBUILD_TESTS="${{ env.BUILD_TESTS }}" - -DBUILD_SWIG_PYTHON="${{ env.BUILD_SWIG_PYTHON }}" + -DFLAMEGPU_WARNINGS_AS_ERRORS="ON" + -DCMAKE_CUDA_ARCHITECTURES="${{ env.CUDA_ARCH }}" + -DFLAMEGPU_BUILD_TESTS="${{ env.FLAMEGPU_BUILD_TESTS }}" + -DFLAMEGPU_BUILD_PYTHON="${{ env.FLAMEGPU_BUILD_PYTHON }}" -DPYTHON3_EXACT_VERSION="${{ env.PYTHON }}" - -DVISUALISATION="${{ env.VISUALISATION }}" - -DUSE_NVTX="ON" + -DFLAMEGPU_VISUALISATION="${{ env.VISUALISATION }}" + -DFLAMEGPU_ENABLE_NVTX="ON" - name: Build static library working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --target flamegpu --verbose -j `nproc` - name: Build python wheel - if: ${{ env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.FLAMEGPU_BUILD_PYTHON == 'ON' }} working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --target pyflamegpu --verbose -j `nproc` - name: Build tests - if: ${{ env.BUILD_TESTS == 'ON' }} + if: ${{ env.FLAMEGPU_BUILD_TESTS == 'ON' }} working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --target tests --verbose -j `nproc` @@ -184,11 +184,11 @@ jobs: # CUDA_ARCH values are reduced compared to wheels due to CI memory issues while compiling the test suite. cudacxx: - cuda: "11.8.0" - cuda_arch: "35 60 80" + cuda_arch: "35;60;90" hostcxx: "Visual Studio 16 2019" os: windows-2019 - cuda: "11.0.3" - cuda_arch: "35 60 80" + cuda_arch: "35;60;80" hostcxx: "Visual Studio 16 2019" os: windows-2019 python: @@ -207,16 +207,16 @@ jobs: env: # Define constants BUILD_DIR: "build" - BUILD_TESTS: "ON" + FLAMEGPU_BUILD_TESTS: "ON" # Conditional based on matrix via awkward almost ternary - BUILD_SWIG_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} + FLAMEGPU_BUILD_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} # Port matrix options to environment, for more portability. CUDA: ${{ matrix.cudacxx.cuda }} CUDA_ARCH: ${{ matrix.cudacxx.cuda_arch }} HOSTCXX: ${{ matrix.cudacxx.hostcxx }} OS: ${{ matrix.cudacxx.os }} CONFIG: ${{ matrix.config.config }} - SEATBELTS: ${{ matrix.config.SEATBELTS }} + FLAMEGPU_SEATBELTS: ${{ matrix.config.SEATBELTS }} PYTHON: ${{ matrix.python}} VISUALISATION: ${{ matrix.VISUALISATION }} @@ -232,7 +232,7 @@ jobs: run: .github\scripts\install_cuda_windows.ps1 - name: Select Python - if: ${{ env.PYTHON != '' && env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.PYTHON != '' && env.FLAMEGPU_BUILD_PYTHON == 'ON' }} uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON }} @@ -244,26 +244,26 @@ jobs: -G "${{ env.HOSTCXX }}" -A x64 -Werror=dev -DCMAKE_WARN_DEPRECATED="OFF" - -DWARNINGS_AS_ERRORS="ON" - -DCUDA_ARCH="${{ env.CUDA_ARCH }}" - -DBUILD_TESTS="${{ env.BUILD_TESTS }}" - -DBUILD_SWIG_PYTHON="${{ env.BUILD_SWIG_PYTHON }}" + -DFLAMEGPU_WARNINGS_AS_ERRORS="ON" + -DCMAKE_CUDA_ARCHITECTURES="${{ env.CUDA_ARCH }}" + -DFLAMEGPU_BUILD_TESTS="${{ env.FLAMEGPU_BUILD_TESTS }}" + -DFLAMEGPU_BUILD_PYTHON="${{ env.FLAMEGPU_BUILD_PYTHON }}" -DPython3_ROOT_DIR="$(dirname $(which python))" -DPython3_EXECUTABLE="$(which python)" - -DVISUALISATION="${{ env.VISUALISATION }}" - -DUSE_NVTX="ON" + -DFLAMEGPU_VISUALISATION="${{ env.VISUALISATION }}" + -DFLAMEGPU_ENABLE_NVTX="ON" - name: Build static library working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --config ${{ env.CONFIG }} --target flamegpu --verbose -j `nproc` - name: Build python wheel - if: ${{ env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.FLAMEGPU_BUILD_PYTHON == 'ON' }} working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --config ${{ env.CONFIG }} --target pyflamegpu --verbose -j `nproc` - name: Build tests - if: ${{ env.BUILD_TESTS == 'ON' }} + if: ${{ env.FLAMEGPU_BUILD_TESTS == 'ON' }} working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --config ${{ env.CONFIG }} --target tests --verbose -j `nproc` @@ -284,11 +284,11 @@ jobs: matrix: cudacxx: - cuda: "11.2" - cuda_arch: "35 52 60 70 80" + cuda_arch: "35;52;60;70;80" hostcxx: devtoolset-9 os: ubuntu-20.04 - cuda: "11.0" - cuda_arch: "35 52 60 70 80" + cuda_arch: "35;52;60;70;80" hostcxx: devtoolset-8 os: ubuntu-20.04 python: @@ -319,16 +319,16 @@ jobs: ARTIFACT_NAME: wheel-manylinux2014-${{ matrix.cudacxx.cuda }}-${{matrix.python}}-${{ matrix.VISUALISATION }}-${{ matrix.config.name }}-${{ matrix.cudacxx.os }} # Define constants BUILD_DIR: "build" - BUILD_TESTS: "OFF" + FLAMEGPU_BUILD_TESTS: "OFF" # Conditional based on matrix via awkward almost ternary - BUILD_SWIG_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} + FLAMEGPU_BUILD_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} # Port matrix options to environment, for more portability. CUDA: ${{ matrix.cudacxx.cuda }} CUDA_ARCH: ${{ matrix.cudacxx.cuda_arch }} HOSTCXX: ${{ matrix.cudacxx.hostcxx }} OS: ${{ matrix.cudacxx.os }} CONFIG: ${{ matrix.config.config }} - SEATBELTS: ${{ matrix.config.SEATBELTS }} + FLAMEGPU_SEATBELTS: ${{ matrix.config.SEATBELTS }} PYTHON: ${{ matrix.python}} VISUALISATION: ${{ matrix.VISUALISATION }} @@ -391,18 +391,18 @@ jobs: -DCMAKE_BUILD_TYPE="${{ env.CONFIG }}" -Werror=dev -DCMAKE_WARN_DEPRECATED="OFF" - -DWARNINGS_AS_ERRORS="ON" - -DCUDA_ARCH="${{ env.CUDA_ARCH }}" - -DBUILD_TESTS="${{ env.BUILD_TESTS }}" - -DBUILD_SWIG_PYTHON="${{ env.BUILD_SWIG_PYTHON }}" + -DFLAMEGPU_WARNINGS_AS_ERRORS="ON" + -DCMAKE_CUDA_ARCHITECTURES="${{ env.CUDA_ARCH }}" + -DFLAMEGPU_BUILD_TESTS="${{ env.FLAMEGPU_BUILD_TESTS }}" + -DFLAMEGPU_BUILD_PYTHON="${{ env.FLAMEGPU_BUILD_PYTHON }}" -DPYTHON3_EXACT_VERSION="${{ env.PYTHON }}" - -DVISUALISATION="${{ env.VISUALISATION }}" - -DUSE_NVTX="ON" + -DFLAMEGPU_VISUALISATION="${{ env.VISUALISATION }}" + -DFLAMEGPU_ENABLE_NVTX="ON" -DGLEW_USE_STATIC_LIBS="${{ env.USE_STATIC_GLEW }}" -DOpenGL_GL_PREFERENCE:STRING=LEGACY - name: Build python wheel - if: ${{ env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.FLAMEGPU_BUILD_PYTHON == 'ON' }} working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --target pyflamegpu --verbose -j `nproc` @@ -421,7 +421,7 @@ jobs: # Upload wheel artifacts to the job on GHA, with a short retention # Use a unique name per job matrix run, to avoid a risk of corruption according to the docs (although it should work with unique filenames) - name: Upload Wheel Artifacts - if: ${{ env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.FLAMEGPU_BUILD_PYTHON == 'ON' }} uses: actions/upload-artifact@v3 with: name: ${{ env.ARTIFACT_NAME }} @@ -440,11 +440,11 @@ jobs: matrix: cudacxx: - cuda: "11.2.2" - cuda_arch: "35 52 60 70 80" + cuda_arch: "35;52;60;70;80" hostcxx: "Visual Studio 16 2019" os: windows-2019 - cuda: "11.0.3" - cuda_arch: "35 52 60 70 80" + cuda_arch: "35;52;60;70;80" hostcxx: "Visual Studio 16 2019" os: windows-2019 python: @@ -471,16 +471,16 @@ jobs: ARTIFACT_NAME: wheel-windows-${{ matrix.cudacxx.cuda }}-${{matrix.python}}-${{ matrix.VISUALISATION }}-${{ matrix.config.name }}-${{ matrix.cudacxx.os }} # Define constants BUILD_DIR: "build" - BUILD_TESTS: "OFF" + FLAMEGPU_BUILD_TESTS: "OFF" # Conditional based on matrix via awkward almost ternary - BUILD_SWIG_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} + FLAMEGPU_BUILD_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} # Port matrix options to environment, for more portability. CUDA: ${{ matrix.cudacxx.cuda }} CUDA_ARCH: ${{ matrix.cudacxx.cuda_arch }} HOSTCXX: ${{ matrix.cudacxx.hostcxx }} OS: ${{ matrix.cudacxx.os }} CONFIG: ${{ matrix.config.config }} - SEATBELTS: ${{ matrix.config.SEATBELTS }} + FLAMEGPU_SEATBELTS: ${{ matrix.config.SEATBELTS }} PYTHON: ${{ matrix.python}} VISUALISATION: ${{ matrix.VISUALISATION }} @@ -496,7 +496,7 @@ jobs: run: .github\scripts\install_cuda_windows.ps1 - name: Select Python - if: ${{ env.PYTHON != '' && env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.PYTHON != '' && env.FLAMEGPU_BUILD_PYTHON == 'ON' }} uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON }} @@ -508,24 +508,24 @@ jobs: -G "${{ env.HOSTCXX }}" -A x64 -Werror=dev -DCMAKE_WARN_DEPRECATED="OFF" - -DWARNINGS_AS_ERRORS="ON" - -DCUDA_ARCH="${{ env.CUDA_ARCH }}" - -DBUILD_TESTS="${{ env.BUILD_TESTS }}" - -DBUILD_SWIG_PYTHON="${{ env.BUILD_SWIG_PYTHON }}" + -DFLAMEGPU_WARNINGS_AS_ERRORS="ON" + -DCMAKE_CUDA_ARCHITECTURES="${{ env.CUDA_ARCH }}" + -DFLAMEGPU_BUILD_TESTS="${{ env.FLAMEGPU_BUILD_TESTS }}" + -DFLAMEGPU_BUILD_PYTHON="${{ env.FLAMEGPU_BUILD_PYTHON }}" -DPython3_ROOT_DIR="$(dirname $(which python))" -DPython3_EXECUTABLE="$(which python)" - -DVISUALISATION="${{ env.VISUALISATION }}" - -DUSE_NVTX="ON" + -DFLAMEGPU_VISUALISATION="${{ env.VISUALISATION }}" + -DFLAMEGPU_ENABLE_NVTX="ON" - name: Build python wheel - if: ${{ env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.FLAMEGPU_BUILD_PYTHON == 'ON' }} working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --config ${{ env.CONFIG }} --target pyflamegpu --verbose -j `nproc` # Upload wheel artifacts to the job on GHA, with a short retention # Use a unique name per job matrix run, to avoid a risk of corruption according to the docs (although it should work with unique filenames) - name: Upload Wheel Artifacts - if: ${{env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{env.FLAMEGPU_BUILD_PYTHON == 'ON' }} uses: actions/upload-artifact@v3 with: name: ${{ env.ARTIFACT_NAME }} diff --git a/.github/workflows/Lint.yml b/.github/workflows/Lint.yml index fc7253bd1..9cead5e67 100644 --- a/.github/workflows/Lint.yml +++ b/.github/workflows/Lint.yml @@ -35,7 +35,7 @@ jobs: env: # Define constants BUILD_DIR: "build" - BUILD_TESTS: "ON" + FLAMEGPU_BUILD_TESTS: "ON" # Port matrix options to environment, for more portability. CUDA: ${{ matrix.cudacxx.cuda }} OS: ${{ matrix.cudacxx.os }} @@ -62,8 +62,8 @@ jobs: cmake . -B "${{ env.BUILD_DIR }}" -Werror=dev -DCMAKE_WARN_DEPRECATED="OFF" - -DWARNINGS_AS_ERRORS="ON" - -DBUILD_TESTS="${{ env.BUILD_TESTS }}" + -DFLAMEGPU_WARNINGS_AS_ERRORS="ON" + -DFLAMEGPU_BUILD_TESTS="${{ env.FLAMEGPU_BUILD_TESTS }}" - name: Lint working-directory: ${{ env.BUILD_DIR }} diff --git a/.github/workflows/Manylinux2014.yml b/.github/workflows/Manylinux2014.yml index e293beccc..3ce2aa30a 100644 --- a/.github/workflows/Manylinux2014.yml +++ b/.github/workflows/Manylinux2014.yml @@ -61,16 +61,16 @@ jobs: ARTIFACT_NAME: wheel-manylinux2014-${{ matrix.cudacxx.cuda }}-${{matrix.python}}-${{ matrix.VISUALISATION }}-${{ matrix.config.name }}-${{ matrix.cudacxx.os }} # Define constants BUILD_DIR: "build" - BUILD_TESTS: "OFF" + FLAMEGPU_BUILD_TESTS: "OFF" # Conditional based on matrix via awkward almost ternary - BUILD_SWIG_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} + FLAMEGPU_BUILD_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} # Port matrix options to environment, for more portability. CUDA: ${{ matrix.cudacxx.cuda }} CUDA_ARCH: ${{ matrix.cudacxx.cuda_arch }} HOSTCXX: ${{ matrix.cudacxx.hostcxx }} OS: ${{ matrix.cudacxx.os }} CONFIG: ${{ matrix.config.config }} - SEATBELTS: ${{ matrix.config.SEATBELTS }} + FLAMEGPU_SEATBELTS: ${{ matrix.config.SEATBELTS }} PYTHON: ${{ matrix.python}} VISUALISATION: ${{ matrix.VISUALISATION }} @@ -135,18 +135,18 @@ jobs: -DCMAKE_BUILD_TYPE="${{ env.CONFIG }}" -Werror=dev -DCMAKE_WARN_DEPRECATED="OFF" - -DWARNINGS_AS_ERRORS="ON" - -DCUDA_ARCH="${{ env.CUDA_ARCH }}" - -DBUILD_TESTS="${{ env.BUILD_TESTS }}" - -DBUILD_SWIG_PYTHON="${{ env.BUILD_SWIG_PYTHON }}" + -DFLAMEGPU_WARNINGS_AS_ERRORS="ON" + -DCMAKE_CUDA_ARCHITECTURES="${{ env.CUDA_ARCH }}" + -DFLAMEGPU_BUILD_TESTS="${{ env.FLAMEGPU_BUILD_TESTS }}" + -DFLAMEGPU_BUILD_PYTHON="${{ env.FLAMEGPU_BUILD_PYTHON }}" -DPYTHON3_EXACT_VERSION="${{ env.PYTHON }}" - -DVISUALISATION="${{ env.VISUALISATION }}" - -DUSE_NVTX="ON" + -DFLAMEGPU_VISUALISATION="${{ env.VISUALISATION }}" + -DFLAMEGPU_ENABLE_NVTX="ON" -DGLEW_USE_STATIC_LIBS="${{ env.USE_STATIC_GLEW }}" -DOpenGL_GL_PREFERENCE:STRING=LEGACY - name: Build python wheel - if: ${{ env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.FLAMEGPU_BUILD_PYTHON == 'ON' }} working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --target pyflamegpu --verbose -j `nproc` @@ -165,7 +165,7 @@ jobs: # Upload wheel artifacts to the job on GHA, with a short retention # Use a unique name per job matrix run, to avoid a risk of corruption according to the docs (although it should work with unique filenames) - name: Upload Wheel Artifacts - if: ${{env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{env.FLAMEGPU_BUILD_PYTHON == 'ON' }} uses: actions/upload-artifact@v3 with: name: ${{ env.ARTIFACT_NAME }} diff --git a/.github/workflows/Ubuntu.yml b/.github/workflows/Ubuntu.yml index 586cf17ec..2b44b23fd 100644 --- a/.github/workflows/Ubuntu.yml +++ b/.github/workflows/Ubuntu.yml @@ -62,16 +62,16 @@ jobs: INDIVIDUAL_EXAMPLE: "game_of_life" # Define constants BUILD_DIR: "build" - BUILD_TESTS: "ON" + FLAMEGPU_BUILD_TESTS: "ON" # Conditional based on matrix via awkward almost ternary - BUILD_SWIG_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} + FLAMEGPU_BUILD_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} # Port matrix options to environment, for more portability. CUDA: ${{ matrix.cudacxx.cuda }} CUDA_ARCH: ${{ matrix.cudacxx.cuda_arch }} HOSTCXX: ${{ matrix.cudacxx.hostcxx }} OS: ${{ matrix.cudacxx.os }} CONFIG: ${{ matrix.config.config }} - SEATBELTS: ${{ matrix.config.SEATBELTS }} + FLAMEGPU_SEATBELTS: ${{ matrix.config.SEATBELTS }} PYTHON: ${{ matrix.python}} VISUALISATION: ${{ matrix.VISUALISATION }} @@ -94,14 +94,14 @@ jobs: echo "CUDAHOSTCXX=/usr/bin/g++-${gcc_version}" >> $GITHUB_ENV - name: Select Python - if: ${{ env.PYTHON != '' && env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.PYTHON != '' && env.FLAMEGPU_BUILD_PYTHON == 'ON' }} uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON }} # @todo - is some/all of this still required when using select Python? - name: Install python dependencies - if: ${{ env.PYTHON != '' && env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.PYTHON != '' && env.FLAMEGPU_BUILD_PYTHON == 'ON' }} run: | sudo apt-get install python3-venv python3 -m pip install --upgrade wheel build setuptools @@ -154,25 +154,25 @@ jobs: -DCMAKE_BUILD_TYPE="${{ env.CONFIG }}" -Werror=dev -DCMAKE_WARN_DEPRECATED="OFF" - -DWARNINGS_AS_ERRORS="ON" - -DCUDA_ARCH="${{ env.CUDA_ARCH }}" - -DBUILD_TESTS="${{ env.BUILD_TESTS }}" - -DBUILD_SWIG_PYTHON="${{ env.BUILD_SWIG_PYTHON }}" + -DFLAMEGPU_WARNINGS_AS_ERRORS="ON" + -DCMAKE_CUDA_ARCHITECTURES="${{ env.CUDA_ARCH }}" + -DFLAMEGPU_BUILD_TESTS="${{ env.FLAMEGPU_BUILD_TESTS }}" + -DFLAMEGPU_BUILD_PYTHON="${{ env.FLAMEGPU_BUILD_PYTHON }}" -DPYTHON3_EXACT_VERSION="${{ env.PYTHON }}" - -DVISUALISATION="${{ env.VISUALISATION }}" - -DUSE_NVTX="ON" + -DFLAMEGPU_VISUALISATION="${{ env.VISUALISATION }}" + -DFLAMEGPU_ENABLE_NVTX="ON" - name: Build static library working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --target flamegpu --verbose -j `nproc` - name: Build python wheel - if: ${{ env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.FLAMEGPU_BUILD_PYTHON == 'ON' }} working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --target pyflamegpu --verbose -j `nproc` - name: Build tests - if: ${{ env.BUILD_TESTS == 'ON' }} + if: ${{ env.FLAMEGPU_BUILD_TESTS == 'ON' }} working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --target tests --verbose -j `nproc` @@ -188,9 +188,9 @@ jobs: -DCMAKE_BUILD_TYPE="${{ env.CONFIG }}" -Werror=dev -DCMAKE_WARN_DEPRECATED="OFF" - -DWARNINGS_AS_ERRORS="ON" - -DCUDA_ARCH="${{ env.CUDA_ARCH }}" - -DUSE_NVTX="ON" + -DFLAMEGPU_WARNINGS_AS_ERRORS="ON" + -DCMAKE_CUDA_ARCHITECTURES="${{ env.CUDA_ARCH }}" + -DFLAMEGPU_ENABLE_NVTX="ON" - name: Build Individual example if: ${{ env.INDIVIDUAL_EXAMPLE != '' }} diff --git a/.github/workflows/Windows-Tests.yml b/.github/workflows/Windows-Tests.yml index 0d8c8c7a9..f42d5f86e 100644 --- a/.github/workflows/Windows-Tests.yml +++ b/.github/workflows/Windows-Tests.yml @@ -45,15 +45,15 @@ jobs: env: # Define constants BUILD_DIR: "build" - BUILD_TESTS: "ON" - BUILD_SWIG_PYTHON: "OFF" + FLAMEGPU_BUILD_TESTS: "ON" + FLAMEGPU_BUILD_PYTHON: "OFF" # Port matrix options to environment, for more portability. CUDA: ${{ matrix.cudacxx.cuda }} CUDA_ARCH: ${{ matrix.cudacxx.cuda_arch }} HOSTCXX: ${{ matrix.cudacxx.hostcxx }} OS: ${{ matrix.cudacxx.os }} CONFIG: ${{ matrix.config.config }} - SEATBELTS: ${{ matrix.config.SEATBELTS }} + FLAMEGPU_SEATBELTS: ${{ matrix.config.SEATBELTS }} VISUALISATION: ${{ matrix.VISUALISATION }} steps: @@ -77,19 +77,19 @@ jobs: -G "${{ env.HOSTCXX }}" -A x64 -Werror=dev -DCMAKE_WARN_DEPRECATED="OFF" - -DWARNINGS_AS_ERRORS="ON" - -DCUDA_ARCH="${{ env.CUDA_ARCH }}" - -DBUILD_TESTS="${{ env.BUILD_TESTS }}" - -DBUILD_SWIG_PYTHON="${{ env.BUILD_SWIG_PYTHON }}" - -DVISUALISATION="${{ env.VISUALISATION }}" - -DUSE_NVTX="ON" + -DFLAMEGPU_WARNINGS_AS_ERRORS="ON" + -DCMAKE_CUDA_ARCHITECTURES="${{ env.CUDA_ARCH }}" + -DFLAMEGPU_BUILD_TESTS="${{ env.FLAMEGPU_BUILD_TESTS }}" + -DFLAMEGPU_BUILD_PYTHON="${{ env.FLAMEGPU_BUILD_PYTHON }}" + -DFLAMEGPU_VISUALISATION="${{ env.VISUALISATION }}" + -DFLAMEGPU_ENABLE_NVTX="ON" - name: Build static library working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --config ${{ env.CONFIG }} --target flamegpu --verbose -j `nproc` - name: Build tests - if: ${{ env.BUILD_TESTS == 'ON' }} + if: ${{ env.FLAMEGPU_BUILD_TESTS == 'ON' }} working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --config ${{ env.CONFIG }} --target tests --verbose -j `nproc` diff --git a/.github/workflows/Windows.yml b/.github/workflows/Windows.yml index 0cda6c143..02baceb78 100644 --- a/.github/workflows/Windows.yml +++ b/.github/workflows/Windows.yml @@ -65,16 +65,16 @@ jobs: # Define constants BUILD_DIR: "build" # Tests are off for regular builds, but if initiated by a workflow dispatch then they are enabled. Horribly json terriarry to achieve - BUILD_TESTS: ${{ fromJSON('{true:"ON",false:"OFF"}')[github.event_name == 'workflow_dispatch' && matrix.VISUALISATION == 'OFF'] }} + FLAMEGPU_BUILD_TESTS: ${{ fromJSON('{true:"ON",false:"OFF"}')[github.event_name == 'workflow_dispatch' && matrix.VISUALISATION == 'OFF'] }} # Conditional based on matrix via awkward almost ternary - BUILD_SWIG_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} + FLAMEGPU_BUILD_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }} # Port matrix options to environment, for more portability. CUDA: ${{ matrix.cudacxx.cuda }} CUDA_ARCH: ${{ matrix.cudacxx.cuda_arch }} HOSTCXX: ${{ matrix.cudacxx.hostcxx }} OS: ${{ matrix.cudacxx.os }} CONFIG: ${{ matrix.config.config }} - SEATBELTS: ${{ matrix.config.SEATBELTS }} + FLAMEGPU_SEATBELTS: ${{ matrix.config.SEATBELTS }} PYTHON: ${{ matrix.python}} VISUALISATION: ${{ matrix.VISUALISATION }} @@ -91,13 +91,13 @@ jobs: run: .github\scripts\install_cuda_windows.ps1 - name: Select Python - if: ${{ env.PYTHON != '' && env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.PYTHON != '' && env.FLAMEGPU_BUILD_PYTHON == 'ON' }} uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON }} - name: Install python dependencies - if: ${{ env.PYTHON != '' && env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.PYTHON != '' && env.FLAMEGPU_BUILD_PYTHON == 'ON' }} run: | python3 -m pip install --upgrade wheel build setuptools @@ -111,28 +111,28 @@ jobs: -G "${{ env.HOSTCXX }}" -A x64 -Werror=dev -DCMAKE_WARN_DEPRECATED="OFF" - -DWARNINGS_AS_ERRORS="ON" - -DCUDA_ARCH="${{ env.CUDA_ARCH }}" - -DBUILD_TESTS="${{ env.BUILD_TESTS }}" - -DBUILD_SWIG_PYTHON="${{ env.BUILD_SWIG_PYTHON }}" + -DFLAMEGPU_WARNINGS_AS_ERRORS="ON" + -DCMAKE_CUDA_ARCHITECTURES="${{ env.CUDA_ARCH }}" + -DFLAMEGPU_BUILD_TESTS="${{ env.FLAMEGPU_BUILD_TESTS }}" + -DFLAMEGPU_BUILD_PYTHON="${{ env.FLAMEGPU_BUILD_PYTHON }}" -DPython3_ROOT_DIR="$(dirname $(which python))" -DPython3_EXECUTABLE="$(which python)" - -DVISUALISATION="${{ env.VISUALISATION }}" - -DUSE_NVTX="ON" + -DFLAMEGPU_VISUALISATION="${{ env.VISUALISATION }}" + -DFLAMEGPU_ENABLE_NVTX="ON" - name: Build static library working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --config ${{ env.CONFIG }} --target flamegpu --verbose -j `nproc` - name: Build python wheel - if: ${{ env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{ env.FLAMEGPU_BUILD_PYTHON == 'ON' }} working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --config ${{ env.CONFIG }} --target pyflamegpu --verbose -j `nproc` # Upload wheel artifacts to the job on GHA, with a short retention # Use a unique name per job matrix run, to avoid a risk of corruption according to the docs (although it should work with unique filenames) - name: Upload Wheel Artifacts - if: ${{env.BUILD_SWIG_PYTHON == 'ON' }} + if: ${{env.FLAMEGPU_BUILD_PYTHON == 'ON' }} uses: actions/upload-artifact@v3 with: name: ${{ env.ARTIFACT_NAME }} @@ -141,7 +141,7 @@ jobs: retention-days: 5 - name: Build tests - if: ${{ env.BUILD_TESTS == 'ON' }} + if: ${{ env.FLAMEGPU_BUILD_TESTS == 'ON' }} working-directory: ${{ env.BUILD_DIR }} run: cmake --build . --config ${{ env.CONFIG }} --target tests --verbose -j `nproc` @@ -157,9 +157,9 @@ jobs: -G "${{ env.HOSTCXX }}" -A x64 -Werror=dev -DCMAKE_WARN_DEPRECATED="OFF" - -DWARNINGS_AS_ERRORS="ON" - -DCUDA_ARCH="${{ env.CUDA_ARCH }}" - -DUSE_NVTX="ON" + -DFLAMEGPU_WARNINGS_AS_ERRORS="ON" + -DCMAKE_CUDA_ARCHITECTURES="${{ env.CUDA_ARCH }}" + -DFLAMEGPU_ENABLE_NVTX="ON" - name: Build Individual example if: ${{ env.INDIVIDUAL_EXAMPLE != '' }} diff --git a/CMakeLists.txt b/CMakeLists.txt index aef5b831e..8dc836b80 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,8 +1,20 @@ # Minimum CMake version 3.18 for CUDA --std=c++17 -cmake_minimum_required(VERSION VERSION 3.18 FATAL_ERROR) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) +# Include and call some CMake to record initial state of CMAKE_CUDA_ARCHITECTURES for later use +include(${CMAKE_CURRENT_LIST_DIR}/cmake/CUDAArchitectures.cmake) +flamegpu_init_cuda_architectures() + +# Declare the project with no languages project(FLAMEGPU LANGUAGES NONE) +# Detect if this is the top-level project or if it has been included by add_subdirectory. PROJECT_IS_TOP_LEVEL requires CMake 3.21 +if ("${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_CURRENT_LIST_DIR}") + set(FLAMEGPU_PROJECT_IS_TOP_LEVEL ON) +else() + set(FLAMEGPU_PROJECT_IS_TOP_LEVEL OFF) +endif() + # Find the root directory get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR} REALPATH) @@ -19,9 +31,9 @@ set(DOCUMENTATION_ONLY_BUILD OFF) # Check compiler functionailty, as there are known issues in some cases, but version checks are not always sufficient. include(./cmake/CheckCompilerFunctionality.cmake) # If this returned a negative result, set the docs only build. -if(NOT CheckCompilerFunctionality_RESULT) +if(NOT FLAMEGPU_CheckCompilerFunctionality_RESULT) set(DOCUMENTATION_ONLY_BUILD ON) - message(STATUS "Documentation-only build: due to Compiler compatability version. See prior warnings.") + message(STATUS "Documentation-only build: due to compiler compatability version. See prior warnings.") endif() # If the checks passed, enable CXX and CUDA languages @@ -33,6 +45,8 @@ endif() check_language(CUDA) if(CMAKE_CUDA_COMPILER) enable_language(CUDA) + # Set CMAKE_CUDA_ARCHITECTURES correctly, now CUDA is enabled. + flamegpu_set_cuda_architectures() endif() # Set the minimum supported version of CUDA for FLAME GPU, currently 11.0 @@ -55,8 +69,8 @@ endif() if(DOCUMENTATION_ONLY_BUILD) # Not able to build code, so just make docs include(./cmake/dependencies/doxygen.cmake) - if(${BUILD_API_DOCUMENTATION}) - create_doxygen_target("${FLAMEGPU_ROOT}" "${CMAKE_CURRENT_BINARY_DIR}" "") + if(${FLAMEGPU_BUILD_API_DOCUMENTATION}) + flamegpu_create_doxygen_target("${FLAMEGPU_ROOT}" "${CMAKE_CURRENT_BINARY_DIR}" "") endif() return() endif() @@ -64,54 +78,33 @@ endif() # include for dependent modules include(CMakeDependentOption) -# Option to enable/disable building the static library -option(BUILD_FLAMEGPU "Enable building FLAMEGPU library" ON) - -# Option to enable/disable building the static library -option(VISUALISATION "Enable visualisation support" OFF) - -if(NOT NO_EXAMPLES) - # Option to enable building all examples - option(BUILD_ALL_EXAMPLES "Enable building examples" ON) - - # Options to enable building individual examples, if BUILD_ALL_EXAMPLES is off. - option(BUILD_EXAMPLE_BOIDS_BRUTEFORCE "Enable building examples/boids_bruteforce" OFF) - option(BUILD_EXAMPLE_BOIDS_SPATIAL3D "Enable building examples/boids_spatial3D" OFF) - option(BUILD_EXAMPLE_BOIDS_RTC_BRUTEFORCE "Enable building examples/boids_rtc_bruteforce" OFF) - option(BUILD_EXAMPLE_BOIDS_RTC_SPATIAL3D "Enable building examples/boids_rtc_spatial3D" OFF) - option(BUILD_EXAMPLE_CIRCLES_BRUTEFORCE "Enable building examples/circles_bruteforcespatial3D" OFF) - option(BUILD_EXAMPLE_CIRCLES_SPATIAL3D "Enable building examples/circles_spatial3D" OFF) - option(BUILD_EXAMPLE_GAME_OF_LIFE "Enable building examples/game_of_life" OFF) - option(BUILD_EXAMPLE_HOST_FUNCTIONS "Enable building examples/host_functions" OFF) - option(BUILD_EXAMPLE_ENSEMBLE "Enable building examples/ensemble" OFF) - option(BUILD_EXAMPLE_SUGARSCAPE "Enable building examples/sugarscape" OFF) - option(BUILD_EXAMPLE_DIFFUSION "Enable building examples/diffusion" OFF) -endif() - -option(BUILD_SWIG_PYTHON "Enable python bindings via SWIG" OFF) -# By default, build into a venv, otherwise it is set to ON anyway. -cmake_dependent_option(BUILD_SWIG_PYTHON_VENV "Enable the use of a venv for swig/python installation" ON "BUILD_SWIG_PYTHON" ON) -# Add option to embed the cuda version in the python local version, for pseudo downstream releases / to differentiate wheel files. -cmake_dependent_option(BUILD_SWIG_PYTHON_LOCALVERSION "Embed CUDA version for the build in the local information" ON "BUILD_SWIG_PYTHON" ON) - +# Option to enable building all examples, defaults to ON if FLAMEPGU is the top level cmake, else OFF +cmake_dependent_option(FLAMEGPU_BUILD_ALL_EXAMPLES "Enable building all FLAMEGPU examples" ON "FLAMEGPU_PROJECT_IS_TOP_LEVEL" OFF) + +# Options to enable building individual examples, if FLAMEGPU_BUILD_ALL_EXAMPLES is off. +# Dependent options hide these from the CMake GUI if FLAMEGPU_BUILD_ALL_EXAMPLES is on, or if it is not the top level project +cmake_dependent_option(FLAMEGPU_BUILD_EXAMPLE_BOIDS_BRUTEFORCE "Enable building examples/boids_bruteforce" OFF "FLAMEGPU_PROJECT_IS_TOP_LEVEL; NOT FLAMEGPU_BUILD_ALL_EXAMPLES" OFF) +cmake_dependent_option(FLAMEGPU_BUILD_EXAMPLE_BOIDS_SPATIAL3D "Enable building examples/boids_spatial3D" OFF "FLAMEGPU_PROJECT_IS_TOP_LEVEL; NOT FLAMEGPU_BUILD_ALL_EXAMPLES" OFF) +cmake_dependent_option(FLAMEGPU_BUILD_EXAMPLE_BOIDS_RTC_BRUTEFORCE "Enable building examples/boids_rtc_bruteforce" OFF "FLAMEGPU_PROJECT_IS_TOP_LEVEL; NOT FLAMEGPU_BUILD_ALL_EXAMPLES" OFF) +cmake_dependent_option(FLAMEGPU_BUILD_EXAMPLE_BOIDS_RTC_SPATIAL3D "Enable building examples/boids_rtc_spatial3D" OFF "FLAMEGPU_PROJECT_IS_TOP_LEVEL; NOT FLAMEGPU_BUILD_ALL_EXAMPLES" OFF) +cmake_dependent_option(FLAMEGPU_BUILD_EXAMPLE_CIRCLES_BRUTEFORCE "Enable building examples/circles_bruteforcespatial3D" OFF "FLAMEGPU_PROJECT_IS_TOP_LEVEL; NOT FLAMEGPU_BUILD_ALL_EXAMPLES" OFF) +cmake_dependent_option(FLAMEGPU_BUILD_EXAMPLE_CIRCLES_SPATIAL3D "Enable building examples/circles_spatial3D" OFF "FLAMEGPU_PROJECT_IS_TOP_LEVEL; NOT FLAMEGPU_BUILD_ALL_EXAMPLES" OFF) +cmake_dependent_option(FLAMEGPU_BUILD_EXAMPLE_GAME_OF_LIFE "Enable building examples/game_of_life" OFF "FLAMEGPU_PROJECT_IS_TOP_LEVEL; NOT FLAMEGPU_BUILD_ALL_EXAMPLES" OFF) +cmake_dependent_option(FLAMEGPU_BUILD_EXAMPLE_HOST_FUNCTIONS "Enable building examples/host_functions" OFF "FLAMEGPU_PROJECT_IS_TOP_LEVEL; NOT FLAMEGPU_BUILD_ALL_EXAMPLES" OFF) +cmake_dependent_option(FLAMEGPU_BUILD_EXAMPLE_ENSEMBLE "Enable building examples/ensemble" OFF "FLAMEGPU_PROJECT_IS_TOP_LEVEL; NOT FLAMEGPU_BUILD_ALL_EXAMPLES" OFF) +cmake_dependent_option(FLAMEGPU_BUILD_EXAMPLE_SUGARSCAPE "Enable building examples/sugarscape" OFF "FLAMEGPU_PROJECT_IS_TOP_LEVEL; NOT FLAMEGPU_BUILD_ALL_EXAMPLES" OFF) +cmake_dependent_option(FLAMEGPU_BUILD_EXAMPLE_DIFFUSION "Enable building examples/diffusion" OFF "FLAMEGPU_PROJECT_IS_TOP_LEVEL; NOT FLAMEGPU_BUILD_ALL_EXAMPLES" OFF) + +option(FLAMEGPU_BUILD_PYTHON "Enable python bindings via SWIG" OFF) # Option to enable/disable tests. -option(BUILD_TESTS "Enable building tests" OFF) - -# Option to enable the development tests target, test_dev. This is independant from build_tests -option(BUILD_TESTS_DEV "Enable building test_dev" OFF) - -# Option to enable GTEST_DISCOVER if tests or tests_dev are enabled. Defaults to off due to runtime increase -cmake_dependent_option(USE_GTEST_DISCOVER "Enable GTEST_DISCOVER for more detailed ctest output without -VV. This dramitically increases test suite runtime to CUDA context initialisation." OFF "BUILD_TESTS OR BUILD_TESTS_DEV" OFF) +option(FLAMEGPU_BUILD_TESTS "Enable building tests" OFF) -# Option to enable/disable NVTX markers for improved profiling -option(USE_NVTX "Build with NVTX markers enabled" OFF) - -# Option to enable/disable logging of dynamic RTC files to disk -option(EXPORT_RTC_SOURCES "Export RTC source files to disk at runtime" OFF) +# Option to enable the development tests target, test_dev. This is independant from FLAMEGPU_BUILD_TESTS +option(FLAMEGPU_BUILD_TESTS_DEV "Enable building test_dev" OFF) # If a mutli-config generator is beign used, and swig / python bindings are enabled, then CMake must be >= 3.20 not >= 3.18 due to cmake limitations. -if(BUILD_SWIG_PYTHON AND "${CMAKE_VERSION}" VERSION_LESS "3.20") +if(FLAMEGPU_BUILD_PYTHON AND "${CMAKE_VERSION}" VERSION_LESS "3.20") get_property(isMultiConfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) if(${isMultiConfig}) # Multiconfig generators (visual studio, eclipse, ninja multi config) do not support genex in byproducts/outptus, but these are required. @@ -120,91 +113,56 @@ get_property(isMultiConfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) unset(isMultiConfig) endif() -# Option to change curand engine used for CUDA random generation -set(CURAND_ENGINE "PHILOX" CACHE STRING "The curand engine to use. Suitable options: \"XORWOW\", \"PHILOX\", \"MRG\"") -mark_as_advanced(CURAND_ENGINE) - -# If CUDA >= 11.2, add an option to enable using NVCC_THREASD -if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 11.2) - option(USE_NVCC_THREADS "Enable parallel compilation of multiple NVCC targets. See NVCC_THREADS for more control." ON) - # The number of threads to use defaults to 2, telling the compiler to use up to 2 threads when multiple arch's are specified. - # Setting this value to 0 would use as many threads as possible. - # In some cases, this may increase total runtime due to excessive thread creation, and lowering the number of threads, or lowering the value of `-j` passed to cmake may be beneficial. - if(NOT DEFINED NVCC_THREADS) - set(NVCC_THREADS "2") - endif() - SET(NVCC_THREADS "${NVCC_THREADS}" CACHE STRING "Number of concurrent threads for building multiple target architectures. 0 indicates use as many as required." FORCE) -endif() - -# Control target CUDA_ARCH to compile for -SET(CUDA_ARCH "${CUDA_ARCH}" CACHE STRING "List of CUDA Architectures to target. E.g. 61;70" FORCE) - -# Define a function to add a lint target. -find_file(CPPLINT NAMES cpplint cpplint.exe) -if(CPPLINT) - # Add a custom target to lint all child projects. Dependencies are specified in child projects. - add_custom_target(all_lint) - # Don't trigger this target on ALL_BUILD or Visual Studio 'Rebuild Solution' - set_target_properties(all_lint PROPERTIES EXCLUDE_FROM_ALL TRUE) - # set_target_properties(all_lint PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD TRUE) -endif() +include(${FLAMEGPU_ROOT}/cmake/cpplint.cmake) +flamegpu_create_all_lint_target() # Add the library building subdirectory -if(BUILD_FLAMEGPU) - add_subdirectory(src "${PROJECT_BINARY_DIR}/FLAMEGPU") - # Set as startup project - set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" PROPERTY VS_STARTUP_PROJECT flamegpu) -endif() +add_subdirectory(src "${PROJECT_BINARY_DIR}/FLAMEGPU") +# Set as startup project +set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" PROPERTY VS_STARTUP_PROJECT flamegpu) # Add each example -if(BUILD_ALL_EXAMPLES OR BUILD_EXAMPLE_BOIDS_BRUTEFORCE) +if(FLAMEGPU_BUILD_ALL_EXAMPLES OR FLAMEGPU_BUILD_EXAMPLE_BOIDS_BRUTEFORCE) add_subdirectory(examples/boids_bruteforce) endif() -if(BUILD_ALL_EXAMPLES OR BUILD_EXAMPLE_BOIDS_SPATIAL3D) +if(FLAMEGPU_BUILD_ALL_EXAMPLES OR FLAMEGPU_BUILD_EXAMPLE_BOIDS_SPATIAL3D) add_subdirectory(examples/boids_spatial3D) endif() -if(BUILD_ALL_EXAMPLES OR BUILD_EXAMPLE_BOIDS_RTC_BRUTEFORCE) +if(FLAMEGPU_BUILD_ALL_EXAMPLES OR FLAMEGPU_BUILD_EXAMPLE_BOIDS_RTC_BRUTEFORCE) add_subdirectory(examples/boids_rtc_bruteforce) endif() -if(BUILD_ALL_EXAMPLES OR BUILD_EXAMPLE_BOIDS_RTC_SPATIAL3D) +if(FLAMEGPU_BUILD_ALL_EXAMPLES OR FLAMEGPU_BUILD_EXAMPLE_BOIDS_RTC_SPATIAL3D) add_subdirectory(examples/boids_rtc_spatial3D) endif() -if(BUILD_ALL_EXAMPLES OR BUILD_EXAMPLE_CIRCLES_BRUTEFORCE) +if(FLAMEGPU_BUILD_ALL_EXAMPLES OR FLAMEGPU_BUILD_EXAMPLE_CIRCLES_BRUTEFORCE) add_subdirectory(examples/circles_bruteforce) endif() -if(BUILD_ALL_EXAMPLES OR BUILD_EXAMPLE_CIRCLES_SPATIAL3D) +if(FLAMEGPU_BUILD_ALL_EXAMPLES OR FLAMEGPU_BUILD_EXAMPLE_CIRCLES_SPATIAL3D) add_subdirectory(examples/circles_spatial3D) endif() -if(BUILD_ALL_EXAMPLES OR BUILD_EXAMPLE_GAME_OF_LIFE) +if(FLAMEGPU_BUILD_ALL_EXAMPLES OR FLAMEGPU_BUILD_EXAMPLE_GAME_OF_LIFE) add_subdirectory(examples/game_of_life) endif() -if(BUILD_ALL_EXAMPLES OR BUILD_EXAMPLE_HOST_FUNCTIONS) +if(FLAMEGPU_BUILD_ALL_EXAMPLES OR FLAMEGPU_BUILD_EXAMPLE_HOST_FUNCTIONS) add_subdirectory(examples/host_functions) endif() -if(BUILD_ALL_EXAMPLES OR BUILD_EXAMPLE_ENSEMBLE) +if(FLAMEGPU_BUILD_ALL_EXAMPLES OR FLAMEGPU_BUILD_EXAMPLE_ENSEMBLE) add_subdirectory(examples/ensemble) endif() -if(BUILD_ALL_EXAMPLES OR BUILD_EXAMPLE_SUGARSCAPE) +if(FLAMEGPU_BUILD_ALL_EXAMPLES OR FLAMEGPU_BUILD_EXAMPLE_SUGARSCAPE) add_subdirectory(examples/sugarscape) endif() -if(BUILD_ALL_EXAMPLES OR BUILD_EXAMPLE_DIFFUSION) +if(FLAMEGPU_BUILD_ALL_EXAMPLES OR FLAMEGPU_BUILD_EXAMPLE_DIFFUSION) add_subdirectory(examples/diffusion) endif() # Add the tests directory (if required) -if(BUILD_TESTS OR BUILD_TESTS_DEV) +if(FLAMEGPU_BUILD_TESTS OR FLAMEGPU_BUILD_TESTS_DEV) # Enable Ctest enable_testing() # Add the tests subdirectory add_subdirectory(tests) endif() -if(BUILD_SWIG_PYTHON) +if(FLAMEGPU_BUILD_PYTHON) add_subdirectory(swig) endif() - -# Put all_lint within Lint filter -CMAKE_SET_TARGET_FOLDER(all_lint "Lint") - - -# message("Generated with CMAKE_BUILD_TYPE types: ${CMAKE_BUILD_TYPE}") -# message("Generated with config types: ${CMAKE_CONFIGURATION_TYPES}") diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 74fd2e2d6..e7a0e8217 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -31,7 +31,7 @@ Please adhere to the [coding conventions](#Coding-Conventions) used throughout t Before merging pull requests are required to pass all continuous integration: -* The full codebase should build with the `WARNINGS_AS_ERRORS` option enabled under the provided CMake configuration, on both Windows and Linux. +* The full codebase should build with the `FLAMEGPU_WARNINGS_AS_ERRORS` option enabled under the provided CMake configuration, on both Windows and Linux. * The full test suite should pass. * `cpplint` should report no issues, using `CPPLINT.cfg` found in the root of the project. diff --git a/README.md b/README.md index 20dfe50ee..1770dc764 100644 --- a/README.md +++ b/README.md @@ -93,7 +93,7 @@ Building via CMake is a three step process, with slight differences depending on 1. Create a build directory for an out-of tree build 2. Configure CMake into the build directory + Using the CMake GUI or CLI tools - + Specifying build options such as the CUDA Compute Capabilities to target, the inclusion of Visualisation or Python components, or performance impacting features such as `SEATBELTS`. See [CMake Configuration Options](#CMake-Configuration-Options) for details of the available configuration options + + Specifying build options such as the CUDA Compute Capabilities to target, the inclusion of Visualisation or Python components, or performance impacting features such as `FLAMEGPU_SEATBELTS`. See [CMake Configuration Options](#CMake-Configuration-Options) for details of the available configuration options + CMake will automatically find and select compilers, libraries and python interpreters based on current environmental variables and default locations. See [Mastering CMake](https://cmake.org/cmake/help/book/mastering-cmake/chapter/Getting%20Started.html#specifying-the-compiler-to-cmake) for more information. + Python dependencies must be installed in the selected python environment. If needed you can instruct CMake to use a specific python implementation using the `Python_ROOT_DIR` and `Python_Executable` CMake options at configure time. 3. Build compilation targets using the configured build system @@ -110,7 +110,7 @@ For example, to configure CMake for `Release` builds, for consumer Pascal GPUs ( mkdir -p build && cd build # Configure CMake from the command line passing configure-time options. -cmake .. -DCMAKE_BUILD_TYPE=Release -DCUDA_ARCH=61 -DBUILD_SWIG_PYTHON=ON +cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_CUDA_ARCHITECTURES=61 -DFLAMEGPU_BUILD_PYTHON=ON # Build the required targets. In this case all targets cmake --build . --target flamegpu boids_bruteforce -j 8 @@ -133,7 +133,7 @@ mkdir build cd build REM Configure CMake from the command line, specifying the -A and -G options. Alternatively use the GUI -cmake .. -A x64 -G "Visual Studio 16 2019" -DCUDA_ARCH=61 -DBUILD_SWIG_PYTHON=ON +cmake .. -A x64 -G "Visual Studio 16 2019" -DCMAKE_CUDA_ARCHITECTURES=61 -DFLAMEGPU_BUILD_PYTHON=ON REM You can then open Visual Studio manually from the .sln file, or via: cmake --open . @@ -151,42 +151,44 @@ I.e. to configure and build `game_of_life` example in release mode from the comm cd examples/game_of_life mkdir -p build cd build -cmake .. -DCMAKE_BUILD_TYPE=Release -DCUDA_ARCH=61 +cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_CUDA_ARCHITECTURES=61 cmake --build . --target all ``` #### CMake Configuration Options -| Option | Value | Description | -| --------------------------------- | ----------------- | ---------------------------------------------------------------------------------------------------------- | -| `CMAKE_BUILD_TYPE` | `Release`/`Debug`/`MinSizeRel`/`RelWithDebInfo` | Select the build configuration for single-target generators such as `make` | -| `SEATBELTS` | `ON`/`OFF` | Enable / Disable additional runtime checks which harm performance but increase usability. Default `ON` | -| `CUDA_ARCH` | `"52 60 70 80"` | Select [CUDA Compute Capabilities](https://developer.nvidia.com/cuda-gpus) to build/optimise for, as a space or `;` separated list. Defaults to `""` | -| `BUILD_SWIG_PYTHON` | `ON`/`OFF` | Enable Python target `pyflamegpu` via Swig. Default `OFF`. Python packages `setuptools`, `build` & `wheel` required | -| `BUILD_SWIG_PYTHON_VENV` | `ON`/`OFF` | Use a python `venv` when building the python Swig target. Default `ON`. Python package `venv` required | -| `BUILD_TESTS` | `ON`/`OFF` | Build the C++/CUDA test suite. Default `OFF`. | -| `BUILD_TESTS_DEV` | `ON`/`OFF` | Build the reduced-scope development test suite. Default `OFF` | -| `USE_GTEST_DISCOVER` | `ON`/`OFF` | Run individual CUDA C++ tests as independent `ctest` tests. This dramatically increases test suite runtime. Default `OFF`. | -| `VISUALISATION` | `ON`/`OFF` | Enable Visualisation. Default `OFF`. | -| `VISUALISATION_ROOT` | `path/to/vis` | Provide a path to a local copy of the visualisation repository. | -| `USE_NVTX` | `ON`/`OFF` | Enable NVTX markers for improved profiling. Default `OFF` | -| `WARNINGS_AS_ERRORS` | `ON`/`OFF` | Promote compiler/tool warnings to errors are build time. Default `OFF` | -| `EXPORT_RTC_SOURCES` | `ON`/`OFF` | At runtime, export dynamic RTC files to disk. Useful for debugging RTC models. Default `OFF` | -| `RTC_DISK_CACHE` | `ON`/`OFF` | Enable/Disable caching of RTC functions to disk. Default `ON`. | -| `VERBOSE_PTXAS` | `ON`/`OFF` | Enable verbose PTXAS output during compilation. Default `OFF`. | -| `CURAND_ENGINE` | `XORWOW`/`PHILOX`/`MRG` | Select the CUDA random engine. Default `XORWOW` | -| `USE_GLM` | `ON`/`OFF` | Experimental feature for GLM type support in RTC models. Default `OFF`. | -| `FLAMEGPU_SHARE_USAGE_STATISTICS` | `ON`/`OFF` | Share usage statistics ([telemetry](https://docs.flamegpu.com/guide/telemetry)) to support evidencing usage/impact of the software. Default `ON`. | -| `FLAMEGPU_TELEMETRY_SUPPRESS_NOTICE` | `ON`/`OFF` | Suppress notice encouraging telemetry to be enabled, which is emitted once per binary execution if telemetry is disabled. Defaults to `OFF`, or the value of a system environment variable of the same name. | -| `FLAMEGPU_TELEMETRY_TEST_MODE` | `ON`/`OFF` | Submit telemetry values to the test mode of TelemetryDeck. Intended for use during development of FLAMEGPU rather than use. Defaults to `OFF`, or the value of a system environment variable of the same name.| +| Option | Value | Description | +| -------------------------------------| --------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `CMAKE_BUILD_TYPE` | `Release` / `Debug` / `MinSizeRel` / `RelWithDebInfo` | Select the build configuration for single-target generators such as `make` | +| `CMAKE_CUDA_ARCHITECTURES` | e.g `60`, `"60;70"` | [CUDA Compute Capabilities][cuda-CC] to build/optimise for, as a `;` separated list. See [CMAKE_CUDA_ARCHITECTURES][cmake-CCA]. Defaults to `all-major` or equivalent. Alternatively use the `CUDAARCHS` environment variable. | +| `FLAMEGPU_SEATBELTS` | `ON`/`OFF` | Enable / Disable additional runtime checks which harm performance but increase usability. Default `ON` | +| `FLAMEGPU_BUILD_PYTHON` | `ON`/`OFF` | Enable Python target `pyflamegpu` via Swig. Default `OFF`. Python packages `setuptools`, `build` & `wheel` required | +| `FLAMEGPU_BUILD_PYTHON_VENV` | `ON`/`OFF` | Use a python `venv` when building the python Swig target. Default `ON`. Python package `venv` required | +| `FLAMEGPU_BUILD_TESTS` | `ON`/`OFF` | Build the C++/CUDA test suite. Default `OFF`. | +| `FLAMEGPU_BUILD_TESTS_DEV` | `ON`/`OFF` | Build the reduced-scope development test suite. Default `OFF` | +| `FLAMEGPU_ENABLE_GTEST_DISCOVER` | `ON`/`OFF` | Run individual CUDA C++ tests as independent `ctest` tests. This dramatically increases test suite runtime. Default `OFF`. | +| `FLAMEGPU_VISUALISATION` | `ON`/`OFF` | Enable Visualisation. Default `OFF`. | +| `FLAMEGPU_VISUALISATION_ROOT` | `path/to/vis` | Provide a path to a local copy of the visualisation repository. | +| `FLAMEGPU_ENABLE_NVTX` | `ON`/`OFF` | Enable NVTX markers for improved profiling. Default `OFF` | +| `FLAMEGPU_WARNINGS_AS_ERRORS` | `ON`/`OFF` | Promote compiler/tool warnings to errors are build time. Default `OFF` | +| `FLAMEGPU_RTC_EXPORT_SOURCES` | `ON`/`OFF` | At runtime, export dynamic RTC files to disk. Useful for debugging RTC models. Default `OFF` | +| `FLAMEGPU_RTC_DISK_CACHE` | `ON`/`OFF` | Enable/Disable caching of RTC functions to disk. Default `ON`. | +| `FLAMEGPU_VERBOSE_PTXAS` | `ON`/`OFF` | Enable verbose PTXAS output during compilation. Default `OFF`. | +| `FLAMEGPU_CURAND_ENGINE` | `XORWOW` / `PHILOX` / `MRG` | Select the CUDA random engine. Default `XORWOW` | +| `FLAMEGPU_USE_GLM` | `ON`/`OFF` | Experimental feature for GLM type support in RTC models. Default `OFF`. | +| `FLAMEGPU_SHARE_USAGE_STATISTICS` | `ON`/`OFF` | Share usage statistics ([telemetry](https://docs.flamegpu.com/guide/telemetry)) to support evidencing usage/impact of the software. Default `ON`. | +| `FLAMEGPU_TELEMETRY_SUPPRESS_NOTICE` | `ON`/`OFF` | Suppress notice encouraging telemetry to be enabled, which is emitted once per binary execution if telemetry is disabled. Defaults to `OFF`, or the value of a system environment variable of the same name. | +| `FLAMEGPU_TELEMETRY_TEST_MODE` | `ON`/`OFF` | Submit telemetry values to the test mode of TelemetryDeck. Intended for use during development of FLAMEGPU rather than use. Defaults to `OFF`, or the value of a system environment variable of the same name.| +[cuda-CC]: https://developer.nvidia.com/cuda-gpus +[cmake-CCA]: https://cmake.org/cmake/help/latest/prop_tgt/CUDA_ARCHITECTURES.html + For a list of available CMake configuration options, run the following from the `build` directory: ```bash @@ -203,8 +205,8 @@ cmake -LH .. | `flamegpu` | Build FLAME GPU static library | | `pyflamegpu` | Build the python bindings for FLAME GPU | | `docs` | The FLAME GPU API documentation (if available) | -| `tests` | Build the CUDA C++ test suite, if enabled by `BUILD_TESTS=ON` | -| `tests_dev` | Build the CUDA C++ test suite, if enabled by `BUILD_TESTS_DEV=ON` | +| `tests` | Build the CUDA C++ test suite, if enabled by `FLAMEGPU_BUILD_TESTS=ON` | +| `tests_dev` | Build the CUDA C++ test suite, if enabled by `FLAMEGPU_BUILD_TESTS_DEV=ON` | | `` | Each individual model has it's own target. I.e. `boids_bruteforce` corresponds to `examples/boids_bruteforce` | | `lint_` | Lint the `` target. I.e. `lint_flamegpu` will lint the `flamegpu` target | @@ -237,15 +239,15 @@ Note, it may be necessary to change the configuration as the properties dialog m Several environmental variables are used or required by FLAME GPU 2. -+ `CUDA_PATH` - Required when using RunTime Compilation (RTC), pointing to the root of the CUDA Toolkit where NVRTC resides. - + i.e. `/usr/local/cuda-11.0/` or `C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.0`. - + Alternatively `CUDA_HOME` may be used if `CUDA_PATH` was not set. -+ `FLAMEGPU_INC_DIR` - When RTC compilation is required, if the location of the `include` directory cannot be found it must be specified using the `FLAMEGPU_INC_DIR` environment variable. -+ `FLAMEGPU_TMP_DIR` - FLAME GPU may cache some files to a temporary directory on the system, using the temporary directory returned by [`std::filesystem::temp_directory_path`](https://en.cppreference.com/w/cpp/filesystem/temp_directory_path). The location can optionally be overridden using the `FLAMEGPU_TMP_DIR` environment variable. -+ `FLAMEGPU_RTC_INCLUDE_DIRS` - A list of include directories that should be provided to the RTC compiler, these should be separated using `;` (Windows) or `:` (Linux). If this variable is not found, the working directory will be used as a default. -+ `FLAMEGPU_SHARE_USAGE_STATISTICS` - Enable / Disable sending of telemetry data, when set to `ON` or `OFF` respectively. -+ `FLAMEGPU_TELEMETRY_SUPPRESS_NOTICE` - Enable / Disable a once per execution notice encouraging the use of telemetry, if telemetry is disabled, when set to `ON` or `OFF` respectively. -+ `FLAMEGPU_TELEMETRY_TEST_MODE` - Enable / Disable sending telemetry data to a test endpoint, for FLAMEGPU develepoment to separate user statistics from developer statistics. Set to `ON` or `OFF`. +| Environment Variable | Description | +|--------------------------------------|-------------| +| `CUDA_PATH` | Required when using RunTime Compilation (RTC), pointing to the root of the CUDA Toolkit where NVRTC resides.
i.e. `/usr/local/cuda-11.0/` or `C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.0`.
Alternatively `CUDA_HOME` may be used if `CUDA_PATH` was not set. | +| `FLAMEGPU_INC_DIR` | When RTC compilation is required, if the location of the `include` directory cannot be found it must be specified using the `FLAMEGPU_INC_DIR` environment variable. | +| `FLAMEGPU_TMP_DIR` | FLAME GPU may cache some files to a temporary directory on the system, using the temporary directory returned by [`std::filesystem::temp_directory_path`](https://en.cppreference.com/w/cpp/filesystem/temp_directory_path). The location can optionally be overridden using the `FLAMEGPU_TMP_DIR` environment variable. | +| `FLAMEGPU_RTC_INCLUDE_DIRS` | A list of include directories that should be provided to the RTC compiler, these should be separated using `;` (Windows) or `:` (Linux). If this variable is not found, the working directory will be used as a default. | +| `FLAMEGPU_SHARE_USAGE_STATISTICS` | Enable / Disable sending of telemetry data, when set to `ON` or `OFF` respectively. | +| `FLAMEGPU_TELEMETRY_SUPPRESS_NOTICE` | Enable / Disable a once per execution notice encouraging the use of telemetry, if telemetry is disabled, when set to `ON` or `OFF` respectively. | +| `FLAMEGPU_TELEMETRY_TEST_MODE` | Enable / Disable sending telemetry data to a test endpoint, for FLAMEGPU develepoment to separate user statistics from developer statistics. Set to `ON` or `OFF`. | ## Running the Test Suite(s) @@ -257,10 +259,10 @@ The test suite for the CUDA/C++ library can be executed using CTest, or by manua The test suite can be executed using [CTest](https://cmake.org/cmake/help/latest/manual/ctest.1.html) by running `ctest`, or `ctest -VV` for verbose output of sub-tests, from the the build directory. -More verbose CTest output for the GoogleTest based CUDA C++ test suite(s) can be enabled by configuring CMake with `USE_GTEST_DISCOVER` set to `ON`. +More verbose CTest output for the GoogleTest based CUDA C++ test suite(s) can be enabled by configuring CMake with `FLAMEGPU_ENABLE_GTEST_DISCOVER` set to `ON`. This however will dramatically increase test suite execution time. -1. Configure CMake to build the desired tests suites as desired, using `BUILD_TESTS=ON`, `BUILD_TESTS_DEV=ON` and optionally `USE_GTEST_DISCOVER=ON`. +1. Configure CMake to build the desired tests suites as desired, using `FLAMEGPU_BUILD_TESTS=ON`, `FLAMEGPU_BUILD_TESTS_DEV=ON` and optionally `FLAMEGPU_ENABLE_GTEST_DISCOVER=ON`. 2. Build the `tests`, `tests_dev` targets as required 3. Run the test suites via ctest, using `-vv` for more-verbose output. Multiple tests can be ran concurrently using `-j `. Use `-R ` to only run matching tests. @@ -270,7 +272,7 @@ This however will dramatically increase test suite execution time. To run the CUDA/C++ test suite(s) manually, which allows use of `--gtest_filter`: -1. Configure CMake with `BUILD_TESTS=ON` +1. Configure CMake with `FLAMEGPU_BUILD_TESTS=ON` 2. Build the `tests` target 3. Run the test suite executable for the selected configuration i.e. @@ -282,7 +284,7 @@ To run the CUDA/C++ test suite(s) manually, which allows use of `--gtest_filter` To run the python test suite: -1. Configure CMake with `BUILD_SWIG_PYTHON=ON` +1. Configure CMake with `FLAMEGPU_BUILD_PYTHON=ON` 2. Build the `pyflamegpu` target 3. Activate the generated python `venv` for the selected configuration, which has `pyflamegpu` and `pytest` installed diff --git a/cmake/CUDAArchitectures.cmake b/cmake/CUDAArchitectures.cmake new file mode 100644 index 000000000..7b80005ae --- /dev/null +++ b/cmake/CUDAArchitectures.cmake @@ -0,0 +1,299 @@ +#[[[ +# Handle CMAKE_CUDA_ARCHITECTURES gracefully, allowing library CMakeLists.txt to provide a sane default if not user-specified +# +# CMAKE_CUDA_ARCHITECTURES is a CMake >= 3.18 feature which controls code generation options for CUDA device code. +# The initial value can be set using teh CUDAARCHS environment variable, or the CMake Cache varaibel CMAKE_CUDA_ARCHITECTURES. +# This allows users to provide their chosen value. +# When the CUDA language is enabled, by a project() or enable_language() command, if the cache variable is empty then CMake will set the cache variable to the default used by the compiler. I.e. 52 for CUDA 11.x. +# However, it is then impossible to detect if the user provided this value, or if CMake did, preventing a library from setting a default, without executing CMake prior to the first project command, which is unusual. +# +#]] +include_guard(GLOBAL) + +#[[[ +# Initialise the CMAKE_CUDA_ARCHITECTURES from the environment, CACHE or a sane programatic default +# +# Call this method prior to the first (or all) project commands, to store the initial state of CMAKE_CUDA_ARCHITECTURES/CUDAARCHS for later post processing. +# Optionally specify a project to inject a call to flamegpu_set_cuda_architectures in to, to post-process the stored value or set a library-provided default. +# +# :keyword PROJECT: Optional project name to inject CMAKE_CUDA_ARCHITECTURES setting into. Otherwise call flamegpu_set_cuda_architectures manually after the project command or enable_lanague(CUDA). +# :type PROJECT: string +# :keyword NO_VALIDATE_ARCHITECTURES: Do not validate the passed arguments against nvcc --help output +# :type NO_VALIDATE_ARCHITECTURES: boolean +#]] +function(flamegpu_init_cuda_architectures) + # Handle argument parsing + cmake_parse_arguments(CICA + "NO_VALIDATE_ARCHITECTURES" + "PROJECT" + "" + ${ARGN} + ) + # Detect if there are user provided architectures or not, form the cache or environment + set(flamegpu_ARCH_FROM_ENV_OR_CACHE FALSE) + if(DEFINED CMAKE_CUDA_ARCHITECTURES OR DEFINED ENV{CUDAARCHS}) + set(flamegpu_ARCH_FROM_ENV_OR_CACHE TRUE) + endif() + # promote the stored value to parent(file) scope for later use. This might need to become internal cache, but hopefully not. + set(flamegpu_ARCH_FROM_ENV_OR_CACHE ${flamegpu_ARCH_FROM_ENV_OR_CACHE} PARENT_SCOPE) + # If the user does not want architecture validation to occur, set a parent scoped variable to be checked later. + if(CICA_NO_VALIDATE_ARCHITECTURES) + # If a project was also specified, append to a list and promote to the parent scope + if(CICA_PROJECT) + list(APPEND flamegpu_NO_VALIDATE_ARCHITECTURES_PROJECTS ${CICA_PROJECT}) + set(flamegpu_NO_VALIDATE_ARCHITECTURES_PROJECTS ${flamegpu_NO_VALIDATE_ARCHITECTURES_PROJECTS} PARENT_SCOPE) + else() + # Otherwise just set a parent scoped variable + set(flamegpu_NO_VALIDATE_ARCHITECTURES ${CICA_NO_VALIDATE_ARCHITECTURES} PARENT_SCOPE) + endif() + endif() + # If a project name was provided, inject code into the PROJECT command. Users must call flamegpu_set_cuda_architectures otherwise + if(CICA_PROJECT) + set(CMAKE_PROJECT_${CICA_PROJECT}_INCLUDE "${CMAKE_CURRENT_FUNCTION_LIST_DIR}/CUDAArchitecturesProjectInclude.cmake" PARENT_SCOPE) + endif() +endfunction() + +#[[[ +# Set the CMAKE_CUDA_ARCHITECTURES value to the environment/cache provided value, or generate the CUDA version appropraite default. +# +# Set teh CMAKE_CUDA_ARCHITECTURES cache variable to a user priovided or a library-arpropriate default. +# If the CUDAARCHS environment variable, or CMAKE_CUDA_ARCHITECTURES cache variable did not specify a value before the CUDA lanagugea was enabled, +# build an appropraite default option, based on the CMake and NVCC verison. +# Effectively all-major (-real for all major achitectures, and PTX for the most reent.) +# +# If the user provided a value, it will be validated against nvcc --help unless NO_VALIDATE_ARCHITECTURES is set, or was set in a previous call to flamegpu_init_cuda_architectures without a PROJECT. +# +# CUDA must be enabled as a language prior to this method being called. +# +# :keyword NO_VALIDATE_ARCHITECTURES: Do not validate the passed arguments against nvcc --help output +# :type NO_VALIDATE_ARCHITECTURES: boolean +#]] +function(flamegpu_set_cuda_architectures) + # Handle argument parsing + cmake_parse_arguments(CSCA + "NO_VALIDATE_ARCHITECTURES" + "" + "" + ${ARGN} + ) + # This function requires that the CUDA language is enabled on the current project. + if(NOT CMAKE_CUDA_COMPILER_LOADED) + # If in the inkected project code, give a different error message + if(DEFINED flamegpu_IN_PROJECT_INCLUDE AND flamegpu_IN_PROJECT_INCLUDE) + message(FATAL_ERROR + " ${CMAKE_CURRENT_FUNCTION} requires the CUDA lanaguage to be enabled\n" + " Please either:\n" + " * use project( LANGUAGES CUDA)\n" + " * call flamegpu_init_cuda_architectures() without the PROJECT argument, and explcitly call ${CMAKE_CURRENT_FUNCTION}() after enable_language(CUDA).") + else() + # not in project injection, so only suggest enabled + message(FATAL_ERROR + " ${CMAKE_CURRENT_FUNCTION} requires the CUDA language to be enabled.\n" + " Please call enable_language(CUDA) prior to ${CMAKE_CURRENT_FUNCTION}()") + endif() + + endif() + # Query NVCC for the acceptable SM values, this is used in multiple places + if(NOT DEFINED SUPPORTED_CUDA_ARCHITECTURES_NVCC) + execute_process(COMMAND ${CMAKE_CUDA_COMPILER} "--help" OUTPUT_VARIABLE NVCC_HELP_STR ERROR_VARIABLE NVCC_HELP_STR) + # Match all comptue_XX or sm_XXs + string(REGEX MATCHALL "'(sm|compute)_[0-9]+'" SUPPORTED_CUDA_ARCHITECTURES_NVCC "${NVCC_HELP_STR}" ) + # Strip just the numeric component + string(REGEX REPLACE "'(sm|compute)_([0-9]+)'" "\\2" SUPPORTED_CUDA_ARCHITECTURES_NVCC "${SUPPORTED_CUDA_ARCHITECTURES_NVCC}" ) + # Remove dupes and sort to build the correct list of supported CUDA_ARCH. + list(REMOVE_DUPLICATES SUPPORTED_CUDA_ARCHITECTURES_NVCC) + list(REMOVE_ITEM SUPPORTED_CUDA_ARCHITECTURES_NVCC "") + list(SORT SUPPORTED_CUDA_ARCHITECTURES_NVCC) + # Store the supported arch's once and only once. This could be a cache var given the cuda compiler should not be able to change without clearing th cache? + set(SUPPORTED_CUDA_ARCHITECTURES_NVCC ${SUPPORTED_CUDA_ARCHITECTURES_NVCC} PARENT_SCOPE) + endif() + list(LENGTH SUPPORTED_CUDA_ARCHITECTURES_NVCC SUPPORTED_CUDA_ARCHITECTURES_NVCC_COUNT) + # If we already have a cuda architectures value, validate it as CMake doesn't on its own. Unless the caller asked us not to. + if(flamegpu_ARCH_FROM_ENV_OR_CACHE AND NOT CMAKE_CUDA_ARCHITECTURES STREQUAL "" + AND NOT CSCA_NO_VALIDATE_ARCHITECTURES + AND NOT flamegpu_NO_VALIDATE_ARCHITECTURES + AND (DEFINED PROJECT_NAME AND NOT ${PROJECT_NAME} IN_LIST flamegpu_NO_VALIDATE_ARCHITECTURES_PROJECTS)) + # Get the number or architectures specified + list(LENGTH CMAKE_CUDA_ARCHITECTURES arch_count) + # Prep a bool to track if a single special value is being used or not + set(using_keyword_arch FALSE) + # native requires CMake >= 3.24, and must be the only option. + if("native" IN_LIST CMAKE_CUDA_ARCHITECTURES) + # Error if CMake is too old + if(CMAKE_VERSION VERSION_LESS 3.24) + message(FATAL_ERROR + " CMAKE_CUDA_ARCHITECTURES value `native` requires CMake >= 3.24.\n" + " CMAKE_CUDA_ARCHITECTURES=\"${CMAKE_CUDA_ARCHITECTURES}\"") + endif() + # Error if there are multiple architectures specified. + if(arch_count GREATER 1) + message(FATAL_ERROR + " CMAKE_CUDA_ARCHITECTURES value `native` must be the only value specified.\n" + " CMAKE_CUDA_ARCHITECTURES=\"${CMAKE_CUDA_ARCHITECTURES}\"") + endif() + set(using_keyword_arch TRUE) + endif() + # all requires 3.23, and must be the sole value. + if("all" IN_LIST CMAKE_CUDA_ARCHITECTURES) + # Error if CMake is too old + if(CMAKE_VERSION VERSION_LESS 3.23) + message(FATAL_ERROR + " CMAKE_CUDA_ARCHITECTURES value `all` requires CMake >= 3.23.\n" + " CMAKE_CUDA_ARCHITECTURES=\"${CMAKE_CUDA_ARCHITECTURES}\"") + endif() + # Error if there are multiple architectures specified. + if(arch_count GREATER 1) + message(FATAL_ERROR + " CMAKE_CUDA_ARCHITECTURES value `all` must be the only value specified.\n" + " CMAKE_CUDA_ARCHITECTURES=\"${CMAKE_CUDA_ARCHITECTURES}\"") + endif() + set(using_keyword_arch TRUE) + endif() + # all-major requires 3.23, and must be the sole value. + if("all-major" IN_LIST CMAKE_CUDA_ARCHITECTURES) + # Error if CMake is too old + if(CMAKE_VERSION VERSION_LESS 3.23) + message(FATAL_ERROR + " CMAKE_CUDA_ARCHITECTURES value `all-major` requires CMake >= 3.23.\n" + " CMAKE_CUDA_ARCHITECTURES=\"${CMAKE_CUDA_ARCHITECTURES}\"") + endif() + # Error if there are multiple architectures specified. + if(arch_count GREATER 1) + message(FATAL_ERROR + " CMAKE_CUDA_ARCHITECTURES value `all-major` must be the only value specified.\n" + " CMAKE_CUDA_ARCHITECTURES=\"${CMAKE_CUDA_ARCHITECTURES}\"") + endif() + set(using_keyword_arch TRUE) + endif() + # Cmake 3.18+ expects a list of 1 or more , -real or -virtual. + # CMake isn't aware of the exact SMS supported by the CUDA version afiak, but we have already queired nvcc for this (once and only once) + # If nvcc parsing worked and a single keyword option is not being used, attempt the validation: + if(SUPPORTED_CUDA_ARCHITECTURES_NVCC_COUNT GREATER 0 AND NOT using_keyword_arch) + # Transform a copy of the list of supported architectures, to hopefully just contain numbers + set(archs ${CMAKE_CUDA_ARCHITECTURES}) + list(TRANSFORM archs REPLACE "(\-real|\-virtual)" "") + # If any of the specified architectures are not in the nvcc reported list, error. + foreach(ARCH IN LISTS archs) + if(NOT ARCH IN_LIST SUPPORTED_CUDA_ARCHITECTURES_NVCC) + message(FATAL_ERROR + " CMAKE_CUDA_ARCHITECTURES value `${ARCH}` is not supported by nvcc ${CMAKE_CUDA_COMPILER_VERSION}.\n" + " Supported architectures based on nvcc --help: \n" + " ${SUPPORTED_CUDA_ARCHITECTURES_NVCC}\n") + endif() + endforeach() + unset(archs) + endif() + else() + # Otherwise, set a mulit-arch default for good compatibility and performacne + # If we're using CMake >= 3.23, we can just use all-major, though we then have to find the minimum a different way? + if(CMAKE_VERSION VERSION_GREATER_EQUAL "3.23") + set(CMAKE_CUDA_ARCHITECTURES "all-major") + else() + # For CMake < 3.23, we have to make our own all-major equivalent. + # If we have nvcc help outut, we can generate this from all the elements that end with a 0 (and the first element if it does not.) + if(SUPPORTED_CUDA_ARCHITECTURES_NVCC_COUNT GREATER 0) + # If the lowest support arch is not major, add it to the default + list(GET SUPPORTED_CUDA_ARCHITECTURES_NVCC 0 lowest_supported) + if(NOT lowest_supported MATCHES "0$") + list(APPEND default_archs ${lowest_supported}) + endif() + unset(lowest_supported) + # For each architecture, if it is major add it to the default list + foreach(ARCH IN LISTS SUPPORTED_CUDA_ARCHITECTURES_NVCC) + if(ARCH MATCHES "0$") + list(APPEND default_archs ${ARCH}) + endif() + endforeach() + else() + # If nvcc help output parsing failed, just use an informed guess option from CUDA 11.8 + set(default_archs "35;50;60;70;80") + if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 11.8) + list(APPEND default_archs "90") + endif() + message(AUTHOR_WARNING + " ${CMAKE_CURRENT_FUNCTION} failed to parse NVCC --help output for default architecture generation\n" + " Using ${default_archs} based on CUDA 11.0 to 11.8." + ) + endif() + # We actually want real for each arch, then virtual for the final, but only for library-provided values, to only embed one arch worth of ptx. + # So grab the last element of the list + list(GET default_archs -1 final) + # append -real to each element, to not embed ptx for that arch too + list(TRANSFORM default_archs APPEND "-real") + # add the -virtual version of the final element + list(APPEND default_archs "${final}-virtual") + # Set the value + set(CMAKE_CUDA_ARCHITECTURES ${default_archs}) + #unset local vars + unset(default_archs) + endif() + endif() + # Promote the value to the parent's scope, where it is needed on the first invokation (might be fine with cache, but just incase) + set(CMAKE_CUDA_ARCHITECTURES "${CMAKE_CUDA_ARCHITECTURES}" PARENT_SCOPE) + # Promote the value to the cache for reconfigure persistence, as the enable_language sets it on the cache + set(CMAKE_CUDA_ARCHITECTURES "${CMAKE_CUDA_ARCHITECTURES}" CACHE STRING "CUDA architectures" FORCE) +endfunction() + +#[[[ +# Get the minimum CUDA Architecture from the current CMAKE_CUDA_ARCHITECTURES value if possible +# +# Gets the minimum CUDA architectuyre from teh current value of CMAKE_CUDA_ARCHITECTURES if possible, storing the result in the pass-by-reference return value +# Supports CMAKE_CUDA_ARCHITECTURE values including integers, -real post-fixed integers, -virtual post-fixed integers, all-major and all. +# Does not support native, instead returning -1. +# all or all-major are supported by querying nvcc --help to detect the minimum built for. +# +# CUDA must be enabled as a language prior to this method being called, and CMAKE_CUDA_ARCHITECTURES must be defined and non-empty +# +# :param minimum_architecture: the minimum architecture set in CMAKE_CUDA_ARCHITECTURES +# :type NO_VALIDATE_ARCHITECTURES: integer +#]] +function(flamegpu_get_minimum_cuda_architecture minimum_architecture) + if(DEFINED CMAKE_CUDA_ARCHITECTURES) + # Cannot deal with native gracefully + if("native" IN_LIST CMAKE_CUDA_ARCHITECTURES) + # If it's native, we would need to exeucte some CUDA code to detect this. + set(flamegpu_minimum_cuda_architecture 0) + # if all/all-major is specified, detect via nvcc --help. It must be the only option (CMake doens't validate this and generates bad gencodes otherwise) + elseif("all-major" IN_LIST CMAKE_CUDA_ARCHITECTURES OR "all" IN_LIST CMAKE_CUDA_ARCHITECTURES) + # Query NVCC for the acceptable SM values. + if(NOT DEFINED SUPPORTED_CUDA_ARCHITECTURES_NVCC) + execute_process(COMMAND ${CMAKE_CUDA_COMPILER} "--help" OUTPUT_VARIABLE NVCC_HELP_STR ERROR_VARIABLE NVCC_HELP_STR) + # Match all comptue_XX or sm_XXs + string(REGEX MATCHALL "'(sm|compute)_[0-9]+'" SUPPORTED_CUDA_ARCHITECTURES_NVCC "${NVCC_HELP_STR}" ) + # Strip just the numeric component + string(REGEX REPLACE "'(sm|compute)_([0-9]+)'" "\\2" SUPPORTED_CUDA_ARCHITECTURES_NVCC "${SUPPORTED_CUDA_ARCHITECTURES_NVCC}" ) + # Remove dupes and sort to build the correct list of supported CUDA_ARCH. + list(REMOVE_DUPLICATES SUPPORTED_CUDA_ARCHITECTURES_NVCC) + list(REMOVE_ITEM SUPPORTED_CUDA_ARCHITECTURES_NVCC "") + list(SORT SUPPORTED_CUDA_ARCHITECTURES_NVCC) + # Store the supported arch's once and only once. This could be a cache var given the cuda compiler should not be able to change without clearing th cache? + set(SUPPORTED_CUDA_ARCHITECTURES_NVCC ${SUPPORTED_CUDA_ARCHITECTURES_NVCC} PARENT_SCOPE) + endif() + list(LENGTH SUPPORTED_CUDA_ARCHITECTURES_NVCC SUPPORTED_CUDA_ARCHITECTURES_NVCC_COUNT) + if(SUPPORTED_CUDA_ARCHITECTURES_NVCC_COUNT GREATER 0) + # For both all and all-major, the lowest arch should be the lowest supported. This is true for CUDA <= 11.8 atleast. + list(GET SUPPORTED_CUDA_ARCHITECTURES_NVCC 0 lowest) + set(flamegpu_minimum_cuda_architecture ${lowest}) + else() + # If nvcc didn't give anything useful, set to 0 + set(flamegpu_minimum_cuda_architecture 0) + endif() + else() + # Otherwise it should just be a list of one or more /-real/ + # Copy the list + set(archs ${CMAKE_CUDA_ARCHITECTURES}) + # Replace occurances of -real and -virtual + list(TRANSFORM archs REPLACE "(\-real|\-virtual)" "") + # Sort the list numerically (natural option + list(SORT archs COMPARE NATURAL ORDER ASCENDING) + # Get the first element + list(GET archs 0 lowest) + # Set the value for later returning + set(flamegpu_minimum_cuda_architecture ${lowest}) + endif() + # Set the return value as required, effectively pass by reference. + set(${minimum_architecture} ${flamegpu_minimum_cuda_architecture} PARENT_SCOPE) + else() + message(FATAL_ERROR "${CMAKE_CURRENT_FUNCTION}: CMAKE_CUDA_ARCHITECTURES is not set or is empty") + endif() +endfunction() diff --git a/cmake/CUDAArchitecturesProjectInclude.cmake b/cmake/CUDAArchitecturesProjectInclude.cmake new file mode 100644 index 000000000..ee28ef2d4 --- /dev/null +++ b/cmake/CUDAArchitecturesProjectInclude.cmake @@ -0,0 +1,9 @@ +# CMake file to be injected into a project via CMAKE_PROJECT_INCLUDE +# If CUDA is enabled, call a CMake function which gracefully sets a library-useful + +# Set a locally scoped cmake variable, to alter the error message within. +set(flamegpu_IN_PROJECT_INCLUDE ON) +# Call the appropraite command to set CMAKE_CUDA_ARCHITECTURES to the user-provided value, the exising value, or a sane libray-provided defualt +flamegpu_set_cuda_architectures() +# Unset the variable used to alter behaviour in set_cuda_architectures +unset(flamegpu_IN_PROJECT_INCLUDE) \ No newline at end of file diff --git a/cmake/CheckBinaryDirPathForSpaces.cmake b/cmake/CheckBinaryDirPathForSpaces.cmake index a924a5528..26c8ca39a 100644 --- a/cmake/CheckBinaryDirPathForSpaces.cmake +++ b/cmake/CheckBinaryDirPathForSpaces.cmake @@ -3,7 +3,7 @@ include_guard(GLOBAL) # Define a cmake function which emits a warning if the build directory path contains a space, in some cases # With Visual Stuido 2022 and CUDA 11.7, this resulted in compilation errors. A relevant bug report has been logged with NVIDIA, so should be fixed in a future CUDA release. # https://github.com/FLAMEGPU/FLAMEGPU2/issues/867 -function(CheckBinaryDirPathForSpaces) +function(flamegpu_check_binary_dir_for_spaces) # If using Visual Studio 17 2022 (the known verison which errors with this, with current CUDA version(s)) if (CMAKE_GENERATOR MATCHES "Visual Studio 17 2022") # Resolve paths to get the full abs path of the binary dir @@ -25,5 +25,5 @@ function(CheckBinaryDirPathForSpaces) endfunction() # Call the function imediately, so the file only needs to be included. -CheckBinaryDirPathForSpaces() +flamegpu_check_binary_dir_for_spaces() diff --git a/cmake/CheckCompilerFunctionality.cmake b/cmake/CheckCompilerFunctionality.cmake index 8fc207b60..bdf9b881c 100644 --- a/cmake/CheckCompilerFunctionality.cmake +++ b/cmake/CheckCompilerFunctionality.cmake @@ -1,7 +1,10 @@ +include_guard(GLOBAL) + # Define a cmake function which checks that CUDA and the host compiler are functional. -function(CheckCompilerFunctionality) +# Failure only results in CMake warnings not Errors, so that documentation only builds will function. +function(flamegpu_check_compiler_functionality) # If the result variable is already defined, this has already been called once, so don't check agian. - if(DEFINED CheckCompilerFunctionality_RESULT) + if(DEFINED FLAMEGPU_CheckCompilerFunctionality_RESULT) return() endif() @@ -11,17 +14,50 @@ function(CheckCompilerFunctionality) check_language(CXX) if(NOT CMAKE_CXX_COMPILER) message(WARNING "CXX Language Support Not Found") - set(CheckCompilerFunctionality_RESULT "NO" PARENT_SCOPE) + set(FLAMEGPU_CheckCompilerFunctionality_RESULT "NO" PARENT_SCOPE) return() endif() enable_language(CXX) check_language(CUDA) if(NOT CMAKE_CUDA_COMPILER) message(WARNING "CUDA Language Support Not Found") - set(CheckCompilerFunctionality_RESULT "NO" PARENT_SCOPE) + set(FLAMEGPU_CheckCompilerFunctionality_RESULT "NO" PARENT_SCOPE) return() endif() enable_language(CUDA) + + # We need c++17 std::filesytem, but not all compilers which claim to implement c++17 provide filesystem (GCC 7) + if(NOT DEFINED CUDA_STD_FILESYSTEM) + # Disable CMAKE_CUDA_ARCHTIECTURES if not already controlled. This is scoped to the function so safe to control. + if(NOT DEFINED CMAKE_CUDA_ARCHITECTURES OR "${CMAKE_CUDA_ARCHITECTURES}" STREQUAL "") + set(CMAKE_CUDA_ARCHITECTURES "OFF") + endif() + try_compile( + CUDA_STD_FILESYSTEM + "${CMAKE_CURRENT_BINARY_DIR}/try_compile" + "${CMAKE_CURRENT_LIST_DIR}/CheckCompilerFunctionality/CheckStdFilesystem.cu" + CXX_STANDARD 17 + CUDA_STANDARD 17 + CXX_STANDARD_REQUIRED "ON" + ) + endif() + # If an error occured while building the snippet, report a warning + if(NOT CUDA_STD_FILESYSTEM) + # If the GCC versions is known to be bad, give an appropriate error + if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8.1) + message(WARNING + "${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION} does not provide even in --std=c++17 mode.\n" + " Please use GCC >= 8.1.\n" + " \n") + else() + # If the gcc version is not a known problem, emit a generic error. + message(WARNING + " error with ${CMAKE_CUDA_COMPILER_ID} ${CMAKE_CUDA_COMPILER_VERSION} and ${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}.") + endif() + # Set the result variable to a false-like value + set(FLAMEGPU_CheckCompilerFunctionality_RESULT "NO" PARENT_SCOPE) + return() + endif() # Original releases of GCC 10.3.0 and 11.1.0 included a bug preventing the use of in . # This was patched in subsequent versions, and backported in the release branches, but the broken version is still distributed in some cases (i.e. Ubuntu 20.04, but not 21.04). @@ -29,8 +65,6 @@ function(CheckCompilerFunctionality) if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") # Try to compile the test case file for inclusion of chrono. if(NOT DEFINED GCC_CUDA_STDCHRONO) - # CUDA must be available. - enable_language(CUDA) # Disable CMAKE_CUDA_ARCHTIECTURES if not already controlled. This is scoped to the function so safe to control. if(NOT DEFINED CMAKE_CUDA_ARCHITECTURES OR "${CMAKE_CUDA_ARCHITECTURES}" STREQUAL "") set(CMAKE_CUDA_ARCHITECTURES "OFF") @@ -61,7 +95,7 @@ function(CheckCompilerFunctionality) " not usable with ${CMAKE_CUDA_COMPILER_ID} ${CMAKE_CUDA_COMPILER_VERSION} and ${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}.") endif() # Set the result variable to a false-like value - set(CheckCompilerFunctionality_RESULT "NO" PARENT_SCOPE) + set(FLAMEGPU_CheckCompilerFunctionality_RESULT "NO" PARENT_SCOPE) return() endif() endif() @@ -73,8 +107,6 @@ function(CheckCompilerFunctionality) if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") # Try to compile the test case file for inclusion of chrono. if(NOT DEFINED GCC_CUDA_VECTOR_TUPLE_PUSHBACK) - # CUDA must be available. - enable_language(CUDA) # Disable CMAKE_CUDA_ARCHTIECTURES if not already controlled. This is scoped to the function so safe to control. if(NOT DEFINED CMAKE_CUDA_ARCHITECTURES OR "${CMAKE_CUDA_ARCHITECTURES}" STREQUAL "") set(CMAKE_CUDA_ARCHITECTURES "OFF") @@ -107,9 +139,9 @@ function(CheckCompilerFunctionality) endif() # If we made it this far, set the result variable to be truthy - set(CheckCompilerFunctionality_RESULT "YES" PARENT_SCOPE) + set(FLAMEGPU_CheckCompilerFunctionality_RESULT "YES" PARENT_SCOPE) endfunction() # Call the function imediately, so the file only needs to be included. -CheckCompilerFunctionality() +flamegpu_check_compiler_functionality() diff --git a/cmake/CheckCompilerFunctionality/CheckStdFilesystem.cu b/cmake/CheckCompilerFunctionality/CheckStdFilesystem.cu new file mode 100644 index 000000000..11f119904 --- /dev/null +++ b/cmake/CheckCompilerFunctionality/CheckStdFilesystem.cu @@ -0,0 +1,4 @@ +// We require std::filesytem, but just requireing std=c++17 does not enforce this for all compilers, so check it works. (I.e. GCC < 8 is a problem.) +// CMake doesn't appear to have knowledge of this feature. +#include +int main() { return 0; } \ No newline at end of file diff --git a/cmake/OutOfSourceOnly.cmake b/cmake/OutOfSourceOnly.cmake index 2041e4513..5f287318c 100644 --- a/cmake/OutOfSourceOnly.cmake +++ b/cmake/OutOfSourceOnly.cmake @@ -1,5 +1,7 @@ +include_guard(GLOBAL) + # Define a cmake function which emits a fatal error if the source directory and binary directory are the same. -function(EnforceOutOfSourceBuilds) +function(flamegpu_enforce_out_of_source_builds) # Resolve paths before comparioson to ensure comparions are accurate get_filename_component(source_dir "${CMAKE_SOURCE_DIR}" REALPATH) get_filename_component(binary_dir "${CMAKE_BINARY_DIR}" REALPATH) @@ -15,5 +17,5 @@ function(EnforceOutOfSourceBuilds) endfunction() # Call the function imediately, so the file only needs to be included. -EnforceOutOfSourceBuilds() +flamegpu_enforce_out_of_source_builds() diff --git a/cmake/SetTargetFolder.cmake b/cmake/SetTargetFolder.cmake new file mode 100644 index 000000000..0be0dd571 --- /dev/null +++ b/cmake/SetTargetFolder.cmake @@ -0,0 +1,17 @@ +include_guard(GLOBAL) + +#----------------------------------------------------------------------- +# a macro that only sets the FOLDER target property if it's +# "appropriate" +# Borrowed from cmake's own CMakeLists.txt +#----------------------------------------------------------------------- +macro(flamegpu_set_target_folder tgt folder) + if(CMAKE_USE_FOLDERS) + set_property(GLOBAL PROPERTY USE_FOLDERS ON) + if(TARGET ${tgt}) # AND MSVC # AND MSVC stops all lint from being set with folder + set_property(TARGET "${tgt}" PROPERTY FOLDER "${folder}") + endif() + else() + set_property(GLOBAL PROPERTY USE_FOLDERS OFF) + endif() +endmacro() \ No newline at end of file diff --git a/cmake/common.cmake b/cmake/common.cmake index 6d87f6492..41837453b 100644 --- a/cmake/common.cmake +++ b/cmake/common.cmake @@ -14,11 +14,17 @@ include(${FLAMEGPU_ROOT}/cmake/OutOfSourceOnly.cmake) # Ensure there are no spaces in the build directory path include(${FLAMEGPU_ROOT}/cmake/CheckBinaryDirPathForSpaces.cmake) -# include CUDA_ARCH processing code. -# Uses -DCUDA_ARCH values (and modifies if appropriate). -# Adds -gencode argumetns to cuda compiler options -# Adds -DMIN_COMPUTE_CAPABILITY=VALUE compiler defintions for C, CXX and CUDA -include(${CMAKE_CURRENT_LIST_DIR}/cuda_arch.cmake) +# Ensure that cmake functions for handling CMAKE_CUDA_ARCHITECTURES are available +include(${FLAMEGPU_ROOT}/cmake/CUDAArchitectures.cmake) +# Emit a message once and only once per configure of the chosen architectures? +if(DEFINED CMAKE_CUDA_ARCHITECTURES AND NOT flamegpu_printed_cmake_cuda_architectures) + message(STATUS "CUDA Architectures: ${CMAKE_CUDA_ARCHITECTURES}") + get_directory_property(hasParent PARENT_DIRECTORY) + if(hasParent) + set(flamegpu_printed_cmake_cuda_architectures TRUE PARENT_SCOPE) + endif() + unset(hasParent) +endif() # Ensure that other dependencies are downloaded and available. # As flamegpu is a static library, linking only only occurs at consumption not generation, so dependent targets must also know of PRIVATE shared library dependencies such as tinyxml2 and rapidjson, as well any intentionally public dependencies (for include dirs) @@ -26,7 +32,7 @@ include(${CMAKE_CURRENT_LIST_DIR}/dependencies/Thrust.cmake) include(${CMAKE_CURRENT_LIST_DIR}/dependencies/Jitify.cmake) include(${CMAKE_CURRENT_LIST_DIR}/dependencies/Tinyxml2.cmake) include(${CMAKE_CURRENT_LIST_DIR}/dependencies/rapidjson.cmake) -if(USE_GLM) +if(FLAMEGPU_ENABLE_GLM) include(${CMAKE_CURRENT_LIST_DIR}/dependencies/glm.cmake) endif() @@ -34,18 +40,43 @@ endif() # Don't create installation scripts (and hide CMAKE_INSTALL_PREFIX from cmake-gui) set(CMAKE_SKIP_INSTALL_RULES TRUE) set(CMAKE_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}" CACHE INTERNAL "" FORCE) + +# Option to enable/disable NVTX markers for improved profiling +option(FLAMEGPU_ENABLE_NVTX "Build with NVTX markers enabled" OFF) + # Option to enable verbose PTXAS output -option(VERBOSE_PTXAS "Enable verbose PTXAS output" OFF) -mark_as_advanced(VERBOSE_PTXAS) +option(FLAMEGPU_VERBOSE_PTXAS "Enable verbose PTXAS output" OFF) +mark_as_advanced(FLAMEGPU_VERBOSE_PTXAS) + # Option to promote compilation warnings to error, useful for strict CI -option(WARNINGS_AS_ERRORS "Promote compilation warnings to errors" OFF) +option(FLAMEGPU_WARNINGS_AS_ERRORS "Promote compilation warnings to errors" OFF) + +# Option to change curand engine used for CUDA random generation +set(FLAMEGPU_CURAND_ENGINE "PHILOX" CACHE STRING "The curand engine to use. Suitable options: \"PHILOX\", \"XORWOW\", \"MRG\"") +set_property(CACHE FLAMEGPU_CURAND_ENGINE PROPERTY STRINGS PHILOX XORWOW MRG) +mark_as_advanced(FLAMEGPU_CURAND_ENGINE) + +# If CUDA >= 11.2, add an option to control the use of NVCC_THREADS +set(DEFAULT_FLAMEGPU_NVCC_THREADS 2) +if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 11.2) + # The number of threads to use defaults to 2, telling the compiler to use up to 2 threads when multiple arch's are specified. + # Setting this value to 0 would use as many threads as possible. + # In some cases, this may increase total runtime due to excessive thread creation, and lowering the number of threads, or lowering the value of `-j` passed to cmake may be beneficial. + if(NOT DEFINED FLAMEGPU_NVCC_THREADS) + SET(FLAMEGPU_NVCC_THREADS "${DEFAULT_FLAMEGPU_NVCC_THREADS}" CACHE STRING "Number of concurrent threads for building multiple target architectures. 0 indicates use as many as required." FORCE) + endif() + mark_as_advanced(FLAMEGPU_NVCC_THREADS) +endif() + # Option to group CMake generated projects into folders in supported IDEs option(CMAKE_USE_FOLDERS "Enable folder grouping of projects in IDEs." ON) mark_as_advanced(CMAKE_USE_FOLDERS) # Include files which define target specific functions. include(${CMAKE_CURRENT_LIST_DIR}/warnings.cmake) -include(${CMAKE_CURRENT_LIST_DIR}/cxxstd.cmake) + +# Ensure that flamegpu_set_target_folder is available +include(${CMAKE_CURRENT_LIST_DIR}/SetTargetFolder.cmake) # Set a default build type if not passed get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) @@ -88,7 +119,7 @@ endif() # @todo - why do we not have to link against curand? Is that only required for the host API? Use CUDA::curand if required. # If NVTX is enabled, find the library and update variables accordingly. -if(USE_NVTX) +if(FLAMEGPU_ENABLE_NVTX) # Find the nvtx library using custom cmake module, providing imported targets # Do not use CUDA::nvToolsExt as this always uses NVTX1 not 3. # See https://gitlab.kitware.com/cmake/cmake/-/issues/21377 @@ -96,12 +127,12 @@ if(USE_NVTX) # If the targets were not found, emit a warning if(NOT TARGET NVTX::nvtx) # If not found, emit a warning and continue without NVTX - message(WARNING "NVTX could not be found. Proceeding with USE_NVTX=OFF") + message(WARNING "NVTX could not be found. Proceeding with FLAMEGPU_ENABLE_NVTX=OFF") if(NOT CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR) - SET(USE_NVTX "OFF" PARENT_SCOPE) + SET(FLAMEGPU_ENABLE_NVTX "OFF" PARENT_SCOPE) endif() endif() -endif(USE_NVTX) +endif(FLAMEGPU_ENABLE_NVTX) # Set the minimum supported cuda version, if not already set. Currently duplicated due to docs only build logic. # CUDA 11.0 is current minimum cuda version, and the minimum supported @@ -122,87 +153,14 @@ if(NOT DEFINED MINIMUM_SUPPORTED_CUDA_VERSION) endif() endif() -# Define a function to add a lint target. -find_file(CPPLINT NAMES cpplint cpplint.exe) -if(CPPLINT) - # Create the all_lint meta target if it does not exist - if(NOT TARGET all_lint) - add_custom_target(all_lint) - set_target_properties(all_lint PROPERTIES EXCLUDE_FROM_ALL TRUE) - endif() - # Define a cmake function for adding a new lint target. - function(new_linter_target NAME SRC) - cmake_parse_arguments( - NEW_LINTER_TARGET - "" - "" - "EXCLUDE_FILTERS" - ${ARGN}) - # Don't lint external files - list(FILTER SRC EXCLUDE REGEX "^${FLAMEGPU_ROOT}/externals/.*") - # Don't lint user provided list of regular expressions. - foreach(EXCLUDE_FILTER ${NEW_LINTER_TARGET_EXCLUDE_FILTERS}) - list(FILTER SRC EXCLUDE REGEX "${EXCLUDE_FILTER}") - endforeach() - - # Only lint accepted file type extensions h++, hxx, cuh, cu, c, c++, cxx, cc, hpp, h, cpp, hh - list(FILTER SRC INCLUDE REGEX ".*\\.(h\\+\\+|hxx|cuh|cu|c|c\\+\\+|cxx|cc|hpp|h|cpp|hh)$") - - # Build a list of arguments to pass to CPPLINT - LIST(APPEND CPPLINT_ARGS "") - - # Specify output format for msvc highlighting - if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - LIST(APPEND CPPLINT_ARGS "--output" "vs7") - endif() - # Set the --repository argument if included as a sub project. - if(NOT CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) - # Use find the repository root via git, to pass to cpplint. - execute_process(COMMAND git rev-parse --show-toplevel - WORKING_DIRECTORY "${CMAKE_CURRENT_LIST_DIR}" - RESULT_VARIABLE git_repo_found - OUTPUT_VARIABLE abs_repo_root - OUTPUT_STRIP_TRAILING_WHITESPACE) - if(git_repo_found EQUAL 0) - LIST(APPEND CPPLINT_ARGS "--repository=${abs_repo_root}") - endif() - endif() - # Add the lint_ target - add_custom_target( - "lint_${PROJECT_NAME}" - COMMAND ${CPPLINT} ${CPPLINT_ARGS} - ${SRC} - ) - - # Don't trigger this target on ALL_BUILD or Visual Studio 'Rebuild Solution' - set_target_properties("lint_${NAME}" PROPERTIES EXCLUDE_FROM_ALL TRUE) - # Add the custom target as a dependency of the global lint target - if(TARGET all_lint) - add_dependencies(all_lint lint_${NAME}) - endif() - # Put within Lint filter - if (CMAKE_USE_FOLDERS) - set_property(GLOBAL PROPERTY USE_FOLDERS ON) - set_property(TARGET "lint_${PROJECT_NAME}" PROPERTY FOLDER "Lint") - endif () - endfunction() -else() - # Don't create this message multiple times - if(NOT COMMAND add_flamegpu_executable) - message( - " cpplint: NOT FOUND!\n" - " Lint projects will not be generated.\n" - " Please install cpplint as described on https://pypi.python.org/pypi/cpplint.\n" - " In most cases command 'pip install --user cpplint' should be sufficient.") - function(new_linter_target NAME SRC) - endfunction() - endif() -endif() +# Invlude the cpplint camake, which provides a function to create a lint target. +include(${CMAKE_CURRENT_LIST_DIR}/cpplint.cmake) + # Define a function which can be used to set common compiler options for a target # We do not want to force these options on end users (although they should be used ideally), hence not just public properties on the library target # Function to suppress compiler warnings for a given target -function(CommonCompilerSettings) +function(flamegpu_common_compiler_settings) # Parse the expected arguments, prefixing variables. cmake_parse_arguments( CCS @@ -214,16 +172,11 @@ function(CommonCompilerSettings) # Ensure that a target has been passed, and that it is a valid target. if(NOT CCS_TARGET) - message( FATAL_ERROR "function(CommonCompilerSettings): 'TARGET' argument required") + message( FATAL_ERROR "flamegpu_common_compiler_settings: 'TARGET' argument required") elseif(NOT TARGET ${CCS_TARGET} ) - message( FATAL_ERROR "function(CommonCompilerSettings): TARGET '${CCS_TARGET}' is not a valid target") + message( FATAL_ERROR "flamegpu_common_compiler_settings: TARGET '${CCS_TARGET}' is not a valid target") endif() - # Add device debugging symbols to device builds of CUDA objects - target_compile_options(${CCS_TARGET} PRIVATE "$<$,$>:-G>") - # Ensure DEBUG and _DEBUG are defined for Debug builds - target_compile_definitions(${CCS_TARGET} PRIVATE $<$,$>:DEBUG>) - target_compile_definitions(${CCS_TARGET} PRIVATE $<$,$>:_DEBUG>) # Enable -lineinfo for Release builds, for improved profiling output. # CMAKE >=3.19 required for multivalue CONFIG: target_compile_options(${CCS_TARGET} PRIVATE "$<$,$,$,$>>:-lineinfo>") @@ -231,20 +184,6 @@ function(CommonCompilerSettings) # Set an NVCC flag which allows host constexpr to be used on the device. target_compile_options(${CCS_TARGET} PRIVATE "$<$:--expt-relaxed-constexpr>") - # Prevent windows.h from defining max and min. - if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - target_compile_definitions(${CCS_TARGET} PRIVATE NOMINMAX) - endif() - - # Pass the SEATBELTS macro, which when set to off/0 (for non debug builds) removes expensive operations. - if (SEATBELTS) - # If on, all build configs have seatbelts - target_compile_definitions(${CCS_TARGET} PRIVATE SEATBELTS=1) - else() - # If off, debug builds have seatbelts, non debug builds do not. - target_compile_definitions(${CCS_TARGET} PRIVATE $,SEATBELTS=1,SEATBELTS=0>) - endif() - # MSVC handling of SYSTEM for external includes, present in 19.10+ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") # These flags don't currently have any effect on how CMake passes system-private includes to msvc (VS 2017+) @@ -262,36 +201,37 @@ function(CommonCompilerSettings) # If CUDA 11.2+, can build multiple architectures in parallel. # Note this will be multiplicative against the number of threads launched for parallel cmake build, which may lead to processes being killed, or excessive memory being consumed. - if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL "11.2" AND USE_NVCC_THREADS AND DEFINED NVCC_THREADS AND NVCC_THREADS GREATER_EQUAL 0) - target_compile_options(${CCS_TARGET} PRIVATE "$<$:SHELL:--threads ${NVCC_THREADS}>") + if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL "11.2" AND DEFINED FLAMEGPU_NVCC_THREADS) + set(FLAMEGPU_NVCC_THREADS_INTEGER -1}) + # If its a number GE 0, use that, this is false for truthy values + if(FLAMEGPU_NVCC_THREADS GREATER_EQUAL 0) + set(FLAMEGPU_NVCC_THREADS_INTEGER ${FLAMEGPU_NVCC_THREADS}) + # If it is not set, use a hardcoded sensible default 2. + elseif("${FLAMEGPU_NVCC_THREADS}" STREQUAL "") + set(FLAMEGPU_NVCC_THREADS_INTEGER ${DEFAULT_FLAMEGPU_NVCC_THREADS}) + # Otherwise, use 1, alternativel we could fatal error here. + else() + set(FLAMEGPU_NVCC_THREADS_INTEGER 1) + endif() + if(FLAMEGPU_NVCC_THREADS_INTEGER GREATER_EQUAL 0) + target_compile_options(${CCS_TARGET} PRIVATE "$<$:SHELL:--threads ${FLAMEGPU_NVCC_THREADS_INTEGER}>") + endif() endif() # Enable verbose ptxas output if required - if(VERBOSE_PTXAS) + if(FLAMEGPU_VERBOSE_PTXAS) target_compile_options(${CCS_TARGET} PRIVATE "$<$:SHELL:-Xptxas -v>") endif() - # Request a specific curand engine - string(TOUPPER CURAND_ENGINE CURAND_ENGINE_UPPER) - if(${CURAND_ENGINE_UPPER} STREQUAL "MRG") - target_compile_definitions(${CCS_TARGET} PRIVATE CURAND_MRG32k3a) - elseif(${CURAND_ENGINE_UPPER} STREQUAL "PHILOX") - target_compile_definitions(${CCS_TARGET} PRIVATE CURAND_Philox4_32_10) - elseif(${CURAND_ENGINE_UPPER} STREQUAL "XORWOW") - target_compile_definitions(${CCS_TARGET} PRIVATE CURAND_XORWOW) - elseif(DEFINED CURAND_ENGINE) - message(FATAL_ERROR "${CURAND_ENGINE} is not a suitable value of CURAND_ENGINE\nOptions: \"MRG\", \"PHILOX\", \"XORWOW\"") - endif() - endfunction() # Function to mask some of the steps to create an executable which links against the static library -function(add_flamegpu_executable NAME SRC FLAMEGPU_ROOT PROJECT_ROOT IS_EXAMPLE) +function(flamegpu_add_executable NAME SRC FLAMEGPU_ROOT PROJECT_ROOT IS_EXAMPLE) # @todo - correctly set PUBLIC/PRIVATE/INTERFACE for executables created with this utility function # Parse optional arugments. cmake_parse_arguments( - ADD_FLAMEGPU_EXECUTABLE + FLAMEGPU_ADD_EXECUTABLE "" "" "LINT_EXCLUDE_FILTERS" @@ -317,12 +257,18 @@ function(add_flamegpu_executable NAME SRC FLAMEGPU_ROOT PROJECT_ROOT IS_EXAMPLE) add_executable(${NAME} ${SRC}) # Set target level warnings. - EnableFLAMEGPUCompilerWarnings(TARGET "${NAME}") + flamegpu_enable_compiler_warnings(TARGET "${NAME}") # Apply common compiler settings - CommonCompilerSettings(TARGET "${NAME}") - # Set the cuda gencodes, potentially using the user-provided CUDA_ARCH - SetCUDAGencodes(TARGET "${NAME}") - + flamegpu_common_compiler_settings(TARGET "${NAME}") + + # Set C++17 using modern CMake options + target_compile_features(${NAME} PUBLIC cxx_std_17) + target_compile_features(${NAME} PUBLIC cuda_std_17) + set_property(TARGET ${NAME} PROPERTY CXX_EXTENSIONS OFF) + set_property(TARGET ${NAME} PROPERTY CUDA_EXTENSIONS OFF) + set_property(TARGET ${NAME} PROPERTY CXX_STANDARD_REQUIRED ON) + set_property(TARGET ${NAME} PROPERTY CUDA_STANDARD_REQUIRED ON) + # Enable RDC for the target set_property(TARGET ${NAME} PROPERTY CUDA_SEPARABLE_COMPILATION ON) @@ -336,50 +282,26 @@ function(add_flamegpu_executable NAME SRC FLAMEGPU_ROOT PROJECT_ROOT IS_EXAMPLE) endif() # Activate visualisation if requested - if (VISUALISATION) - # Copy DLLs - # @todo clean this up. It would be much better if it were dynamic based on the visualisers's runtime dependencies too. - if(WIN32) - # sdl - # if(NOT sdl2_FOUND) - # Force finding this is disabled, as the cmake vars should already be set. - # set(SDL2_DIR ${VISUALISATION_BUILD}/) - # mark_as_advanced(FORCE SDL2_DIR) - # find_package(SDL2 REQUIRED) - # endif() - add_custom_command(TARGET "${NAME}" POST_BUILD # Adds a post-build event to MyTest - COMMAND ${CMAKE_COMMAND} -E copy_if_different # which executes "cmake - E copy_if_different..." - "${SDL2_RUNTIME_LIBRARIES}" # <--this is in-file - $) # <--this is out-file path - # glew - # if(NOT glew_FOUND) - # Force finding this is disabled, as the cmake vars should already be set. - # set(GLEW_DIR ${VISUALISATION_BUILD}/glew) - # mark_as_advanced(FORCE GLEW_DIR) - # find_package(GLEW REQUIRED) - # endif() - add_custom_command(TARGET "${NAME}" POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_if_different - "${GLEW_RUNTIME_LIBRARIES}" - $) - # DevIL - # if(NOT devil_FOUND) - # Force finding this is disabled, as the cmake vars should already be set. - # set(DEVIL_DIR ${VISUALISATION_BUILD}/devil) - # mark_as_advanced(FORCE DEVIL_DIR) - # find_package(DEVIL REQUIRED NO_MODULE) - # endif() - add_custom_command(TARGET "${NAME}" POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_if_different - ${IL_RUNTIME_LIBRARIES} - $) + if (FLAMEGPU_VISUALISATION) + # Copy DLLs / other Runtime dependencies + if(COMMAND flamegpu_visualiser_get_runtime_depenencies) + flamegpu_visualiser_get_runtime_depenencies(vis_runtime_dependencies) + # For each runtime dependency (dll) + foreach(vis_runtime_dependency ${vis_runtime_dependencies}) + # Add a post build comamnd which copies the dll to the directory of the binary if needed. + add_custom_command( + TARGET "${NAME}" POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_if_different + "${vis_runtime_dependency}" + $ + ) + endforeach() + unset(vis_runtime_dependencies) endif() - # @todo - this could be inherrited instead? - target_compile_definitions(${NAME} PRIVATE VISUALISATION) endif() # Flag the new linter target and the files to be linted, and pass optional exclusions filters (regex) - new_linter_target(${NAME} "${SRC}" EXCLUDE_FILTERS "${ADD_FLAMEGPU_EXECUTABLE_LINT_EXCLUDE_FILTERS}") + flamegpu_new_linter_target(${NAME} "${SRC}" EXCLUDE_FILTERS "${FLAMEGPU_ADD_EXECUTABLE_LINT_EXCLUDE_FILTERS}") # Setup Visual Studio (and eclipse) filters #src/.h @@ -406,22 +328,7 @@ function(add_flamegpu_executable NAME SRC FLAMEGPU_ROOT PROJECT_ROOT IS_EXAMPLE) # Put within Examples filter if(IS_EXAMPLE) - CMAKE_SET_TARGET_FOLDER(${NAME} "Examples") + flamegpu_set_target_folder(${NAME} "Examples") endif() endfunction() -#----------------------------------------------------------------------- -# a macro that only sets the FOLDER target property if it's -# "appropriate" -# Borrowed from cmake's own CMakeLists.txt -#----------------------------------------------------------------------- -macro(CMAKE_SET_TARGET_FOLDER tgt folder) - if(CMAKE_USE_FOLDERS) - set_property(GLOBAL PROPERTY USE_FOLDERS ON) - if(TARGET ${tgt}) # AND MSVC # AND MSVC stops all lint from being set with folder - set_property(TARGET "${tgt}" PROPERTY FOLDER "${folder}") - endif() - else() - set_property(GLOBAL PROPERTY USE_FOLDERS OFF) - endif() -endmacro() \ No newline at end of file diff --git a/cmake/cpplint.cmake b/cmake/cpplint.cmake new file mode 100644 index 000000000..622ce4961 --- /dev/null +++ b/cmake/cpplint.cmake @@ -0,0 +1,98 @@ +# Only include this once +include_guard(GLOBAL) + +include(${CMAKE_CURRENT_LIST_DIR}/SetTargetFolder.cmake) + +# Find CPPLINT, storing in a variable CPPLINT_EXECUTABLE +find_file(CPPLINT_EXECUTABLE NAMES cpplint cpplint.exe) +# find_file sets a cache var, so mark as advanced to hide from the GUI by default +mark_as_advanced(CPPLINT_EXECUTABLE) + +# If CPPLINT_EXECUTABLE was not found, emit a warning. This should only be once due to the include guard. +if(NOT CPPLINT_EXECUTABLE) + # Only raises a STATUS not a WARNING/AUTHOR_WARNING for CI's benefit + message(STATUS + " cpplint: NOT FOUND!\n" + " Lint projects will not be generated.\n" + " Please install cpplint as described on https://pypi.python.org/pypi/cpplint.\n" + " In most cases command 'pip install cpplint' should be sufficient.") +endif() + +# Decalre a function to create the all_lint target, if cpplint was found. + +# Create an all_lint target if cpplint is available +function(flamegpu_create_all_lint_target) + if(CPPLINT_EXECUTABLE AND NOT TARGET all_lint) + # Add a custom target to lint all child projects. Dependencies are specified in child projects. + add_custom_target(all_lint) + # Don't trigger this target on ALL_BUILD or Visual Studio 'Rebuild Solution' + set_target_properties(all_lint PROPERTIES EXCLUDE_FROM_ALL TRUE) + # set_target_properties(all_lint PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD TRUE) + # Put all_lint within Lint filter + flamegpu_set_target_folder(all_lint "Lint") + endif() +endfunction() + +# Define a function to add a lint target. +function(flamegpu_new_linter_target NAME SRC) + # if cpplint has not been found, do nothing + if(NOT CPPLINT_EXECUTABLE) + return() + endif() + # Create the all_lint meta target if it does not exist + flamegpu_create_all_lint_target() + # Define a cmake function for adding a new lint target. + cmake_parse_arguments( + NEW_LINTER_TARGET + "" + "" + "EXCLUDE_FILTERS" + ${ARGN}) + # Don't lint external files + list(FILTER SRC EXCLUDE REGEX "^${FLAMEGPU_ROOT}/externals/.*") + # Don't lint user provided list of regular expressions. + foreach(EXCLUDE_FILTER ${NEW_LINTER_TARGET_EXCLUDE_FILTERS}) + list(FILTER SRC EXCLUDE REGEX "${EXCLUDE_FILTER}") + endforeach() + + # Only lint accepted file type extensions h++, hxx, cuh, cu, c, c++, cxx, cc, hpp, h, cpp, hh + list(FILTER SRC INCLUDE REGEX ".*\\.(h\\+\\+|hxx|cuh|cu|c|c\\+\\+|cxx|cc|hpp|h|cpp|hh)$") + + # Build a list of arguments to pass to CPPLINT + LIST(APPEND CPPLINT_ARGS "") + + # Specify output format for msvc highlighting + if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + LIST(APPEND CPPLINT_ARGS "--output" "vs7") + endif() + # Set the --repository argument if included as a sub project. + if(NOT CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) + # Use find the repository root via git, to pass to cpplint. + execute_process(COMMAND git rev-parse --show-toplevel + WORKING_DIRECTORY "${CMAKE_CURRENT_LIST_DIR}" + RESULT_VARIABLE git_repo_found + OUTPUT_VARIABLE abs_repo_root + OUTPUT_STRIP_TRAILING_WHITESPACE) + if(git_repo_found EQUAL 0) + LIST(APPEND CPPLINT_ARGS "--repository=${abs_repo_root}") + endif() + endif() + # Add the lint_ target + add_custom_target( + "lint_${PROJECT_NAME}" + COMMAND ${CPPLINT_EXECUTABLE} ${CPPLINT_ARGS} + ${SRC} + ) + + # Don't trigger this target on ALL_BUILD or Visual Studio 'Rebuild Solution' + set_target_properties("lint_${NAME}" PROPERTIES EXCLUDE_FROM_ALL TRUE) + # Add the custom target as a dependency of the global lint target + if(TARGET all_lint) + add_dependencies(all_lint lint_${NAME}) + endif() + # Put within Lint filter + if (CMAKE_USE_FOLDERS) + set_property(GLOBAL PROPERTY USE_FOLDERS ON) + set_property(TARGET "lint_${PROJECT_NAME}" PROPERTY FOLDER "Lint") + endif () +endfunction() diff --git a/cmake/cuda_arch.cmake b/cmake/cuda_arch.cmake deleted file mode 100644 index 47acf489d..000000000 --- a/cmake/cuda_arch.cmake +++ /dev/null @@ -1,134 +0,0 @@ -# Provides a per target function to set gencode compiler options. -# Function to suppress compiler warnings for a given target -# If the cmake variable CUDA_ARCH is set, to a non emtpy list or space separated string this will be used instead. -# @todo - find a way to warn about deprecated architectures once and only once (at cmake time?) Might need to just try compiling with old warnings and capture / post process the output. -# @todo - figure out how to do this once and only once as a function rather than a macro. -macro(SetCUDAGencodes) - # @todo - only get the available gencodes from nvcc once, rather than per target. - - # Parse the expected arguments, prefixing variables. - cmake_parse_arguments( - SCG - "" - "TARGET" - "" - ${ARGN} - ) - # Ensure that a target has been passed, and that it is a valid target. - if(NOT SCG_TARGET) - message( FATAL_ERROR "SetCUDAGencodes: 'TARGET' argument required." ) - elseif(NOT TARGET ${SCG_TARGET} ) - message( FATAL_ERROR "SetCUDAGencodes: TARGET '${SCG_TARGET}' is not a valid target" ) - endif() - - # CMAKE > 3.18 introduces CUDA_ARCHITECTURES as a cmake-native way of generating gencodes (Policy CMP0104). Set the value to OFF to prevent errors for it being not provided. - # We manually set gencode arguments, so we can (potentially) use LTO and are not restricted to CMake's availble options. - set_property(TARGET ${SCG_TARGET} PROPERTY CUDA_ARCHITECTURES OFF) - - # Define the default compute capabilites incase not provided by the user - set(DEFAULT_CUDA_ARCH "35;50;60;70;80;90;") - - # Determine if the user has provided a non default CUDA_ARCH value - string(LENGTH "${CUDA_ARCH}" CUDA_ARCH_LENGTH) - - # Query NVCC in order to filter the provided list. - # @todo only do this once, and re-use the output for a given cmake configure? - - # Get the valid options for the current compiler. - # Run nvcc --help to get the help string which contains all valid compute_ sm_ for that version. - if(NOT DEFINED SUPPORTED_CUDA_ARCH) - execute_process(COMMAND ${CMAKE_CUDA_COMPILER} "--help" OUTPUT_VARIABLE NVCC_HELP_STR ERROR_VARIABLE NVCC_HELP_STR) - # Match all comptue_XX or sm_XXs - string(REGEX MATCHALL "'(sm|compute)_[0-9]+'" SUPPORTED_CUDA_ARCH "${NVCC_HELP_STR}" ) - # Strip just the numeric component - string(REGEX REPLACE "'(sm|compute)_([0-9]+)'" "\\2" SUPPORTED_CUDA_ARCH "${SUPPORTED_CUDA_ARCH}" ) - # Remove dupes and sort to build the correct list of supported CUDA_ARCH. - list(REMOVE_DUPLICATES SUPPORTED_CUDA_ARCH) - list(REMOVE_ITEM SUPPORTED_CUDA_ARCH "") - list(SORT SUPPORTED_CUDA_ARCH) - - # Store the supported arch's once and only once. This could be a cache var given the cuda compiler should not be able to change without clearing th cache? - get_directory_property(hasParent PARENT_DIRECTORY) - if(hasParent) - set(SUPPORTED_CUDA_ARCH ${SUPPORTED_CUDA_ARCH} PARENT_SCOPE) - endif() - endif() - - - # Update defaults to only be those supported - # @todo might be better to instead do a dry run compilation with each gencode to validate? - foreach(ARCH IN LISTS DEFAULT_CUDA_ARCH) - if (NOT ARCH IN_LIST SUPPORTED_CUDA_ARCH) - list(REMOVE_ITEM DEFAULT_CUDA_ARCH "${ARCH}") - endif() - list(REMOVE_DUPLICATES CUDA_ARCH) - list(REMOVE_ITEM CUDA_ARCH "") - list(SORT CUDA_ARCH) - endforeach() - - if(NOT CUDA_ARCH_LENGTH EQUAL 0) - # Convert user provided string argument to a list. - string (REPLACE " " ";" CUDA_ARCH "${CUDA_ARCH}") - string (REPLACE "," ";" CUDA_ARCH "${CUDA_ARCH}") - - # Remove duplicates, empty items and sort. - list(REMOVE_DUPLICATES CUDA_ARCH) - list(REMOVE_ITEM CUDA_ARCH "") - list(SORT CUDA_ARCH) - - # Validate the list. - foreach(ARCH IN LISTS CUDA_ARCH) - if (NOT ARCH IN_LIST SUPPORTED_CUDA_ARCH) - message(WARNING - " CUDA_ARCH '${ARCH}' not supported by CUDA ${CMAKE_CUDA_COMPILER_VERSION} and is being ignored.\n" - " Choose from: ${SUPPORTED_CUDA_ARCH}") - list(REMOVE_ITEM CUDA_ARCH "${ARCH}") - endif() - endforeach() - endif() - - - # If the list is empty post validation, set it to the (validated) defaults - list(LENGTH CUDA_ARCH CUDA_ARCH_LENGTH) - if(CUDA_ARCH_LENGTH EQUAL 0) - set(CUDA_ARCH ${DEFAULT_CUDA_ARCH}) - endif() - - # Propagate the validated values to the parent scope, to reduce warning duplication. - get_directory_property(hasParent PARENT_DIRECTORY) - if(hasParent) - set(CUDA_ARCH ${CUDA_ARCH} PARENT_SCOPE) - endif() - - # If the list is somehow empty now, do not set any gencodes arguments, instead using the compiler defaults. - list(LENGTH CUDA_ARCH CUDA_ARCH_LENGTH) - if(NOT CUDA_ARCH_LENGTH EQUAL 0) - # Only do this if required.I.e. CUDA_ARCH is the same as the last time this file was included - if(NOT CUDA_ARCH_APPLIED EQUAL CUDA_ARCH) - message(STATUS "Generating Compute Capabilities: ${CUDA_ARCH}") - if(hasParent) - set(CUDA_ARCH_APPLIED "${CUDA_ARCH}" PARENT_SCOPE ) - endif() - endif() - set(MIN_CUDA_ARCH) - # Convert to gencode arguments - - foreach(ARCH IN LISTS CUDA_ARCH) - target_compile_options(${SCG_TARGET} PRIVATE "$<$:SHELL:-gencode arch=compute_${ARCH}$code=sm_${ARCH}>") - target_link_options(${SCG_TARGET} PRIVATE "$code=sm_${ARCH}>") - endforeach() - - # Add the last arch again as compute_, compute_ to enable forward looking JIT - list(GET CUDA_ARCH -1 LAST_ARCH) - target_compile_options(${SCG_TARGET} PRIVATE "$<$:SHELL:-gencode arch=compute_${LAST_ARCH}$code=compute_${LAST_ARCH}>") - target_link_options(${SCG_TARGET} PRIVATE "$code=compute_${LAST_ARCH}>") - - # Get the minimum device architecture to pass through to nvcc to enable graceful failure prior to cuda execution. - list(GET CUDA_ARCH 0 MIN_CUDA_ARCH) - - # Set the minimum arch flags for all compilers - target_compile_definitions(${SCG_TARGET} PRIVATE -DMIN_CUDA_ARCH=${MIN_CUDA_ARCH}) - else() - message(STATUS "Generating default CUDA Compute Capabilities ${CUDA_ARCH}") - endif() -endmacro() diff --git a/cmake/cxxstd.cmake b/cmake/cxxstd.cmake deleted file mode 100644 index dc327b4cb..000000000 --- a/cmake/cxxstd.cmake +++ /dev/null @@ -1,37 +0,0 @@ -# Select the CXX standard to use, FLAME GPU 2 is c++17 only -if(NOT FLAMEGPU_CXX_STD) - # No need to check CMake version, as our minimum (3.18) supports CUDA c++17 - - # Check the CUDA version, CUDA 11.0 adds CXX 17 support - if(CMAKE_CUDA_COMPILER_VERSION VERSION_LESS 11.0.0) - # Fatal Error. - message(FATAL_ERROR "CUDA ${CMAKE_CUDA_COMPILER_VERSION} does not support -std=c++17") - endif() - - # Check MSVC version, VS 2017 version 15.3 added /std:c++17 - 1911 - # Inside source code, __STDC_VERSION__ can be used on msvc, which will have a value such as 201710L for c++17 - if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.11) - message(FATAL_ERROR "MSVC ${CMAKE_CXX_COMPILER_VERSION} does not support -std=c++17 (>= 19.11 required)") - endif() - endif() - - # GCC 8 required for - if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8.1) - message(FATAL_ERROR "GCC >= 8.1 required for -std=c++17 ") - endif() - - # Set a cmake variable so this is only calcualted once, and can be applied afterwards. - set(FLAMEGPU_CXX_STD 17) -endif() - -# @future - set this on a per target basis using set_target_properties? -set(CMAKE_CXX_EXTENSIONS OFF) -if(NOT DEFINED CMAKE_CXX_STANDARD) - set(CMAKE_CXX_STANDARD ${FLAMEGPU_CXX_STD}) - set(CMAKE_CXX_STANDARD_REQUIRED true) -endif() -if(NOT DEFINED CMAKE_CUDA_STANDARD) - set(CMAKE_CUDA_STANDARD ${FLAMEGPU_CXX_STD}) - set(CMAKE_CUDA_STANDARD_REQUIRED True) -endif() diff --git a/cmake/dependencies/Jitify.cmake b/cmake/dependencies/Jitify.cmake index 5c076aa56..d5aa509c2 100644 --- a/cmake/dependencies/Jitify.cmake +++ b/cmake/dependencies/Jitify.cmake @@ -24,3 +24,11 @@ endif() set(CMAKE_PREFIX_PATH "${CMAKE_PREFIX_PATH};${jitify_SOURCE_DIR}/..") # Always find the package, even if jitify is already populated. find_package(Jitify REQUIRED) + +# Mark some CACHE vars advanced for a cleaner GUI +mark_as_advanced(FETCHCONTENT_SOURCE_DIR_JITIFY) +mark_as_advanced(FETCHCONTENT_QUIET) +mark_as_advanced(FETCHCONTENT_BASE_DIR) +mark_as_advanced(FETCHCONTENT_FULLY_DISCONNECTED) +mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED) +mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED_JITIFY) \ No newline at end of file diff --git a/cmake/dependencies/Thrust.cmake b/cmake/dependencies/Thrust.cmake index 2697ef031..e485022b1 100644 --- a/cmake/dependencies/Thrust.cmake +++ b/cmake/dependencies/Thrust.cmake @@ -81,6 +81,13 @@ if(FETCH_THRUST_CUB) find_package(Thrust REQUIRED CONFIG) find_package(CUB REQUIRED CONFIG) endif() + # Mark some CACHE vars as advnaced for a cleaner CMake GUI + mark_as_advanced(FETCHCONTENT_QUIET) + mark_as_advanced(FETCHCONTENT_BASE_DIR) + mark_as_advanced(FETCHCONTENT_FULLY_DISCONNECTED) + mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED) + mark_as_advanced(FETCHCONTENT_SOURCE_DIR_THRUST) + mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED_THRUST) endif() # Unset temporary variables @@ -88,3 +95,7 @@ unset(FETCH_THRUST_CUB) unset(MIN_REQUIRED_THRUST_VERSION) unset(MIN_REQUIRED_CUB_VERSION) unset(THRUST_DOWNLOAD_VERSION) + +# Mark some CACHE vars as advnaced for a cleaner CMake GUI +mark_as_advanced(CUB_DIR) +mark_as_advanced(Thrust_DIR) \ No newline at end of file diff --git a/cmake/dependencies/Tinyxml2.cmake b/cmake/dependencies/Tinyxml2.cmake index 7382f8260..ad8cad118 100644 --- a/cmake/dependencies/Tinyxml2.cmake +++ b/cmake/dependencies/Tinyxml2.cmake @@ -68,10 +68,18 @@ if(NOT tinyxml2_POPULATED) # Suppress warnigns from this target. include(${CMAKE_CURRENT_LIST_DIR}/../warnings.cmake) - DisableCompilerWarnings(TARGET tinyxml2) + flamegpu_disable_compiler_warnings(TARGET tinyxml2) # Create an alias target for tinyxml2 to namespace it / make it more like other modern cmake add_library(Tinyxml2::tinyxml2 ALIAS tinyxml2) endif() endif() + +# Mark some CACHE vars advanced for a cleaner GUI +mark_as_advanced(FETCHCONTENT_SOURCE_DIR_TINYXML2) +mark_as_advanced(FETCHCONTENT_QUIET) +mark_as_advanced(FETCHCONTENT_BASE_DIR) +mark_as_advanced(FETCHCONTENT_FULLY_DISCONNECTED) +mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED) +mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED_TINYXML2) \ No newline at end of file diff --git a/cmake/dependencies/doxygen.cmake b/cmake/dependencies/doxygen.cmake index b5ea21dba..82dd41c17 100644 --- a/cmake/dependencies/doxygen.cmake +++ b/cmake/dependencies/doxygen.cmake @@ -1,7 +1,7 @@ # Doxygen find_package(Doxygen OPTIONAL_COMPONENTS mscgen dia dot) if(DOXYGEN_FOUND) - option(BUILD_API_DOCUMENTATION "Enable building documentation (requires Doxygen)" ON) + option(FLAMEGPU_BUILD_API_DOCUMENTATION "Enable building documentation (requires Doxygen)" ON) else() if(CMAKE_CUDA_COMPILER STREQUAL NOTFOUND) message(FATAL_ERROR @@ -17,7 +17,7 @@ else() endif() endif() -function(create_doxygen_target FLAMEGPU_ROOT DOXY_OUT_DIR XML_PATH) +function(flamegpu_create_doxygen_target FLAMEGPU_ROOT DOXY_OUT_DIR XML_PATH) if(DOXYGEN_FOUND) # Modern method which generates unique doxyfile # These args taken from readme.md at time of commit @@ -62,7 +62,7 @@ function(create_doxygen_target FLAMEGPU_ROOT DOXY_OUT_DIR XML_PATH) set(DOXYGEN_WARN_IF_DOC_ERROR YES) set(DOXYGEN_WARN_IF_INCOMPLETE_DOC YES) set(DOXYGEN_WARN_NO_PARAMDOC YES) # Defaults off, unlike other warning settings - if(WARNINGS_AS_ERRORS) + if(FLAMEGPU_WARNINGS_AS_ERRORS) if(DOXYGEN_VERSION VERSION_GREATER_EQUAL 1.9.0) set(DOXYGEN_WARN_AS_ERROR FAIL_ON_WARNINGS) else() @@ -76,7 +76,7 @@ function(create_doxygen_target FLAMEGPU_ROOT DOXY_OUT_DIR XML_PATH) set(DOXYGEN_ENABLE_PREPROCESSING YES) set(DOXYGEN_MACRO_EXPANSION YES) set(DOXYGEN_EXPAND_ONLY_PREDEF YES) - set(DOXYGEN_PREDEFINED "DERIVED_FLAMEGPUException(name,default_message)=class name: public flamegpu::FLAMEGPUException { public: explicit name(const char *format = default_message)\; }" "VISUALISATION= ") + set(DOXYGEN_PREDEFINED "DERIVED_FLAMEGPUException(name,default_message)=class name: public flamegpu::FLAMEGPUException { public: explicit name(const char *format = default_message)\; }" "FLAMEGPU_VISUALISATION= ") set(DOXY_INPUT_FILES "${FLAMEGPU_ROOT}/include;${FLAMEGPU_ROOT}/src;${FLAMEGPU_ROOT}/README.md") # Do not generate a todo list page set(DOXYGEN_GENERATE_TODOLIST NO) @@ -88,9 +88,9 @@ function(create_doxygen_target FLAMEGPU_ROOT DOXY_OUT_DIR XML_PATH) set(DOXYGEN_HTML_OUTPUT docs) doxygen_add_docs("docs" "${DOXY_INPUT_FILES}") set_target_properties("docs" PROPERTIES EXCLUDE_FROM_ALL TRUE) - if(COMMAND CMAKE_SET_TARGET_FOLDER) + if(COMMAND flamegpu_set_target_folder) # Put within FLAMEGPU filter - CMAKE_SET_TARGET_FOLDER("docs" "FLAMEGPU") + flamegpu_set_target_folder("docs" "FLAMEGPU") endif() endif() else() @@ -100,9 +100,9 @@ function(create_doxygen_target FLAMEGPU_ROOT DOXY_OUT_DIR XML_PATH) set(DOXYGEN_XML_OUTPUT "${XML_PATH}") doxygen_add_docs("api_docs_xml" "${DOXY_INPUT_FILES}") set_target_properties("api_docs_xml" PROPERTIES EXCLUDE_FROM_ALL TRUE) - if(COMMAND CMAKE_SET_TARGET_FOLDER) + if(COMMAND flamegpu_set_target_folder) # Put within FLAMEGPU filter - CMAKE_SET_TARGET_FOLDER("api_docs_xml" "FLAMEGPU") + flamegpu_set_target_folder("api_docs_xml" "FLAMEGPU") endif() endif() endif() diff --git a/cmake/dependencies/flamegpu2-visualiser.cmake b/cmake/dependencies/flamegpu2-visualiser.cmake index 4f7270ea6..7ef7508ad 100644 --- a/cmake/dependencies/flamegpu2-visualiser.cmake +++ b/cmake/dependencies/flamegpu2-visualiser.cmake @@ -7,71 +7,81 @@ include(FetchContent) cmake_policy(SET CMP0079 NEW) # Set the visualiser repo and tag to use unless overridden by the user. -# @todo - If the git version has changed in this file, fetch again? -set(DEFAULT_VISUALISATION_GIT_VERSION "dcfeb74b9a3df60c44af64ab98af87aae525f795") -set(DEFAULT_VISUALISATION_REPOSITORY "https://github.com/FLAMEGPU/FLAMEGPU2-visualiser.git") +set(DEFAULT_FLAMEGPU_VISUALISATION_GIT_VERSION "b858b8626ec58654577d695018f80c66d127ca10") +set(DEFAULT_FLAMEGPU_VISUALISATION_REPOSITORY "https://github.com/FLAMEGPU/FLAMEGPU2-visualiser.git") -# If overridden by the user, attempt to use that -if (VISUALISATION_ROOT) - # @todo - we should make the visualisation package find_package() compatible, and check it exists if VISUALISATION_ROOT is set. +# Set a VISUSLAITION_ROOT cache entry so it is available in the GUI to override the location if required +if(NOT DEFINED CACHE{FLAMEGPU_VISUALISATION_ROOT}) + set(FLAMEGPU_VISUALISATION_ROOT "" CACHE STRING "Path to local copy of the FLAMEGPU2-visualiser repository, rather than CMake-based fetching") +endif() +# Detect if the user provided the visualisation root or ot, by comparing to the fetch content source dir. +if (FLAMEGPU_VISUALISATION_ROOT) + # @todo - we should make the visualisation package find_package() compatible, and check it exists if FLAMEGPU_VISUALISATION_ROOT is set. # Look for the main visualisation header to get the abs path, but only look relative to the hints/paths, no cmake defaults (for now) - set(VISUALISATION_INCLUDE_HEADER_FILE include/flamegpu/visualiser/FLAMEGPU_Visualisation.h) - find_path(VISUALISATION_ROOT_ABS + set(FLAMEGPU_VISUALISATION_INCLUDE_HEADER_FILE include/flamegpu/visualiser/FLAMEGPU_Visualisation.h) + find_path(FLAMEGPU_VISUALISATION_ROOT_ABS NAMES - ${VISUALISATION_INCLUDE_HEADER_FILE} + ${FLAMEGPU_VISUALISATION_INCLUDE_HEADER_FILE} HINTS - ${VISUALISATION_ROOT} + ${FLAMEGPU_VISUALISATION_ROOT} PATHS - ${VISUALISATION_ROOT} + ${FLAMEGPU_VISUALISATION_ROOT} NO_DEFAULT_PATH ) # If found, use the local vis, otherwise error. - if(VISUALISATION_ROOT_ABS) + if(FLAMEGPU_VISUALISATION_ROOT_ABS) # If the correct visualtiion root was found, output a successful status message - message(STATUS "Found VISUALISATION_ROOT: ${VISUALISATION_ROOT_ABS} (${VISUALISATION_ROOT})") + message(STATUS "Found FLAMEGPU_VISUALISATION_ROOT: ${FLAMEGPU_VISUALISATION_ROOT_ABS} (${FLAMEGPU_VISUALISATION_ROOT})") # update the value to the non abs version, in local and parent scope. - set(VISUALISATION_ROOT "${VISUALISATION_ROOT_ABS}") - set(VISUALISATION_ROOT "${VISUALISATION_ROOT_ABS}" PARENT_SCOPE) + set(FLAMEGPU_VISUALISATION_ROOT "${FLAMEGPU_VISUALISATION_ROOT_ABS}") + set(FLAMEGPU_VISUALISATION_ROOT "${FLAMEGPU_VISUALISATION_ROOT_ABS}" PARENT_SCOPE) # And set up the visualisation build - add_subdirectory(${VISUALISATION_ROOT_ABS} ${CMAKE_CURRENT_BINARY_DIR}/_deps/flamegpu_visualiser-build EXCLUDE_FROM_ALL) - # Set locally and for parent scope, which are mutually exclusive - set(VISUALISATION_BUILD ${flamegpu_visualiser_BINARY_DIR} CACHE INTERNAL "flamegpu_visualiser_BINARY_DIR") + add_subdirectory(${FLAMEGPU_VISUALISATION_ROOT_ABS} ${CMAKE_CURRENT_BINARY_DIR}/_deps/flamegpu_visualiser-build EXCLUDE_FROM_ALL) + # Set the cahce var too, to ensure it appears in the GUI. + set(FLAMEGPU_VISUALISATION_ROOT "${FLAMEGPU_VISUALISATION_ROOT}" CACHE STRING "Path to local copy of the FLAMEGPU2-visualiser repository, rather than CMake-based fetching") + else() # Send a fatal error if the visualstion root passed is invalid. - message(FATAL_ERROR "Invalid VISUALISATION_ROOT '${VISUALISATION_ROOT}'.\nVISUALISATION_ROOT must be a valid directory containing '${VISUALISATION_INCLUDE_HEADER_FILE}'") + message(FATAL_ERROR "Invalid FLAMEGPU_VISUALISATION_ROOT '${FLAMEGPU_VISUALISATION_ROOT}'.\nFLAMEGPU_VISUALISATION_ROOT must be a valid directory containing '${FLAMEGPU_VISUALISATION_INCLUDE_HEADER_FILE}'") endif() - else() - # If a VISUALISATION_GIT_VERSION has not been defined, set it to the default option. - if(NOT DEFINED VISUALISATION_GIT_VERSION OR VISUALISATION_GIT_VERSION STREQUAL "") - set(VISUALISATION_GIT_VERSION "${DEFAULT_VISUALISATION_GIT_VERSION}" CACHE STRING "Git branch or tag to use for the FLAMEPGU2_visualiaer") + # If not using a user-specified FLAMEGPU_VISUALISER_ROOT, fetch content vi CMake + # If a FLAMEGPU_VISUALISATION_GIT_VERSION has not been defined, set it to the default option. + if(NOT DEFINED FLAMEGPU_VISUALISATION_GIT_VERSION OR FLAMEGPU_VISUALISATION_GIT_VERSION STREQUAL "") + set(FLAMEGPU_VISUALISATION_GIT_VERSION "${DEFAULT_FLAMEGPU_VISUALISATION_GIT_VERSION}" CACHE STRING "Git branch or tag to use for the FLAMEPGU2_visualiser") endif() + mark_as_advanced(FLAMEGPU_VISUALISATION_GIT_VERSION) # Allow users to switch to forks with relative ease. - if(NOT DEFINED VISUALISATION_REPOSITORY OR VISUALISATION_REPOSITORY STREQUAL "") - set(VISUALISATION_REPOSITORY "${DEFAULT_VISUALISATION_REPOSITORY}" CACHE STRING "Remote Git Repository for the FLAMEPGU2_visualiaer") + if(NOT DEFINED FLAMEGPU_VISUALISATION_REPOSITORY OR FLAMEGPU_VISUALISATION_REPOSITORY STREQUAL "") + set(FLAMEGPU_VISUALISATION_REPOSITORY "${DEFAULT_FLAMEGPU_VISUALISATION_REPOSITORY}" CACHE STRING "Remote Git Repository for the FLAMEPGU2_visualiser") endif() + mark_as_advanced(FLAMEGPU_VISUALISATION_REPOSITORY) # Otherwise download. FetchContent_Declare( flamegpu_visualiser - GIT_REPOSITORY ${VISUALISATION_REPOSITORY} - GIT_TAG ${VISUALISATION_GIT_VERSION} + GIT_REPOSITORY ${FLAMEGPU_VISUALISATION_REPOSITORY} + GIT_TAG ${FLAMEGPU_VISUALISATION_GIT_VERSION} GIT_PROGRESS ON - # UPDATE_DISCONNECTED ON - ) - FetchContent_GetProperties(flamegpu_visualiser) + ) + FetchContent_GetProperties(flamegpu_visualiser) if(NOT flamegpu_visualiser_POPULATED) - message(STATUS "using flamegpu_visualiser ${VISUALISATION_GIT_VERSION} from ${VISUALISATION_REPOSITORY}") + message(STATUS "using flamegpu_visualiser ${FLAMEGPU_VISUALISATION_GIT_VERSION} from ${FLAMEGPU_VISUALISATION_REPOSITORY}") FetchContent_Populate(flamegpu_visualiser) - + # Add the project as a subdirectory add_subdirectory(${flamegpu_visualiser_SOURCE_DIR} ${flamegpu_visualiser_BINARY_DIR} EXCLUDE_FROM_ALL) - - # Set locally and for parent scope, which are mutually exclusive - set(VISUALISATION_ROOT ${flamegpu_visualiser_SOURCE_DIR} CACHE INTERNAL "flamegpu_visualiser_SOURCE_DIR") - set(VISUALISATION_BUILD ${flamegpu_visualiser_BINARY_DIR} CACHE INTERNAL "flamegpu_visualiser_BINARY_DIR") endif() endif() -unset(DEFAULT_VISUALISATION_GIT_VERSION) -unset(DEFAULT_VISUALISATION_REPOSITORY) +# Mark some CACHE vars advanced for a cleaner GUI +mark_as_advanced(FETCHCONTENT_SOURCE_DIR_FLAMEGPU_VISUALISER) +mark_as_advanced(FETCHCONTENT_QUIET) +mark_as_advanced(FETCHCONTENT_BASE_DIR) +mark_as_advanced(FETCHCONTENT_FULLY_DISCONNECTED) +mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED) +mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED_FLAMEGPU_VISUALISER) +mark_as_advanced(FLAMEGPU_VISUALISATION_ROOT_ABS) +# Uneset some variables to avoid scope leaking. +unset(DEFAULT_FLAMEGPU_VISUALISATION_GIT_VERSION) +unset(DEFAULT_FLAMEGPU_VISUALISATION_REPOSITORY) diff --git a/cmake/dependencies/glm.cmake b/cmake/dependencies/glm.cmake index 961eead7d..d328c9545 100644 --- a/cmake/dependencies/glm.cmake +++ b/cmake/dependencies/glm.cmake @@ -18,10 +18,32 @@ FetchContent_Declare( FetchContent_GetProperties(glm) if(NOT glm_POPULATED) FetchContent_Populate(glm) - # glm CMake wants to generate the find file in a system location, so handle it manually - set(CMAKE_PREFIX_PATH "${CMAKE_PREFIX_PATH};${glm_SOURCE_DIR}") + if (NOT TARGET glm::glm) + # glm CMake wants to generate the find file in a system location, so handle it manually + # Find the path, just incase + find_path(glm_INCLUDE_DIRS + NAMES + glm/glm.hpp + PATHS + ${glm_SOURCE_DIR} + NO_CACHE + ) + if(glm_INCLUDE_DIRS) + # Define an imported interface target + add_library(glm::glm INTERFACE IMPORTED) + # Specify the location of the headers (but actually the parent dir, so include can be used.) + target_include_directories(glm::glm INTERFACE "${glm_INCLUDE_DIRS}") + else() + message(FATAL_ERROR "Error during creation og `glm::glm` target. Could not find glm/glm.hpp") + endif() + unset(glm_INCLUDE_DIRS) + endif() endif() -if (NOT glm_FOUND) - find_package(glm REQUIRED) - # Include path is ${glm_INCLUDE_DIRS} -endif() \ No newline at end of file + +# Mark some CACHE vars advanced for a cleaner GUI +mark_as_advanced(FETCHCONTENT_QUIET) +mark_as_advanced(FETCHCONTENT_BASE_DIR) +mark_as_advanced(FETCHCONTENT_FULLY_DISCONNECTED) +mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED) +mark_as_advanced(FETCHCONTENT_SOURCE_DIR_GLM) +mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED_GLM) \ No newline at end of file diff --git a/cmake/dependencies/googletest.cmake b/cmake/dependencies/googletest.cmake index 89e454fe8..547d01214 100644 --- a/cmake/dependencies/googletest.cmake +++ b/cmake/dependencies/googletest.cmake @@ -19,16 +19,24 @@ if(NOT googletest_POPULATED) # Suppress installation target, as this makes a warning set(INSTALL_GTEST OFF CACHE BOOL "" FORCE) mark_as_advanced(FORCE INSTALL_GTEST) + set(BUILD_GMOCK OFF CACHE BOOL "Builds the googlemock subproject" FORCE) mark_as_advanced(FORCE BUILD_GMOCK) # Prevent overriding the parent project's compiler/linker # settings on Windows set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR} EXCLUDE_FROM_ALL) - CMAKE_SET_TARGET_FOLDER("gtest" "Tests/Dependencies") + flamegpu_set_target_folder("gtest" "Tests/Dependencies") # Suppress warnigns from this target. include(${CMAKE_CURRENT_LIST_DIR}/../warnings.cmake) if(TARGET gtest) - DisableCompilerWarnings(TARGET gtest) + flamegpu_disable_compiler_warnings(TARGET gtest) endif() endif() +# Mark some CACHE vars advanced for a cleaner GUI +mark_as_advanced(FETCHCONTENT_SOURCE_DIR_GOOGLETEST) +mark_as_advanced(FETCHCONTENT_QUIET) +mark_as_advanced(FETCHCONTENT_BASE_DIR) +mark_as_advanced(FETCHCONTENT_FULLY_DISCONNECTED) +mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED) +mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED_GOOGLETEST) \ No newline at end of file diff --git a/cmake/dependencies/rapidjson.cmake b/cmake/dependencies/rapidjson.cmake index 579ec1b19..289411c3a 100644 --- a/cmake/dependencies/rapidjson.cmake +++ b/cmake/dependencies/rapidjson.cmake @@ -34,3 +34,12 @@ if(NOT rapidjson_POPULATED) endif() endif() + +# Mark some CACHE vars advanced for a cleaner GUI +mark_as_advanced(RapidJSON_DIR) +mark_as_advanced(FETCHCONTENT_SOURCE_DIR_RAPIDJSON) +mark_as_advanced(FETCHCONTENT_QUIET) +mark_as_advanced(FETCHCONTENT_BASE_DIR) +mark_as_advanced(FETCHCONTENT_FULLY_DISCONNECTED) +mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED) +mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED_RAPIDJSON) \ No newline at end of file diff --git a/cmake/modules/Findglm.cmake b/cmake/modules/Findglm.cmake deleted file mode 100644 index 81407f83e..000000000 --- a/cmake/modules/Findglm.cmake +++ /dev/null @@ -1,43 +0,0 @@ -# CMake module to find glm headers/library -# -# Very basic. -# -# Usage: -# find_package( glm ) -# if(glm_FOUND) -# include_directories(${glm_INCLUDE_DIRS}) -# endif() -# -# Variables: -# glm_FOUND -# glm_INCLUDE_DIRS -# glm_VERSION -# -# Manually specify glm paths via -Dglm_ROOT=/path/to/glm - -include(FindPackageHandleStandardArgs) - -# Find the main Jitify header -find_path(glm_INCLUDE_DIRS - NAMES - glm/glm.hpp -) - -# if found, get the version number. -if(glm_INCLUDE_DIRS) - # glm nolonger has official releases, so there isn't a way to detect a version - set(glm_VERSION "VERSION_UNKNOWN") -endif() -# Apply standard cmake find package rules / variables. I.e. QUIET, glm_FOUND etc. -# Outside the if, so REQUIRED works. -find_package_handle_standard_args(glm - REQUIRED_VARS glm_INCLUDE_DIRS - VERSION_VAR glm_VERSION -) -if(NOT TARGET GLM::glm) - # Create a header only (INTERFACE) target which can be linked against to inherit include directories. Mark this as imported, because there are no build steps requred. - add_library(GLM::glm INTERFACE IMPORTED) - target_include_directories(GLM::glm INTERFACE ${glm_INCLUDE_DIRS}) -endif() -# Set returned values as advanced? -mark_as_advanced(glm_INCLUDE_DIRS glm_VERSION) diff --git a/cmake/version.cmake b/cmake/version.cmake index e81a48dca..b2ef15156 100644 --- a/cmake/version.cmake +++ b/cmake/version.cmake @@ -51,7 +51,7 @@ endif() # Extract the short hash from git to use in the BUILDMETADATA component of SemVer # Based on https://cmake.org/pipermail/cmake/2018-October/068388.html -macro(GET_COMMIT_HASH) +macro(flamegpu_get_commit_hash) # @todo - graceful error handling find_package(Git REQUIRED) if(Git_FOUND) @@ -78,7 +78,7 @@ macro(GET_COMMIT_HASH) endmacro() # Get the hash from git, used as build metadata in semver. -GET_COMMIT_HASH() +flamegpu_get_commit_hash() set(FLAMEGPU_VERSION_BUILDMETADATA "${FLAMEGPU_SHORT_HASH}") # Major.minor.patch version string, CMake if VERSION_ doesn't support non numeric components. @@ -131,7 +131,7 @@ if(CUDAToolkit_FOUND) endif() # If a vis build, also add the visualisation marker -if(VISUALISATION) +if(FLAMEGPU_VISUALISATION) list(APPEND FLAMEGPU_VERSION_PYTHON_LOCAL_SEGMENTS "vis") endif() @@ -145,7 +145,7 @@ endif() # Set the python version number to use, based on the local version flag. set(FLAMEGPU_VERSION_PYTHON ${FLAMEGPU_VERSION_PYTHON_PUBLIC}) -if(BUILD_SWIG_PYTHON_LOCALVERSION) +if(FLAMEGPU_BUILD_PYTHON_LOCALVERSION) set(FLAMEGPU_VERSION_PYTHON ${FLAMEGPU_VERSION_PYTHON_LOCAL}) endif() diff --git a/cmake/warnings.cmake b/cmake/warnings.cmake index cc90a2de8..fdda3640f 100644 --- a/cmake/warnings.cmake +++ b/cmake/warnings.cmake @@ -1,6 +1,8 @@ +include_guard(GLOBAL) + # Function to disable all (as many as possible) compiler warnings for a given target -if(NOT COMMAND DisableCompilerWarnings) - function(DisableCompilerWarnings) +if(NOT COMMAND flamegpu_disable_compiler_warnings) + function(flamegpu_disable_compiler_warnings) # Parse the expected arguments, prefixing variables. cmake_parse_arguments( DCW @@ -11,9 +13,9 @@ if(NOT COMMAND DisableCompilerWarnings) ) # Ensure that a target has been passed, and that it is a valid target. if(NOT DCW_TARGET) - message(FATAL_ERROR "DisableCompilerWarnings: 'TARGET' argument required.") + message(FATAL_ERROR "flamegpu_disable_compiler_warnings: 'TARGET' argument required.") elseif(NOT TARGET ${DCW_TARGET}) - message(FATAL_ERROR "DisableCompilerWarnings: TARGET '${DCW_TARGET}' is not a valid target") + message(FATAL_ERROR "flamegpu_disable_compiler_warnings: TARGET '${DCW_TARGET}' is not a valid target") endif() # By default, suppress all warnings, so that warnings are applied per-target. if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") @@ -28,8 +30,8 @@ endif() # Function to set a high level of compiler warnings for a target # Function to disable all (as many as possible) compiler warnings for a given target -if(NOT COMMAND SetHighWarningLevel) - function(SetHighWarningLevel) +if(NOT COMMAND flamegpu_set_high_warning_level) + function(flamegpu_set_high_warning_level) # Parse the expected arguments, prefixing variables. cmake_parse_arguments( SHWL @@ -40,9 +42,9 @@ if(NOT COMMAND SetHighWarningLevel) ) # Ensure that a target has been passed, and that it is a valid target. if(NOT SHWL_TARGET) - message(FATAL_ERROR "SetHighWarningLevel: 'TARGET' argument required.") + message(FATAL_ERROR "flamegpu_set_high_warning_level: 'TARGET' argument required.") elseif(NOT TARGET ${SHWL_TARGET}) - message(FATAL_ERROR "SetHighWarningLevel: TARGET '${SHWL_TARGET}' is not a valid target") + message(FATAL_ERROR "flamegpu_set_high_warning_level: TARGET '${SHWL_TARGET}' is not a valid target") endif() # Per host-compiler settings for high warning levels and opt-in warnings. @@ -69,8 +71,8 @@ if(NOT COMMAND SetHighWarningLevel) endif() # Function to apply warning suppressions to a given target, without changing the general warning level (This is so SWIG can have suppressions, with default warning levels) -if(NOT COMMAND SuppressSomeCompilerWarnings) - function(SuppressSomeCompilerWarnings) +if(NOT COMMAND flamegpu_suppress_some_compiler_warnings) + function(flamegpu_suppress_some_compiler_warnings) # Parse the expected arguments, prefixing variables. cmake_parse_arguments( SSCW @@ -81,9 +83,9 @@ if(NOT COMMAND SuppressSomeCompilerWarnings) ) # Ensure that a target has been passed, and that it is a valid target. if(NOT SSCW_TARGET) - message(FATAL_ERROR "SuppressSomeCompilerWarnings: 'TARGET' argument required.") + message(FATAL_ERROR "flamegpu_suppress_some_compiler_warnings: 'TARGET' argument required.") elseif(NOT TARGET ${SSCW_TARGET}) - message(FATAL_ERROR "SuppressSomeCompilerWarnings: TARGET '${SSCW_TARGET}' is not a valid target") + message(FATAL_ERROR "flamegpu_suppress_some_compiler_warnings: TARGET '${SSCW_TARGET}' is not a valid target") endif() # Per host-compiler/OS settings for suppressions @@ -105,7 +107,7 @@ if(NOT COMMAND SuppressSomeCompilerWarnings) # CUDA 11.6 deprecates __device__ cudaDeviceSynchronize, but does not provide an alternative. # This is used in cub/thrust, and windows still emits this warning from the third party library if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 11.6.0) - target_compile_definitions(${SSCW_TARGET} PRIVATE "$<$:__CDPRT_SUPPRESS_SYNC_DEPRECATION_WARNING>") + target_compile_definitions(${SSCW_TARGET} PRIVATE "__CDPRT_SUPPRESS_SYNC_DEPRECATION_WARNING") endif() else() # Linux specific warning suppressions @@ -131,9 +133,9 @@ if(NOT COMMAND SuppressSomeCompilerWarnings) endfunction() endif() -# Function to promote warnings to errors, controlled by the WARNINGS_AS_ERRORS CMake option. -if(NOT COMMAND EnableWarningsAsErrors) - function(EnableWarningsAsErrors) +# Function to promote warnings to errors, controlled by the FLAMEGPU_WARNINGS_AS_ERRORS CMake option. +if(NOT COMMAND flamegpu_enable_warnings_as_errors) + function(flamegpu_enable_warnings_as_errors) # Parse the expected arguments, prefixing variables. cmake_parse_arguments( EWAS @@ -144,13 +146,13 @@ if(NOT COMMAND EnableWarningsAsErrors) ) # Ensure that a target has been passed, and that it is a valid target. if(NOT EWAS_TARGET) - message(FATAL_ERROR "EnableWarningsAsErrors: 'TARGET' argument required.") + message(FATAL_ERROR "flamegpu_enable_warnings_as_errors: 'TARGET' argument required.") elseif(NOT TARGET ${EWAS_TARGET}) - message(FATAL_ERROR "EnableWarningsAsErrors: TARGET '${EWAS_TARGET}' is not a valid target") + message(FATAL_ERROR "flamegpu_enable_warnings_as_errors: TARGET '${EWAS_TARGET}' is not a valid target") endif() - # Check the WARNINGS_AS_ERRORS cmake option to optionally enable this. - if(WARNINGS_AS_ERRORS) + # Check the FLAMEGPU_WARNINGS_AS_ERRORS cmake option to optionally enable this. + if(FLAMEGPU_WARNINGS_AS_ERRORS) # OS Specific flags if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") # Windows specific options @@ -183,8 +185,8 @@ endif() # + With some warnings suppressed # + Optionally promotes warnings to errors. # Also enables the treating of warnings as errors if required. -if(NOT COMMAND EnableFLAMEGPUCompilerWarnings) - function(EnableFLAMEGPUCompilerWarnings) +if(NOT COMMAND flamegpu_enable_compiler_warnings) + function(flamegpu_enable_compiler_warnings) # Parse the expected arguments, prefixing variables. cmake_parse_arguments( EFCW @@ -195,16 +197,16 @@ if(NOT COMMAND EnableFLAMEGPUCompilerWarnings) ) # Ensure that a target has been passed, and that it is a valid target. if(NOT EFCW_TARGET) - message(FATAL_ERROR "EnableFLAMEGPUCompilerWarnings: 'TARGET' argument required.") + message(FATAL_ERROR "flamegpu_enable_compiler_warnings: 'TARGET' argument required.") elseif(NOT TARGET ${EFCW_TARGET}) - message(FATAL_ERROR "EnableFLAMEGPUCompilerWarnings: TARGET '${EFCW_TARGET}' is not a valid target") + message(FATAL_ERROR "flamegpu_enable_compiler_warnings: TARGET '${EFCW_TARGET}' is not a valid target") endif() # Enable a high level of warnings - SetHighWarningLevel(TARGET ${EFCW_TARGET}) + flamegpu_set_high_warning_level(TARGET ${EFCW_TARGET}) # Suppress some warnings - SuppressSomeCompilerWarnings(TARGET ${EFCW_TARGET}) + flamegpu_suppress_some_compiler_warnings(TARGET ${EFCW_TARGET}) # Optionally promote warnings to errors. - EnableWarningsAsErrors(TARGET ${EFCW_TARGET}) + flamegpu_enable_warnings_as_errors(TARGET ${EFCW_TARGET}) endfunction() endif() diff --git a/examples/boids_bruteforce/CMakeLists.txt b/examples/boids_bruteforce/CMakeLists.txt index 2981f3ecb..e65eee714 100644 --- a/examples/boids_bruteforce/CMakeLists.txt +++ b/examples/boids_bruteforce/CMakeLists.txt @@ -1,12 +1,16 @@ # Minimum CMake version 3.18 for CUDA --std=c++17 -cmake_minimum_required(VERSION VERSION 3.18 FATAL_ERROR) - -# Name the project and set languages -project(boids_bruteforce CUDA CXX) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) # Set the location of the ROOT flame gpu project relative to this CMakeList.txt get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../.. REALPATH) +# Handle CMAKE_CUDA_ARCHITECTURES gracefully +include(${FLAMEGPU_ROOT}/cmake/CUDAArchitectures.cmake) +flamegpu_init_cuda_architectures(PROJECT boids_bruteforce) + +# Name the project and enable required languages +project(boids_bruteforce CXX CUDA) + # Include common rules. include(${FLAMEGPU_ROOT}/cmake/common.cmake) @@ -26,7 +30,7 @@ SET(ALL_SRC ) # Add the executable and set required flags for the target -add_flamegpu_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) +flamegpu_add_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) # Also set as startup project (if top level project) set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" PROPERTY VS_STARTUP_PROJECT "${PROJECT_NAME}") diff --git a/examples/boids_bruteforce/src/main.cu b/examples/boids_bruteforce/src/main.cu index 98c63e862..c1f89da1c 100644 --- a/examples/boids_bruteforce/src/main.cu +++ b/examples/boids_bruteforce/src/main.cu @@ -382,7 +382,7 @@ int main(int argc, const char ** argv) { /** * Create visualisation */ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION flamegpu::visualiser::ModelVis visualisation = cudaSimulation.getVisualisation(); { flamegpu::EnvironmentDescription env = model.Environment(); @@ -464,7 +464,7 @@ int main(int argc, const char ** argv) { */ // cudaSimulation.exportData("end.xml"); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION visualisation.join(); #endif diff --git a/examples/boids_rtc_bruteforce/CMakeLists.txt b/examples/boids_rtc_bruteforce/CMakeLists.txt index 3048a4c73..65adf300a 100644 --- a/examples/boids_rtc_bruteforce/CMakeLists.txt +++ b/examples/boids_rtc_bruteforce/CMakeLists.txt @@ -1,12 +1,16 @@ # Minimum CMake version 3.18 for CUDA --std=c++17 -cmake_minimum_required(VERSION VERSION 3.18 FATAL_ERROR) - -# Name the project and set languages -project(boids_rtc_bruteforce CUDA CXX) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) # Set the location of the ROOT flame gpu project relative to this CMakeList.txt get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../.. REALPATH) +# Handle CMAKE_CUDA_ARCHITECTURES gracefully +include(${FLAMEGPU_ROOT}/cmake/CUDAArchitectures.cmake) +flamegpu_init_cuda_architectures(PROJECT boids_bruteforce) + +# Name the project and enable required languages +project(boids_rtc_bruteforce CXX CUDA) + # Include common rules. include(${FLAMEGPU_ROOT}/cmake/common.cmake) @@ -26,7 +30,7 @@ SET(ALL_SRC ) # Add the executable and set required flags for the target -add_flamegpu_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) +flamegpu_add_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) # Also set as startup project (if top level project) set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" PROPERTY VS_STARTUP_PROJECT "${PROJECT_NAME}") \ No newline at end of file diff --git a/examples/boids_rtc_bruteforce/src/main.cu b/examples/boids_rtc_bruteforce/src/main.cu index 7d25abcd1..c58d90f7e 100644 --- a/examples/boids_rtc_bruteforce/src/main.cu +++ b/examples/boids_rtc_bruteforce/src/main.cu @@ -429,7 +429,7 @@ int main(int argc, const char ** argv) { /** * Create visualisation */ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION flamegpu::visualiser::ModelVis visualisation = cudaSimulation.getVisualisation(); { flamegpu::EnvironmentDescription env = model.Environment(); @@ -511,7 +511,7 @@ int main(int argc, const char ** argv) { */ // cudaSimulation.exportData("end.xml"); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION visualisation.join(); #endif diff --git a/examples/boids_rtc_spatial3D/CMakeLists.txt b/examples/boids_rtc_spatial3D/CMakeLists.txt index b9d4ded81..da3038fba 100644 --- a/examples/boids_rtc_spatial3D/CMakeLists.txt +++ b/examples/boids_rtc_spatial3D/CMakeLists.txt @@ -1,12 +1,16 @@ # Minimum CMake version 3.18 for CUDA --std=c++17 -cmake_minimum_required(VERSION VERSION 3.18 FATAL_ERROR) - -# Name the project and set languages -project(boids_rtc_spatial3D CUDA CXX) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) # Set the location of the ROOT flame gpu project relative to this CMakeList.txt get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../.. REALPATH) +# Handle CMAKE_CUDA_ARCHITECTURES gracefully +include(${FLAMEGPU_ROOT}/cmake/CUDAArchitectures.cmake) +flamegpu_init_cuda_architectures(PROJECT boids_rtc_spatial3D) + +# Name the project and enable required languages +project(boids_rtc_spatial3D CXX CUDA) + # Include common rules. include(${FLAMEGPU_ROOT}/cmake/common.cmake) @@ -26,7 +30,7 @@ SET(ALL_SRC ) # Add the executable and set required flags for the target -add_flamegpu_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) +flamegpu_add_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) # Also set as startup project (if top level project) set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" PROPERTY VS_STARTUP_PROJECT "${PROJECT_NAME}") \ No newline at end of file diff --git a/examples/boids_rtc_spatial3D/src/main.cu b/examples/boids_rtc_spatial3D/src/main.cu index 3bd254a10..11f7416e2 100644 --- a/examples/boids_rtc_spatial3D/src/main.cu +++ b/examples/boids_rtc_spatial3D/src/main.cu @@ -432,7 +432,7 @@ int main(int argc, const char ** argv) { /** * Create visualisation */ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION flamegpu::visualiser::ModelVis visualisation = cudaSimulation.getVisualisation(); { float envWidth = env.getProperty("MAX_POSITION") - env.getProperty("MIN_POSITION"); @@ -512,7 +512,7 @@ int main(int argc, const char ** argv) { */ // cudaSimulation.exportData("end.xml"); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION visualisation.join(); #endif diff --git a/examples/boids_spatial3D/CMakeLists.txt b/examples/boids_spatial3D/CMakeLists.txt index 665cfebd2..6a7fd9d90 100644 --- a/examples/boids_spatial3D/CMakeLists.txt +++ b/examples/boids_spatial3D/CMakeLists.txt @@ -1,12 +1,16 @@ # Minimum CMake version 3.18 for CUDA --std=c++17 -cmake_minimum_required(VERSION VERSION 3.18 FATAL_ERROR) - -# Name the project and set languages -project(boids_spatial3D CUDA CXX) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) # Set the location of the ROOT flame gpu project relative to this CMakeList.txt get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../.. REALPATH) +# Handle CMAKE_CUDA_ARCHITECTURES gracefully +include(${FLAMEGPU_ROOT}/cmake/CUDAArchitectures.cmake) +flamegpu_init_cuda_architectures(PROJECT boids_spatial3D) + +# Name the project and enable required languages +project(boids_spatial3D CXX CUDA) + # Include common rules. include(${FLAMEGPU_ROOT}/cmake/common.cmake) @@ -26,7 +30,7 @@ SET(ALL_SRC ) # Add the executable and set required flags for the target -add_flamegpu_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) +flamegpu_add_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) # Also set as startup project (if top level project) set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" PROPERTY VS_STARTUP_PROJECT "${PROJECT_NAME}") diff --git a/examples/boids_spatial3D/src/main.cu b/examples/boids_spatial3D/src/main.cu index 97f10eac5..3164033d6 100644 --- a/examples/boids_spatial3D/src/main.cu +++ b/examples/boids_spatial3D/src/main.cu @@ -393,7 +393,7 @@ int main(int argc, const char ** argv) { /** * Create visualisation */ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION flamegpu::visualiser::ModelVis visualisation = cudaSimulation.getVisualisation(); { flamegpu::EnvironmentDescription env = model.Environment(); @@ -481,7 +481,7 @@ int main(int argc, const char ** argv) { */ // cudaSimulation.exportData("end.xml"); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION visualisation.join(); #endif diff --git a/examples/circles_bruteforce/CMakeLists.txt b/examples/circles_bruteforce/CMakeLists.txt index fef1d5282..4c1e2ac51 100644 --- a/examples/circles_bruteforce/CMakeLists.txt +++ b/examples/circles_bruteforce/CMakeLists.txt @@ -1,8 +1,15 @@ # Minimum CMake version 3.18 for CUDA --std=c++17 -cmake_minimum_required(VERSION VERSION 3.18 FATAL_ERROR) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) -# Name the project and set languages -project(circles_bruteforce CUDA CXX) +# Set the location of the ROOT flame gpu project relative to this CMakeList.txt +get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../.. REALPATH) + +# Handle CMAKE_CUDA_ARCHITECTURES gracefully +include(${FLAMEGPU_ROOT}/cmake/CUDAArchitectures.cmake) +flamegpu_init_cuda_architectures(PROJECT circles_bruteforce) + +# Name the project and enable required languages +project(circles_bruteforce CXX CUDA) # Set the location of the ROOT flame gpu project relative to this CMakeList.txt get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../.. REALPATH) @@ -26,10 +33,10 @@ SET(ALL_SRC ) # Option to enable/disable building the static library -option(VISUALISATION "Enable visualisation support" OFF) +option(FLAMEGPU_VISUALISATION "Enable visualisation support" OFF) # Add the executable and set required flags for the target -add_flamegpu_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) +flamegpu_add_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) # Also set as startup project (if top level project) set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" PROPERTY VS_STARTUP_PROJECT "${PROJECT_NAME}") diff --git a/examples/circles_bruteforce/src/main.cu b/examples/circles_bruteforce/src/main.cu index 9fd8748e0..c008b38ee 100644 --- a/examples/circles_bruteforce/src/main.cu +++ b/examples/circles_bruteforce/src/main.cu @@ -126,7 +126,7 @@ int main(int argc, const char ** argv) { /** * Create visualisation */ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION flamegpu::visualiser::ModelVis m_vis = cudaSimulation.getVisualisation(); { const float INIT_CAM = ENV_MAX * 1.25F; @@ -167,7 +167,7 @@ int main(int argc, const char ** argv) { */ cudaSimulation.exportData("end.xml"); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION m_vis.join(); #endif diff --git a/examples/circles_spatial3D/CMakeLists.txt b/examples/circles_spatial3D/CMakeLists.txt index 73810fbbe..616d65bed 100644 --- a/examples/circles_spatial3D/CMakeLists.txt +++ b/examples/circles_spatial3D/CMakeLists.txt @@ -1,12 +1,16 @@ # Minimum CMake version 3.18 for CUDA --std=c++17 -cmake_minimum_required(VERSION VERSION 3.18 FATAL_ERROR) - -# Name the project and set languages -project(circles_spatial3D CUDA CXX) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) # Set the location of the ROOT flame gpu project relative to this CMakeList.txt get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../.. REALPATH) +# Handle CMAKE_CUDA_ARCHITECTURES gracefully +include(${FLAMEGPU_ROOT}/cmake/CUDAArchitectures.cmake) +flamegpu_init_cuda_architectures(PROJECT circles_spatial3D) + +# Name the project and enable required languages +project(circles_spatial3D CXX CUDA) + # Include common rules. include(${FLAMEGPU_ROOT}/cmake/common.cmake) @@ -26,10 +30,10 @@ SET(ALL_SRC ) # Option to enable/disable building the static library -option(VISUALISATION "Enable visualisation support" OFF) +option(FLAMEGPU_VISUALISATION "Enable visualisation support" OFF) # Add the executable and set required flags for the target -add_flamegpu_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) +flamegpu_add_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) # Also set as startup project (if top level project) set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" PROPERTY VS_STARTUP_PROJECT "${PROJECT_NAME}") diff --git a/examples/circles_spatial3D/src/main.cu b/examples/circles_spatial3D/src/main.cu index 814ee7a07..e91f6e6df 100644 --- a/examples/circles_spatial3D/src/main.cu +++ b/examples/circles_spatial3D/src/main.cu @@ -120,7 +120,7 @@ int main(int argc, const char ** argv) { /** * Create visualisation */ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION flamegpu::visualiser::ModelVis m_vis = cudaSimulation.getVisualisation(); { const float INIT_CAM = ENV_MAX * 1.25F; @@ -188,7 +188,7 @@ int main(int argc, const char ** argv) { */ cudaSimulation.exportData("end.xml"); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION m_vis.join(); #endif diff --git a/examples/diffusion/CMakeLists.txt b/examples/diffusion/CMakeLists.txt index bf8151611..2e2d370b3 100644 --- a/examples/diffusion/CMakeLists.txt +++ b/examples/diffusion/CMakeLists.txt @@ -1,12 +1,16 @@ # Set the minimum cmake version to that which supports cuda natively. -cmake_minimum_required(VERSION VERSION 3.12 FATAL_ERROR) - -# Name the project and set languages -project(diffusion CUDA CXX) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) # Set the location of the ROOT flame gpu project relative to this CMakeList.txt get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../.. REALPATH) +# Handle CMAKE_CUDA_ARCHITECTURES gracefully +include(${FLAMEGPU_ROOT}/cmake/CUDAArchitectures.cmake) +flamegpu_init_cuda_architectures(PROJECT diffusion) + +# Name the project and enable required languages +project(diffusion CXX CUDA) + # Include common rules. include(${FLAMEGPU_ROOT}/cmake/common.cmake) @@ -26,10 +30,10 @@ SET(ALL_SRC ) # Option to enable/disable building the static library -option(VISUALISATION "Enable visualisation support" OFF) +option(FLAMEGPU_VISUALISATION "Enable visualisation support" OFF) # Add the executable and set required flags for the target -add_flamegpu_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) +flamegpu_add_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) # Also set as startup project (if top level project) set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" PROPERTY VS_STARTUP_PROJECT "${PROJECT_NAME}") diff --git a/examples/diffusion/src/main.cu b/examples/diffusion/src/main.cu index 841e7968a..58d60ebce 100644 --- a/examples/diffusion/src/main.cu +++ b/examples/diffusion/src/main.cu @@ -57,7 +57,7 @@ int main(int argc, const char ** argv) { flamegpu::AgentDescription agent = model.newAgent("cell"); agent.newVariable("pos"); agent.newVariable("value"); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION // Redundant separate floating point position vars for vis agent.newVariable("x"); agent.newVariable("y"); @@ -125,7 +125,7 @@ int main(int argc, const char ** argv) { flamegpu::AgentVector::Agent instance = init_pop.back(); instance.setVariable("pos", { x, y }); instance.setVariable("value", dist(rng)); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION // Redundant separate floating point position vars for vis instance.setVariable("x", static_cast(x)); instance.setVariable("y", static_cast(y)); @@ -139,7 +139,7 @@ int main(int argc, const char ** argv) { * Create visualisation * @note FLAMEGPU2 doesn't currently have proper support for discrete/2d visualisations */ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION flamegpu::visualiser::ModelVis visualisation = cudaSimulation.getVisualisation(); { visualisation.setBeginPaused(true); @@ -167,7 +167,7 @@ int main(int argc, const char ** argv) { /** * Export Pop */ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION visualisation.join(); #endif diff --git a/examples/ensemble/CMakeLists.txt b/examples/ensemble/CMakeLists.txt index 837912a40..53f6d1c1d 100644 --- a/examples/ensemble/CMakeLists.txt +++ b/examples/ensemble/CMakeLists.txt @@ -1,12 +1,16 @@ # Set the minimum cmake version to that which supports cuda natively. -cmake_minimum_required(VERSION VERSION 3.12 FATAL_ERROR) - -# Name the project and set languages -project(ensemble CUDA CXX) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) # Set the location of the ROOT flame gpu project relative to this CMakeList.txt get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../.. REALPATH) +# Handle CMAKE_CUDA_ARCHITECTURES gracefully +include(${FLAMEGPU_ROOT}/cmake/CUDAArchitectures.cmake) +flamegpu_init_cuda_architectures(PROJECT ensemble) + +# Name the project and enable required languages +project(ensemble CXX CUDA) + # Include common rules. include(${FLAMEGPU_ROOT}/cmake/common.cmake) @@ -26,10 +30,10 @@ SET(ALL_SRC ) # Option to enable/disable building the static library -# option(VISUALISATION "Enable visualisation support" OFF) # This example is unlikely to have a visualisation +# option(FLAMEGPU_VISUALISATION "Enable visualisation support" OFF) # This example is unlikely to have a visualisation # Add the executable and set required flags for the target -add_flamegpu_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) +flamegpu_add_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) # Also set as startup project (if top level project) set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" PROPERTY VS_STARTUP_PROJECT "${PROJECT_NAME}") \ No newline at end of file diff --git a/examples/game_of_life/CMakeLists.txt b/examples/game_of_life/CMakeLists.txt index 8518972d3..62238e2c4 100644 --- a/examples/game_of_life/CMakeLists.txt +++ b/examples/game_of_life/CMakeLists.txt @@ -1,8 +1,15 @@ # Minimum CMake version 3.18 for CUDA --std=c++17 -cmake_minimum_required(VERSION VERSION 3.18 FATAL_ERROR) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) -# Name the project and set languages -project(game_of_life CUDA CXX) +# Set the location of the ROOT flame gpu project relative to this CMakeList.txt +get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../.. REALPATH) + +# Handle CMAKE_CUDA_ARCHITECTURES gracefully +include(${FLAMEGPU_ROOT}/cmake/CUDAArchitectures.cmake) +flamegpu_init_cuda_architectures(PROJECT game_of_life) + +# Name the project and enable required languages +project(game_of_life CXX CUDA) # Set the location of the ROOT flame gpu project relative to this CMakeList.txt get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../.. REALPATH) @@ -26,10 +33,10 @@ SET(ALL_SRC ) # Option to enable/disable building the static library -option(VISUALISATION "Enable visualisation support" OFF) +option(FLAMEGPU_VISUALISATION "Enable visualisation support" OFF) # Add the executable and set required flags for the target -add_flamegpu_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) +flamegpu_add_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) # Also set as startup project (if top level project) set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" PROPERTY VS_STARTUP_PROJECT "${PROJECT_NAME}") diff --git a/examples/game_of_life/src/main.cu b/examples/game_of_life/src/main.cu index fc5610b64..0e22848f9 100644 --- a/examples/game_of_life/src/main.cu +++ b/examples/game_of_life/src/main.cu @@ -46,7 +46,7 @@ int main(int argc, const char ** argv) { flamegpu::AgentDescription agent = model.newAgent("cell"); agent.newVariable("pos"); agent.newVariable("is_alive"); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION // Redundant separate floating point position vars for vis agent.newVariable("x"); agent.newVariable("y"); @@ -100,7 +100,7 @@ int main(int argc, const char ** argv) { instance.setVariable("pos", { x, y }); char is_alive = dist(rng) < 0.4f ? 1 : 0; instance.setVariable("is_alive", is_alive); // 40% Chance of being flamegpu::ALIVE -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION // Redundant separate floating point position vars for vis instance.setVariable("x", static_cast(x)); instance.setVariable("y", static_cast(y)); @@ -114,7 +114,7 @@ int main(int argc, const char ** argv) { * Create visualisation * @note FLAMEGPU2 doesn't currently have proper support for discrete/2d visualisations */ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION flamegpu::visualiser::ModelVis visualisation = cudaSimulation.getVisualisation(); { visualisation.setBeginPaused(true); @@ -144,7 +144,7 @@ int main(int argc, const char ** argv) { /** * Export Pop */ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION visualisation.join(); #endif diff --git a/examples/host_functions/CMakeLists.txt b/examples/host_functions/CMakeLists.txt index 8d76a2389..2129a20d3 100644 --- a/examples/host_functions/CMakeLists.txt +++ b/examples/host_functions/CMakeLists.txt @@ -1,12 +1,16 @@ # Minimum CMake version 3.18 for CUDA --std=c++17 -cmake_minimum_required(VERSION VERSION 3.18 FATAL_ERROR) - -# Name the project and set languages -project(host_functions CUDA CXX) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) # Set the location of the ROOT flame gpu project relative to this CMakeList.txt get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../.. REALPATH) +# Handle CMAKE_CUDA_ARCHITECTURES gracefully +include(${FLAMEGPU_ROOT}/cmake/CUDAArchitectures.cmake) +flamegpu_init_cuda_architectures(PROJECT host_functions) + +# Name the project and enable required languages +project(host_functions CXX CUDA) + # Include common rules. include(${FLAMEGPU_ROOT}/cmake/common.cmake) @@ -26,10 +30,10 @@ SET(ALL_SRC ) # Option to enable/disable building the static library -# option(VISUALISATION "Enable visualisation support" OFF) # This example is unlikely to have a visualisation +# option(FLAMEGPU_VISUALISATION "Enable visualisation support" OFF) # This example is unlikely to have a visualisation # Add the executable and set required flags for the target -add_flamegpu_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) +flamegpu_add_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) # Also set as startup project (if top level project) set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" PROPERTY VS_STARTUP_PROJECT "${PROJECT_NAME}") \ No newline at end of file diff --git a/examples/python_boids_spatial3D_bounded/README.md b/examples/python_boids_spatial3D_bounded/README.md index a4827d1e2..99a921652 100644 --- a/examples/python_boids_spatial3D_bounded/README.md +++ b/examples/python_boids_spatial3D_bounded/README.md @@ -4,7 +4,7 @@ This FLAME GPU 2 example uses the `pyflamegpu` library. It uses Spatial 3D messa # Running the example -Running the model requires the pyflamegpu module to be installed. Ensure that you have installed the prerequisites listed in the main README.md and that you have build FLAME GPU using the `BUILD_SWIG_PYTHON` CMake option. This will build a virtual environment which you can activate before executing this script. E.g. +Running the model requires the pyflamegpu module to be installed. Ensure that you have installed the prerequisites listed in the main README.md and that you have build FLAME GPU using the `FLAMEGPU_BUILD_PYTHON` CMake option. This will build a virtual environment which you can activate before executing this script. E.g. `../../build/lib/Release/python/venv/Scripts/activate` diff --git a/examples/python_boids_spatial3D_wrapped_native/README.md b/examples/python_boids_spatial3D_wrapped_native/README.md index 14b4d5bfc..ec009ae7a 100644 --- a/examples/python_boids_spatial3D_wrapped_native/README.md +++ b/examples/python_boids_spatial3D_wrapped_native/README.md @@ -4,7 +4,7 @@ This FLAME GPU 2 example uses the `pyflamegpu` library. It uses Spatial 3D messa # Running the example -Running the model requires the pyflamegpu module to be installed. Ensure that you have installed the prerequisites listed in the main README.md and that you have build FLAME GPU using the `BUILD_SWIG_PYTHON` CMake option. This will build a virtual environment which you can activate before executing this script. E.g. +Running the model requires the pyflamegpu module to be installed. Ensure that you have installed the prerequisites listed in the main README.md and that you have build FLAME GPU using the `FLAMEGPU_BUILD_PYTHON` CMake option. This will build a virtual environment which you can activate before executing this script. E.g. `../../build/lib/Release/python/venv/Scripts/activate` diff --git a/examples/sugarscape/CMakeLists.txt b/examples/sugarscape/CMakeLists.txt index 3eb05ed57..a1a4560c8 100644 --- a/examples/sugarscape/CMakeLists.txt +++ b/examples/sugarscape/CMakeLists.txt @@ -1,8 +1,15 @@ # Set the minimum cmake version to that which supports cuda natively. -cmake_minimum_required(VERSION VERSION 3.12 FATAL_ERROR) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) -# Name the project and set languages -project(sugarscape CUDA CXX) +# Set the location of the ROOT flame gpu project relative to this CMakeList.txt +get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../.. REALPATH) + +# Handle CMAKE_CUDA_ARCHITECTURES gracefully +include(${FLAMEGPU_ROOT}/cmake/CUDAArchitectures.cmake) +flamegpu_init_cuda_architectures(PROJECT sugarscape) + +# Name the project and enable required languages +project(sugarscape CXX CUDA) # Set the location of the ROOT flame gpu project relative to this CMakeList.txt get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../.. REALPATH) @@ -26,10 +33,10 @@ SET(ALL_SRC ) # Option to enable/disable building the static library -option(VISUALISATION "Enable visualisation support" OFF) +option(FLAMEGPU_VISUALISATION "Enable visualisation support" OFF) # Add the executable and set required flags for the target -add_flamegpu_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) +flamegpu_add_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" TRUE) # Also set as startup project (if top level project) set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" PROPERTY VS_STARTUP_PROJECT "${PROJECT_NAME}") diff --git a/examples/sugarscape/src/main.cu b/examples/sugarscape/src/main.cu index b3922e38b..f2b1abb84 100644 --- a/examples/sugarscape/src/main.cu +++ b/examples/sugarscape/src/main.cu @@ -222,7 +222,7 @@ flamegpu::AgentDescription makeCoreAgent(flamegpu::ModelDescription &model) { // environment specific var agent.newVariable("env_sugar_level"); agent.newVariable("env_max_sugar_level"); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION // Redundant seperate floating point position vars for vis agent.newVariable("x"); agent.newVariable("y"); @@ -362,7 +362,7 @@ int main(int argc, const char ** argv) { * Create visualisation * @note FLAMEGPU2 doesn't currently have proper support for discrete/2d visualisations */ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION flamegpu::visualiser::ModelVis visualisation = cudaSimulation.getVisualisation(); { visualisation.setSimulationSpeed(2); @@ -463,7 +463,7 @@ int main(int argc, const char ** argv) { env_sugar_lvl = env_sugar_lvl < SUGAR_MAX_CAPACITY / 2 ? poor_env_sugar_dist(rng) : env_sugar_lvl; instance.setVariable("env_max_sugar_level", env_sugar_lvl); // All cells begin at their local max sugar instance.setVariable("env_sugar_level", env_sugar_lvl); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION // Redundant separate floating point position vars for vis instance.setVariable("x", static_cast(x)); instance.setVariable("y", static_cast(y)); @@ -484,7 +484,7 @@ int main(int argc, const char ** argv) { */ // cudaSimulation.exportData("end.xml"); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION visualisation.join(); #endif diff --git a/include/flamegpu/exception/FLAMEGPUDeviceException.cuh b/include/flamegpu/exception/FLAMEGPUDeviceException.cuh index 1b2b787b6..9182caa10 100644 --- a/include/flamegpu/exception/FLAMEGPUDeviceException.cuh +++ b/include/flamegpu/exception/FLAMEGPUDeviceException.cuh @@ -6,7 +6,7 @@ #include "flamegpu/gpu/CUDAScanCompaction.h" -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS #include "flamegpu/exception/FLAMEGPUDeviceException_device.cuh" @@ -51,9 +51,9 @@ class DeviceExceptionManager { } // namespace flamegpu #else /** - * Ignore the device error macro when SEATBELTS are off + * Ignore the device error macro when FLAMEGPU_SEATBELTS are off * These checks are costly to performance */ #define DTHROW(nop) -#endif // SEATBELTS=OFF +#endif // FLAMEGPU_SEATBELTS=OFF #endif // INCLUDE_FLAMEGPU_EXCEPTION_FLAMEGPUDEVICEEXCEPTION_CUH_ diff --git a/include/flamegpu/exception/FLAMEGPUDeviceException_device.cuh b/include/flamegpu/exception/FLAMEGPUDeviceException_device.cuh index 7c01aa924..a596fa345 100644 --- a/include/flamegpu/exception/FLAMEGPUDeviceException_device.cuh +++ b/include/flamegpu/exception/FLAMEGPUDeviceException_device.cuh @@ -11,7 +11,7 @@ namespace flamegpu { namespace exception { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * This allows us to write DTHROW("My Error message: %d", 12); or similar to report an error in device code */ @@ -197,11 +197,11 @@ __device__ unsigned int DeviceException::getErrorCount() { #endif #else /** - * Ignore the device error macro when SEATBELTS is OFF + * Ignore the device error macro when FLAMEGPU_SEATBELTS is OFF * These checks are costly to performance */ #define DTHROW(nop) -#endif // SEATBELTS=OFF +#endif // FLAMEGPU_SEATBELTS=OFF } // namespace exception } // namespace flamegpu diff --git a/include/flamegpu/flamegpu.h b/include/flamegpu/flamegpu.h index 7898e76fb..631781265 100644 --- a/include/flamegpu/flamegpu.h +++ b/include/flamegpu/flamegpu.h @@ -1,7 +1,7 @@ #ifndef INCLUDE_FLAMEGPU_FLAMEGPU_H_ #define INCLUDE_FLAMEGPU_FLAMEGPU_H_ -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM #ifdef __CUDACC__ #ifdef __NVCC_DIAG_PRAGMA_SUPPORT__ #pragma nv_diag_suppress = esa_on_defaulted_function_ignored @@ -40,7 +40,7 @@ #include "flamegpu/util/cleanup.h" #include "flamegpu/io/Telemetry.h" -// This include has no impact if VISUALISATION is not defined +// This include has no impact if FLAMEGPU_VISUALISATION is not defined #include "flamegpu/visualiser/visualiser_api.h" #endif // INCLUDE_FLAMEGPU_FLAMEGPU_H_ diff --git a/include/flamegpu/gpu/CUDAAgent.h b/include/flamegpu/gpu/CUDAAgent.h index 560a4e11a..37e3dd358 100644 --- a/include/flamegpu/gpu/CUDAAgent.h +++ b/include/flamegpu/gpu/CUDAAgent.h @@ -30,9 +30,9 @@ class HostAPI; * However it does not own these buffers, they are owned by it's parent CUDAFatAgent, as buffers are shared with all mapped agents too. */ class CUDAAgent : public AgentInterface { -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION friend struct visualiser::AgentVisData; -#endif // VISUALISATION +#endif // FLAMEGPU_VISUALISATION public: /** diff --git a/include/flamegpu/gpu/CUDAMacroEnvironment.h b/include/flamegpu/gpu/CUDAMacroEnvironment.h index 380737b80..4f8e2df95 100644 --- a/include/flamegpu/gpu/CUDAMacroEnvironment.h +++ b/include/flamegpu/gpu/CUDAMacroEnvironment.h @@ -117,7 +117,7 @@ class CUDAMacroEnvironment { */ void registerCurveVariables(detail::curve::HostCurve &curve) const; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * Reset the flags used by seatbelts to catch potential race conditions * @param streams Streams to async reset over @@ -180,7 +180,7 @@ HostMacroProperty CUDAMacroEnvironment::getProperty(const std::st "in HostEnvironment::getMacroProperty()\n", name.c_str(), I, J, K, W, prop->second.elements[0], prop->second.elements[1], prop->second.elements[2], prop->second.elements[3]); } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS const unsigned int flags = getDeviceWriteFlag(name); if (flags & (1 << 1)) { THROW flamegpu::exception::InvalidOperation("Environment macro property '%s' was written to by an agent function in the same layer, " @@ -218,7 +218,7 @@ HostMacroProperty_swig CUDAMacroEnvironment::getProperty_swig(const std::stri "in HostEnvironment::getMacroProperty()\n", name.c_str(), std::type_index(typeid(T)).name(), prop->second.type.name()); } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS const unsigned int flags = getDeviceWriteFlag(name); if (flags & (1 << 1)) { THROW flamegpu::exception::InvalidOperation("Environment macro property '%s' was written to by an agent function in the same layer, " diff --git a/include/flamegpu/gpu/CUDASimulation.h b/include/flamegpu/gpu/CUDASimulation.h index eaa7df848..1dd0439d3 100644 --- a/include/flamegpu/gpu/CUDASimulation.h +++ b/include/flamegpu/gpu/CUDASimulation.h @@ -18,7 +18,7 @@ #include "flamegpu/gpu/CUDAMacroEnvironment.h" #include "flamegpu/runtime/utility/EnvironmentManager.cuh" -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION #include "flamegpu/visualiser/ModelVis.h" #endif @@ -52,7 +52,7 @@ class CUDASimulation : public Simulation { friend class HostAgentAPI; friend class SimRunner; friend class CUDAEnsemble; -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION friend struct visualiser::ModelVisData; #endif /** @@ -291,7 +291,7 @@ class CUDASimulation : public Simulation { * Returns a reference to the current exit log */ const RunLog &getRunLog() const override; -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION /** * Creates (on first call) and returns the visualisation configuration options for this model instance */ @@ -526,7 +526,7 @@ class CUDASimulation : public Simulation { * Held here for tracking when to release cuda memory */ std::shared_ptr environment; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * Provides buffers for device error checking */ @@ -598,7 +598,7 @@ class CUDASimulation : public Simulation { */ AgentDataMap agentData; void initOffsetsAndMap(); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION /** * Empty if getVisualisation() hasn't been called */ diff --git a/include/flamegpu/runtime/AgentFunction.cuh b/include/flamegpu/runtime/AgentFunction.cuh index 45fdb2ec9..7dd10c321 100644 --- a/include/flamegpu/runtime/AgentFunction.cuh +++ b/include/flamegpu/runtime/AgentFunction.cuh @@ -19,7 +19,7 @@ namespace flamegpu { enum AGENT_STATUS { ALIVE = 1, DEAD = 0 }; typedef void(AgentFunctionWrapper)( -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS exception::DeviceExceptionBuffer *error_buffer, #endif #ifndef __CUDACC_RTC__ @@ -38,7 +38,7 @@ typedef void(AgentFunctionWrapper)( /** * Wrapper function for launching agent functions * Initialises FLAMEGPU_API instance - * @param error_buffer Buffer used for detecting and reporting exception::DeviceErrors (flamegpu must be built with SEATBELTS enabled for this to be used) + * @param error_buffer Buffer used for detecting and reporting exception::DeviceErrors (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for this to be used) * @param d_curve_table Pointer to curve hash table in device memory * @param d_env_buffer Pointer to env buffer in device memory * @param d_agent_output_nextID If agent output is enabled, this points to a global memory src of the next suitable agent id, this will be atomically incremented at birth @@ -55,7 +55,7 @@ typedef void(AgentFunctionWrapper)( */ template __global__ void agent_function_wrapper( -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS exception::DeviceExceptionBuffer *error_buffer, #endif #ifndef __CUDACC_RTC__ @@ -73,7 +73,7 @@ __global__ void agent_function_wrapper( // We place these at the start of shared memory, so we can locate it anywhere in device code without a reference using detail::sm; if (threadIdx.x == 0) { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS sm()->device_exception = error_buffer; #endif #ifndef __CUDACC_RTC__ @@ -104,7 +104,7 @@ __global__ void agent_function_wrapper( if (scanFlag_agentDeath) { // (scan flags will not be processed unless agent death has been requested in model definition) scanFlag_agentDeath[DeviceAPI::getIndex()] = flag; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS } else if (flag == DEAD) { DTHROW("Agent death must be enabled per agent function when defining the model.\n"); #endif diff --git a/include/flamegpu/runtime/AgentFunctionCondition.cuh b/include/flamegpu/runtime/AgentFunctionCondition.cuh index 671907b01..b100d9123 100644 --- a/include/flamegpu/runtime/AgentFunctionCondition.cuh +++ b/include/flamegpu/runtime/AgentFunctionCondition.cuh @@ -14,7 +14,7 @@ namespace flamegpu { // ! FLAMEGPU function return type typedef void(AgentFunctionConditionWrapper)( -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS exception::DeviceExceptionBuffer *error_buffer, #endif #ifndef __CUDACC_RTC__ @@ -28,7 +28,7 @@ typedef void(AgentFunctionConditionWrapper)( /** * Wrapper function for launching agent functions * Initialises FLAMEGPU_API instance - * @param error_buffer Buffer used for detecting and reporting exception::DeviceErrors (flamegpu must be built with SEATBELTS enabled for this to be used) + * @param error_buffer Buffer used for detecting and reporting exception::DeviceErrors (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for this to be used) * @param d_curve_table Pointer to curve hash table in device memory * @param d_env_buffer Pointer to env buffer in device memory * @param popNo Total number of agents exeucting the function (number of threads launched) @@ -39,7 +39,7 @@ typedef void(AgentFunctionConditionWrapper)( */ template __global__ void agent_function_condition_wrapper( -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS exception::DeviceExceptionBuffer *error_buffer, #endif #ifndef __CUDACC_RTC__ @@ -52,7 +52,7 @@ __global__ void agent_function_condition_wrapper( // We place these at the start of shared memory, so we can locate it anywhere in device code without a reference using detail::sm; if (threadIdx.x == 0) { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS sm()->device_exception = error_buffer; #endif #ifndef __CUDACC_RTC__ diff --git a/include/flamegpu/runtime/DeviceAPI.cuh b/include/flamegpu/runtime/DeviceAPI.cuh index 8dece74e3..ea3edb5b0 100644 --- a/include/flamegpu/runtime/DeviceAPI.cuh +++ b/include/flamegpu/runtime/DeviceAPI.cuh @@ -18,7 +18,7 @@ #include "flamegpu/runtime/AgentFunctionCondition.cuh" #include "flamegpu/defines.h" -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM #ifdef __CUDACC__ #ifdef __NVCC_DIAG_PRAGMA_SUPPORT__ #pragma nv_diag_suppress = esa_on_defaulted_function_ignored @@ -27,7 +27,7 @@ #endif // __NVCC_DIAG_PRAGMA_SUPPORT__ #endif // __CUDACC__ #include -#endif // USE_GLM +#endif // FLAMEGPU_USE_GLM namespace flamegpu { @@ -40,7 +40,7 @@ class ReadOnlyDeviceAPI { // Friends have access to TID() & TS_ID() template friend __global__ void agent_function_condition_wrapper( -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS exception::DeviceExceptionBuffer *, #endif #ifndef __CUDACC_RTC__ @@ -62,8 +62,8 @@ class ReadOnlyDeviceAPI { * @param variable_name name used for accessing the variable, this value should be a string literal e.g. "foobar" * @tparam T Type of the agent variable being accessed * @tparam N Length of variable name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[N]) const; @@ -74,9 +74,9 @@ class ReadOnlyDeviceAPI { * @tparam T Type of the agent variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -121,7 +121,7 @@ class ReadOnlyDeviceAPI { + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId;*/ -#ifdef SEATBELTS +#ifdef FLAMEGPU_SEATBELTS assert(blockDim.y == 1); assert(blockDim.z == 1); assert(gridDim.y == 1); @@ -145,7 +145,7 @@ class DeviceAPI { // Friends have access to TID() & TS_ID() template friend __global__ void agent_function_wrapper( -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS exception::DeviceExceptionBuffer *, #endif #ifndef __CUDACC_RTC__ @@ -182,8 +182,8 @@ class DeviceAPI { * @tparam N Variable name length, this should be ignored as it is implicitly set * @note Any agent variables not set will remain as their default values * @note Calling AgentOut::setVariable() or AgentOut::getID() will trigger agent output - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ void setVariable(const char(&variable_name)[N], T value) const; @@ -197,9 +197,9 @@ class DeviceAPI { * @tparam M Variable name length, this should be ignored as it is implicitly set * @note Any agent variables not set will remain as their default values * @note Calling AgentOut::setVariable() or AgentOut::getID() will trigger agent output - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ void setVariable(const char(&variable_name)[M], unsigned int index, T value) const; @@ -254,8 +254,8 @@ class DeviceAPI { * @param variable_name name used for accessing the variable, this value should be a string literal e.g. "foobar" * @tparam T Type of the agent variable being accessed * @tparam N Length of variable name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[N]) const; @@ -266,9 +266,9 @@ class DeviceAPI { * @tparam T Type of the agent variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -278,8 +278,8 @@ class DeviceAPI { * @param value The value to set the variable * @tparam T The type of the variable, as set within the model description hierarchy * @tparam N variable_name length, this should be ignored as it is implicitly set - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ void setVariable(const char(&variable_name)[N], T value); @@ -291,9 +291,9 @@ class DeviceAPI { * @tparam T The type of the variable, as set within the model description hierarchy * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M variable_name length, this should be ignored as it is implicitly set - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ void setVariable(const char(&variable_name)[M], unsigned int index, T value); @@ -328,7 +328,7 @@ class DeviceAPI { + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId;*/ -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS assert(blockDim.y == 1); assert(blockDim.z == 1); assert(gridDim.y == 1); @@ -417,7 +417,7 @@ template template __device__ void DeviceAPI::setVariable(const char(&variable_name)[N], T value) { if (variable_name[0] == '_') { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS DTHROW("Variable names starting with '_' are reserved for internal use, with '%s', in DeviceAPI::setVariable().\n", variable_name); #endif return; // Fail silently @@ -431,7 +431,7 @@ template template __device__ void DeviceAPI::setVariable(const char(&variable_name)[M], const unsigned int array_index, const T value) { if (variable_name[0] == '_') { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS DTHROW("Variable names starting with '_' are reserved for internal use, with '%s', in DeviceAPI::setVariable().\n", variable_name); #endif return; // Fail silently @@ -448,7 +448,7 @@ template __device__ void DeviceAPI::AgentOut::setVariable(const char(&variable_name)[N], T value) const { if (nextID) { if (variable_name[0] == '_') { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS DTHROW("Variable names starting with '_' are reserved for internal use, with '%s', in AgentOut::setVariable().\n", variable_name); #endif return; // Fail silently @@ -461,7 +461,7 @@ __device__ void DeviceAPI::AgentOut::setVariable(const ch // Mark scan flag genID(); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS } else { DTHROW("Agent output must be enabled per agent function when defining the model.\n"); #endif @@ -472,7 +472,7 @@ template __device__ void DeviceAPI::AgentOut::setVariable(const char(&variable_name)[M], const unsigned int array_index, T value) const { if (nextID) { if (variable_name[0] == '_') { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS DTHROW("Variable names starting with '_' are reserved for internal use, with '%s', in AgentOut::setVariable().\n", variable_name); #endif return; // Fail silently @@ -485,7 +485,7 @@ __device__ void DeviceAPI::AgentOut::setVariable(const ch // Mark scan flag genID(); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS } else { DTHROW("Agent output must be enabled per agent function when defining the model.\n"); #endif @@ -498,7 +498,7 @@ __device__ id_t DeviceAPI::AgentOut::getID() const { genID(); return this->id; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS DTHROW("Agent output must be enabled per agent function when defining the model.\n"); #endif return ID_NOT_SET; diff --git a/include/flamegpu/runtime/detail/SharedBlock.h b/include/flamegpu/runtime/detail/SharedBlock.h index c205c62cb..b1ba3c15d 100644 --- a/include/flamegpu/runtime/detail/SharedBlock.h +++ b/include/flamegpu/runtime/detail/SharedBlock.h @@ -16,14 +16,14 @@ struct SharedBlock { #ifndef __CUDACC_RTC__ curve::Curve::VariableHash curve_hashes[curve::Curve::MAX_VARIABLES]; char* curve_variables[curve::Curve::MAX_VARIABLES]; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS unsigned int curve_type_size[curve::Curve::MAX_VARIABLES]; unsigned int curve_elements[curve::Curve::MAX_VARIABLES]; unsigned int curve_count[curve::Curve::MAX_VARIABLES]; #endif const char* env_buffer; #endif -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS exception::DeviceExceptionBuffer *device_exception; #endif }; diff --git a/include/flamegpu/runtime/detail/curve/DeviceCurve.cuh b/include/flamegpu/runtime/detail/curve/DeviceCurve.cuh index aaeaa70a8..97d01b17e 100644 --- a/include/flamegpu/runtime/detail/curve/DeviceCurve.cuh +++ b/include/flamegpu/runtime/detail/curve/DeviceCurve.cuh @@ -6,7 +6,7 @@ #include "flamegpu/exception/FLAMEGPUDeviceException_device.cuh" #include "flamegpu/util/type_decode.h" -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM #ifdef __CUDACC__ #ifdef __NVCC_DIAG_PRAGMA_SUPPORT__ #pragma nv_diag_suppress = esa_on_defaulted_function_ignored @@ -15,7 +15,7 @@ #endif // __NVCC_DIAG_PRAGMA_SUPPORT__ #endif // __CUDACC__ #include -#endif // USE_GLM +#endif // FLAMEGPU_USE_GLM namespace flamegpu { namespace detail { @@ -52,11 +52,11 @@ class DeviceCurve { * @param variableName A constant char array (C string) variable name. * @param namespace_hash Curve namespace hash for the variable. * @param offset an offset into the variable's buffer in bytes (offset is normally variable index * sizeof(T) * N). - * @tparam T The return type requested of the variable (only used for type-checking when SEATBELTS==ON). - * @tparam N The variable array length, 1 for non array variables (only used for type-checking when SEATBELTS==ON). + * @tparam T The return type requested of the variable (only used for type-checking when FLAMEGPU_SEATBELTS==ON). + * @tparam N The variable array length, 1 for non array variables (only used for type-checking when FLAMEGPU_SEATBELTS==ON). * @tparam M The length of the string literal passed to variableName. This parameter should always be implicit, and does not need to be provided. * @return A generic pointer to the variable value. Will be nullptr if there is an error and a DeviceError has been raised. - * @throws exception::DeviceError (Only when SEATBELTS==ON) If the specified variable is not found in the cuRVE hashtable, or it's details are invalid. + * @throws exception::DeviceError (Only when FLAMEGPU_SEATBELTS==ON) If the specified variable is not found in the cuRVE hashtable, or it's details are invalid. */ template __device__ __forceinline__ static char* getVariablePtr(const char(&variableName)[M], VariableHash namespace_hash, unsigned int offset); @@ -75,7 +75,7 @@ class DeviceCurve { * @tparam T The return type requested of the variable. * @tparam N The variable array length, 1 for non array variables. * @tparam M The length of the string literal passed to variableName. This parameter should always be implicit, and does not need to be provided. - * @throws exception::DeviceError (Only when SEATBELTS==ON) If the specified variable is not found in the cuRVE hashtable, or it's details are invalid. + * @throws exception::DeviceError (Only when FLAMEGPU_SEATBELTS==ON) If the specified variable is not found in the cuRVE hashtable, or it's details are invalid. */ template __device__ __forceinline__ static T getVariable(const char(&variableName)[M], VariableHash namespace_hash, unsigned int agent_index = 0, unsigned int array_index = 0); @@ -96,7 +96,7 @@ class DeviceCurve { * @tparam T The type variable to be stored. * @tparam N The variable array length, 1 for non array variables. * @tparam M The length of the string literal passed to variableName. This parameter should always be implicit, and does not need to be provided. - * @throws exception::DeviceError (Only when SEATBELTS==ON) If the specified variable is not found in the cuRVE hashtable, or it's details are invalid. + * @throws exception::DeviceError (Only when FLAMEGPU_SEATBELTS==ON) If the specified variable is not found in the cuRVE hashtable, or it's details are invalid. */ template __device__ __forceinline__ static void setVariable(const char(&variableName)[M], VariableHash namespace_hash, T value, unsigned int agent_index = 0, unsigned int array_index = 0); @@ -110,7 +110,7 @@ class DeviceCurve { for (int idx = threadIdx.x; idx < Curve::MAX_VARIABLES; idx += blockDim.x) { sm()->curve_variables[idx] = d_curve_table->variables[idx]; sm()->curve_hashes[idx] = d_curve_table->hashes[idx]; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS sm()->curve_type_size[idx] = d_curve_table->type_size[idx]; sm()->curve_elements[idx] = d_curve_table->elements[idx]; sm()->curve_count[idx] = d_curve_table->count[idx]; @@ -128,7 +128,7 @@ class DeviceCurve { * @tparam T The return type requested of the variable. * @tparam M The length of the string literal passed to variableName. This parameter should always be implicit, and does not need to be provided. * @return The requested variable - * @throws exception::DeviceError (Only when SEATBELTS==ON) If the specified variable is not found in the cuRVE hashtable, or it's details are invalid. + * @throws exception::DeviceError (Only when FLAMEGPU_SEATBELTS==ON) If the specified variable is not found in the cuRVE hashtable, or it's details are invalid. */ template __device__ __forceinline__ static T getAgentVariable(const char(&variableName)[M], unsigned int index); @@ -181,7 +181,7 @@ class DeviceCurve { * @param index The index of the variable in the named variable vector. This corresponds to the agent/message agent/message/new-agent index within the agent/message/new-agent population. * @tparam T The return type requested of the variable. * @tparam M The length of the string literal passed to variableName. This parameter should always be implicit, and does not need to be provided. - * @throws exception::DeviceError (Only when SEATBELTS==ON) If the specified variable is not found in the cuRVE hashtable, or it's details are invalid. + * @throws exception::DeviceError (Only when FLAMEGPU_SEATBELTS==ON) If the specified variable is not found in the cuRVE hashtable, or it's details are invalid. */ template __device__ __forceinline__ static void setAgentVariable(const char(&variableName)[M], T variable, unsigned int index); @@ -218,14 +218,14 @@ class DeviceCurve { * @tparam T The return type requested of the property. * @tparam M The length of the string literal passed to variableName. This parameter should always be implicit, and does not need to be provided. * @return The requested property - * @throws exception::DeviceError (Only when SEATBELTS==ON) If the specified property is not found in the cuRVE hashtable, or it's details are invalid. + * @throws exception::DeviceError (Only when FLAMEGPU_SEATBELTS==ON) If the specified property is not found in the cuRVE hashtable, or it's details are invalid. */ template __device__ __forceinline__ static T getEnvironmentProperty(const char(&propertyName)[M]); /** * @copydoc DeviceCurve::getEnvironmentProperty() * @param array_index The index of the element in the named variable array. - * @tparam N (Optional) Length of the array variable specified by variableName, available for parity with other APIs, checked if provided (flamegpu must be built with SEATBELTS enabled for device error checking). + * @tparam N (Optional) Length of the array variable specified by variableName, available for parity with other APIs, checked if provided (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking). */ template __device__ __forceinline__ static T getEnvironmentArrayProperty(const char(&propertyName)[M], unsigned int array_index); @@ -240,7 +240,7 @@ class DeviceCurve { * @tparam K The length of the 3rd dimension of the environment macro property, default 1. * @tparam W The length of the 4th dimension of the environment macro property, default 1. * @tparam M The length of the string literal passed to variableName. This parameter should always be implicit, and does not need to be provided. - * @throws exception::DeviceError (Only when SEATBELTS==ON) If the specified variable is not found in the cuRVE hashtable, or it's details are invalid. + * @throws exception::DeviceError (Only when FLAMEGPU_SEATBELTS==ON) If the specified variable is not found in the cuRVE hashtable, or it's details are invalid. */ template __device__ __forceinline__ static char *getEnvironmentMacroProperty(const char(&name)[M]); @@ -264,7 +264,7 @@ template __device__ __forceinline__ char* DeviceCurve::getVariablePtr(const char(&variableName)[M], const VariableHash namespace_hash, const unsigned int offset) { using detail::sm; const Variable cv = getVariableIndex(Curve::variableHash(variableName) + namespace_hash); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (cv == UNKNOWN_VARIABLE) { DTHROW("Curve variable with name '%s' was not found.\n", variableName); return nullptr; @@ -291,7 +291,7 @@ __device__ __forceinline__ T DeviceCurve::getVariable(const char(&variableName)[ const unsigned int buffer_offset = agent_index * static_cast(sizeof(T)) * N + array_index * sizeof(typename type_decode::type_t); T *value_ptr = reinterpret_cast(getVariablePtr(variableName, namespace_hash, buffer_offset)); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!value_ptr) return {}; #endif @@ -302,7 +302,7 @@ __device__ __forceinline__ T DeviceCurve::getVariable_ldg(const char(&variableNa const unsigned int buffer_offset = agent_index * static_cast(sizeof(T)) * N + array_index * sizeof(typename type_decode::type_t); T *value_ptr = reinterpret_cast(getVariablePtr(variableName, namespace_hash, buffer_offset)); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!value_ptr) return {}; #endif @@ -313,7 +313,7 @@ __device__ __forceinline__ void DeviceCurve::setVariable(const char(&variableNam const unsigned int buffer_offset = agent_index * static_cast(sizeof(T)) * N + array_index * sizeof(typename type_decode::type_t); T* value_ptr = reinterpret_cast(getVariablePtr(variableName, namespace_hash, buffer_offset)); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!value_ptr) return; #endif diff --git a/include/flamegpu/runtime/messaging/MessageArray/MessageArrayDevice.cuh b/include/flamegpu/runtime/messaging/MessageArray/MessageArrayDevice.cuh index 9866577b7..896bf2879 100644 --- a/include/flamegpu/runtime/messaging/MessageArray/MessageArrayDevice.cuh +++ b/include/flamegpu/runtime/messaging/MessageArray/MessageArrayDevice.cuh @@ -40,7 +40,7 @@ class MessageArray::In { * @note See member variable documentation for their purposes */ __device__ Message(const MessageArray::In &parent, const size_type _index) : _parent(parent), index(_index) {} -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * A null message which always returns the message at index 0 */ @@ -78,9 +78,9 @@ class MessageArray::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -177,9 +177,9 @@ class MessageArray::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -244,7 +244,7 @@ class MessageArray::In { * @param _radius Search radius */ inline __device__ WrapFilter(const size_type _length, const size_type x, const size_type _radius); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * A null filter which always returns 0 messages */ @@ -254,7 +254,7 @@ class MessageArray::In { * Returns an iterator to the start of the message list subset about the search origin */ inline __device__ iterator begin(void) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!this->length) return iterator(*this, radius); #endif @@ -376,9 +376,9 @@ class MessageArray::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -444,7 +444,7 @@ class MessageArray::In { */ inline __device__ Filter(size_type _length, size_type x, size_type _radius); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * A null filter which always returns 0 messages */ @@ -507,7 +507,7 @@ class MessageArray::In { * @note The location x must be within the bounds of the message list */ inline __device__ WrapFilter wrap(const size_type x, const size_type radius = 1) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (radius == 0) { DTHROW("Invalid radius %u for accessing array messagelist of length %u\n", radius, length); return WrapFilter(); @@ -540,7 +540,7 @@ class MessageArray::In { * @note The location x must be within the bounds of the message list */ inline __device__ Filter operator() (const size_type x, const size_type radius = 1) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (radius == 0) { DTHROW("Invalid radius %u for accessing array messagelist of length %u\n", radius, length); return Filter(); @@ -559,7 +559,7 @@ class MessageArray::In { return length; } __device__ Message at(const size_type index) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (index >= length) { DTHROW("Index is out of bounds for Array messagelist (%u >= %u).\n", index, length); return Message(*this); @@ -588,7 +588,7 @@ class MessageArray::Out { */ __device__ Out(const void *_metadata, unsigned int *scan_flag_messageOutput) : scan_flag(scan_flag_messageOutput) -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS , metadata(reinterpret_cast(_metadata)) #else , metadata(nullptr) @@ -616,9 +616,9 @@ class MessageArray::Out { * @tparam T The type of the variable, as set within the model description hierarchy * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M variable_name length, this should be ignored as it is implicitly set - * @throws exception::DeviceError If name is not a valid variable within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ void setVariable(const char(&variable_name)[M], unsigned int index, T value) const; @@ -636,7 +636,7 @@ class MessageArray::Out { template __device__ T MessageArray::In::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index >= this->_parent.length) { DTHROW("Invalid Array message, unable to get variable '%s'.\n", variable_name); @@ -649,7 +649,7 @@ __device__ T MessageArray::In::Message::getVariable(const char(&variable_name)[N template __device__ T MessageArray::In::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { // simple indexing assumes index is the thread number (this may change later) -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index >= this->_parent.length) { DTHROW("Invalid Array message, unable to get variable '%s'.\n", variable_name); @@ -662,7 +662,7 @@ T MessageArray::In::Message::getVariable(const char(&variable_name)[M], const un } template __device__ T MessageArray::In::WrapFilter::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.length) { DTHROW("Invalid Array message, unable to get variable '%s'.\n", variable_name); @@ -675,7 +675,7 @@ __device__ T MessageArray::In::WrapFilter::Message::getVariable(const char(&vari template __device__ T MessageArray::In::WrapFilter::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { // simple indexing assumes index is the thread number (this may change later) -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.length) { DTHROW("Invalid Array message, unable to get variable '%s'.\n", variable_name); @@ -688,7 +688,7 @@ T MessageArray::In::WrapFilter::Message::getVariable(const char(&variable_name)[ } template __device__ T MessageArray::In::Filter::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.length) { DTHROW("Invalid Array message, unable to get variable '%s'.\n", variable_name); @@ -701,7 +701,7 @@ __device__ T MessageArray::In::Filter::Message::getVariable(const char(&variable template __device__ T MessageArray::In::Filter::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { // simple indexing assumes index is the thread number (this may change later) -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.length) { DTHROW("Invalid Array message, unable to get variable '%s'.\n", variable_name); @@ -716,7 +716,7 @@ T MessageArray::In::Filter::Message::getVariable(const char(&variable_name)[M], template __device__ void MessageArray::Out::setVariable(const char(&variable_name)[N], T value) const { // message name or variable name if (variable_name[0] == '_') { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS DTHROW("Variable names starting with '_' are reserved for internal use, with '%s', in MessageArray::Out::setVariable().\n", variable_name); #endif return; // Fail silently @@ -731,7 +731,7 @@ __device__ void MessageArray::Out::setVariable(const char(&variable_name)[N], T template __device__ void MessageArray::Out::setVariable(const char(&variable_name)[M], const unsigned int array_index, T value) const { if (variable_name[0] == '_') { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS DTHROW("Variable names starting with '_' are reserved for internal use, with '%s', in MessageArray::Out::setVariable().\n", variable_name); #endif return; // Fail silently @@ -750,7 +750,7 @@ __device__ void MessageArray::Out::setVariable(const char(&variable_name)[M], co __device__ void MessageArray::Out::setIndex(const size_type id) const { unsigned int index = (blockDim.x * blockIdx.x) + threadIdx.x; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (id >= metadata->length) { DTHROW("MessageArray index [%u] is out of bounds [%u]\n", id, metadata->length); } @@ -767,7 +767,7 @@ __device__ MessageArray::In::WrapFilter::WrapFilter(const size_type _length, con , length(_length) { loc = x; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS __device__ inline MessageArray::In::WrapFilter::WrapFilter() : radius(0) , length(0) { @@ -775,7 +775,7 @@ __device__ inline MessageArray::In::WrapFilter::WrapFilter() } #endif __device__ MessageArray::In::WrapFilter::Message& MessageArray::In::WrapFilter::Message::operator++() { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!_parent.length) return *this; #endif @@ -794,7 +794,7 @@ __device__ MessageArray::In::Filter::Filter(const size_type _length, const size_ min_cell = static_cast(x) - static_cast(_radius) < 0 ? -static_cast(x) : -static_cast(_radius); max_cell = x + _radius >= _length ? static_cast(_length) - 1 - static_cast(x) : static_cast(_radius); } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS __device__ inline MessageArray::In::Filter::Filter() : length(0) { loc = 0; @@ -803,7 +803,7 @@ __device__ inline MessageArray::In::Filter::Filter() } #endif __device__ MessageArray::In::Filter::Message& MessageArray::In::Filter::Message::operator++() { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!_parent.length) return *this; #endif diff --git a/include/flamegpu/runtime/messaging/MessageArray2D/MessageArray2DDevice.cuh b/include/flamegpu/runtime/messaging/MessageArray2D/MessageArray2DDevice.cuh index ee15b8d38..e5c0c5dae 100644 --- a/include/flamegpu/runtime/messaging/MessageArray2D/MessageArray2DDevice.cuh +++ b/include/flamegpu/runtime/messaging/MessageArray2D/MessageArray2DDevice.cuh @@ -36,7 +36,7 @@ class MessageArray2D::In { * @note See member variable documentation for their purposes */ __device__ Message(const MessageArray2D::In &parent, const size_type _index) : _parent(parent), index(_index) {} -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * A null message which always returns the message at index 0 */ @@ -74,9 +74,9 @@ class MessageArray2D::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -191,9 +191,9 @@ class MessageArray2D::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -259,7 +259,7 @@ class MessageArray2D::In { * @param _radius Search radius */ inline __device__ WrapFilter(const MetaData *_metadata, size_type x, size_type y, size_type _radius); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * A null filter which always returns 0 messages */ @@ -269,7 +269,7 @@ class MessageArray2D::In { * Returns an iterator to the start of the message list subset about the search origin */ inline __device__ iterator begin(void) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!this->metadata) return iterator(*this, radius, radius); #endif @@ -406,9 +406,9 @@ class MessageArray2D::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -474,7 +474,7 @@ class MessageArray2D::In { * @param _radius Search radius */ inline __device__ Filter(const MetaData *_metadata, size_type x, size_type y, size_type _radius); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * A null filter which always returns 0 messages */ @@ -624,9 +624,9 @@ class MessageArray2D::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -692,7 +692,7 @@ class MessageArray2D::In { * @param _radius Search radius */ inline __device__ VonNeumannWrapFilter(const MetaData *_metadata, size_type x, size_type y, size_type _radius); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * A null filter which always returns 0 messages */ @@ -702,7 +702,7 @@ class MessageArray2D::In { * Returns an iterator to the start of the message list subset about the search origin */ inline __device__ iterator begin(void) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!this->metadata) return iterator(*this, radius, radius); #endif @@ -839,9 +839,9 @@ class MessageArray2D::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -907,7 +907,7 @@ class MessageArray2D::In { * @param _radius Search radius */ inline __device__ VonNeumannFilter(const MetaData *_metadata, size_type x, size_type y, size_type _radius); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * A null filter which always returns 0 messages */ @@ -975,7 +975,7 @@ class MessageArray2D::In { * @note The location [x, y] must be within the bounds of the message list */ inline __device__ WrapFilter wrap(const size_type x, const size_type y, const size_type radius = 1) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (radius == 0) { DTHROW("%u is not a valid radius for accessing Array2D message lists.\n", radius); } else if ((radius * 2) + 1 > metadata->dimensions[0] || @@ -1010,7 +1010,7 @@ class MessageArray2D::In { * @note The location [x, y] must be within the bounds of the message list */ inline __device__ Filter operator() (const size_type x, const size_type y, const size_type radius = 1) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (radius == 0) { DTHROW("%u is not a valid radius for accessing Array2D message lists.\n", radius); return Filter(); @@ -1039,7 +1039,7 @@ class MessageArray2D::In { * @note The location [x, y] must be within the bounds of the message list */ inline __device__ VonNeumannWrapFilter vn_wrap(const size_type x, const size_type y, const size_type radius = 1) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (radius == 0) { DTHROW("%u is not a valid radius for accessing Array2D message lists.\n", radius); } else if ((radius * 2) + 1 > metadata->dimensions[0] || @@ -1074,7 +1074,7 @@ class MessageArray2D::In { * @note The location [x, y] must be within the bounds of the message list */ inline __device__ VonNeumannFilter vn(const size_type x, const size_type y, const size_type radius = 1) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (radius == 0) { DTHROW("%u is not a valid radius for accessing Array2D message lists.\n", radius); return VonNeumannFilter(); @@ -1107,7 +1107,7 @@ class MessageArray2D::In { return metadata->length; } __device__ Message at(const size_type x, const size_type y) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (x >= metadata->dimensions[0] || y >= metadata->dimensions[1]) { DTHROW("Index is out of bounds for Array2D messagelist ([%u, %u] >= [%u, %u]).\n", x, y, metadata->dimensions[0], metadata->dimensions[1]); return Message(*this); @@ -1164,9 +1164,9 @@ class MessageArray2D::Out { * @tparam T The type of the variable, as set within the model description hierarchy * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M variable_name length, this should be ignored as it is implicitly set - * @throws exception::DeviceError If name is not a valid variable within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ void setVariable(const char(&variable_name)[M], unsigned int index, T value) const; @@ -1186,7 +1186,7 @@ class MessageArray2D::Out { template __device__ T MessageArray2D::In::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index >= this->_parent.metadata->length) { DTHROW("Invalid Array2D message, unable to get variable '%s'.\n", variable_name); @@ -1198,7 +1198,7 @@ __device__ T MessageArray2D::In::Message::getVariable(const char(&variable_name) } template __device__ T MessageArray2D::In::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index >= this->_parent.metadata->length) { DTHROW("Invalid Array2D message, unable to get variable '%s'.\n", variable_name); @@ -1211,7 +1211,7 @@ T MessageArray2D::In::Message::getVariable(const char(&variable_name)[M], const } template __device__ T MessageArray2D::In::WrapFilter::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array2D message, unable to get variable '%s'.\n", variable_name); @@ -1223,7 +1223,7 @@ __device__ T MessageArray2D::In::WrapFilter::Message::getVariable(const char(&va } template __device__ T MessageArray2D::In::WrapFilter::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array2D message, unable to get variable '%s'.\n", variable_name); @@ -1236,7 +1236,7 @@ T MessageArray2D::In::WrapFilter::Message::getVariable(const char(&variable_name } template __device__ T MessageArray2D::In::Filter::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array2D message, unable to get variable '%s'.\n", variable_name); @@ -1248,7 +1248,7 @@ __device__ T MessageArray2D::In::Filter::Message::getVariable(const char(&variab } template __device__ T MessageArray2D::In::Filter::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array2D message, unable to get variable '%s'.\n", variable_name); @@ -1261,7 +1261,7 @@ T MessageArray2D::In::Filter::Message::getVariable(const char(&variable_name)[M] } template __device__ T MessageArray2D::In::VonNeumannWrapFilter::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array2D message, unable to get variable '%s'.\n", variable_name); @@ -1273,7 +1273,7 @@ __device__ T MessageArray2D::In::VonNeumannWrapFilter::Message::getVariable(cons } template __device__ T MessageArray2D::In::VonNeumannWrapFilter::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array2D message, unable to get variable '%s'.\n", variable_name); @@ -1286,7 +1286,7 @@ T MessageArray2D::In::VonNeumannWrapFilter::Message::getVariable(const char(&var } template __device__ T MessageArray2D::In::VonNeumannFilter::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array2D message, unable to get variable '%s'.\n", variable_name); @@ -1298,7 +1298,7 @@ __device__ T MessageArray2D::In::VonNeumannFilter::Message::getVariable(const ch } template __device__ T MessageArray2D::In::VonNeumannFilter::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array2D message, unable to get variable '%s'.\n", variable_name); @@ -1313,7 +1313,7 @@ T MessageArray2D::In::VonNeumannFilter::Message::getVariable(const char(&variabl template __device__ void MessageArray2D::Out::setVariable(const char(&variable_name)[N], T value) const { // message name or variable name if (variable_name[0] == '_') { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS DTHROW("Variable names starting with '_' are reserved for internal use, with '%s', in MessageArray2D::Out::setVariable().\n", variable_name); #endif return; // Fail silently @@ -1328,7 +1328,7 @@ __device__ void MessageArray2D::Out::setVariable(const char(&variable_name)[N], template __device__ void MessageArray2D::Out::setVariable(const char(&variable_name)[M], const unsigned int array_index, T value) const { if (variable_name[0] == '_') { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS DTHROW("Variable names starting with '_' are reserved for internal use, with '%s', in MessageArray2D::Out::setVariable().\n", variable_name); #endif return; // Fail silently @@ -1350,7 +1350,7 @@ __device__ void MessageArray2D::Out::setIndex(const size_type x, const size_type size_type index_1d = y * metadata->dimensions[0] + x; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (x >= metadata->dimensions[0] || y >= metadata->dimensions[1]) { DTHROW("MessageArray2D index [%u, %u] is out of bounds [%u, %u]\n", x, y, metadata->dimensions[0], metadata->dimensions[1]); @@ -1369,7 +1369,7 @@ __device__ MessageArray2D::In::WrapFilter::WrapFilter(const MetaData* _metadata, loc[0] = x; loc[1] = y; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS __device__ inline MessageArray2D::In::WrapFilter::WrapFilter() : radius(0) , metadata(nullptr) { @@ -1378,7 +1378,7 @@ __device__ inline MessageArray2D::In::WrapFilter::WrapFilter() } #endif __device__ MessageArray2D::In::WrapFilter::Message& MessageArray2D::In::WrapFilter::Message::operator++() { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!_parent.metadata) return *this; #endif @@ -1410,7 +1410,7 @@ __device__ MessageArray2D::In::Filter::Filter(const MetaData *_metadata, const max_cell[0] = x + _radius >= _metadata->dimensions[0] ? static_cast(_metadata->dimensions[0]) - 1 - static_cast(x) : static_cast(_radius); max_cell[1] = y + _radius >= _metadata->dimensions[1] ? static_cast(_metadata->dimensions[1]) - 1 - static_cast(y) : static_cast(_radius); } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS __device__ inline MessageArray2D::In::Filter::Filter() : metadata(nullptr) { loc[0] = 0; @@ -1422,7 +1422,7 @@ __device__ inline MessageArray2D::In::Filter::Filter() } #endif __device__ MessageArray2D::In::Filter::Message& MessageArray2D::In::Filter::Message::operator++() { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!_parent.metadata) return *this; #endif @@ -1453,7 +1453,7 @@ __device__ MessageArray2D::In::VonNeumannWrapFilter::VonNeumannWrapFilter(const loc[0] = x; loc[1] = y; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS __device__ inline MessageArray2D::In::VonNeumannWrapFilter::VonNeumannWrapFilter() : radius(0) , metadata(nullptr) { @@ -1462,7 +1462,7 @@ __device__ inline MessageArray2D::In::VonNeumannWrapFilter::VonNeumannWrapFilter } #endif __device__ MessageArray2D::In::VonNeumannWrapFilter::Message& MessageArray2D::In::VonNeumannWrapFilter::Message::operator++() { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!_parent.metadata) return *this; #endif @@ -1502,7 +1502,7 @@ __device__ MessageArray2D::In::VonNeumannFilter::VonNeumannFilter(const MetaData max_cell[0] = x + _radius >= _metadata->dimensions[0] ? static_cast(_metadata->dimensions[0]) - 1 - static_cast(x) : static_cast(_radius); max_cell[1] = y + _radius >= _metadata->dimensions[1] ? static_cast(_metadata->dimensions[1]) - 1 - static_cast(y) : static_cast(_radius); } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS __device__ inline MessageArray2D::In::VonNeumannFilter::VonNeumannFilter() : radius(0) , metadata(nullptr) { @@ -1515,7 +1515,7 @@ __device__ inline MessageArray2D::In::VonNeumannFilter::VonNeumannFilter() } #endif __device__ MessageArray2D::In::VonNeumannFilter::Message& MessageArray2D::In::VonNeumannFilter::Message::operator++() { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!_parent.metadata) return *this; #endif diff --git a/include/flamegpu/runtime/messaging/MessageArray3D/MessageArray3DDevice.cuh b/include/flamegpu/runtime/messaging/MessageArray3D/MessageArray3DDevice.cuh index cc1f1de2d..42999b110 100644 --- a/include/flamegpu/runtime/messaging/MessageArray3D/MessageArray3DDevice.cuh +++ b/include/flamegpu/runtime/messaging/MessageArray3D/MessageArray3DDevice.cuh @@ -42,7 +42,7 @@ class MessageArray3D::In { * @note See member variable documentation for their purposes */ __device__ Message(const MessageArray3D::In &parent, const size_type _index) : _parent(parent), index(_index) {} -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * A null message which always returns the message at index 0 */ @@ -80,9 +80,9 @@ class MessageArray3D::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -212,9 +212,9 @@ class MessageArray3D::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -281,7 +281,7 @@ class MessageArray3D::In { * @param _radius Search radius */ inline __device__ WrapFilter(const MetaData *_metadata, size_type x, size_type y, size_type z, size_type _radius); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * A null filter which always returns 0 messages */ @@ -291,7 +291,7 @@ class MessageArray3D::In { * Returns an iterator to the start of the message list subset about the search origin */ inline __device__ iterator begin(void) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!this->metadata) return iterator(*this, radius, radius, radius); #endif @@ -442,9 +442,9 @@ class MessageArray3D::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -511,7 +511,7 @@ class MessageArray3D::In { * @param _radius Search radius */ inline __device__ Filter(const MetaData* _metadata, const size_type x, const size_type y, const size_type z, const size_type _radius); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * A null filter which always returns 0 messages */ @@ -676,9 +676,9 @@ class MessageArray3D::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -745,7 +745,7 @@ class MessageArray3D::In { * @param _radius Search radius */ inline __device__ VonNeumannWrapFilter(const MetaData *_metadata, size_type x, size_type y, size_type z, size_type _radius); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * A null filter which always returns 0 messages */ @@ -755,7 +755,7 @@ class MessageArray3D::In { * Returns an iterator to the start of the message list subset about the search origin */ inline __device__ iterator begin(void) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!this->metadata) return iterator(*this, radius, radius, radius); #endif @@ -906,9 +906,9 @@ class MessageArray3D::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -975,7 +975,7 @@ class MessageArray3D::In { * @param _radius Search radius */ inline __device__ VonNeumannFilter(const MetaData* _metadata, size_type x, size_type y, size_type z, size_type _radius); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * A null filter which always returns 0 messages */ @@ -1044,7 +1044,7 @@ class MessageArray3D::In { * @note The location [x, y, z] must be within the bounds of the message list */ inline __device__ WrapFilter wrap(const size_type x, const size_type y, const size_type z, const size_type radius = 1) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (radius == 0) { DTHROW("%u is not a valid radius for accessing Array3D message lists.\n", radius); return WrapFilter(); @@ -1085,7 +1085,7 @@ class MessageArray3D::In { * @note The location [x, y, z] must be within the bounds of the message list */ inline __device__ Filter operator()(const size_type x, const size_type y, const size_type z, const size_type radius = 1) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (radius == 0) { DTHROW("%u is not a valid radius for accessing Array3D message lists.\n", radius); return Filter(); @@ -1116,7 +1116,7 @@ class MessageArray3D::In { * @note The location [x, y, z] must be within the bounds of the message list */ inline __device__ VonNeumannWrapFilter vn_wrap(const size_type x, const size_type y, const size_type z, const size_type radius = 1) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (radius == 0) { DTHROW("%u is not a valid radius for accessing Array3D message lists.\n", radius); return VonNeumannWrapFilter(); @@ -1157,7 +1157,7 @@ class MessageArray3D::In { * @note The location [x, y, z] must be within the bounds of the message list */ inline __device__ VonNeumannFilter vn(const size_type x, const size_type y, const size_type z, const size_type radius = 1) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (radius == 0) { DTHROW("%u is not a valid radius for accessing Array3D message lists.\n", radius); return VonNeumannFilter(); @@ -1197,7 +1197,7 @@ class MessageArray3D::In { return metadata->length; } __device__ Message at(const size_type x, const size_type y, const size_type z) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (x >= metadata->dimensions[0] || y >= metadata->dimensions[1] || z >= metadata->dimensions[2]) { DTHROW("Index is out of bounds for Array3D messagelist ([%u, %u, %u] >= [%u, %u, %u]).\n", x, y, z, metadata->dimensions[0], metadata->dimensions[1], metadata->dimensions[2]); return Message(*this); @@ -1255,9 +1255,9 @@ class MessageArray3D::Out { * @tparam T The type of the variable, as set within the model description hierarchy * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M variable_name length, this should be ignored as it is implicitly set - * @throws exception::DeviceError If name is not a valid variable within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ void setVariable(const char(&variable_name)[M], unsigned int index, T value) const; @@ -1275,7 +1275,7 @@ class MessageArray3D::Out { template __device__ T MessageArray3D::In::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index >= this->_parent.metadata->length) { DTHROW("Invalid Array3D message, unable to get variable '%s'.\n", variable_name); @@ -1288,7 +1288,7 @@ __device__ T MessageArray3D::In::Message::getVariable(const char(&variable_name) template __device__ T MessageArray3D::In::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { // simple indexing assumes index is the thread number (this may change later) -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index >= this->_parent.metadata->length) { DTHROW("Invalid Array3D message, unable to get variable '%s'.\n", variable_name); @@ -1301,7 +1301,7 @@ T MessageArray3D::In::Message::getVariable(const char(&variable_name)[M], const } template __device__ T MessageArray3D::In::WrapFilter::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array3D message, unable to get variable '%s'.\n", variable_name); @@ -1314,7 +1314,7 @@ __device__ T MessageArray3D::In::WrapFilter::Message::getVariable(const char(&va template __device__ T MessageArray3D::In::WrapFilter::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { // simple indexing assumes index is the thread number (this may change later) -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array3D message, unable to get variable '%s'.\n", variable_name); @@ -1327,7 +1327,7 @@ T MessageArray3D::In::WrapFilter::Message::getVariable(const char(&variable_name } template __device__ T MessageArray3D::In::Filter::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array3D message, unable to get variable '%s'.\n", variable_name); @@ -1340,7 +1340,7 @@ __device__ T MessageArray3D::In::Filter::Message::getVariable(const char(&variab template __device__ T MessageArray3D::In::Filter::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { // simple indexing assumes index is the thread number (this may change later) -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array3D message, unable to get variable '%s'.\n", variable_name); @@ -1353,7 +1353,7 @@ T MessageArray3D::In::Filter::Message::getVariable(const char(&variable_name)[M] } template __device__ T MessageArray3D::In::VonNeumannWrapFilter::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array3D message, unable to get variable '%s'.\n", variable_name); @@ -1366,7 +1366,7 @@ __device__ T MessageArray3D::In::VonNeumannWrapFilter::Message::getVariable(cons template __device__ T MessageArray3D::In::VonNeumannWrapFilter::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { // simple indexing assumes index is the thread number (this may change later) -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array3D message, unable to get variable '%s'.\n", variable_name); @@ -1379,7 +1379,7 @@ T MessageArray3D::In::VonNeumannWrapFilter::Message::getVariable(const char(&var } template __device__ T MessageArray3D::In::VonNeumannFilter::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array3D message, unable to get variable '%s'.\n", variable_name); @@ -1392,7 +1392,7 @@ __device__ T MessageArray3D::In::VonNeumannFilter::Message::getVariable(const ch template __device__ T MessageArray3D::In::VonNeumannFilter::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { // simple indexing assumes index is the thread number (this may change later) -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index_1d >= this->_parent.metadata->length) { DTHROW("Invalid Array3D message, unable to get variable '%s'.\n", variable_name); @@ -1407,7 +1407,7 @@ T MessageArray3D::In::VonNeumannFilter::Message::getVariable(const char(&variabl template __device__ void MessageArray3D::Out::setVariable(const char(&variable_name)[N], T value) const { // message name or variable name if (variable_name[0] == '_') { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS DTHROW("Variable names starting with '_' are reserved for internal use, with '%s', in MessageArray3D::Out::setVariable().\n", variable_name); #endif return; // Fail silently @@ -1422,7 +1422,7 @@ __device__ void MessageArray3D::Out::setVariable(const char(&variable_name)[N], template __device__ void MessageArray3D::Out::setVariable(const char(&variable_name)[M], const unsigned int array_index, T value) const { if (variable_name[0] == '_') { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS DTHROW("Variable names starting with '_' are reserved for internal use, with '%s', in MessageArray3D::Out::setVariable().\n", variable_name); #endif return; // Fail silently @@ -1444,7 +1444,7 @@ __device__ inline void MessageArray3D::Out::setIndex(const size_type x, const si z * metadata->dimensions[0] * metadata->dimensions[1] + y * metadata->dimensions[0] + x; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (x >= metadata->dimensions[0] || y >= metadata->dimensions[1] || z >= metadata->dimensions[2]) { @@ -1467,7 +1467,7 @@ __device__ inline MessageArray3D::In::WrapFilter::WrapFilter(const MetaData *_me loc[1] = y; loc[2] = z; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS __device__ inline MessageArray3D::In::WrapFilter::WrapFilter() : radius(0) , metadata(nullptr) { @@ -1477,7 +1477,7 @@ __device__ inline MessageArray3D::In::WrapFilter::WrapFilter() } #endif __device__ inline MessageArray3D::In::WrapFilter::Message& MessageArray3D::In::WrapFilter::Message::operator++() { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!_parent.metadata) return *this; #endif @@ -1519,7 +1519,7 @@ __device__ inline MessageArray3D::In::Filter::Filter(const MetaData* _metadata, max_cell[1] = y + _radius >= _metadata->dimensions[1] ? static_cast(_metadata->dimensions[1]) - 1 - static_cast(y) : static_cast(_radius); max_cell[2] = z + _radius >= _metadata->dimensions[2] ? static_cast(_metadata->dimensions[2]) - 1 - static_cast(z) : static_cast(_radius); } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS __device__ inline MessageArray3D::In::Filter::Filter() : metadata(nullptr) { loc[0] = 0; @@ -1534,7 +1534,7 @@ __device__ inline MessageArray3D::In::Filter::Filter() } #endif __device__ inline MessageArray3D::In::Filter::Message& MessageArray3D::In::Filter::Message::operator++() { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!_parent.metadata) return *this; #endif @@ -1577,7 +1577,7 @@ __device__ inline MessageArray3D::In::VonNeumannWrapFilter::VonNeumannWrapFilter loc[1] = y; loc[2] = z; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS __device__ inline MessageArray3D::In::VonNeumannWrapFilter::VonNeumannWrapFilter() : radius(0) , metadata(nullptr) { @@ -1587,7 +1587,7 @@ __device__ inline MessageArray3D::In::VonNeumannWrapFilter::VonNeumannWrapFilter } #endif __device__ inline MessageArray3D::In::VonNeumannWrapFilter::Message& MessageArray3D::In::VonNeumannWrapFilter::Message::operator++() { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!_parent.metadata) return *this; #endif @@ -1637,7 +1637,7 @@ __device__ inline MessageArray3D::In::VonNeumannFilter::VonNeumannFilter(const M max_cell[1] = y + _radius >= _metadata->dimensions[1] ? static_cast(_metadata->dimensions[1]) - 1 - static_cast(y) : static_cast(_radius); max_cell[2] = z + _radius >= _metadata->dimensions[2] ? static_cast(_metadata->dimensions[2]) - 1 - static_cast(z) : static_cast(_radius); } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS __device__ inline MessageArray3D::In::VonNeumannFilter::VonNeumannFilter() : radius(0) , metadata(nullptr) { @@ -1653,7 +1653,7 @@ __device__ inline MessageArray3D::In::VonNeumannFilter::VonNeumannFilter() } #endif __device__ inline MessageArray3D::In::VonNeumannFilter::Message& MessageArray3D::In::VonNeumannFilter::Message::operator++() { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!_parent.metadata) return *this; #endif diff --git a/include/flamegpu/runtime/messaging/MessageBruteForce/MessageBruteForceDevice.cuh b/include/flamegpu/runtime/messaging/MessageBruteForce/MessageBruteForceDevice.cuh index f71032ba7..7d09a11b2 100644 --- a/include/flamegpu/runtime/messaging/MessageBruteForce/MessageBruteForceDevice.cuh +++ b/include/flamegpu/runtime/messaging/MessageBruteForce/MessageBruteForceDevice.cuh @@ -108,8 +108,8 @@ class MessageBruteForce::In { * @tparam T type of the variable * @tparam N Length of variable name (this should be implicit if a string literal is passed to variable name) * @return The specified variable, else 0x0 if an error occurs - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[N]) const; @@ -120,9 +120,9 @@ class MessageBruteForce::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -205,9 +205,9 @@ class MessageBruteForce::Out { * @tparam T The type of the variable, as set within the model description hierarchy * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M variable_name length, this should be ignored as it is implicitly set - * @throws exception::DeviceError If name is not a valid variable within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ void setVariable(const char(&variable_name)[M], unsigned int index, T value) const; @@ -221,7 +221,7 @@ class MessageBruteForce::Out { template __device__ T MessageBruteForce::In::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index >= this->_parent.len) { DTHROW("Brute force message index exceeds messagelist length, unable to get variable '%s'.\n", variable_name); @@ -229,7 +229,7 @@ __device__ T MessageBruteForce::In::Message::getVariable(const char(&variable_na } #endif // get the value from curve using the message index. -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM T value = detail::curve::DeviceCurve::getMessageVariable(variable_name, index); #else T value = detail::curve::DeviceCurve::getMessageVariable_ldg(variable_name, index); @@ -240,7 +240,7 @@ template __device__ T MessageBruteForce::In::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { // simple indexing assumes index is the thread number (this may change later) const unsigned int index = (blockDim.x * blockIdx.x) + threadIdx.x; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (index >= this->_parent.len) { DTHROW("Brute force message index exceeds messagelist length, unable to get variable '%s'.\n", variable_name); @@ -248,7 +248,7 @@ T MessageBruteForce::In::Message::getVariable(const char(&variable_name)[M], con } #endif // get the value from curve using the message index. -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM T value = detail::curve::DeviceCurve::getMessageArrayVariable(variable_name, index, array_index); #else T value = detail::curve::DeviceCurve::getMessageArrayVariable_ldg(variable_name, index, array_index); @@ -259,7 +259,7 @@ T MessageBruteForce::In::Message::getVariable(const char(&variable_name)[M], con template __device__ void MessageBruteForce::Out::setVariable(const char(&variable_name)[N], T value) const { // message name or variable name if (variable_name[0] == '_') { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS DTHROW("Variable names starting with '_' are reserved for internal use, with '%s', in MessageBruteForce::Out::setVariable().\n", variable_name); #endif return; // Fail silently @@ -277,7 +277,7 @@ __device__ void MessageBruteForce::Out::setVariable(const char(&variable_name)[N template __device__ void MessageBruteForce::Out::setVariable(const char(&variable_name)[M], const unsigned int array_index, T value) const { if (variable_name[0] == '_') { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS DTHROW("Variable names starting with '_' are reserved for internal use, with '%s', in MessageBruteForce::Out::setVariable().\n", variable_name); #endif return; // Fail silently diff --git a/include/flamegpu/runtime/messaging/MessageBucket/MessageBucketDevice.cuh b/include/flamegpu/runtime/messaging/MessageBucket/MessageBucketDevice.cuh index c1a3578d2..ac53b83f5 100644 --- a/include/flamegpu/runtime/messaging/MessageBucket/MessageBucketDevice.cuh +++ b/include/flamegpu/runtime/messaging/MessageBucket/MessageBucketDevice.cuh @@ -84,9 +84,9 @@ class MessageBucket::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -152,7 +152,7 @@ class MessageBucket::In { * @param endKey Exclusive final bucket of range to access, this is the final bucket + 1 */ inline __device__ Filter(const MetaData *_metadata, const IntT &beginKey, const IntT &endKey); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * Creates a null filter which always returns 0 messages */ @@ -205,7 +205,7 @@ class MessageBucket::In { * @param key The bucket to access */ inline __device__ Filter operator() (const IntT &key) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS { if (key < metadata->min) { DTHROW("Bucket messaging iterator key %d is lower than minimum key (%d).\n", key, metadata->min); @@ -226,7 +226,7 @@ class MessageBucket::In { * @param endKey The bin beyond the last bin to access messages from */ inline __device__ Filter operator() (const IntT &beginKey, const IntT &endKey) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS { if (beginKey < metadata->min) { DTHROW("Bucket messaging iterator begin key %d is lower than minimum key (%d).\n", beginKey, metadata->min); @@ -265,7 +265,7 @@ class MessageBucket::Out : public MessageBruteForce::Out { */ __device__ Out(const void *_metadata, unsigned int *scan_flag_messageOutput) : MessageBruteForce::Out(nullptr, scan_flag_messageOutput) -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS , metadata(reinterpret_cast(_metadata)) #else , metadata(nullptr) @@ -293,7 +293,7 @@ __device__ MessageBucket::In::Filter::Filter(const MetaData* _metadata, const In bucket_end = metadata->PBM[endKey - metadata->min]; } } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS __device__ MessageBucket::In::Filter::Filter() : bucket_begin(0) , bucket_end(0) @@ -303,7 +303,7 @@ __device__ MessageBucket::In::Filter::Filter() __device__ void MessageBucket::Out::setKey(const IntT &key) const { unsigned int index = (blockDim.x * blockIdx.x) + threadIdx.x; // + d_message_count; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (key < metadata->min || key >= metadata->max) { DTHROW("MessageArray key %u is out of range [%d, %d).\n", key, metadata->min, metadata->max); return; @@ -318,7 +318,7 @@ __device__ void MessageBucket::Out::setKey(const IntT &key) const { template __device__ T MessageBucket::In::Filter::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (cell_index >= _parent.bucket_end) { DTHROW("Bucket message index exceeds bin length, unable to get variable '%s'.\n", variable_name); @@ -331,7 +331,7 @@ __device__ T MessageBucket::In::Filter::Message::getVariable(const char(&variabl } template __device__ T MessageBucket::In::Filter::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (cell_index >= _parent.bucket_end) { DTHROW("Bucket message index exceeds bin length, unable to get variable '%s'.\n", variable_name); diff --git a/include/flamegpu/runtime/messaging/MessageSpatial2D/MessageSpatial2DDevice.cuh b/include/flamegpu/runtime/messaging/MessageSpatial2D/MessageSpatial2DDevice.cuh index 028616ed0..2d6136a5b 100644 --- a/include/flamegpu/runtime/messaging/MessageSpatial2D/MessageSpatial2DDevice.cuh +++ b/include/flamegpu/runtime/messaging/MessageSpatial2D/MessageSpatial2DDevice.cuh @@ -111,9 +111,9 @@ class MessageSpatial2D::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -319,9 +319,9 @@ class MessageSpatial2D::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -487,7 +487,7 @@ class MessageSpatial2D::In { * @note Unlike the regular iterator, this iterator will not return messages outside of the search radius. The wrapped distance can be returned via WrapFilter::Message::distance() */ inline __device__ WrapFilter wrap(const float x, const float y) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (x > metadata->max[0] || y > metadata->max[1] || x < metadata->min[0] || @@ -543,7 +543,7 @@ class MessageSpatial2D::Out : public MessageBruteForce::Out { template __device__ T MessageSpatial2D::In::Filter::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (relative_cell >= 2) { DTHROW("MessageSpatial2D in invalid bin, unable to get variable '%s'.\n", variable_name); @@ -556,7 +556,7 @@ __device__ T MessageSpatial2D::In::Filter::Message::getVariable(const char(&vari } template __device__ T MessageSpatial2D::In::Filter::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (relative_cell >= 2) { DTHROW("MessageSpatial2D in invalid bin, unable to get variable '%s'.\n", variable_name); @@ -569,7 +569,7 @@ T MessageSpatial2D::In::Filter::Message::getVariable(const char(&variable_name)[ } template __device__ T MessageSpatial2D::In::WrapFilter::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (relative_cell[0] >= 2) { DTHROW("MessageSpatial2D in invalid bin, unable to get variable '%s'.\n", variable_name); @@ -582,7 +582,7 @@ __device__ T MessageSpatial2D::In::WrapFilter::Message::getVariable(const char(& } template __device__ T MessageSpatial2D::In::WrapFilter::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (relative_cell[0] >= 2) { DTHROW("MessageSpatial2D in invalid bin, unable to get variable '%s'.\n", variable_name); diff --git a/include/flamegpu/runtime/messaging/MessageSpatial3D/MessageSpatial3DDevice.cuh b/include/flamegpu/runtime/messaging/MessageSpatial3D/MessageSpatial3DDevice.cuh index 6e07b2015..e8d35a9db 100644 --- a/include/flamegpu/runtime/messaging/MessageSpatial3D/MessageSpatial3DDevice.cuh +++ b/include/flamegpu/runtime/messaging/MessageSpatial3D/MessageSpatial3DDevice.cuh @@ -119,9 +119,9 @@ class MessageSpatial3D::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -338,9 +338,9 @@ class MessageSpatial3D::In { * @tparam T Type of the message variable being accessed * @tparam N The length of the array variable, as set within the model description hierarchy * @tparam M Length of variable_name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid variable within the agent (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of variable 'name' within the message (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the variable array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ T getVariable(const char(&variable_name)[M], unsigned int index) const; @@ -525,7 +525,7 @@ class MessageSpatial3D::In { * @note Unlike the regular iterator, this iterator will not return messages outside of the search radius. The wrapped distance can be returned via WrapFilter::Message::distance() */ inline __device__ WrapFilter wrap(const float x, const float y, const float z) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (x > metadata->max[0] || y > metadata->max[1] || z > metadata->max[2] || @@ -584,7 +584,7 @@ class MessageSpatial3D::Out : public MessageBruteForce::Out { template __device__ T MessageSpatial3D::In::Filter::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (relative_cell[0] >= 2) { DTHROW("MessageSpatial3D in invalid bin, unable to get variable '%s'.\n", variable_name); @@ -597,7 +597,7 @@ __device__ T MessageSpatial3D::In::Filter::Message::getVariable(const char(&vari } template __device__ T MessageSpatial3D::In::Filter::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (relative_cell[0] >= 2) { DTHROW("MessageSpatial3D in invalid bin, unable to get variable '%s'.\n", variable_name); @@ -610,7 +610,7 @@ T MessageSpatial3D::In::Filter::Message::getVariable(const char(&variable_name)[ } template __device__ T MessageSpatial3D::In::WrapFilter::Message::getVariable(const char(&variable_name)[N]) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (relative_cell[0] >= 2) { DTHROW("MessageSpatial3D in invalid bin, unable to get variable '%s'.\n", variable_name); @@ -623,7 +623,7 @@ __device__ T MessageSpatial3D::In::WrapFilter::Message::getVariable(const char(& } template __device__ T MessageSpatial3D::In::WrapFilter::Message::getVariable(const char(&variable_name)[M], const unsigned int array_index) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Ensure that the message is within bounds. if (relative_cell[0] >= 2) { DTHROW("MessageSpatial3D in invalid bin, unable to get variable '%s'.\n", variable_name); diff --git a/include/flamegpu/runtime/utility/AgentRandom.cuh b/include/flamegpu/runtime/utility/AgentRandom.cuh index b990bad9c..4c6196cba 100644 --- a/include/flamegpu/runtime/utility/AgentRandom.cuh +++ b/include/flamegpu/runtime/utility/AgentRandom.cuh @@ -104,7 +104,7 @@ __forceinline__ __device__ double AgentRandom::logNormal(const double mean, cons template __forceinline__ __device__ T AgentRandom::uniform(T min, T max) const { static_assert(util::detail::StaticAssert::_Is_IntType::value, "Invalid template argument for AgentRandom::uniform(T lowerBound, T max)"); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (min > max) { DTHROW("Invalid arguments passed to AgentRandom::uniform(), %lld > %lld\n", static_cast(min), static_cast(max)); } @@ -113,7 +113,7 @@ __forceinline__ __device__ T AgentRandom::uniform(T min, T max) const { } template<> __forceinline__ __device__ int64_t AgentRandom::uniform(const int64_t min, const int64_t max) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (min > max) { DTHROW("Invalid arguments passed to AgentRandom::uniform(), %lld > %lld\n", static_cast(min), static_cast(max)); } @@ -122,7 +122,7 @@ __forceinline__ __device__ int64_t AgentRandom::uniform(const int64_t min, const } template<> __forceinline__ __device__ uint64_t AgentRandom::uniform(const uint64_t min, const uint64_t max) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (min > max) { DTHROW("Invalid arguments passed to AgentRandom::uniform(), %lld > %lld\n", static_cast(min), static_cast(max)); } @@ -131,7 +131,7 @@ __forceinline__ __device__ uint64_t AgentRandom::uniform(const uint64_t min, con } template<> __forceinline__ __device__ float AgentRandom::uniform(const float min, const float max) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (min > max) { DTHROW("Invalid arguments passed to AgentRandom::uniform(), %f > %f\n", min, max); } @@ -140,7 +140,7 @@ __forceinline__ __device__ float AgentRandom::uniform(const float min, const flo } template<> __forceinline__ __device__ double AgentRandom::uniform(const double min, const double max) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (min > max) { DTHROW("Invalid arguments passed to AgentRandom::uniform(), %f > %f\n", min, max); } diff --git a/include/flamegpu/runtime/utility/DeviceEnvironment.cuh b/include/flamegpu/runtime/utility/DeviceEnvironment.cuh index af4b43b70..8f044a997 100644 --- a/include/flamegpu/runtime/utility/DeviceEnvironment.cuh +++ b/include/flamegpu/runtime/utility/DeviceEnvironment.cuh @@ -30,8 +30,8 @@ class ReadOnlyDeviceEnvironment { * @param name name used for accessing the property, this value should be a string literal e.g. "foobar" * @tparam T Type of the environment property being accessed * @tparam M Length of property name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid property within the environment (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of the environment property specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid property within the environment (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of the environment property specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ __forceinline__ T getProperty(const char(&name)[M]) const; @@ -42,10 +42,10 @@ class ReadOnlyDeviceEnvironment { * @tparam T Type of the environment property being accessed * @tparam N (Optional) Length of the environment property array, available for parity with other APIs, checked if provided * @tparam M Length of property name, this should always be implicit if passing a string literal - * @throws exception::DeviceError If name is not a valid property within the environment (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If T is not the type of the environment property specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If index is out of bounds for the environment property array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) - * @throws exception::DeviceError If N does not match the length of the environment property array specified by name (flamegpu must be built with SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If name is not a valid property within the environment (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If T is not the type of the environment property specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If index is out of bounds for the environment property array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) + * @throws exception::DeviceError If N does not match the length of the environment property array specified by name (flamegpu must be built with FLAMEGPU_SEATBELTS enabled for device error checking) */ template __device__ __forceinline__ T getProperty(const char(&name)[M], unsigned int index) const; @@ -99,7 +99,7 @@ __device__ __forceinline__ T ReadOnlyDeviceEnvironment::getProperty(const char(& template __device__ __forceinline__ ReadOnlyDeviceMacroProperty ReadOnlyDeviceEnvironment::getMacroProperty(const char(&name)[N]) const { char * d_ptr = detail::curve::DeviceCurve::getEnvironmentMacroProperty(name); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!d_ptr) { return ReadOnlyDeviceMacroProperty(nullptr, nullptr); } @@ -113,7 +113,7 @@ __device__ __forceinline__ ReadOnlyDeviceMacroProperty ReadOnlyDe template __device__ __forceinline__ DeviceMacroProperty DeviceEnvironment::getMacroProperty(const char(&name)[N]) const { char* d_ptr = detail::curve::DeviceCurve::getEnvironmentMacroProperty(name); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (!d_ptr) { return DeviceMacroProperty(nullptr, nullptr); } diff --git a/include/flamegpu/runtime/utility/DeviceMacroProperty.cuh b/include/flamegpu/runtime/utility/DeviceMacroProperty.cuh index aa93e1b12..eeec5ecb7 100644 --- a/include/flamegpu/runtime/utility/DeviceMacroProperty.cuh +++ b/include/flamegpu/runtime/utility/DeviceMacroProperty.cuh @@ -39,7 +39,7 @@ template class DeviceMacroProperty : public ReadOnlyDeviceMacroProperty { public: -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS /** * Constructor * @param _ptr Pointer to buffer @@ -188,7 +188,7 @@ class DeviceMacroProperty : public ReadOnlyDeviceMacroProperty { __device__ __forceinline__ T exchange(T val); }; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS template __device__ __forceinline__ ReadOnlyDeviceMacroProperty::ReadOnlyDeviceMacroProperty(T* _ptr, unsigned int* _rwf) : ptr(_ptr) @@ -228,7 +228,7 @@ __device__ __forceinline__ DeviceMacroProperty::DeviceMacroPrope #endif template __device__ __forceinline__ ReadOnlyDeviceMacroProperty ReadOnlyDeviceMacroProperty::operator[](unsigned int i) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (I == 1 && J == 1 && K == 1 && W == 1) { DTHROW("Indexing error, property has less dimensions.\n"); return ReadOnlyDeviceMacroProperty(nullptr, nullptr); @@ -240,7 +240,7 @@ __device__ __forceinline__ ReadOnlyDeviceMacroProperty ReadOnlyDe } #endif // (i * J * K * W) + (j * K * W) + (k * W) + w -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS return ReadOnlyDeviceMacroProperty(this->ptr + (i * J * K * W), this->read_write_flag); #else return DeviceMacroProperty(this->ptr + (i * J * K * W)); @@ -248,7 +248,7 @@ __device__ __forceinline__ ReadOnlyDeviceMacroProperty ReadOnlyDe } template __device__ __forceinline__ DeviceMacroProperty DeviceMacroProperty::operator[](unsigned int i) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (I == 1 && J == 1 && K == 1 && W == 1) { DTHROW("Indexing error, property has less dimensions.\n"); return DeviceMacroProperty(nullptr, nullptr); @@ -260,7 +260,7 @@ __device__ __forceinline__ DeviceMacroProperty DeviceMacroPropert } #endif // (i * J * K * W) + (j * K * W) + (k * W) + w -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS return DeviceMacroProperty(this->ptr + (i * J * K * W), this->read_write_flag); #else return DeviceMacroProperty(this->ptr + (i * J * K * W)); @@ -268,7 +268,7 @@ __device__ __forceinline__ DeviceMacroProperty DeviceMacroPropert } template __device__ __forceinline__ ReadOnlyDeviceMacroProperty::operator T() const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (I != 1 || J != 1 || K != 1 || W != 1) { DTHROW("Indexing error, property has more dimensions.\n"); return { }; @@ -286,7 +286,7 @@ __device__ __forceinline__ DeviceMacroProperty& DeviceMacroProper std::is_same::value || std::is_same::value || std::is_same::value, "atomic add only supports the types int32_t/uint32_t/uint64_t/float/double."); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (I != 1 || J != 1 || K != 1 || W != 1) { DTHROW("Indexing error, property has more dimensions.\n"); return *this; @@ -301,7 +301,7 @@ __device__ __forceinline__ DeviceMacroProperty& DeviceMacroProper template __device__ __forceinline__ DeviceMacroProperty& DeviceMacroProperty::operator-=(const T val) { static_assert(std::is_same::value || std::is_same::value, "atomic subtract only supports the types int32_t/uint32_t."); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (I != 1 || J != 1 || K != 1 || W != 1) { DTHROW("Indexing error, property has more dimensions.\n"); return *this; @@ -315,7 +315,7 @@ __device__ __forceinline__ DeviceMacroProperty& DeviceMacroProper } template __device__ __forceinline__ T DeviceMacroProperty::operator+(const T val) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (I != 1 || J != 1 || K != 1 || W != 1) { DTHROW("Indexing error, property has more dimensions.\n"); return { }; @@ -328,7 +328,7 @@ __device__ __forceinline__ T DeviceMacroProperty::operator+(const } template __device__ __forceinline__ T DeviceMacroProperty::operator-(const T val) const { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (I != 1 || J != 1 || K != 1 || W != 1) { DTHROW("Indexing error, property has more dimensions.\n"); return { }; @@ -342,7 +342,7 @@ __device__ __forceinline__ T DeviceMacroProperty::operator-(const template __device__ __forceinline__ T DeviceMacroProperty::operator++() { static_assert(std::is_same::value, "atomic increment only supports the type uint32_t."); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (I != 1 || J != 1 || K != 1 || W != 1) { DTHROW("Indexing error, property has more dimensions.\n"); return *this; @@ -358,7 +358,7 @@ __device__ __forceinline__ T DeviceMacroProperty::operator++() { template __device__ __forceinline__ T DeviceMacroProperty::operator--() { static_assert(std::is_same::value, "atomic decrement only supports the type uint32_t."); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (I != 1 || J != 1 || K != 1 || W != 1) { DTHROW("Indexing error, property has more dimensions.\n"); return *this; @@ -373,7 +373,7 @@ __device__ __forceinline__ T DeviceMacroProperty::operator--() { template __device__ __forceinline__ T DeviceMacroProperty::operator++(int) { static_assert(std::is_same::value, "atomic increment only supports the type uint32_t."); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (I != 1 || J != 1 || K != 1 || W != 1) { DTHROW("Indexing error, property has more dimensions.\n"); return { }; @@ -388,7 +388,7 @@ __device__ __forceinline__ T DeviceMacroProperty::operator++(int) template __device__ __forceinline__ T DeviceMacroProperty::operator--(int) { static_assert(std::is_same::value, "atomic decrement only supports the type uint32_t."); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (I != 1 || J != 1 || K != 1 || W != 1) { DTHROW("Indexing error, property has more dimensions.\n"); return { }; @@ -404,7 +404,7 @@ __device__ __forceinline__ T DeviceMacroProperty::min(T val) { static_assert(std::is_same::value || std::is_same::value || std::is_same::value, "atomic min only supports the types int32_t/uint32_t/uint64_t."); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (I != 1 || J != 1 || K != 1 || W != 1) { DTHROW("Indexing error, property has more dimensions.\n"); return { }; @@ -420,7 +420,7 @@ __device__ __forceinline__ T DeviceMacroProperty::max(T val) { static_assert(std::is_same::value || std::is_same::value || std::is_same::value, "atomic max only supports the types int32_t/uint32_t/uint64_t."); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (I != 1 || J != 1 || K != 1 || W != 1) { DTHROW("Indexing error, property has more dimensions.\n"); return { }; @@ -437,7 +437,7 @@ __device__ __forceinline__ T DeviceMacroProperty::CAS(T compare, std::is_same::value || std::is_same::value || std::is_same::value, "atomic compare and swap only supports the types int32_t/uint32_t/uint64_t/uint16_t."); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (I != 1 || J != 1 || K != 1 || W != 1) { DTHROW("Indexing error, property has more dimensions.\n"); return { }; @@ -465,7 +465,7 @@ __device__ __forceinline__ T DeviceMacroProperty::exchange(T val) std::is_same::value || std::is_same::value, "atomic exchange only supports the types int32_t/int64_t/uint32_t/uint64_t/float/double."); static_assert(sizeof(uint64_t) == sizeof(unsigned long long int), "uint64_t != unsigned long long int."); // NOLINT(runtime/int) -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (I != 1 || J != 1 || K != 1 || W != 1) { DTHROW("Indexing error, property has more dimensions.\n"); return { }; diff --git a/include/flamegpu/runtime/utility/HostEnvironment.cuh b/include/flamegpu/runtime/utility/HostEnvironment.cuh index 6bf48547a..b5b15b4ac 100644 --- a/include/flamegpu/runtime/utility/HostEnvironment.cuh +++ b/include/flamegpu/runtime/utility/HostEnvironment.cuh @@ -2,7 +2,7 @@ #define INCLUDE_FLAMEGPU_RUNTIME_UTILITY_HOSTENVIRONMENT_CUH_ #include -#include // Required for SEATBELTS=OFF builds for some reason. +#include // Required for FLAMEGPU_SEATBELTS=OFF builds for some reason. #include #include diff --git a/include/flamegpu/runtime/utility/HostMacroProperty.cuh b/include/flamegpu/runtime/utility/HostMacroProperty.cuh index 719ec13cb..a78cd9268 100644 --- a/include/flamegpu/runtime/utility/HostMacroProperty.cuh +++ b/include/flamegpu/runtime/utility/HostMacroProperty.cuh @@ -45,7 +45,7 @@ struct HostMacroProperty_MetaData { */ void upload() { if (h_base_ptr && has_changed) { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (device_read_flag) { THROW flamegpu::exception::InvalidEnvProperty("The environment macro property '%s' was not found, " "in HostMacroProperty_MetaData::upload()\n", @@ -287,7 +287,7 @@ void HostMacroProperty::zero() { memset(reinterpret_cast(metadata->h_base_ptr) + offset, 0, I * J * K * W * metadata->type_size); metadata->has_changed = true; } else { -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS if (metadata->device_read_flag) { THROW flamegpu::exception::InvalidEnvProperty("The environment macro property '%s' was not found, " "in HostMacroProperty::zero()\n", diff --git a/include/flamegpu/sim/LogFrame.h b/include/flamegpu/sim/LogFrame.h index 0893638aa..2e22d7924 100644 --- a/include/flamegpu/sim/LogFrame.h +++ b/include/flamegpu/sim/LogFrame.h @@ -186,7 +186,7 @@ struct RunLog { */ int cuda_version; /** - * True if FLAME GPU was built with SEATBELTS enabled at CMake configure time + * True if FLAME GPU was built with FLAMEGPU_SEATBELTS enabled at CMake configure time * For maximum performance, this value should be false */ bool seatbelts; diff --git a/include/flamegpu/sim/Simulation.h b/include/flamegpu/sim/Simulation.h index c76a0c3a8..2f0e456e0 100644 --- a/include/flamegpu/sim/Simulation.h +++ b/include/flamegpu/sim/Simulation.h @@ -45,7 +45,7 @@ class Simulation { timing = other.timing; silence_unknown_args = other.silence_unknown_args; telemetry = other.telemetry; -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION console_mode = other.console_mode; #endif } @@ -60,7 +60,7 @@ class Simulation { bool timing = false; bool silence_unknown_args = false; bool telemetry = false; -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION bool console_mode = false; #else const bool console_mode = true; diff --git a/include/flamegpu/util/detail/compute_capability.cuh b/include/flamegpu/util/detail/compute_capability.cuh index 82f00005e..cdfd6c2dd 100644 --- a/include/flamegpu/util/detail/compute_capability.cuh +++ b/include/flamegpu/util/detail/compute_capability.cuh @@ -21,14 +21,14 @@ int getComputeCapability(int deviceIndex); /** * Get the minimum compute capability which this file was compiled for. - * Specified via the MIN_ARCH macro, as __CUDA_ARCH__ is only defined for device compilation. + * Specified via the FLAMEGPU_MIN_CUDA_ARCH macro, as __CUDA_ARCH__ is only defined for device compilation. */ int minimumCompiledComputeCapability(); /** * Check that the current executable has been built with a low enough compute capability for the current device. * This assumes JIT support is enabled for future (major) architectures. - * If the compile time flag MIN_ARCH was not specified, no decision can be made so it is assumed to be successful. + * If the compile time macro FLAMEGPU_MIN_CUDA_ARCH was not specified or incorrectly detected, no decision can be made so it is assumed to be successful. * @param deviceIndex the index of the device to be checked. * @return boolean indicating if the executable can run on the specified device. */ diff --git a/include/flamegpu/util/detail/curand.cuh b/include/flamegpu/util/detail/curand.cuh index 79273bef3..c07ad2ed9 100644 --- a/include/flamegpu/util/detail/curand.cuh +++ b/include/flamegpu/util/detail/curand.cuh @@ -11,11 +11,11 @@ namespace flamegpu { namespace util { namespace detail { -#if defined(CURAND_MRG32k3a) +#if defined(FLAMEGPU_CURAND_MRG32k3a) typedef curandStateMRG32k3a_t curandState; -#elif defined(CURAND_XORWOW) +#elif defined(FLAMEGPU_CURAND_XORWOW) typedef curandStateXORWOW_t curandState; -#else // defined(CURAND_Philox4_32_10) +#else // defined(FLAMEGPU_CURAND_Philox4_32_10) typedef curandStatePhilox4_32_10_t curandState; #endif diff --git a/include/flamegpu/util/type_decode.h b/include/flamegpu/util/type_decode.h index 4a5e7f779..b4d8bf29a 100644 --- a/include/flamegpu/util/type_decode.h +++ b/include/flamegpu/util/type_decode.h @@ -12,7 +12,7 @@ struct type_decode { typedef T type_t; }; -#if defined(USE_GLM) || defined(GLM_VERSION) +#if defined(FLAMEGPU_USE_GLM) || defined(GLM_VERSION) #ifndef GLM_VERSION #ifdef __CUDACC__ #ifdef __NVCC_DIAG_PRAGMA_SUPPORT__ diff --git a/include/flamegpu/visualiser/AgentStateVis.h b/include/flamegpu/visualiser/AgentStateVis.h index bc66f93d7..48a07e6a1 100644 --- a/include/flamegpu/visualiser/AgentStateVis.h +++ b/include/flamegpu/visualiser/AgentStateVis.h @@ -1,6 +1,6 @@ #ifndef INCLUDE_FLAMEGPU_VISUALISER_AGENTSTATEVIS_H_ #define INCLUDE_FLAMEGPU_VISUALISER_AGENTSTATEVIS_H_ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION #include #include @@ -115,5 +115,5 @@ class AgentStateVis { } // namespace visualiser } // namespace flamegpu -#endif // VISUALISATION +#endif // FLAMEGPU_VISUALISATION #endif // INCLUDE_FLAMEGPU_VISUALISER_AGENTSTATEVIS_H_ diff --git a/include/flamegpu/visualiser/AgentVis.h b/include/flamegpu/visualiser/AgentVis.h index 0def77b55..99b90bda7 100644 --- a/include/flamegpu/visualiser/AgentVis.h +++ b/include/flamegpu/visualiser/AgentVis.h @@ -1,6 +1,6 @@ #ifndef INCLUDE_FLAMEGPU_VISUALISER_AGENTVIS_H_ #define INCLUDE_FLAMEGPU_VISUALISER_AGENTVIS_H_ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION // #include #include @@ -594,5 +594,5 @@ class AgentVis { } // namespace visualiser } // namespace flamegpu -#endif // VISUALISATION +#endif // FLAMEGPU_VISUALISATION #endif // INCLUDE_FLAMEGPU_VISUALISER_AGENTVIS_H_ diff --git a/include/flamegpu/visualiser/LineVis.h b/include/flamegpu/visualiser/LineVis.h index c7c6e9546..06892ed0b 100644 --- a/include/flamegpu/visualiser/LineVis.h +++ b/include/flamegpu/visualiser/LineVis.h @@ -1,6 +1,6 @@ #ifndef INCLUDE_FLAMEGPU_VISUALISER_LINEVIS_H_ #define INCLUDE_FLAMEGPU_VISUALISER_LINEVIS_H_ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION #include @@ -56,5 +56,5 @@ class LineVis { } // namespace visualiser } // namespace flamegpu -#endif // VISUALISATION +#endif // FLAMEGPU_VISUALISATION #endif // INCLUDE_FLAMEGPU_VISUALISER_LINEVIS_H_ diff --git a/include/flamegpu/visualiser/ModelVis.h b/include/flamegpu/visualiser/ModelVis.h index dc5dcd88e..e3fafa5ca 100644 --- a/include/flamegpu/visualiser/ModelVis.h +++ b/include/flamegpu/visualiser/ModelVis.h @@ -1,6 +1,6 @@ #ifndef INCLUDE_FLAMEGPU_VISUALISER_MODELVIS_H_ #define INCLUDE_FLAMEGPU_VISUALISER_MODELVIS_H_ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION #include #include @@ -269,5 +269,5 @@ class ModelVis { } // namespace visualiser } // namespace flamegpu -#endif // VISUALISATION +#endif // FLAMEGPU_VISUALISATION #endif // INCLUDE_FLAMEGPU_VISUALISER_MODELVIS_H_ diff --git a/include/flamegpu/visualiser/visualiser_api.h b/include/flamegpu/visualiser/visualiser_api.h index 38092f9f2..f5c5e2745 100644 --- a/include/flamegpu/visualiser/visualiser_api.h +++ b/include/flamegpu/visualiser/visualiser_api.h @@ -1,7 +1,7 @@ #ifndef INCLUDE_FLAMEGPU_VISUALISER_VISUALISER_API_H_ #define INCLUDE_FLAMEGPU_VISUALISER_VISUALISER_API_H_ -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION #include "flamegpu/visualiser/ModelVis.h" #include "flamegpu/visualiser/AgentVis.h" #include "flamegpu/visualiser/AgentStateVis.h" @@ -11,6 +11,6 @@ #include "flamegpu/visualiser/color/StaticColor.h" #include "flamegpu/visualiser/color/DiscreteColor.h" #include "flamegpu/visualiser/color/HSVInterpolation.h" -#endif // VISUALISATION +#endif // FLAMEGPU_VISUALISATION #endif // INCLUDE_FLAMEGPU_VISUALISER_VISUALISER_API_H_ diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index fa4925324..40bcf2355 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,5 +1,5 @@ # Minimum CMake version 3.18 for CUDA --std=c++17 -cmake_minimum_required(VERSION VERSION 3.18 FATAL_ERROR) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) # Set the location of the ROOT flame gpu project relative to this CMakeList.txt get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/.. REALPATH) @@ -7,20 +7,25 @@ get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/.. REALPATH) # Include versioning CMake logic, to set the project version include(${FLAMEGPU_ROOT}/cmake/version.cmake) +# Include CMake Logic and call a method to record user-provided CMAKE_CUDA_ARCHITECTURES +include(${FLAMEGPU_ROOT}/cmake/CUDAArchitectures.cmake) +flamegpu_init_cuda_architectures() + # Set the project with no languages iniitailly, in case of doxygen only builds. project(flamegpu LANGUAGES NONE VERSION ${FLAMEGPU_VERSION}) # See what languages are supported. Must be called within a project. # This is primarily here incase someone targets src/CMakeFiles.txt directly include(CheckLanguage) +include(CheckCXXCompilerFlag) check_language(CXX) check_language(CUDA) if(CMAKE_CUDA_COMPILER STREQUAL NOTFOUND) #Not able to build code, so just make docs message("Suitable compilers for building code not found.\n" "Attempting generation of minimal documentation only project.") include(${FLAMEGPU_ROOT}/cmake/dependencies/doxygen.cmake) - if(${BUILD_API_DOCUMENTATION}) - create_doxygen_target("${FLAMEGPU_ROOT}" "${CMAKE_CURRENT_BINARY_DIR}" "") + if(${FLAMEGPU_BUILD_API_DOCUMENTATION}) + flamegpu_create_doxygen_target("${FLAMEGPU_ROOT}" "${CMAKE_CURRENT_BINARY_DIR}" "") endif() return() endif() @@ -30,6 +35,9 @@ enable_language(C) enable_language(CXX) enable_language(CUDA) +# Set CMAKE_CUDA_ARCHITECTURES now CUDA is enabled +flamegpu_set_cuda_architectures() + # Ensure this is not an in-source build include(${FLAMEGPU_ROOT}/cmake/OutOfSourceOnly.cmake) @@ -67,14 +75,20 @@ unset(FLAMEGPU_TELEMETRY_TEST_MODE_DEFAULT) # Option to enable/disable runtime checks which may impact performance # This will primarily prevent device code from reporting errors -option(SEATBELTS "Enable runtime checks which harm performance for release/profile builds.\nThis should only be disabled after a model is known to be correct." ON) +option(FLAMEGPU_SEATBELTS "Enable runtime checks which harm performance for release/profile builds.\nThis should only be disabled after a model is known to be correct." ON) + +# Option to enable/disable building the flamegpu visualiser into the flamegpu +option(FLAMEGPU_VISUALISATION "Enable FLAMEGPU visualisation support" OFF) # Option to enable/disable the default status of JitifyCache -option(RTC_DISK_CACHE "Enable caching of RTC kernels to disk by default (this can still be overridden programatically)." ON) +option(FLAMEGPU_RTC_DISK_CACHE "Enable caching of RTC kernels to disk by default (this can still be overridden programatically)." ON) + +# Option to enable/disable logging of dynamic RTC files to disk +option(FLAMEGPU_RTC_EXPORT_SOURCES "Export RTC source files to disk at runtime" OFF) # Option to make put glm on the include path -option(USE_GLM "Experimental: Make GLM available to flamegpu2 projects on the include path" OFF) -mark_as_advanced(USE_GLM) +option(FLAMEGPU_ENABLE_GLM "Experimental: Make GLM available to flamegpu2 projects on the include path" OFF) +mark_as_advanced(FLAMEGPU_ENABLE_GLM) # Include common rules. include(${FLAMEGPU_ROOT}/cmake/common.cmake) @@ -82,7 +96,7 @@ include(${FLAMEGPU_ROOT}/cmake/common.cmake) include(${FLAMEGPU_ROOT}/cmake/dependencies/doxygen.cmake) # Include the visualiser cmake if requested -if(VISUALISATION) +if(FLAMEGPU_VISUALISATION) include(${FLAMEGPU_ROOT}/cmake/dependencies/flamegpu2-visualiser.cmake) endif() @@ -100,14 +114,14 @@ configure_file(${DYNAMIC_VERSION_SRC_SRC} ${DYNAMIC_VERSION_SRC_DEST} @ONLY) if(CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR) # If top level project SET(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib/${CMAKE_BUILD_TYPE}/) - if(${BUILD_API_DOCUMENTATION}) - create_doxygen_target("${FLAMEGPU_ROOT}" "${CMAKE_CURRENT_BINARY_DIR}" "") + if(${FLAMEGPU_BUILD_API_DOCUMENTATION}) + flamegpu_create_doxygen_target("${FLAMEGPU_ROOT}" "${CMAKE_CURRENT_BINARY_DIR}" "") endif() else() # If called via add_subdirectory() SET(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/../lib/${CMAKE_BUILD_TYPE}/) - if(${BUILD_API_DOCUMENTATION}) - create_doxygen_target("${FLAMEGPU_ROOT}" "${CMAKE_CURRENT_BINARY_DIR}/.." "") + if(${FLAMEGPU_BUILD_API_DOCUMENTATION}) + flamegpu_create_doxygen_target("${FLAMEGPU_ROOT}" "${CMAKE_CURRENT_BINARY_DIR}/.." "") endif() endif() @@ -362,7 +376,7 @@ set(SRC_FLAMEGPU_VISUALISER ${FLAMEGPU_ROOT}/src/flamegpu/visualiser/color/ViridisInterpolation.cpp ) # If visualisation is enabled, the the visualiser inc/src files must be in the appropriate lists -if (VISUALISATION) +if (FLAMEGPU_VISUALISATION) SET(SRC_INCLUDE ${SRC_INCLUDE} ${SRC_INCLUDE_VISUALISER} @@ -404,17 +418,59 @@ source_group(TREE ${FLAMEGPU_ROOT}/include PREFIX external FILES ${T_SRC_EXTERNA # Define which source files are required for the target executable add_library(${PROJECT_NAME} STATIC ${ALL_SRC}) +# Require C++17 as a public target property for C++ and CUDA, with no extensions, and the standard is required +target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_17) +target_compile_features(${PROJECT_NAME} PUBLIC cuda_std_17) +set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_EXTENSIONS OFF) +set_property(TARGET ${PROJECT_NAME} PROPERTY CUDA_EXTENSIONS OFF) +set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) +set_property(TARGET ${PROJECT_NAME} PROPERTY CUDA_STANDARD_REQUIRED ON) + # Add the library headers as public so they are forwarded on. target_include_directories(${PROJECT_NAME} PUBLIC "${FLAMEGPU_ROOT}/include") # Add any private headers. target_include_directories(${PROJECT_NAME} PRIVATE "${FLAMEGPU_ROOT}/src") # Set target level warnings. -EnableFLAMEGPUCompilerWarnings(TARGET "${PROJECT_NAME}") +flamegpu_enable_compiler_warnings(TARGET "${PROJECT_NAME}") # Apply common compiler settings -CommonCompilerSettings(TARGET "${PROJECT_NAME}") -# Set the cuda gencodes, potentially using the user-provided CUDA_ARCH -SetCUDAGencodes(TARGET "${PROJECT_NAME}") +flamegpu_common_compiler_settings(TARGET "${PROJECT_NAME}") + +# Ensure that DEBUG macros and -G are set for debug NVCC builds, as public properties so they are inherrited by consumers. Used in include files. +target_compile_options(${PROJECT_NAME} PUBLIC "$<$,$>:-G>") +target_compile_definitions(${PROJECT_NAME} PUBLIC $<$:DEBUG>) +target_compile_definitions(${PROJECT_NAME} PUBLIC $<$:_DEBUG>) + +# Prevent windows.h from defining max and min, this can be private as windows.h is only included in src files (directly, or via nvrtc/jitify) +if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + target_compile_definitions(${PROJECT_NAME} PRIVATE NOMINMAX) +endif() + +# If FLAMEGPU_SEATBELTS is enabled, set it as a public compile defition (i.e it's in the headers) +if (FLAMEGPU_SEATBELTS) + # If on, all build configs have seatbelts + target_compile_definitions(${PROJECT_NAME} PUBLIC FLAMEGPU_SEATBELTS=1) +else() + # If off, debug builds have seatbelts, non debug builds do not. + target_compile_definitions(${PROJECT_NAME} PUBLIC $,FLAMEGPU_SEATBELTS=1,FLAMEGPU_SEATBELTS=0>) +endif() + +# Detect and forward the minimum CMAKE_CUDA_ARCHITECTURES value to the compiler, if possible. This is only used in a library source file so can be private. +flamegpu_get_minimum_cuda_architecture(min_cuda_arch) +target_compile_definitions(${PROJECT_NAME} PRIVATE FLAMEGPU_MIN_CUDA_ARCH=${min_cuda_arch}) + +# Select the curand engine to use for FLAMEGPU, used in include, so must match the library build (PUBLIC) +string(TOUPPER FLAMEGPU_CURAND_ENGINE FLAMEGPU_CURAND_ENGINE_UPPER) +if(${FLAMEGPU_CURAND_ENGINE_UPPER} STREQUAL "MRG") + target_compile_definitions(${PROJECT_NAME} PUBLIC FLAMEGPU_CURAND_MRG32k3a) +elseif(${FLAMEGPU_CURAND_ENGINE_UPPER} STREQUAL "PHILOX") + target_compile_definitions(${PROJECT_NAME} PUBLIC FLAMEGPU_CURAND_Philox4_32_10) +elseif(${FLAMEGPU_CURAND_ENGINE_UPPER} STREQUAL "XORWOW") + target_compile_definitions(${PROJECT_NAME} PUBLIC FLAMEGPU_CURAND_XORWOW) +elseif(DEFINED FLAMEGPU_CURAND_ENGINE) + message(FATAL_ERROR "${FLAMEGPU_CURAND_ENGINE} is not a suitable value of FLAMEGPU_CURAND_ENGINE\nOptions: \"MRG\", \"PHILOX\", \"XORWOW\"") +endif() +unset(FLAMEGPU_CURAND_ENGINE_UPPER) # Telemetry (perform this check here are not in common as it only effects the library build) if (FLAMEGPU_SHARE_USAGE_STATISTICS) @@ -445,32 +501,36 @@ endif() set_property(TARGET ${PROJECT_NAME} PROPERTY POSITION_INDEPENDENT_CODE ON) # Activate visualisation if requested -if (VISUALISATION) +if (FLAMEGPU_VISUALISATION) # @todo - these could/should be private, but must be PUBLIC for swig to build. Alternatively it could be PRIVATE but also set for swig. target_link_libraries(${PROJECT_NAME} PUBLIC flamegpu_visualiser) - CMAKE_SET_TARGET_FOLDER(flamegpu_visualiser "FLAMEGPU") - target_compile_definitions(${PROJECT_NAME} PUBLIC VISUALISATION) + flamegpu_set_target_folder(flamegpu_visualiser "FLAMEGPU") + target_compile_definitions(${PROJECT_NAME} PUBLIC FLAMEGPU_VISUALISATION) endif() -# Make the visualisers GLM accessible via include +# Make GLM accessible via include. PUBLIC so this is usable by downstream projects # @todo - make the vis cmake/glm create a target to use. -if (USE_GLM) - if(glm_FOUND) - target_include_directories(${PROJECT_NAME} PUBLIC "${glm_INCLUDE_DIRS}") - target_compile_definitions(${PROJECT_NAME} PUBLIC GLM_PATH="${glm_INCLUDE_DIRS}") +if (FLAMEGPU_ENABLE_GLM) + if(TARGET glm::glm) + # Get the interface include directories property from the glm target to forward onto nvrtc, which is a bit grim (i.e. not portable/installable) + target_link_libraries(${PROJECT_NAME} PUBLIC glm::glm) + target_compile_definitions(${PROJECT_NAME} PUBLIC FLAMEGPU_USE_GLM) + get_target_property(glm_inc_path glm::glm INTERFACE_INCLUDE_DIRECTORIES) + target_compile_definitions(${PROJECT_NAME} PUBLIC FLAMEGPU_GLM_PATH="${glm_inc_path}") + unset(glm_inc_path) else() - message(WARNING "USE_GLM enabled, but glm_FOUND is False.") + message(WARNING "FLAMEGPU_ENABLE_GLM enabled, but glm was not found") endif() - target_compile_definitions(${PROJECT_NAME} PRIVATE USE_GLM) endif() -if (NOT RTC_DISK_CACHE) + +if (NOT FLAMEGPU_RTC_DISK_CACHE) # Only used in source files, so can be private - target_compile_definitions(${PROJECT_NAME} PRIVATE DISABLE_RTC_DISK_CACHE) + target_compile_definitions(${PROJECT_NAME} PRIVATE FLAMEGPU_DISABLE_RTC_DISK_CACHE) endif() -if (EXPORT_RTC_SOURCES) +if (FLAMEGPU_RTC_EXPORT_SOURCES) # Only used in source files, so can be private - target_compile_definitions(${PROJECT_NAME} PRIVATE OUTPUT_RTC_DYNAMIC_FILES) + target_compile_definitions(${PROJECT_NAME} PRIVATE FLAMEGPU_OUTPUT_RTC_DYNAMIC_FILES) endif () # Enable RDC @@ -511,7 +571,7 @@ target_link_libraries(${PROJECT_NAME} PUBLIC CUDA::nvrtc) target_link_libraries(${PROJECT_NAME} PUBLIC CUDA::cuda_driver) -if(USE_NVTX AND TARGET NVTX::nvtx) +if(FLAMEGPU_ENABLE_NVTX AND TARGET NVTX::nvtx) target_link_libraries(${PROJECT_NAME} PUBLIC NVTX::nvtx) # Get the version to set a definition value # Interface targets only support the version property from 3.19 @@ -521,12 +581,13 @@ if(USE_NVTX AND TARGET NVTX::nvtx) # fallback to the old cmake var. set(nvtxversion ${NVTX_VERSION}) endif() - target_compile_definitions(${PROJECT_NAME} PUBLIC "$<$:FLAMEGPU_USE_NVTX=${nvtxversion}>") + target_compile_definitions(${PROJECT_NAME} PUBLIC "FLAMEGPU_USE_NVTX=${nvtxversion}") unset(nvtxversion) endif() target_link_libraries(${PROJECT_NAME} PUBLIC Jitify::jitify) -target_compile_definitions(${PROJECT_NAME} PUBLIC "$<$:JITIFY_PRINT_LOG>") +# jitify is included in public headers, so this definiition must be public too. +target_compile_definitions(${PROJECT_NAME} PUBLIC "JITIFY_PRINT_LOG") # If gcc < 9, needs to link against stdc++fs if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9.0) @@ -538,33 +599,23 @@ if(CMAKE_DL_LIBS) target_link_libraries(${PROJECT_NAME} PUBLIC ${CMAKE_DL_LIBS}) endif() -# Make GLM accessible via include. PUBLIC so this is usable by downstream projects -if (USE_GLM) - if(TARGET GLM::glm) - target_link_libraries(${PROJECT_NAME} PUBLIC GLM::glm) - target_compile_definitions(${PROJECT_NAME} PUBLIC USE_GLM) - else() - message(WARNING "USE_GLM enabled, but glm was not found") - endif() -endif() - # Ensure we link against pthread / windows equilvalent. This was previously implied set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) target_link_libraries(${PROJECT_NAME} PRIVATE Threads::Threads) # Flag the new linter target and the files to be linted. Ensure vis sources are linted even if vis is not enabled (mostly for CI) -if (VISUALISATION) +if (FLAMEGPU_VISUALISATION) # If vis is enabled, just use ALL_SRC - new_linter_target(${PROJECT_NAME} "${ALL_SRC}") + flamegpu_new_linter_target(${PROJECT_NAME} "${ALL_SRC}") else() # If vis is not enabled, pass in the vis source files too. - new_linter_target(${PROJECT_NAME} "${ALL_SRC};${SRC_INCLUDE_VISUALISER};${SRC_FLAMEGPU_VISUALISER}") + flamegpu_new_linter_target(${PROJECT_NAME} "${ALL_SRC};${SRC_INCLUDE_VISUALISER};${SRC_FLAMEGPU_VISUALISER}") endif() # Put within FLAMEGPU filter -CMAKE_SET_TARGET_FOLDER(${PROJECT_NAME} "FLAMEGPU") +flamegpu_set_target_folder(${PROJECT_NAME} "FLAMEGPU") # Put the tinyxml2 in the folder -CMAKE_SET_TARGET_FOLDER("tinyxml2" "FLAMEGPU/Dependencies") +flamegpu_set_target_folder("tinyxml2" "FLAMEGPU/Dependencies") # Emit some warnings that should only be issued once and are related to this file (but not this target) # @todo - move these author warnigns elsewhere, so they will be emitted if not building FLAMEGPU iteself? Common but with some form of guard? diff --git a/src/flamegpu/exception/FLAMEGPUDeviceException.cu b/src/flamegpu/exception/FLAMEGPUDeviceException.cu index 9bf0a9529..99f978b7d 100644 --- a/src/flamegpu/exception/FLAMEGPUDeviceException.cu +++ b/src/flamegpu/exception/FLAMEGPUDeviceException.cu @@ -3,7 +3,7 @@ #include "flamegpu/gpu/detail/CUDAErrorChecking.cuh" #include "flamegpu/util/detail/cuda.cuh" -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS namespace flamegpu { namespace exception { @@ -212,4 +212,4 @@ std::string DeviceExceptionManager::getErrorString(const DeviceExceptionBuffer & } // namespace exception } // namespace flamegpu -#endif // SEATBELTS are off +#endif // FLAMEGPU_SEATBELTS are off diff --git a/src/flamegpu/gpu/CUDAAgent.cu b/src/flamegpu/gpu/CUDAAgent.cu index 90b120a0b..3511125ac 100644 --- a/src/flamegpu/gpu/CUDAAgent.cu +++ b/src/flamegpu/gpu/CUDAAgent.cu @@ -500,8 +500,8 @@ void CUDAAgent::addInstantitateRTCFunction(const AgentFunctionData& func, const // get the dynamically generated header from curve rtc const std::string curve_dynamic_header = curve_header.getDynamicHeader(env->getBufferLen()); - // output to disk if OUTPUT_RTC_DYNAMIC_FILES macro is set -#ifdef OUTPUT_RTC_DYNAMIC_FILES + // output to disk if FLAMEGPU_OUTPUT_RTC_DYNAMIC_FILES macro is set +#ifdef FLAMEGPU_OUTPUT_RTC_DYNAMIC_FILES // create string for agent function implementation std::string func_impl = std::string(func.rtc_func_name).append("_impl"); // curve diff --git a/src/flamegpu/gpu/CUDAMacroEnvironment.cu b/src/flamegpu/gpu/CUDAMacroEnvironment.cu index e8fd2619c..8da72f230 100644 --- a/src/flamegpu/gpu/CUDAMacroEnvironment.cu +++ b/src/flamegpu/gpu/CUDAMacroEnvironment.cu @@ -24,7 +24,7 @@ void CUDAMacroEnvironment::init(cudaStream_t stream) { * prop.second.elements[1] * prop.second.elements[2] * prop.second.elements[3]; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS buffer_size += sizeof(unsigned int); // Extra uint is used as read-write flag by seatbelts #endif gpuErrchk(cudaMalloc(&prop.second.d_ptr, buffer_size)); @@ -46,7 +46,7 @@ void CUDAMacroEnvironment::init(const SubEnvironmentData& mapping, const CUDAMac * prop.second.elements[1] * prop.second.elements[2] * prop.second.elements[3]; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS buffer_size += sizeof(unsigned int); // Extra uint is used as read-write flag by seatbelts #endif gpuErrchk(cudaMalloc(&prop.second.d_ptr, buffer_size)); @@ -96,7 +96,7 @@ void CUDAMacroEnvironment::unmapRTCVariables(detail::curve::CurveRTCHost& curve_ curve_header.unregisterEnvMacroProperty(p.first.c_str()); } } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS void CUDAMacroEnvironment::resetFlagsAsync(const std::vector &streams) { unsigned int i = 0; for (const auto& prop : properties) { diff --git a/src/flamegpu/gpu/CUDAScatter.cu b/src/flamegpu/gpu/CUDAScatter.cu index ed0e9bb2f..b27166ec9 100644 --- a/src/flamegpu/gpu/CUDAScatter.cu +++ b/src/flamegpu/gpu/CUDAScatter.cu @@ -516,7 +516,7 @@ __global__ void reorder_array_messages( const unsigned int threadCount, const unsigned int array_length, const unsigned int *d_position, -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS unsigned int *d_write_flag, #endif CUDAScatter::ScatterData *scatter_data, @@ -533,7 +533,7 @@ __global__ void reorder_array_messages( for (unsigned int i = 0; i < scatter_len; ++i) { memcpy(scatter_data[i].out + (output_index * scatter_data[i].typeLen), scatter_data[i].in + (index * scatter_data[i].typeLen), scatter_data[i].typeLen); } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Set err check flag atomicInc(d_write_flag + output_index, UINT_MAX); #endif @@ -579,7 +579,7 @@ void CUDAScatter::arrayMessageReorder( assert(d_position); // Not an array message, lacking ___INDEX var size_t t_data_len = 0; { // Decide per-stream resource memory requirements based on curve data, and potentially cub temp memory -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Query cub to find the number of temporary bytes required. gpuErrchk(cub::DeviceReduce::Max(nullptr, t_data_len, d_write_flag, d_position, array_length, stream)); #endif @@ -598,12 +598,12 @@ void CUDAScatter::arrayMessageReorder( reorder_array_messages <<>> ( itemCount, array_length, d_position, -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS d_write_flag, #endif streamResources[streamResourceId].d_data, static_cast(sd.size())); gpuErrchkLaunch(); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Check d_write_flag for dupes gpuErrchk(cub::DeviceReduce::Max(streamResources[streamResourceId].d_data, t_data_len, d_write_flag, d_position, array_length, stream)); unsigned int maxBinSize = 0; diff --git a/src/flamegpu/gpu/CUDASimulation.cu b/src/flamegpu/gpu/CUDASimulation.cu index 8efb9efdb..e0fc6edcb 100644 --- a/src/flamegpu/gpu/CUDASimulation.cu +++ b/src/flamegpu/gpu/CUDASimulation.cu @@ -30,7 +30,7 @@ #include "flamegpu/version.h" #include "flamegpu/model/AgentFunctionDescription.h" #include "flamegpu/io/Telemetry.h" -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION #include "flamegpu/visualiser/FLAMEGPU_Visualisation.h" #endif @@ -199,7 +199,7 @@ CUDASimulation::~CUDASimulation() { submodel_map.clear(); host_api.reset(); macro_env.free(); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION visualisation.reset(); // Might want to force destruct this, as user could hold a ModelVis that has shared ptr #endif @@ -474,7 +474,7 @@ void CUDASimulation::spatialSortAgent_async(const std::string& funcName, const s gridSize = (state_list_size + blockSize - 1) / blockSize; unsigned int sm_size = 0; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS auto *error_buffer = this->singletons->exception.getDevicePtr(streamId, stream); sm_size = sizeof(error_buffer); #endif @@ -696,7 +696,7 @@ void CUDASimulation::stepLayer(const std::shared_ptr& layer, const un // Agent function condition kernel wrapper args util::detail::curandState *t_rng = d_rng + totalThreads; unsigned int *scanFlag_agentDeath = this->singletons->scatter.Scan().Config(CUDAScanCompaction::Type::AGENT_DEATH, streamIdx).d_ptrs.scan_flag; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS auto *error_buffer = this->singletons->exception.getDevicePtr(streamIdx, this->getStream(streamIdx)); #endif // switch between normal and RTC agent function condition @@ -707,7 +707,7 @@ void CUDASimulation::stepLayer(const std::shared_ptr& layer, const un //! Round up according to CUDAAgent state list size gridSize = (state_list_size + blockSize - 1) / blockSize; (func_des->condition) << getStream(streamIdx) >> > ( -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS error_buffer, #endif cuda_agent.getCurve(func_des->name + "_condition").getDevicePtr(), @@ -727,7 +727,7 @@ void CUDASimulation::stepLayer(const std::shared_ptr& layer, const un gridSize = (state_list_size + blockSize - 1) / blockSize; // launch the kernel CUresult a = instance.configure(gridSize, blockSize, 0, this->getStream(streamIdx)).launch({ -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS reinterpret_cast(&error_buffer), #endif const_cast(reinterpret_cast(&state_list_size)), @@ -766,7 +766,7 @@ void CUDASimulation::stepLayer(const std::shared_ptr& layer, const un continue; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Error check after unmap vars this->singletons->exception.checkError("condition " + func_des->name, streamIdx, this->getStream(streamIdx)); #endif @@ -920,7 +920,7 @@ void CUDASimulation::stepLayer(const std::shared_ptr& layer, const un unsigned int *scanFlag_agentDeath = func_des->has_agent_death ? this->singletons->scatter.Scan().Config(CUDAScanCompaction::Type::AGENT_DEATH, streamIdx).d_ptrs.scan_flag : nullptr; unsigned int *scanFlag_messageOutput = this->singletons->scatter.Scan().Config(CUDAScanCompaction::Type::MESSAGE_OUTPUT, streamIdx).d_ptrs.scan_flag; unsigned int *scanFlag_agentOutput = this->singletons->scatter.Scan().Config(CUDAScanCompaction::Type::AGENT_OUTPUT, streamIdx).d_ptrs.scan_flag; - #if !defined(SEATBELTS) || SEATBELTS + #if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS auto *error_buffer = this->singletons->exception.getDevicePtr(streamIdx, this->getStream(streamIdx)); #endif @@ -931,7 +931,7 @@ void CUDASimulation::stepLayer(const std::shared_ptr& layer, const un gridSize = (state_list_size + blockSize - 1) / blockSize; (func_des->func) << getStream(streamIdx) >> > ( - #if !defined(SEATBELTS) || SEATBELTS + #if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS error_buffer, #endif cuda_agent.getCurve(func_des->name).getDevicePtr(), @@ -955,7 +955,7 @@ void CUDASimulation::stepLayer(const std::shared_ptr& layer, const un gridSize = (state_list_size + blockSize - 1) / blockSize; // launch the kernel CUresult a = instance.configure(gridSize, blockSize, 0, this->getStream(streamIdx)).launch({ -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS reinterpret_cast(&error_buffer), #endif reinterpret_cast(&d_agentOut_nextID), @@ -1023,7 +1023,7 @@ void CUDASimulation::stepLayer(const std::shared_ptr& layer, const un output_agent.releaseNewBuffer(*func_des); } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Error check after unmap vars // This means that curve is cleaned up before we throw exception (mostly prevents curve being polluted if we catch and handle errors) this->singletons->exception.checkError(func_des->name, streamIdx, this->getStream(streamIdx)); @@ -1039,7 +1039,7 @@ void CUDASimulation::stepLayer(const std::shared_ptr& layer, const un // Execute the host functions. layerHostFunctions(layer, layerIndex); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS // Reset macro-environment read-write flags // Note this does not synchronise threads, it relies on synchronizeAllStreams() post host fns macro_env.resetFlagsAsync(streams); @@ -1105,7 +1105,7 @@ bool CUDASimulation::stepExitConditions() { // Execute exit conditions for (auto &exitCdns : model->exitConditions) { if (exitCdns(this->host_api.get()) == EXIT) { - #ifdef VISUALISATION + #ifdef FLAMEGPU_VISUALISATION if (visualisation) { visualisation->updateBuffers(step_count+1); } @@ -1119,7 +1119,7 @@ bool CUDASimulation::stepExitConditions() { if (!exitConditionExit) { for (auto &exitCdns : model->exitConditionCallbacks) { if (exitCdns->run(this->host_api.get()) == EXIT) { - #ifdef VISUALISATION + #ifdef FLAMEGPU_VISUALISATION if (visualisation) { visualisation->updateBuffers(step_count+1); } @@ -1137,7 +1137,7 @@ bool CUDASimulation::stepExitConditions() { processHostAgentCreation(0); } - #ifdef VISUALISATION + #ifdef FLAMEGPU_VISUALISATION if (visualisation) { visualisation->updateBuffers(step_count+1); } @@ -1191,7 +1191,7 @@ void CUDASimulation::simulate() { resetLog(); processStepLog(this->elapsedSecondsRTCInitialisation + this->elapsedSecondsInitFunctions); - #ifdef VISUALISATION + #ifdef FLAMEGPU_VISUALISATION // Pre step-loop visualisation update if (visualisation) { visualisation->updateBuffers(); @@ -1206,7 +1206,7 @@ void CUDASimulation::simulate() { if (!continueSimulation) { break; } - #ifdef VISUALISATION + #ifdef FLAMEGPU_VISUALISATION // Special case, if steps == 0 and visualisation has been closed if (getSimulationConfig().steps == 0 && @@ -1221,7 +1221,7 @@ void CUDASimulation::simulate() { this->exitFunctions(); // Sync visualistaion after the exit functions - #ifdef VISUALISATION + #ifdef FLAMEGPU_VISUALISATION if (visualisation) { visualisation->updateBuffers(); } @@ -1356,7 +1356,7 @@ void CUDASimulation::setPopulationData(AgentVector& population, const std::strin } // This call hierarchy validates agent desc matches and state is valid it->second->setPopulationData(population, state_name, this->singletons->scatter, 0, getStream(0)); // Streamid shouldn't matter here -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION if (visualisation) { visualisation->updateBuffers(); } @@ -1457,7 +1457,7 @@ void CUDASimulation::applyConfig_derived() { flamegpu::util::nvtx::Range range{"applyConfig_derived"}; // Handle console_mode -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION if (visualisation) { visualiser::ModelVis mv(visualisation); if (getSimulationConfig().console_mode) { @@ -1575,7 +1575,7 @@ void CUDASimulation::initialiseSingletons() { // Store the WDDM/TCC driver mode status, for timer class decisions. Result is cached in the anon namespace to avoid multiple queries deviceUsingWDDM = util::detail::wddm::deviceIsWDDM(); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION if (visualisation) { visualisation->updateRandomSeed(); // Incase user hasn't triggered applyConfig() visualisation->registerEnvProperties(); @@ -1651,7 +1651,7 @@ CUDASimulation::Config &CUDASimulation::CUDAConfig() { const CUDASimulation::Config &CUDASimulation::getCUDAConfig() const { return config; } -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION visualiser::ModelVis CUDASimulation::getVisualisation() { if (!visualisation) visualisation = std::make_shared(*this); @@ -1809,7 +1809,7 @@ void CUDASimulation::resetLog() { gpuErrchk(cudaDeviceGetAttribute(&run_log->performance_specs.device_cc_major, cudaDevAttrComputeCapabilityMajor, CUDAConfig().device_id)); gpuErrchk(cudaDeviceGetAttribute(&run_log->performance_specs.device_cc_minor, cudaDevAttrComputeCapabilityMinor, CUDAConfig().device_id)); gpuErrchk(cudaRuntimeGetVersion(&run_log->performance_specs.cuda_version)); -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS run_log->performance_specs.seatbelts = true; #else run_log->performance_specs.seatbelts = false; diff --git a/src/flamegpu/io/JSONStateReader.cpp b/src/flamegpu/io/JSONStateReader.cpp index f64b4d7b4..9814f602a 100644 --- a/src/flamegpu/io/JSONStateReader.cpp +++ b/src/flamegpu/io/JSONStateReader.cpp @@ -308,7 +308,7 @@ class JSONStateReader_agentsize_counter : public rapidjson::BaseReaderHandlerSimulationConfig().verbosity = static_cast(val); } else if (lastKey == "console_mode") { -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION sim_instance->SimulationConfig().console_mode = static_cast(val); #else if (static_cast(val) == false) { diff --git a/src/flamegpu/io/JSONStateWriter.cpp b/src/flamegpu/io/JSONStateWriter.cpp index b86b25969..eb7e39a4b 100644 --- a/src/flamegpu/io/JSONStateWriter.cpp +++ b/src/flamegpu/io/JSONStateWriter.cpp @@ -68,7 +68,7 @@ void JSONStateWriter::doWrite(T &writer) { // Timing Output writer.Key("timing"); writer.Bool(sim_cfg.timing); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION // Console mode writer.Key("console_mode"); writer.Bool(sim_cfg.console_mode); diff --git a/src/flamegpu/io/Telemetry.cpp b/src/flamegpu/io/Telemetry.cpp index 130204e11..b88802235 100644 --- a/src/flamegpu/io/Telemetry.cpp +++ b/src/flamegpu/io/Telemetry.cpp @@ -187,7 +187,7 @@ std::string Telemetry::generateData(std::string event_name, std::mapSimulationConfig().timing = static_cast(stoll(val)); } } else if (key == "console_mode") { -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION for (auto& c : val) c = static_cast(::tolower(c)); if (val == "true") { diff --git a/src/flamegpu/io/XMLStateWriter.cpp b/src/flamegpu/io/XMLStateWriter.cpp index c75a44780..ecdbce35e 100644 --- a/src/flamegpu/io/XMLStateWriter.cpp +++ b/src/flamegpu/io/XMLStateWriter.cpp @@ -126,7 +126,7 @@ int XMLStateWriter::writeStates(bool prettyPrint) { pListElement = doc.NewElement("timing"); pListElement->SetText(sim_cfg.timing); pSimCfg->InsertEndChild(pListElement); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION // Console Mode pListElement = doc.NewElement("console_mode"); pListElement->SetText(sim_cfg.console_mode); diff --git a/src/flamegpu/model/AgentFunctionDescription.cpp b/src/flamegpu/model/AgentFunctionDescription.cpp index 3d2def9ce..35d72b8df 100644 --- a/src/flamegpu/model/AgentFunctionDescription.cpp +++ b/src/flamegpu/model/AgentFunctionDescription.cpp @@ -456,12 +456,12 @@ void AgentFunctionDescription::setRTCFunctionCondition(std::string func_cond_src // append jitify program string and include std::string func_cond_src_str = std::string(func_cond_name + "_program\n"); -#ifdef OUTPUT_RTC_DYNAMIC_FILES +#ifdef FLAMEGPU_OUTPUT_RTC_DYNAMIC_FILES func_cond_src_str.append("#line 1 \"").append(function->rtc_func_name).append("_impl_condition.cu\"\n"); #endif func_cond_src_str.append("#include \"flamegpu/runtime/DeviceAPI.cuh\"\n"); - // Append line pragma to correct file/line number in same format as OUTPUT_RTC_DYNAMIC_FILES -#ifndef OUTPUT_RTC_DYNAMIC_FILES + // Append line pragma to correct file/line number in same format as FLAMEGPU_OUTPUT_RTC_DYNAMIC_FILES +#ifndef FLAMEGPU_OUTPUT_RTC_DYNAMIC_FILES func_cond_src_str.append("#line 1 \"").append(function->rtc_func_name).append("_impl_condition.cu\"\n"); #endif // If src begins (\r)\n, trim that @@ -533,7 +533,7 @@ AgentFunctionDescription AgentDescription::newRTCFunction(const std::string& fun } // set the runtime agent function source in agent function data std::string func_src_str = std::string(function_name + "_program\n"); -#ifdef OUTPUT_RTC_DYNAMIC_FILES +#ifdef FLAMEGPU_OUTPUT_RTC_DYNAMIC_FILES func_src_str.append("#line 1 \"").append(code_func_name).append("_impl.cu\"\n"); #endif func_src_str.append("#include \"flamegpu/runtime/DeviceAPI.cuh\"\n"); @@ -545,8 +545,8 @@ AgentFunctionDescription AgentDescription::newRTCFunction(const std::string& fun std::string out_type_include_name = out_type_name.substr(out_type_name.find_last_of("::") + 1); func_src_str = func_src_str.append("#include \"flamegpu/runtime/messaging/"+ out_type_include_name + "/" + out_type_include_name + "Device.cuh\"\n"); } - // Append line pragma to correct file/line number in same format as OUTPUT_RTC_DYNAMIC_FILES -#ifndef OUTPUT_RTC_DYNAMIC_FILES + // Append line pragma to correct file/line number in same format as FLAMEGPU_OUTPUT_RTC_DYNAMIC_FILES +#ifndef FLAMEGPU_OUTPUT_RTC_DYNAMIC_FILES func_src_str.append("#line 1 \"").append(code_func_name).append("_impl.cu\"\n"); #endif // If src begins (\r)\n, trim that diff --git a/src/flamegpu/runtime/detail/curve/curve_rtc.cpp b/src/flamegpu/runtime/detail/curve/curve_rtc.cpp index 01525404a..d0261e75b 100644 --- a/src/flamegpu/runtime/detail/curve/curve_rtc.cpp +++ b/src/flamegpu/runtime/detail/curve/curve_rtc.cpp @@ -383,7 +383,7 @@ void CurveRTCHost::initHeaderEnvironment(const size_t env_buffer_len) { RTCEnvVariableProperties props = element.second; { getEnvVariableImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - getEnvVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getEnvVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getEnvVariableImpl << " if(sizeof(type_decode::type_t) != " << element.second.type_size << ") {\n"; getEnvVariableImpl << " DTHROW(\"Environment property '%s' type mismatch.\\n\", name);\n"; getEnvVariableImpl << " return {};\n"; @@ -396,7 +396,7 @@ void CurveRTCHost::initHeaderEnvironment(const size_t env_buffer_len) { getEnvVariableImpl << " };\n"; } } - getEnvVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getEnvVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getEnvVariableImpl << " DTHROW(\"Environment property '%s' was not found.\\n\", name);\n"; getEnvVariableImpl << "#endif\n"; getEnvVariableImpl << " return {};\n"; @@ -409,7 +409,7 @@ void CurveRTCHost::initHeaderEnvironment(const size_t env_buffer_len) { RTCEnvVariableProperties props = element.second; if (props.elements > 1) { getEnvArrayVariableImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - getEnvArrayVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getEnvArrayVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getEnvArrayVariableImpl << " const unsigned int t_index = type_decode::len_t * index + type_decode::len_t;\n"; getEnvArrayVariableImpl << " if(sizeof(type_decode::type_t) != " << element.second.type_size << ") {\n"; getEnvArrayVariableImpl << " DTHROW(\"Environment array property '%s' type mismatch.\\n\", name);\n"; @@ -426,7 +426,7 @@ void CurveRTCHost::initHeaderEnvironment(const size_t env_buffer_len) { getEnvArrayVariableImpl << " };\n"; } } - getEnvArrayVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getEnvArrayVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getEnvArrayVariableImpl << " DTHROW(\"Environment array property '%s' was not found.\\n\", name);\n"; getEnvArrayVariableImpl << "#endif\n"; getEnvArrayVariableImpl << " return {};\n"; @@ -439,7 +439,7 @@ void CurveRTCHost::initHeaderEnvironment(const size_t env_buffer_len) { for (std::pair element : RTCEnvMacroProperties) { RTCEnvMacroPropertyProperties props = element.second; getMacroPropertyImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - getMacroPropertyImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getMacroPropertyImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getMacroPropertyImpl << " if(sizeof(T) != " << element.second.type_size << ") {\n"; getMacroPropertyImpl << " DTHROW(\"Environment macro property '%s' type mismatch.\\n\", name);\n"; getMacroPropertyImpl << " } else if (I != " << element.second.dimensions[0] << " ||\n"; @@ -459,7 +459,7 @@ void CurveRTCHost::initHeaderEnvironment(const size_t env_buffer_len) { getMacroPropertyImpl << " };\n"; ++ct; } - getMacroPropertyImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getMacroPropertyImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getMacroPropertyImpl << " DTHROW(\"Environment macro property '%s' was not found.\\n\", name);\n"; getMacroPropertyImpl << " return ReadOnlyDeviceMacroProperty(nullptr, nullptr);\n"; getMacroPropertyImpl << "#else\n"; @@ -474,7 +474,7 @@ void CurveRTCHost::initHeaderEnvironment(const size_t env_buffer_len) { for (std::pair element : RTCEnvMacroProperties) { RTCEnvMacroPropertyProperties props = element.second; getMacroPropertyImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - getMacroPropertyImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getMacroPropertyImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getMacroPropertyImpl << " if(sizeof(T) != " << element.second.type_size << ") {\n"; getMacroPropertyImpl << " DTHROW(\"Environment macro property '%s' type mismatch.\\n\", name);\n"; getMacroPropertyImpl << " } else if (I != " << element.second.dimensions[0] << " ||\n"; @@ -494,7 +494,7 @@ void CurveRTCHost::initHeaderEnvironment(const size_t env_buffer_len) { getMacroPropertyImpl << " };\n"; ++ct; } - getMacroPropertyImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getMacroPropertyImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getMacroPropertyImpl << " DTHROW(\"Environment macro property '%s' was not found.\\n\", name);\n"; getMacroPropertyImpl << " return DeviceMacroProperty(nullptr, nullptr);\n"; getMacroPropertyImpl << "#else\n"; @@ -512,7 +512,7 @@ void CurveRTCHost::initHeaderSetters() { RTCVariableProperties props = element.second; if (props.write) { setAgentVariableImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - setAgentVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + setAgentVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; setAgentVariableImpl << " if(sizeof(type_decode::type_t) != " << element.second.type_size << ") {\n"; setAgentVariableImpl << " DTHROW(\"Agent variable '%s' type mismatch during setVariable().\\n\", name);\n"; setAgentVariableImpl << " return;\n"; @@ -526,7 +526,7 @@ void CurveRTCHost::initHeaderSetters() { setAgentVariableImpl << " }\n"; } else { ++ct; } } - setAgentVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + setAgentVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; setAgentVariableImpl << " DTHROW(\"Agent variable '%s' was not found during setVariable().\\n\", name);\n"; setAgentVariableImpl << "#endif\n"; setHeaderPlaceholder("$DYNAMIC_SETAGENTVARIABLE_IMPL", setAgentVariableImpl.str()); @@ -539,7 +539,7 @@ void CurveRTCHost::initHeaderSetters() { RTCVariableProperties props = element.second; if (props.write) { setMessageVariableImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - setMessageVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + setMessageVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; setMessageVariableImpl << " if(sizeof(type_decode::type_t) != " << element.second.type_size << ") {\n"; setMessageVariableImpl << " DTHROW(\"Message variable '%s' type mismatch during setVariable().\\n\", name);\n"; setMessageVariableImpl << " return;\n"; @@ -553,7 +553,7 @@ void CurveRTCHost::initHeaderSetters() { setMessageVariableImpl << " }\n"; } else { ++ct; } } - setMessageVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + setMessageVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; setMessageVariableImpl << " DTHROW(\"Message variable '%s' was not found during setVariable().\\n\", name);\n"; setMessageVariableImpl << "#endif\n"; setHeaderPlaceholder("$DYNAMIC_SETMESSAGEVARIABLE_IMPL", setMessageVariableImpl.str()); @@ -566,7 +566,7 @@ void CurveRTCHost::initHeaderSetters() { RTCVariableProperties props = element.second; if (props.write) { setNewAgentVariableImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - setNewAgentVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + setNewAgentVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; setNewAgentVariableImpl << " if(sizeof(type_decode::type_t) != " << element.second.type_size << ") {\n"; setNewAgentVariableImpl << " DTHROW(\"New agent variable '%s' type mismatch during setVariable().\\n\", name);\n"; setNewAgentVariableImpl << " return;\n"; @@ -580,7 +580,7 @@ void CurveRTCHost::initHeaderSetters() { setNewAgentVariableImpl << " }\n"; } else { ++ct; } } - setNewAgentVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + setNewAgentVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; setNewAgentVariableImpl << " DTHROW(\"New agent variable '%s' was not found during setVariable().\\n\", name);\n"; setNewAgentVariableImpl << "#endif\n"; setHeaderPlaceholder("$DYNAMIC_SETNEWAGENTVARIABLE_IMPL", setNewAgentVariableImpl.str()); @@ -595,7 +595,7 @@ void CurveRTCHost::initHeaderSetters() { RTCVariableProperties props = element.second; if (props.write && props.elements > 1) { setAgentArrayVariableImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - setAgentArrayVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + setAgentArrayVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; setAgentArrayVariableImpl << " const unsigned int t_index = type_decode::len_t * array_index + type_decode::len_t;\n"; setAgentArrayVariableImpl << " if(sizeof(type_decode::type_t) != " << element.second.type_size << ") {\n"; setAgentArrayVariableImpl << " DTHROW(\"Agent array variable '%s' type mismatch during setVariable().\\n\", name);\n"; @@ -613,7 +613,7 @@ void CurveRTCHost::initHeaderSetters() { setAgentArrayVariableImpl << " }\n"; } else { ++ct; } } - setAgentArrayVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + setAgentArrayVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; setAgentArrayVariableImpl << " DTHROW(\"Agent array variable '%s' was not found during setVariable().\\n\", name);\n"; setAgentArrayVariableImpl << "#endif\n"; setHeaderPlaceholder("$DYNAMIC_SETAGENTARRAYVARIABLE_IMPL", setAgentArrayVariableImpl.str()); @@ -628,7 +628,7 @@ void CurveRTCHost::initHeaderSetters() { RTCVariableProperties props = element.second; if (props.write && props.elements > 1) { setMessageArrayVariableImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - setMessageArrayVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + setMessageArrayVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; setMessageArrayVariableImpl << " const unsigned int t_index = type_decode::len_t * array_index + type_decode::len_t;\n"; setMessageArrayVariableImpl << " if(sizeof(type_decode::type_t) != " << element.second.type_size << ") {\n"; setMessageArrayVariableImpl << " DTHROW(\"Message array variable '%s' type mismatch during setVariable().\\n\", name);\n"; @@ -646,7 +646,7 @@ void CurveRTCHost::initHeaderSetters() { setMessageArrayVariableImpl << " }\n"; } else { ++ct; } } - setMessageArrayVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + setMessageArrayVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; setMessageArrayVariableImpl << " DTHROW(\"Message array variable '%s' was not found during setVariable().\\n\", name);\n"; setMessageArrayVariableImpl << "#endif\n"; setHeaderPlaceholder("$DYNAMIC_SETMESSAGEARRAYVARIABLE_IMPL", setMessageArrayVariableImpl.str()); @@ -661,7 +661,7 @@ void CurveRTCHost::initHeaderSetters() { RTCVariableProperties props = element.second; if (props.write && props.elements > 1) { setNewAgentArrayVariableImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - setNewAgentArrayVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + setNewAgentArrayVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; setNewAgentArrayVariableImpl << " const unsigned int t_index = type_decode::len_t * array_index + type_decode::len_t;\n"; setNewAgentArrayVariableImpl << " if(sizeof(type_decode::type_t) != " << element.second.type_size << ") {\n"; setNewAgentArrayVariableImpl << " DTHROW(\"New agent array variable '%s' type mismatch during setVariable().\\n\", name);\n"; @@ -679,7 +679,7 @@ void CurveRTCHost::initHeaderSetters() { setNewAgentArrayVariableImpl << " }\n"; } else { ++ct; } } - setNewAgentArrayVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + setNewAgentArrayVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; setNewAgentArrayVariableImpl << " DTHROW(\"New agent array variable '%s' was not found during setVariable().\\n\", name);\n"; setNewAgentArrayVariableImpl << "#endif\n"; setHeaderPlaceholder("$DYNAMIC_SETNEWAGENTARRAYVARIABLE_IMPL", setNewAgentArrayVariableImpl.str()); @@ -694,7 +694,7 @@ void CurveRTCHost::initHeaderGetters() { RTCVariableProperties props = element.second; if (props.read) { getAgentVariableImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - getAgentVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getAgentVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getAgentVariableImpl << " if(sizeof(type_decode::type_t) != " << element.second.type_size << ") {\n"; getAgentVariableImpl << " DTHROW(\"Agent variable '%s' type mismatch during getVariable().\\n\", name);\n"; getAgentVariableImpl << " return {};\n"; @@ -707,7 +707,7 @@ void CurveRTCHost::initHeaderGetters() { getAgentVariableImpl << " }\n"; } else { ++ct; } } - getAgentVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getAgentVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getAgentVariableImpl << " DTHROW(\"Agent variable '%s' was not found during getVariable().\\n\", name);\n"; getAgentVariableImpl << "#endif\n"; getAgentVariableImpl << " return {};\n"; @@ -721,7 +721,7 @@ void CurveRTCHost::initHeaderGetters() { RTCVariableProperties props = element.second; if (props.read) { getMessageVariableImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - getMessageVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getMessageVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getMessageVariableImpl << " if(sizeof(type_decode::type_t) != " << element.second.type_size << ") {\n"; getMessageVariableImpl << " DTHROW(\"Message variable '%s' type mismatch during getVariable().\\n\", name);\n"; getMessageVariableImpl << " return {};\n"; @@ -734,7 +734,7 @@ void CurveRTCHost::initHeaderGetters() { getMessageVariableImpl << " }\n"; } else { ++ct; } } - getMessageVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getMessageVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getMessageVariableImpl << " DTHROW(\"Message variable '%s' was not found during getVariable().\\n\", name);\n"; getMessageVariableImpl << "#endif\n"; getMessageVariableImpl << " return {};\n"; @@ -748,7 +748,7 @@ void CurveRTCHost::initHeaderGetters() { RTCVariableProperties props = element.second; if (props.read && props.elements == 1) { // GLM does not support __ldg() so should not use this getAgentVariableLDGImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - getAgentVariableLDGImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getAgentVariableLDGImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getAgentVariableLDGImpl << " if(sizeof(T) != " << element.second.type_size * element.second.elements << ") {\n"; getAgentVariableLDGImpl << " DTHROW(\"Agent variable '%s' type mismatch during getVariable().\\n\", name);\n"; getAgentVariableLDGImpl << " return {};\n"; @@ -759,7 +759,7 @@ void CurveRTCHost::initHeaderGetters() { ++ct; // Prev was part of the return line, but don't want confusion } else { ++ct; } } - getAgentVariableLDGImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getAgentVariableLDGImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getAgentVariableLDGImpl << " DTHROW(\"Agent variable '%s' was not found during getVariable().\\n\", name);\n"; getAgentVariableLDGImpl << "#endif\n"; getAgentVariableLDGImpl << " return {};\n"; @@ -773,7 +773,7 @@ void CurveRTCHost::initHeaderGetters() { RTCVariableProperties props = element.second; if (props.read && props.elements == 1) { // GLM does not support __ldg() so should not use this getMessageVariableLDGImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - getMessageVariableLDGImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getMessageVariableLDGImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getMessageVariableLDGImpl << " if(sizeof(T) != " << element.second.type_size << ") {\n"; getMessageVariableLDGImpl << " DTHROW(\"Message variable '%s' type mismatch during getVariable().\\n\", name);\n"; getMessageVariableLDGImpl << " return {};\n"; @@ -784,7 +784,7 @@ void CurveRTCHost::initHeaderGetters() { ++ct; // Prev was part of the return line, but don't want confusion } else { ++ct; } } - getMessageVariableLDGImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getMessageVariableLDGImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getMessageVariableLDGImpl << " DTHROW(\"Message variable '%s' was not found during getVariable().\\n\", name);\n"; getMessageVariableLDGImpl << "#endif\n"; getMessageVariableLDGImpl << " return {};\n"; @@ -800,7 +800,7 @@ void CurveRTCHost::initHeaderGetters() { RTCVariableProperties props = element.second; if (props.read && props.elements > 1) { getAgentArrayVariableImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - getAgentArrayVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getAgentArrayVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getAgentArrayVariableImpl << " const unsigned int t_index = type_decode::len_t * array_index + type_decode::len_t;\n"; getAgentArrayVariableImpl << " if(sizeof(type_decode::type_t) != " << element.second.type_size << ") {\n"; getAgentArrayVariableImpl << " DTHROW(\"Agent array variable '%s' type mismatch during getVariable().\\n\", name);\n"; @@ -817,7 +817,7 @@ void CurveRTCHost::initHeaderGetters() { getAgentArrayVariableImpl << " };\n"; } else { ++ct; } } - getAgentArrayVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getAgentArrayVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getAgentArrayVariableImpl << " DTHROW(\"Agent array variable '%s' was not found during getVariable().\\n\", name);\n"; getAgentArrayVariableImpl << "#endif\n"; getAgentArrayVariableImpl << " return {};\n"; @@ -833,7 +833,7 @@ void CurveRTCHost::initHeaderGetters() { RTCVariableProperties props = element.second; if (props.read && props.elements > 1) { getMessageArrayVariableImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - getMessageArrayVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getMessageArrayVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getMessageArrayVariableImpl << " const unsigned int t_index = type_decode::len_t * array_index + type_decode::len_t;\n"; getMessageArrayVariableImpl << " if(sizeof(type_decode::type_t) != " << element.second.type_size << ") {\n"; getMessageArrayVariableImpl << " DTHROW(\"Message array variable '%s' type mismatch during getVariable().\\n\", name);\n"; @@ -850,7 +850,7 @@ void CurveRTCHost::initHeaderGetters() { getMessageArrayVariableImpl << " };\n"; } else { ++ct; } } - getMessageArrayVariableImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getMessageArrayVariableImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getMessageArrayVariableImpl << " DTHROW(\"Message array variable '%s' was not found during getVariable().\\n\", name);\n"; getMessageArrayVariableImpl << "#endif\n"; getMessageArrayVariableImpl << " return {};\n"; @@ -866,7 +866,7 @@ void CurveRTCHost::initHeaderGetters() { RTCVariableProperties props = element.second; if (props.read && props.elements > 1) { // GLM does not support __ldg() so should not use this getAgentArrayVariableLDGImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - getAgentArrayVariableLDGImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getAgentArrayVariableLDGImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getAgentArrayVariableLDGImpl << " if(sizeof(T) != " << element.second.type_size << ") {\n"; getAgentArrayVariableLDGImpl << " DTHROW(\"Agent array variable '%s' type mismatch during getVariable().\\n\", name);\n"; getAgentArrayVariableLDGImpl << " return {};\n"; @@ -883,7 +883,7 @@ void CurveRTCHost::initHeaderGetters() { ++ct; // Prev was part of the return line, but don't want confusion } else { ++ct; } } - getAgentArrayVariableLDGImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getAgentArrayVariableLDGImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getAgentArrayVariableLDGImpl << " DTHROW(\"Agent array variable '%s' was not found during getVariable().\\n\", name);\n"; getAgentArrayVariableLDGImpl << "#endif\n"; getAgentArrayVariableLDGImpl << " return {};\n"; @@ -899,7 +899,7 @@ void CurveRTCHost::initHeaderGetters() { RTCVariableProperties props = element.second; if (props.read && props.elements > 1) { // GLM does not support __ldg() so should not use this getMessageArrayVariableLDGImpl << " if (strings_equal(name, \"" << element.first << "\")) {\n"; - getMessageArrayVariableLDGImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getMessageArrayVariableLDGImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getMessageArrayVariableLDGImpl << " if(sizeof(T) != " << element.second.type_size << ") {\n"; getMessageArrayVariableLDGImpl << " DTHROW(\"Message array variable '%s' type mismatch during getVariable().\\n\", name);\n"; getMessageArrayVariableLDGImpl << " return {};\n"; @@ -916,7 +916,7 @@ void CurveRTCHost::initHeaderGetters() { ++ct; // Prev was part of the return line, but don't want confusion } else { ++ct; } } - getMessageArrayVariableLDGImpl << "#if !defined(SEATBELTS) || SEATBELTS\n"; + getMessageArrayVariableLDGImpl << "#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS\n"; getMessageArrayVariableLDGImpl << " DTHROW(\"Message array variable '%s' was not found during getVariable().\\n\", name);\n"; getMessageArrayVariableLDGImpl << "#endif\n"; getMessageArrayVariableLDGImpl << " return {};\n"; diff --git a/src/flamegpu/sim/Simulation.cu b/src/flamegpu/sim/Simulation.cu index beccdbf9f..4da2bf6e6 100644 --- a/src/flamegpu/sim/Simulation.cu +++ b/src/flamegpu/sim/Simulation.cu @@ -297,7 +297,7 @@ int Simulation::checkArgs(int argc, const char** argv) { config.truncate_log_files = true; continue; } -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION // -c/--console, Renders the visualisation inert if (arg.compare("--console") == 0 || arg.compare("-c") == 0) { config.console_mode = true; @@ -331,7 +331,7 @@ void Simulation::printHelp(const char* executable) { printf(line_fmt, "-v, --verbose", "Print config, progress and timing (-t) information to console."); printf(line_fmt, "-t, --timing", "Output timing information to stdout"); printf(line_fmt, "-u, --silence-unknown-args", "Silence warnings for unknown arguments passed after this flag."); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION printf(line_fmt, "-c, --console", "Console mode, disable the visualisation"); #endif printHelp_derived(); diff --git a/src/flamegpu/util/detail/JitifyCache.cu b/src/flamegpu/util/detail/JitifyCache.cu index f34a23b51..269f0d18a 100644 --- a/src/flamegpu/util/detail/JitifyCache.cu +++ b/src/flamegpu/util/detail/JitifyCache.cu @@ -291,21 +291,21 @@ std::unique_ptr JitifyCache::compileKernel(const std::strin for (const auto &p : getIncludeDirs()) options.push_back(std::string("-I" + p.generic_string())); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM // GLM headers increase build time ~5x, so only enable glm if user is using it if (kernel_src.find("glm") != std::string::npos) { - options.push_back(std::string("-I") + GLM_PATH); - options.push_back(std::string("-DUSE_GLM")); + options.push_back(std::string("-I") + FLAMEGPU_GLM_PATH); + options.push_back(std::string("-DFLAMEGPU_USE_GLM")); } #endif // Forward the curand Engine request -#if defined(CURAND_MRG32k3a) - options.push_back(std::string("-DCURAND_MRG32k3a")); -#elif defined(CURAND_Philox4_32_10) - options.push_back(std::string("-DCURAND_Philox4_32_10")); -#elif defined(CURAND_XORWOW) - options.push_back(std::string("-DCURAND_XORWOW")); +#if defined(FLAMEGPU_CURAND_MRG32k3a) + options.push_back(std::string("-DFLAMEGPU_CURAND_MRG32k3a")); +#elif defined(FLAMEGPU_CURAND_Philox4_32_10) + options.push_back(std::string("-DFLAMEGPU_CURAND_Philox4_32_10")); +#elif defined(FLAMEGPU_CURAND_XORWOW) + options.push_back(std::string("-DFLAMEGPU_CURAND_XORWOW")); #endif // Set the cuda compuate capability architecture to optimize / generate for, based on the values supported by the current dynamiclaly linked nvrtc and the device in question. @@ -348,11 +348,11 @@ std::unique_ptr JitifyCache::compileKernel(const std::strin options.push_back("--std=c++17"); #endif - // If SEATBELTS is defined and false, forward it as off, otherwise forward it as on. -#if !defined(SEATBELTS) || SEATBELTS - options.push_back("--define-macro=SEATBELTS=1"); + // If FLAMEGPU_SEATBELTS is defined and false, forward it as off, otherwise forward it as on. +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS + options.push_back("--define-macro=FLAMEGPU_SEATBELTS=1"); #else - options.push_back("--define-macro=SEATBELTS=0"); + options.push_back("--define-macro=FLAMEGPU_SEATBELTS=0"); #endif // cuda.h @@ -465,7 +465,7 @@ std::unique_ptr JitifyCache::loadKernel(const std::string & const std::string arch = std::to_string((status == cudaSuccess) ? compute_capability::getComputeCapability(currentDeviceIdx) : 0); status = cudaRuntimeGetVersion(¤tDeviceIdx); const std::string cuda_version = std::to_string((status == cudaSuccess) ? currentDeviceIdx : 0); - const std::string seatbelts = std::to_string(SEATBELTS); + const std::string seatbelts = std::to_string(FLAMEGPU_SEATBELTS); // Cat kernel, dynamic header, header version const std::string long_reference = kernel_src + dynamic_header; // Don't need to include rest, they are explicit in short reference/filename // Generate short reference string @@ -475,14 +475,14 @@ std::unique_ptr JitifyCache::loadKernel(const std::string & arch + "_" + seatbelts + "_" + std::string(flamegpu::VERSION_FULL) + "_" + -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM "glm_" + #endif -#if defined(CURAND_MRG32k3a) +#if defined(FLAMEGPU_CURAND_MRG32k3a) "MRG_" + -#elif defined(CURAND_Philox4_32_10) +#elif defined(FLAMEGPU_CURAND_Philox4_32_10) "PHILOX_" + -#elif defined(CURAND_XORWOW) +#elif defined(FLAMEGPU_CURAND_XORWOW) "XORWOW_" + #endif // Use jitify hash methods for consistent hashing between OSs @@ -569,7 +569,7 @@ void JitifyCache::clearDiskCache() { } JitifyCache::JitifyCache() : use_memory_cache(true) -#ifndef DISABLE_RTC_DISK_CACHE +#ifndef FLAMEGPU_DISABLE_RTC_DISK_CACHE , use_disk_cache(true) { } #else , use_disk_cache(false) { } diff --git a/src/flamegpu/util/detail/compute_capability.cu b/src/flamegpu/util/detail/compute_capability.cu index 266cb459b..85a1086c0 100644 --- a/src/flamegpu/util/detail/compute_capability.cu +++ b/src/flamegpu/util/detail/compute_capability.cu @@ -35,8 +35,8 @@ int compute_capability::getComputeCapability(int deviceIndex) { } int compute_capability::minimumCompiledComputeCapability() { - #if defined(MIN_CUDA_ARCH) - return MIN_CUDA_ARCH; + #if defined(FLAMEGPU_MIN_CUDA_ARCH) + return FLAMEGPU_MIN_CUDA_ARCH; #else // Return 0 as a default minimum? return 0; diff --git a/src/flamegpu/visualiser/color/DiscreteColor.cpp b/src/flamegpu/visualiser/color/DiscreteColor.cpp index 96e373ec9..656830f4d 100644 --- a/src/flamegpu/visualiser/color/DiscreteColor.cpp +++ b/src/flamegpu/visualiser/color/DiscreteColor.cpp @@ -1,4 +1,3 @@ -// @todo - ifdef VISUALISTION? #include "flamegpu/visualiser/color/DiscreteColor.h" #include "flamegpu/visualiser/color/Palette.h" diff --git a/src/flamegpu/visualiser/color/HSVInterpolation.cpp b/src/flamegpu/visualiser/color/HSVInterpolation.cpp index 487a0dcaa..50ac86b0a 100644 --- a/src/flamegpu/visualiser/color/HSVInterpolation.cpp +++ b/src/flamegpu/visualiser/color/HSVInterpolation.cpp @@ -1,4 +1,4 @@ -// @todo - ifdef VISUALISATION +// @todo - ifdef FLAMEGPU_VISUALISATION #include "flamegpu/visualiser/color/HSVInterpolation.h" #include diff --git a/src/flamegpu/visualiser/color/StaticColor.cpp b/src/flamegpu/visualiser/color/StaticColor.cpp index c45f07546..ffe7f9416 100644 --- a/src/flamegpu/visualiser/color/StaticColor.cpp +++ b/src/flamegpu/visualiser/color/StaticColor.cpp @@ -1,4 +1,4 @@ -// @todo - ifdef VISUALISATION +// @todo - ifdef FLAMEGPU_VISUALISATION #include "flamegpu/visualiser/color/StaticColor.h" #include diff --git a/src/flamegpu/visualiser/color/ViridisInterpolation.cpp b/src/flamegpu/visualiser/color/ViridisInterpolation.cpp index 6b3a05cfa..24c2d2eee 100644 --- a/src/flamegpu/visualiser/color/ViridisInterpolation.cpp +++ b/src/flamegpu/visualiser/color/ViridisInterpolation.cpp @@ -1,4 +1,4 @@ -// @todo - #ifdef VISUALISATION +// @todo - #ifdef FLAMEGPU_VISUALISATION #include "flamegpu/visualiser/color/ViridisInterpolation.h" #include diff --git a/swig/CMakeLists.txt b/swig/CMakeLists.txt index c39270f66..902071824 100644 --- a/swig/CMakeLists.txt +++ b/swig/CMakeLists.txt @@ -1,5 +1,5 @@ # Minimum CMake version 3.18 for CUDA --std=c++17 -cmake_minimum_required(VERSION VERSION 3.18 FATAL_ERROR) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) include(FetchContent) @@ -152,6 +152,14 @@ if(NOT SWIG_FOUND) find_package(SWIG ${SWIG_MINIMUM_SUPPORTED_VERSION} REQUIRED) endif() +# Mark some CACHE vars advanced for a cleaner GUI +mark_as_advanced(FETCHCONTENT_SOURCE_DIR_SWIG) +mark_as_advanced(FETCHCONTENT_QUIET) +mark_as_advanced(FETCHCONTENT_BASE_DIR) +mark_as_advanced(FETCHCONTENT_FULLY_DISCONNECTED) +mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED) +mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED_SWIG) + # Set flags for SWIG. set(CMAKE_SWIG_FLAGS) diff --git a/swig/python/CMakeLists.txt b/swig/python/CMakeLists.txt index dfffc64db..d365b39ab 100644 --- a/swig/python/CMakeLists.txt +++ b/swig/python/CMakeLists.txt @@ -1,11 +1,19 @@ # Minimum CMake version 3.18 for CUDA --std=c++17 # for multi-config generators, CMake must be >= 3.20 for python bindings due to use of generator expressions in outputs/byproducts -cmake_minimum_required(VERSION VERSION 3.18 FATAL_ERROR) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) # Defines multiple CMake targets and custom commands to build swig bindings, create a python wheel and (optionally) install it into a venv. # defines `pyflamegpu` - the user-facing target which creates the wheel. # It depends on `pyflamegpu_swig` which performs the expensive swig phase (i.e. pyflamepgu.dll/so/pyd). +# Define a few advanced options which are only relevant to this file +# By default, build into a venv, otherwise it is set to ON anyway. +option(FLAMEGPU_BUILD_PYTHON_VENV "Enable the use of a venv for swig/python installation" ON) +mark_as_advanced(FLAMEGPU_BUILD_PYTHON_VENV) +# Add option to embed the cuda version in the python local version, for pseudo downstream releases / to differentiate wheel files. +option(FLAMEGPU_BUILD_PYTHON_LOCALVERSION "Embed CUDA version for the build in the local information" ON) +mark_as_advanced(FLAMEGPU_BUILD_PYTHON_LOCALVERSION) + # Get the root of the repository to find other CMake files etc. get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../../ REALPATH) @@ -14,10 +22,6 @@ find_package(CUDAToolkit REQUIRED) # include CMake function to enable setting of warning suppressions include(${FLAMEGPU_ROOT}/cmake/warnings.cmake) -# include CMake function to enable setting of gencode flags -include(${FLAMEGPU_ROOT}/cmake/cuda_arch.cmake) -# Set the C++ and CUDA standard to use -include(${FLAMEGPU_ROOT}/cmake/cxxstd.cmake) # Get FLAMEGPU Version information include(${FLAMEGPU_ROOT}/cmake/version.cmake) @@ -97,14 +101,19 @@ set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} PROPERTY CUDA_SEPARABLE_COMPILATI # Build with the SWIG pre-processor macro enabled set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_GENERATED_COMPILE_DEFINITIONS "SWIG") -# Set the gencode arguments for the target -SetCUDAGencodes(TARGET "${PYTHON_SWIG_TARGET_NAME}") +# Require C++17 as a public target property for C++ and CUDA, with no extensions, and the standard is required +target_compile_features(${PYTHON_SWIG_TARGET_NAME} PUBLIC cxx_std_17) +target_compile_features(${PYTHON_SWIG_TARGET_NAME} PUBLIC cuda_std_17) +set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} PROPERTY CXX_EXTENSIONS OFF) +set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} PROPERTY CUDA_EXTENSIONS OFF) +set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) +set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} PROPERTY CUDA_STANDARD_REQUIRED ON) # Disable all compiler warnings, as we do not control the generated code -DisableCompilerWarnings(TARGET "${PYTHON_SWIG_TARGET_NAME}") +flamegpu_disable_compiler_warnings(TARGET "${PYTHON_SWIG_TARGET_NAME}") # Suppress additional warnings -SuppressSomeCompilerWarnings(TARGET "${PYTHON_SWIG_TARGET_NAME}") +flamegpu_suppress_some_compiler_warnings(TARGET "${PYTHON_SWIG_TARGET_NAME}") # Set bigobj property for windows compiler, required by the very large generated swig file if(MSVC) @@ -114,46 +123,14 @@ endif() # set include directories for module build target_include_directories(${PYTHON_SWIG_TARGET_NAME} PRIVATE ${Python3_INCLUDE_DIRS}) -# Handle user options -# This does not use common/CommonCompilerSettings as it appears to rely on swig specific properties (although this is untested.) -if (VISUALISATION) - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_COMPILE_DEFINITIONS VISUALISATION) - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_GENERATED_COMPILE_DEFINITIONS VISUALISATION) -endif() -if (SEATBELTS) - # Debug always has seatbelts. - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_COMPILE_DEFINITIONS SEATBELTS=1) - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_GENERATED_COMPILE_DEFINITIONS SEATBELTS=1) -else() - # Debug builds still have seatbelts. Non debug builds do not. - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_COMPILE_DEFINITIONS $,SEATBELTS=1,SEATBELTS=0>) - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_GENERATED_COMPILE_DEFINITIONS $,SEATBELTS=1,SEATBELTS=0>) -endif() -# Build SWIG for the corresponding CURAND -string(TOUPPER CURAND_ENGINE CURAND_ENGINE_UPPER) -if(${CURAND_ENGINE_UPPER} STREQUAL "MRG") - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_COMPILE_DEFINITIONS CURAND_MRG32k3a) - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_GENERATED_COMPILE_DEFINITIONS CURAND_MRG32k3a) -elseif(${CURAND_ENGINE_UPPER} STREQUAL "PHILOX") - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_COMPILE_DEFINITIONS CURAND_Philox4_32_10) - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_GENERATED_COMPILE_DEFINITIONS CURAND_Philox4_32_10) -elseif(${CURAND_ENGINE_UPPER} STREQUAL "XORWOW") - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_COMPILE_DEFINITIONS CURAND_XORWOW) - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_GENERATED_COMPILE_DEFINITIONS CURAND_XORWOW) -endif() -if (NOT RTC_DISK_CACHE) - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_COMPILE_DEFINITIONS DISABLE_RTC_DISK_CACHE) - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_GENERATED_COMPILE_DEFINITIONS DISABLE_RTC_DISK_CACHE) -endif() -if (EXPORT_RTC_SOURCES) - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_COMPILE_DEFINITIONS OUTPUT_RTC_DYNAMIC_FILES) - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_GENERATED_COMPILE_DEFINITIONS OUTPUT_RTC_DYNAMIC_FILES) -endif () -# Block windows.h min and max defines. -if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_COMPILE_DEFINITIONS NOMINMAX) - set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_GENERATED_COMPILE_DEFINITIONS NOMINMAX) -endif() +# Get the list of compile definitions from the FLAMEGPU target, and also set them for swig generation and swig compilation +# This removes duplciation of setting FLAMEGPU compiler definitions +get_target_property(flamegpu_compile_defines flamegpu COMPILE_DEFINITIONS) +foreach(flamegpu_definition ${flamegpu_compile_defines}) + set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_COMPILE_DEFINITIONS "${flamegpu_definition}") + set_property(TARGET ${PYTHON_SWIG_TARGET_NAME} APPEND PROPERTY SWIG_GENERATED_COMPILE_DEFINITIONS "${flamegpu_definition}") +endforeach() +unset(flamegpu_compile_defines) # link with the static flamegpu, inherriting links on nvrtc and the cuda driver api etc target_link_libraries(${PYTHON_SWIG_TARGET_NAME} PRIVATE flamegpu) @@ -195,10 +172,24 @@ endforeach() # Build a list of OS specific python package_data entries. set(FLAMEGPU_PYTHON_PACKAGE_DATA_OS_SPECIFIC "") -if(WIN32) - # @todo - See if there is a dynamic way to achieve this through target properties? - set(FLAMEGPU_PYTHON_PACKAGE_DATA_OS_SPECIFIC "${FLAMEGPU_PYTHON_PACKAGE_DATA_OS_SPECIFIC}'glew32.dll', 'SDL2.dll', 'DevIL.dll'") +if (FLAMEGPU_VISUALISATION) + if(COMMAND flamegpu_visualiser_get_runtime_depenencies) + flamegpu_visualiser_get_runtime_depenencies(vis_runtime_dependencies) + foreach(vis_runtime_dependency ${vis_runtime_dependencies}) + # get the filename from teh abs filepath + get_filename_component(vis_runtime_dependency_fname ${vis_runtime_dependency} NAME) + list(APPEND FLAMEGPU_PYTHON_PACKAGE_DATA_OS_SPECIFIC "'${vis_runtime_dependency_fname}'") + unset(vis_runtime_dependency_fname) + endforeach() + unset(vis_runtime_dependencies) + endif() +endif() +# Join the cmake list with commas to form the value for conf.py +if(FLAMEGPU_PYTHON_PACKAGE_DATA_OS_SPECIFIC) + string (REPLACE ";" ", " FLAMEGPU_PYTHON_PACKAGE_DATA_OS_SPECIFIC "${FLAMEGPU_PYTHON_PACKAGE_DATA_OS_SPECIFIC}") endif() + + # configure the python setup.py and __init__.py files for packaging and output to the final lib output folder. Must configure into a temp dir then call file(GENERATE) due to use of generator expression in PYTHON_LIB_OUTPUT_DIRECTORY configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in @@ -217,7 +208,7 @@ file(GENERATE INPUT ${PYTHON_LIB_TEMP_DIRECTORY}/__init__.py.in) # Function to find if python module MODULE_NAME is available, and error if it is not available. -function(search_python_module MODULE_NAME) +function(flamegpu_search_python_module MODULE_NAME) execute_process( COMMAND ${Python3_EXECUTABLE} -c "import ${MODULE_NAME}; print(${MODULE_NAME}.__version__) if hasattr(${MODULE_NAME}, '__version__') else print('Unknown');" RESULT_VARIABLE _RESULT @@ -236,9 +227,9 @@ function(search_python_module MODULE_NAME) endfunction() # Look for required python modules to build the python module, error if they are not found. -search_python_module(setuptools) -search_python_module(wheel) -search_python_module(build) +flamegpu_search_python_module(setuptools) +flamegpu_search_python_module(wheel) +flamegpu_search_python_module(build) ## ------ # Define custom commands to produce files in the current cmake directory, a custom target which the user invokes to build the python wheel with appropraite dependencies configured, and any post-build steps required. @@ -291,49 +282,28 @@ foreach(PYTHON_CODEGEN_FILE IN LISTS PYTHON_CODEGEN_SRC_FILES) endforeach() # Copy the visualisation dlls if required, this must occur before the wheel is built -if (VISUALISATION) - # Copy DLLs - if(WIN32) - # sdl - get_filename_component(SDL2_RUNTIME_LIB_NAME ${SDL2_RUNTIME_LIBRARIES} NAME) - set(SDL2_PYTHON_OUTPUT_DESTINATION "${PYTHON_FLAMEGPU_LIB_OUTPUT_MODULE_DIR}/${SDL2_RUNTIME_LIB_NAME}") - add_custom_command( - OUTPUT ${SDL2_PYTHON_OUTPUT_DESTINATION} - DEPENDS ${SDL2_RUNTIME_LIBRARIES} - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${SDL2_RUNTIME_LIBRARIES} ${SDL2_PYTHON_OUTPUT_DESTINATION} - COMMENT "Copying ${SDL2_RUNTIME_LIB_NAME} to ${SDL2_PYTHON_OUTPUT_DESTINATION}" - ) - list(APPEND PYTHON_MODULE_TARGET_NAME_DEPENDS "${SDL2_PYTHON_OUTPUT_DESTINATION}") - unset(SDL2_PYTHON_OUTPUT_DESTINATION) - unset(SDL2_RUNTIME_LIB_NAME) - # glew - get_filename_component(GLEW_RUNTIME_LIB_NAME ${GLEW_RUNTIME_LIBRARIES} NAME) - set(GLEW_PYTHON_OUTPUT_DESTINATION "${PYTHON_FLAMEGPU_LIB_OUTPUT_MODULE_DIR}/${GLEW_RUNTIME_LIB_NAME}") - add_custom_command( - OUTPUT ${GLEW_PYTHON_OUTPUT_DESTINATION} - DEPENDS ${GLEW_RUNTIME_LIBRARIES} - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${GLEW_RUNTIME_LIBRARIES} ${GLEW_PYTHON_OUTPUT_DESTINATION} - COMMENT "Copying ${GLEW_RUNTIME_LIB_NAME} to ${GLEW_PYTHON_OUTPUT_DESTINATION}" - ) - list(APPEND PYTHON_MODULE_TARGET_NAME_DEPENDS "${GLEW_PYTHON_OUTPUT_DESTINATION}") - unset(GLEW_PYTHON_OUTPUT_DESTINATION) - unset(GLEW_RUNTIME_LIB_NAME) - # DevIL - get_filename_component(IL_RUNTIME_LIB_NAME ${IL_RUNTIME_LIBRARIES} NAME) - set(IL_PYTHON_OUTPUT_DESTINATION "${PYTHON_FLAMEGPU_LIB_OUTPUT_MODULE_DIR}/${IL_RUNTIME_LIB_NAME}") - add_custom_command( - OUTPUT ${IL_PYTHON_OUTPUT_DESTINATION} - DEPENDS ${IL_RUNTIME_LIBRARIES} - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${IL_RUNTIME_LIBRARIES} ${IL_PYTHON_OUTPUT_DESTINATION} - COMMENT "Copying ${IL_RUNTIME_LIB_NAME} to ${IL_PYTHON_OUTPUT_DESTINATION}" - ) - list(APPEND PYTHON_MODULE_TARGET_NAME_DEPENDS "${IL_PYTHON_OUTPUT_DESTINATION}") - unset(IL_PYTHON_OUTPUT_DESTINATION) - unset(IL_RUNTIME_LIB_NAME) +if (FLAMEGPU_VISUALISATION) + if(COMMAND flamegpu_visualiser_get_runtime_depenencies) + flamegpu_visualiser_get_runtime_depenencies(vis_runtime_dependencies) + foreach(vis_runtime_dependency ${vis_runtime_dependencies}) + # get the filename from teh abs filepath + get_filename_component(vis_runtime_dependency_fname ${vis_runtime_dependency} NAME) + set(vis_runtime_dependency_dest "${PYTHON_FLAMEGPU_LIB_OUTPUT_MODULE_DIR}/${vis_runtime_dependency_fname}") + add_custom_command( + OUTPUT ${vis_runtime_dependency_dest} + DEPENDS ${vis_runtime_dependency} + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${vis_runtime_dependency} ${vis_runtime_dependency_dest} + COMMENT "Copying ${vis_runtime_dependency_fname} to ${vis_runtime_dependency_dest}" + ) + # Add the file to the list of dependencies for the python module target + list(APPEND PYTHON_MODULE_TARGET_NAME_DEPENDS "${vis_runtime_dependency_dest}") + unset(vis_runtime_dependency_fname) + unset(vis_runtime_dependency_dest) + endforeach() + unset(vis_runtime_dependencies) endif() endif() - # Define a custom commmand to copy the swig generated .py file to the correct destination for wheel generation. This output is then appended to the list of dependencies for the target which builds the wheel. set(PYTHON_FLAMEGPU_LIB_OUTPUT_MODULE_PY "${PYTHON_LIB_OUTPUT_DIRECTORY}/${PYTHON_MODULE_NAME}/${PYTHON_MODULE_NAME}.py") set(PYTHON_FLAMEGPU_TEMP_MODULE_PY ${PYTHON_LIB_TEMP_DIRECTORY}/${PYTHON_MODULE_NAME}/${PYTHON_MODULE_NAME}.py) @@ -378,9 +348,9 @@ add_custom_target(${PYTHON_MODULE_TARGET_NAME} add_dependencies(${PYTHON_MODULE_TARGET_NAME} ${PYTHON_SWIG_TARGET_NAME}) # Build Virtual Environment for python testing and install the packaged wheel -if(BUILD_SWIG_PYTHON_VENV) +if(FLAMEGPU_BUILD_PYTHON_VENV) # Look for python module venv, error if not found - search_python_module(venv) + flamegpu_search_python_module(venv) # Testing using a virtual environment set(VENV_EXECUTABLE ${Python3_EXECUTABLE} -m venv) set(VENV_DIR ${PYTHON_LIB_OUTPUT_DIRECTORY}/venv) diff --git a/swig/python/flamegpu.i b/swig/python/flamegpu.i index 5c7392c2c..068af868a 100644 --- a/swig/python/flamegpu.i +++ b/swig/python/flamegpu.i @@ -550,7 +550,7 @@ class ModelDescription; // For DependencyGraph circular dependency. } // If visualisation is enabled, then CUDASimulation provides access to the visualisation class. This requires a forward declaraiton to place it in the correct namespace. -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION namespace flamegpu { namespace visualiser { class ModelVis; @@ -987,7 +987,7 @@ TEMPLATE_VARIABLE_INSTANTIATE_FLOATS(logNormal, flamegpu::HostRandom::logNormal) // Include visualisation support if enabled. -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION // Include relevant headers in the generated c++ // @todo - Need to put the vis repo into a subfolder for more sensible include paths %{ @@ -1093,8 +1093,8 @@ TEMPLATE_VARIABLE_INSTANTIATE_FLOATS(logNormal, flamegpu::HostRandom::logNormal) TEMPLATE_VARIABLE_ARRAY_INSTANTIATE_INTS(newEnvironmentPropertyToggle, flamegpu::visualiser::PanelVis::newEnvironmentPropertyToggle) - // Redefine the value to ensure it makes it into the python modules - #undef VISUALISATION + // Redefine the value to ensure it makes it into the python modules (without the FLAMEGPU_ prefix) + #undef FLAMEGPU_VISUALISATION #define VISUALISATION true #else // Define in the python module as false. @@ -1102,11 +1102,11 @@ TEMPLATE_VARIABLE_INSTANTIATE_FLOATS(logNormal, flamegpu::HostRandom::logNormal) #endif // Define pyflamegpu.SEATBELTS as true or false as appropriate, so tests can be disabled / enabled -#if defined(SEATBELTS) && SEATBELTS - #undef SEATBELTS +#if defined(FLAMEGPU_SEATBELTS) && FLAMEGPU_SEATBELTS + #undef FLAMEGPU_SEATBELTS #define SEATBELTS true #elif defined(SEATBELTS) - #undef SEATBELTS + #undef FLAMEGPU_SEATBELTS #define SEATBELTS false #else #define SEATBELTS false diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d65376af9..2453d46cf 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,9 +1,12 @@ # Minimum CMake version 3.18 for CUDA --std=c++17 -cmake_minimum_required(VERSION VERSION 3.18 FATAL_ERROR) +cmake_minimum_required(VERSION 3.18...3.25 FATAL_ERROR) -# Only Do anything if BUILD_TESTS or BUILD_TESTS_DEV is set. -if(NOT (BUILD_TESTS OR BUILD_TESTS_DEV)) - message(FATAL_ERROR "${CMAKE_CURRENT_LIST_FILE} requires BUILD_TESTS or BUILD_TESTS_DEV to be ON") +# Option to enable GTEST_DISCOVER if tests or tests_dev are enabled. Defaults to off due to runtime increase +cmake_dependent_option(FLAMEGPU_ENABLE_GTEST_DISCOVER "Enable GTEST_DISCOVER for more detailed ctest output without -VV. This dramitically increases test suite runtime to CUDA context initialisation." OFF "FLAMEGPU_BUILD_TESTS OR FLAMEGPU_BUILD_TESTS_DEV" OFF) + +# Only Do anything if FLAMEGPU_BUILD_TESTS or FLAMEGPU_BUILD_TESTS_DEV is set. +if(NOT (FLAMEGPU_BUILD_TESTS OR FLAMEGPU_BUILD_TESTS_DEV)) + message(FATAL_ERROR "${CMAKE_CURRENT_LIST_FILE} requires FLAMEGPU_BUILD_TESTS or FLAMEGPU_BUILD_TESTS_DEV to be ON") endif() # Define the source files early, prior to projects. @@ -103,16 +106,21 @@ SET(HELPERS_SRC # Set the location of the ROOT flame gpu project relative to this CMakeList.txt get_filename_component(FLAMEGPU_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/.. REALPATH) +# Include CMake for managing CMAKE_CUDA_ARCHITECTURES +include(${FLAMEGPU_ROOT}/cmake/CUDAArchitectures.cmake) + # Include googletest as a dependency. include(${FLAMEGPU_ROOT}/cmake/dependencies/googletest.cmake) # If CTest's GoogleTest integreation is required, include GoogleTest. -if((BUILD_TESTS OR BUILD_TESTS_DEV) AND USE_GTEST_DISCOVER) +if((FLAMEGPU_BUILD_TESTS OR FLAMEGPU_BUILD_TESTS_DEV) AND FLAMEGPU_ENABLE_GTEST_DISCOVER) include(GoogleTest) endif() -if(BUILD_TESTS) +if(FLAMEGPU_BUILD_TESTS) enable_testing() + # Handle CMAKE_CUDA_ARCHITECTURES and inject code into the tests project() command + flamegpu_init_cuda_architectures(PROJECT tests) # Name the project and set languages project(tests CUDA CXX) # Include common rules. @@ -131,13 +139,16 @@ if(BUILD_TESTS) SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/../bin/${CMAKE_BUILD_TYPE}/) endif() # Add the executable and set required flags for the target - add_flamegpu_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" FALSE) + flamegpu_add_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" FALSE) # Add the tests directory to the include path, target_include_directories("${PROJECT_NAME}" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}") # Add the targets we depend on (this does link and include) target_link_libraries("${PROJECT_NAME}" PRIVATE GTest::gtest) + # Pass a tests specific compiler definition for cheking CMAKE_CUDA_ARCHITECTURES behaviour + flamegpu_get_minimum_cuda_architecture(min_cuda_arch) + target_compile_definitions(${PROJECT_NAME} PRIVATE FLAMEGPU_TEST_MIN_CUDA_ARCH=${min_cuda_arch}) # Put Within Tests filter - CMAKE_SET_TARGET_FOLDER("${PROJECT_NAME}" "Tests") + flamegpu_set_target_folder("${PROJECT_NAME}" "Tests") # Also set as startup project (if top level project) set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" PROPERTY VS_STARTUP_PROJECT "${PROJECT_NAME}") # Set the default (visual studio) debugger configure_file @@ -145,7 +156,7 @@ if(BUILD_TESTS) VS_DEBUGGER_COMMAND_ARGUMENTS "$<$:--gtest_catch_exceptions=0> --gtest_filter=*") # Add the tests target as to ctest, optionally using the gtest_discover integration. - if(USE_GTEST_DISCOVER) + if(FLAMEGPU_ENABLE_GTEST_DISCOVER) # If GTEST_DISCOVER is enabled, add the unit test executable using it. This results in very long test exeuction times due to CUDA context initialisation per test gtest_discover_tests( "${PROJECT_NAME}" @@ -163,8 +174,10 @@ if(BUILD_TESTS) endif() # If the tests_dev target is requirest, create it. -if(BUILD_TESTS_DEV) +if(FLAMEGPU_BUILD_TESTS_DEV) enable_testing() + # Handle CMAKE_CUDA_ARCHITECTURES and inject code into the tests project() command + flamegpu_init_cuda_architectures(PROJECT tests_dev) # DEVELOPMENT TESTING THING (Compact repeated version of above) project(tests_dev CUDA CXX) # Include common rules. @@ -183,19 +196,22 @@ if(BUILD_TESTS_DEV) SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/../bin/${CMAKE_BUILD_TYPE}/) endif() # Add the executable and set required flags for the target - add_flamegpu_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" FALSE) + flamegpu_add_executable("${PROJECT_NAME}" "${ALL_SRC}" "${FLAMEGPU_ROOT}" "${PROJECT_BINARY_DIR}" FALSE) # Add the tests directory to the include path, target_include_directories("${PROJECT_NAME}" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}") # Add the targets we depend on (this does link and include) target_link_libraries("${PROJECT_NAME}" PRIVATE GTest::gtest) + # Pass a tests specific compiler definition for cheking CMAKE_CUDA_ARCHITECTURES behaviour + flamegpu_get_minimum_cuda_architecture(min_cuda_arch) + target_compile_definitions(${PROJECT_NAME} PRIVATE FLAMEGPU_TEST_MIN_CUDA_ARCH=${min_cuda_arch}) # Put Within Tests filter - CMAKE_SET_TARGET_FOLDER("${PROJECT_NAME}" "Tests") + flamegpu_set_target_folder("${PROJECT_NAME}" "Tests") # Set the default (visual studio) debugger configure_file set_target_properties("${PROJECT_NAME}" PROPERTIES VS_DEBUGGER_WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" VS_DEBUGGER_COMMAND_ARGUMENTS "$<$:--gtest_catch_exceptions=0> --gtest_filter=*") # Add the tests target as to ctest, optionally using the gtest_discover integration. - if(USE_GTEST_DISCOVER) + if(FLAMEGPU_ENABLE_GTEST_DISCOVER) # If GTEST_DISCOVER is enabled, add the unit test executable using it. This results in very long test exeuction times due to CUDA context initialisation per test gtest_discover_tests( "${PROJECT_NAME}" diff --git a/tests/helpers/host_reductions_common.cu b/tests/helpers/host_reductions_common.cu index dd592038d..1c617ab54 100644 --- a/tests/helpers/host_reductions_common.cu +++ b/tests/helpers/host_reductions_common.cu @@ -12,7 +12,7 @@ uint32_t uint32_t_out = 0; int32_t int32_t_out = 0; uint64_t uint64_t_out = 0; int64_t int64_t_out = 0; -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM glm::vec3 vec3_t_out = glm::vec3(0); #endif std::pair mean_sd_out; diff --git a/tests/helpers/host_reductions_common.h b/tests/helpers/host_reductions_common.h index be78b5fc0..81b40afee 100644 --- a/tests/helpers/host_reductions_common.h +++ b/tests/helpers/host_reductions_common.h @@ -35,7 +35,7 @@ extern uint32_t uint32_t_out; extern int32_t int32_t_out; extern uint64_t uint64_t_out; extern int64_t int64_t_out; -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM extern glm::vec3 vec3_t_out; #endif extern std::pair mean_sd_out; @@ -58,7 +58,7 @@ class MiniSim { agent.newVariable("int32_t"); agent.newVariable("uint64_t"); agent.newVariable("int64_t"); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM agent.newVariable("vec3"); #endif population = new AgentVector(agent, TEST_LEN); diff --git a/tests/test_cases/exception/test_device_exception.cu b/tests/test_cases/exception/test_device_exception.cu index 1228bf569..089bf8a19 100644 --- a/tests/test_cases/exception/test_device_exception.cu +++ b/tests/test_cases/exception/test_device_exception.cu @@ -5,8 +5,8 @@ namespace flamegpu { -// These tests wont work if built with SEATBELTS=OFF, so mark them all as disabled instead -#if defined(SEATBELTS) && !SEATBELTS +// These tests wont work if built with FLAMEGPU_SEATBELTS=OFF, so mark them all as disabled instead +#if defined(FLAMEGPU_SEATBELTS) && !FLAMEGPU_SEATBELTS #undef TEST_F #define TEST_F(test_fixture, test_name)\ GTEST_TEST_(test_fixture, DISABLED_ ## test_name, test_fixture, \ diff --git a/tests/test_cases/exception/test_rtc_device_exception.cu b/tests/test_cases/exception/test_rtc_device_exception.cu index d3828f4ee..78fe487cb 100644 --- a/tests/test_cases/exception/test_rtc_device_exception.cu +++ b/tests/test_cases/exception/test_rtc_device_exception.cu @@ -4,8 +4,8 @@ namespace flamegpu { -// These tests wont work if built with SEATBELTS=OFF, so mark them all as disabled instead -#if defined(SEATBELTS) && !SEATBELTS +// These tests wont work if built with FLAMEGPU_SEATBELTS=OFF, so mark them all as disabled instead +#if defined(FLAMEGPU_SEATBELTS) && !FLAMEGPU_SEATBELTS #undef TEST #define TEST(test_suite_name, test_name) GTEST_TEST(test_suite_name, DISABLED_ ## test_name) #endif diff --git a/tests/test_cases/gpu/test_cuda_simulation.cu b/tests/test_cases/gpu/test_cuda_simulation.cu index 03dc1a864..033c16ee7 100644 --- a/tests/test_cases/gpu/test_cuda_simulation.cu +++ b/tests/test_cases/gpu/test_cuda_simulation.cu @@ -757,7 +757,7 @@ FLAMEGPU_HOST_FUNCTION(Check_setEnvironmentProperty) { std::array t_check = { 6, 7, 8 }; EXPECT_EQ(t, t_check); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM const std::array ivec3_3_check2 = { glm::ivec3{ 41, 42, 43 }, glm::ivec3{44, 45, 46}, glm::ivec3{47, 48, 49} }; EXPECT_EQ(FLAMEGPU->environment.getProperty("ivec3"), glm::ivec3(31, 32, 33)); @@ -773,7 +773,7 @@ TEST(TestCUDASimulation, setEnvironmentProperty) { m.Environment().newProperty("int", 2); m.Environment().newProperty("int2", { -1, 1 }); m.Environment().newProperty("int3", t_check); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM const glm::ivec3 ivec3_1_check = glm::ivec3{ 1, 2, 3 }; const std::array ivec3_2_check = { glm::ivec3{4, 5, 6}, glm::ivec3{7, 8, 9} }; const std::array ivec3_3_check = @@ -791,7 +791,7 @@ TEST(TestCUDASimulation, setEnvironmentProperty) { EXPECT_EQ((s.getEnvironmentProperty)("int3"), t_check); EXPECT_EQ(s.getEnvironmentProperty("int2", 0), -1); EXPECT_EQ(s.getEnvironmentProperty("int2", 1), 1); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM EXPECT_EQ(s.getEnvironmentProperty("ivec3"), ivec3_1_check); EXPECT_EQ((s.getEnvironmentProperty)("ivec33"), ivec3_3_check); EXPECT_EQ(s.getEnvironmentProperty("ivec32", 0), ivec3_2_check[0]); @@ -802,7 +802,7 @@ TEST(TestCUDASimulation, setEnvironmentProperty) { s.setEnvironmentProperty("int3", { 6, 7, 8 }); s.setEnvironmentProperty("int2", 0, 1); s.setEnvironmentProperty("int2", 1, -1); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM s.setEnvironmentProperty("ivec3", glm::ivec3{ 31, 32, 33 }); const std::array ivec3_3_check2 = { glm::ivec3{ 41, 42, 43 }, glm::ivec3{44, 45, 46}, glm::ivec3{47, 48, 49} }; @@ -826,7 +826,7 @@ TEST(TestCUDASimulation, setEnvironmentProperty) { EXPECT_THROW((s.CUDASimulation::getEnvironmentProperty)("int3"), exception::OutOfBoundsException); // Bad length EXPECT_THROW((s.CUDASimulation::getEnvironmentProperty)("int3"), exception::InvalidEnvPropertyType); // Bad type EXPECT_THROW((s.CUDASimulation::getEnvironmentProperty)("int3", 4), exception::OutOfBoundsException); // Out of bounds -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM EXPECT_THROW((s.CUDASimulation::setEnvironmentProperty)("ivec32", 3, {}), exception::OutOfBoundsException); // Out of bounds EXPECT_THROW((s.CUDASimulation::setEnvironmentProperty)("ivec33", 4, {}), exception::OutOfBoundsException); // Out of bounds EXPECT_THROW((s.CUDASimulation::getEnvironmentProperty)("ivec32", 3), exception::OutOfBoundsException); // Out of bounds diff --git a/tests/test_cases/gpu/test_cuda_simulation_concurrency.cu b/tests/test_cases/gpu/test_cuda_simulation_concurrency.cu index 2a355fb28..d9a908285 100644 --- a/tests/test_cases/gpu/test_cuda_simulation_concurrency.cu +++ b/tests/test_cases/gpu/test_cuda_simulation_concurrency.cu @@ -25,7 +25,7 @@ namespace flamegpu { #endif // if seatbelts and not debug, run the test, otherwise disable. -#if defined(SEATBELTS) && SEATBELTS && !defined(_DEBUG) +#if defined(FLAMEGPU_SEATBELTS) && FLAMEGPU_SEATBELTS && !defined(_DEBUG) #define RELEASE_ONLY_SEATBELTS_TEST(TestSuiteName, TestName)\ TEST(TestSuiteName, TestName) #else @@ -1179,7 +1179,7 @@ RELEASE_ONLY_TEST(TestCUDASimulationConcurrency, DISABLED_FastConditionConcurren } /** - * If SEATBELTS are on, try to get a device exception from parallel agent functions. + * If FLAMEGPU_SEATBELTS are on, try to get a device exception from parallel agent functions. */ RELEASE_ONLY_SEATBELTS_TEST(TestCUDASimulationConcurrency, LayerConcurrencyDeviceException) { // Define a model with multiple agent types diff --git a/tests/test_cases/io/test_io.cu b/tests/test_cases/io/test_io.cu index 6a9f60edf..a8433d7e7 100644 --- a/tests/test_cases/io/test_io.cu +++ b/tests/test_cases/io/test_io.cu @@ -216,7 +216,7 @@ class MiniSim { am.SimulationConfig().steps = 123; am.SimulationConfig().verbosity = Verbosity::Quiet; am.SimulationConfig().timing = true; -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION am.SimulationConfig().console_mode = true; #endif am.CUDAConfig().device_id = 0; @@ -234,7 +234,7 @@ class MiniSim { am.SimulationConfig().steps = 0; am.SimulationConfig().verbosity = Verbosity::Verbose; am.SimulationConfig().timing = false; -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION am.SimulationConfig().console_mode = false; #endif am.CUDAConfig().device_id = 1000; @@ -252,7 +252,7 @@ class MiniSim { EXPECT_EQ(am.getSimulationConfig().steps, 123u); EXPECT_EQ(am.getSimulationConfig().verbosity, Verbosity::Quiet); EXPECT_EQ(am.getSimulationConfig().timing, true); -#ifdef VISUALISATION +#ifdef FLAMEGPU_VISUALISATION EXPECT_EQ(am.getSimulationConfig().console_mode, true); #endif EXPECT_EQ(am.getCUDAConfig().device_id, 0); diff --git a/tests/test_cases/model/test_agent.cu b/tests/test_cases/model/test_agent.cu index c26ab7f33..c5abf6394 100644 --- a/tests/test_cases/model/test_agent.cu +++ b/tests/test_cases/model/test_agent.cu @@ -86,7 +86,7 @@ TEST(AgentDescriptionTest, variables) { EXPECT_EQ(sizeof(int16_t), a.getVariableSize(VARIABLE_NAME2)); EXPECT_EQ(std::type_index(typeid(float)), a.getVariableType(VARIABLE_NAME1)); EXPECT_EQ(std::type_index(typeid(int16_t)), a.getVariableType(VARIABLE_NAME2)); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM // Can create variable with GLM types a.newVariable("vec3"); a.newVariable("uvec4"); @@ -130,7 +130,7 @@ TEST(AgentDescriptionTest, variables_array) { EXPECT_EQ(sizeof(int16_t), a.getVariableSize(VARIABLE_NAME2)); EXPECT_EQ(std::type_index(typeid(float)), a.getVariableType(VARIABLE_NAME1)); EXPECT_EQ(std::type_index(typeid(int16_t)), a.getVariableType(VARIABLE_NAME2)); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM // Can create variable array with GLM types a.newVariable("vec3_5"); a.newVariable("uvec4_2"); diff --git a/tests/test_cases/model/test_environment_description.cu b/tests/test_cases/model/test_environment_description.cu index 7c3423d2d..2c1886b86 100644 --- a/tests/test_cases/model/test_environment_description.cu +++ b/tests/test_cases/model/test_environment_description.cu @@ -36,7 +36,7 @@ void AddGet_SetGet_test() { EXPECT_EQ(ed.setProperty("a", c), b); EXPECT_EQ(ed.getProperty("a"), c); } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM template void AddGet_SetGet_vec_test() { ModelDescription model("test"); @@ -80,7 +80,7 @@ void AddGet_SetGet_array_test() { EXPECT_EQ(a[i], c[i]); } } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM template void AddGet_SetGet_array_vec_test() { ModelDescription model("test"); @@ -132,7 +132,7 @@ void AddGet_SetGet_array_element_test() { EXPECT_EQ(ed.getProperty("a", i), c[i]); } } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM template void AddGet_SetGet_array_element_vec_test() { ModelDescription model("test"); @@ -244,7 +244,7 @@ TEST(EnvironmentDescriptionTest, AddGet_SetGetint64_t) { TEST(EnvironmentDescriptionTest, AddGet_SetGetuint64_t) { AddGet_SetGet_test(); } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM TEST(EnvironmentDescriptionTest, AddGet_SetGetvec3) { AddGet_SetGet_vec_test(); } @@ -283,7 +283,7 @@ TEST(EnvironmentDescriptionTest, AddGet_SetGetarray_int64_t) { TEST(EnvironmentDescriptionTest, AddGet_SetGetarray_uint64_t) { AddGet_SetGet_array_test(); } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM TEST(EnvironmentDescriptionTest, AddGet_SetGetrray_vec3) { AddGet_SetGet_array_vec_test(); } @@ -322,7 +322,7 @@ TEST(EnvironmentDescriptionTest, AddGet_SetGetarray_element_int64_t) { TEST(EnvironmentDescriptionTest, AddGet_SetGetarray_element_uint64_t) { AddGet_SetGet_array_element_test(); } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM TEST(EnvironmentDescriptionTest, AddGet_SetGetrray_element_vec3) { AddGet_SetGet_array_element_vec_test(); } @@ -429,7 +429,7 @@ TEST(EnvironmentDescriptionTest, ExceptionPropertyRange_int64_t) { TEST(EnvironmentDescriptionTest, ExceptionPropertyRange_uint64_t) { ExceptionPropertyRange_test(); } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM TEST(EnvironmentDescriptionTest, Exception_array_glm) { EnvironmentDescription ed; std::array b; diff --git a/tests/test_cases/model/test_message.cu b/tests/test_cases/model/test_message.cu index 56134378e..45fc7a1b1 100644 --- a/tests/test_cases/model/test_message.cu +++ b/tests/test_cases/model/test_message.cu @@ -34,7 +34,7 @@ TEST(MessageDescriptionTest, variables) { EXPECT_EQ(sizeof(int16_t), m.getVariableSize(VARIABLE_NAME2)); EXPECT_EQ(std::type_index(typeid(int16_t)), m.getVariableType(VARIABLE_NAME2)); EXPECT_EQ(1u, m.getVariableLength(VARIABLE_NAME2)); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM // Can create variable with GLM types m.newVariable("vec3"); m.newVariable("uvec4"); @@ -74,7 +74,7 @@ TEST(MessageDescriptionTest, variables_array) { EXPECT_EQ(std::type_index(typeid(int16_t)), m.getVariableType(VARIABLE_NAME2)); EXPECT_EQ(2u, m.getVariableLength(VARIABLE_NAME1)); EXPECT_EQ(2u, m.getVariableLength(VARIABLE_NAME2)); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM // Can create variable array with GLM types m.newVariable("vec3_5"); m.newVariable("uvec4_2"); diff --git a/tests/test_cases/pop/test_agent_instance.cu b/tests/test_cases/pop/test_agent_instance.cu index 2c1dca54b..85b229f61 100644 --- a/tests/test_cases/pop/test_agent_instance.cu +++ b/tests/test_cases/pop/test_agent_instance.cu @@ -10,14 +10,14 @@ TEST(AgentInstanceTest, constructor) { AgentDescription agent = model.newAgent("agent"); agent.newVariable("int", 1); agent.newVariable("uint", 2u); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM agent.newVariable("vec3", glm::vec3(4.0f, 5.0f, 6.0f)); #endif AgentInstance ai(agent); // New AgentInstance is default init ASSERT_EQ(ai.getVariable("int"), 1); ASSERT_EQ(ai.getVariable("uint"), 2u); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(ai.getVariable("vec3"), glm::vec3(4.0f, 5.0f, 6.0f)); #endif } @@ -27,7 +27,7 @@ TEST(AgentInstanceTest, copy_constructor) { agent.newVariable("int", 1); agent.newVariable("uint3", {2u, 3u, 4u}); const std::array ai_uint3_ref = { 0u, 1u, 2u }; -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM agent.newVariable("vec3", glm::vec3(4.0f, 5.0f, 6.0f)); #endif // Copying an agent instance retains the values @@ -38,7 +38,7 @@ TEST(AgentInstanceTest, copy_constructor) { ASSERT_EQ(ai2.getVariable("int"), 12); auto ai2_uint3_check = ai2.getVariable("uint3"); ASSERT_EQ(ai2_uint3_check, ai_uint3_ref); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(ai2.getVariable("vec3"), glm::vec3(4.0f, 5.0f, 6.0f)); #endif // Copying an agent instance from an AgentVector::Agent retains values @@ -46,7 +46,7 @@ TEST(AgentInstanceTest, copy_constructor) { AgentVector::Agent ava = av.front(); ava.setVariable("int", 12); ava.setVariable("uint3", ai_uint3_ref); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ava.setVariable("vec3", glm::vec3(6.0f, 5.0f, 4.0f)); #endif AgentInstance ai3(ava); @@ -54,7 +54,7 @@ TEST(AgentInstanceTest, copy_constructor) { auto ai2_uint3_check2 = ai3.getVariable("uint3"); ASSERT_EQ(ai2_uint3_check2, ai_uint3_ref); ASSERT_EQ(ai2_uint3_check, ai_uint3_ref); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(ai3.getVariable("vec3"), glm::vec3(6.0f, 5.0f, 4.0f)); #endif } @@ -64,21 +64,21 @@ TEST(AgentInstanceTest, move_constructor) { agent.newVariable("int", 1); agent.newVariable("uint3", { 2u, 3u, 4u }); const std::array ai_uint3_ref = { 0u, 1u, 2u }; -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM agent.newVariable("vec3", glm::vec3(4.0f, 5.0f, 6.0f)); #endif // Moving an agent instance retains the values AgentInstance ai(agent); ai.setVariable("int", 12); ai.setVariable("uint3", ai_uint3_ref); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ai.setVariable("vec3", glm::vec3(6.0f, 5.0f, 4.0f)); #endif AgentInstance ai2(std::move(ai)); ASSERT_EQ(ai2.getVariable("int"), 12); auto ai2_uint3_check = ai2.getVariable("uint3"); ASSERT_EQ(ai2_uint3_check, ai_uint3_ref); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(ai2.getVariable("vec3"), glm::vec3(6.0f, 5.0f, 4.0f)); #endif } @@ -88,7 +88,7 @@ TEST(AgentInstanceTest, copy_assignment_operator) { AgentDescription agent2 = model.newAgent("agent2"); agent.newVariable("int", 1); agent.newVariable("uint3", { 2u, 3u, 4u }); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM agent.newVariable("vec3", glm::vec3(4.0f, 5.0f, 6.0f)); #endif const std::array ai_uint3_ref = { 0u, 1u, 2u }; @@ -96,7 +96,7 @@ TEST(AgentInstanceTest, copy_assignment_operator) { AgentInstance ai(agent); ai.setVariable("int", 12); ai.setVariable("uint3", ai_uint3_ref); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ai.setVariable("vec3", glm::vec3(16.0f, 15.0f, 14.0f)); #endif AgentInstance ai2(agent2); @@ -104,7 +104,7 @@ TEST(AgentInstanceTest, copy_assignment_operator) { ASSERT_EQ(ai2.getVariable("int"), 12); auto ai2_uint3_check = ai2.getVariable("uint3"); ASSERT_EQ(ai2_uint3_check, ai_uint3_ref); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(ai2.getVariable("vec3"), glm::vec3(16.0f, 15.0f, 14.0f)); #endif // Copying an agent instance from an AgentVector::Agent retains values @@ -112,7 +112,7 @@ TEST(AgentInstanceTest, copy_assignment_operator) { AgentVector::Agent ava = av.front(); ava.setVariable("int", 12); ava.setVariable("uint3", ai_uint3_ref); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ava.setVariable("vec3", glm::vec3(6.0f, 5.0f, 4.0f)); #endif AgentInstance ai3(agent2); @@ -120,7 +120,7 @@ TEST(AgentInstanceTest, copy_assignment_operator) { ASSERT_EQ(ai3.getVariable("int"), 12); auto ai2_uint3_check2 = ai3.getVariable("uint3"); ASSERT_EQ(ai2_uint3_check2, ai_uint3_ref); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(ai3.getVariable("vec3"), glm::vec3(6.0f, 5.0f, 4.0f)); #endif } @@ -130,7 +130,7 @@ TEST(AgentInstanceTest, move_assignment_operator) { AgentDescription agent2 = model.newAgent("agent2"); agent.newVariable("int", 1); agent.newVariable("uint3", { 2u, 3u, 4u }); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM agent.newVariable("vec3", glm::vec3(4.0f, 5.0f, 6.0f)); #endif const std::array ai_uint3_ref = { 0u, 1u, 2u }; @@ -138,7 +138,7 @@ TEST(AgentInstanceTest, move_assignment_operator) { AgentInstance ai(agent); ai.setVariable("int", 12); ai.setVariable("uint3", ai_uint3_ref); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ai.setVariable("vec3", glm::vec3(16.0f, 15.0f, 14.0f)); #endif AgentInstance ai2(agent2); @@ -146,7 +146,7 @@ TEST(AgentInstanceTest, move_assignment_operator) { ASSERT_EQ(ai2.getVariable("int"), 12); auto ai2_uint3_check = ai2.getVariable("uint3"); ASSERT_EQ(ai2_uint3_check, ai_uint3_ref); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(ai2.getVariable("vec3"), glm::vec3(16.0f, 15.0f, 14.0f)); #endif } @@ -159,7 +159,7 @@ TEST(AgentInstanceTest, getsetVariable) { agent.newVariable("int3", { 2, 3, 4 }); agent.newVariable("int2", { 5, 6 }); agent.newVariable("float", 15.0f); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM agent.newVariable("vec3", glm::vec3(2.0f, 4.0f, 6.0f)); agent.newVariable("ivec3_3", {glm::ivec3(12, 14, 16), glm::ivec3(2, 4, 6), glm::ivec3(22, 24, 26)}); agent.newVariable("ivec3_3b", {glm::ivec3(12, 14, 16), glm::ivec3(2, 4, 6), glm::ivec3(22, 24, 26)}); @@ -168,7 +168,7 @@ TEST(AgentInstanceTest, getsetVariable) { // Create pop, variables are as expected AgentInstance ai(agent); const std::array int3_ref = { 2, 3, 4 }; -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM const std::array vec_array_check = {glm::ivec3(12, 14, 16), glm::ivec3(2, 4, 6), glm::ivec3(22, 24, 26)}; #endif { @@ -178,7 +178,7 @@ TEST(AgentInstanceTest, getsetVariable) { ASSERT_EQ(ai.getVariable("int2", 0), 5); ASSERT_EQ(ai.getVariable("int2", 1), 6); ASSERT_EQ(ai.getVariable("float"), 15.0f); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(ai.getVariable("vec3"), glm::vec3(2.0f, 4.0f, 6.0f)); const auto vec_array_test = ai.getVariable("ivec3_3"); ASSERT_EQ(vec_array_test, vec_array_check); @@ -193,7 +193,7 @@ TEST(AgentInstanceTest, getsetVariable) { ai.setVariable("int2", 0, 5 + static_cast(i)); ai.setVariable("int2", 1, 6 + static_cast(i)); ai.setVariable("float", 15.0f + static_cast(i)); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ai.setVariable("vec3", glm::vec3(2.0f + static_cast(i), 4.0f + static_cast(i), 6.0f + static_cast(i))); ai.setVariable("ivec3_3", {glm::ivec3(12, 14, 16) + glm::ivec3(static_cast(i)), glm::ivec3(2, 4, 6) + glm::ivec3(static_cast(i)), glm::ivec3(22, 24, 26) + glm::ivec3(static_cast(i))}); ai.setVariable("ivec3_3b", 1, glm::ivec3(2, 4, 6) + glm::ivec3(static_cast(i) * 3)); @@ -210,7 +210,7 @@ TEST(AgentInstanceTest, getsetVariable) { ASSERT_EQ(ai.getVariable("int2", 0), 5 + static_cast(i)); ASSERT_EQ(ai.getVariable("int2", 1), 6 + static_cast(i)); ASSERT_EQ(ai.getVariable("float"), 15.0f + static_cast(i)); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(ai.getVariable("vec3"), glm::vec3(2.0f + static_cast(i), 4.0f + static_cast(i), 6.0f + static_cast(i))); const std::array vec_array_check2 = {glm::ivec3(12, 14, 16) + glm::ivec3(static_cast(i)), glm::ivec3(2, 4, 6) + glm::ivec3(static_cast(i)), glm::ivec3(22, 24, 26) + glm::ivec3(static_cast(i))}; const std::array vec_array_test = ai.getVariable("ivec3_3"); @@ -227,7 +227,7 @@ TEST(AgentInstanceTest, getsetVariable) { EXPECT_THROW(ai.setVariable("wrong", 1), exception::InvalidAgentVar); // Array passed to non-array method EXPECT_THROW(ai.setVariable("int2", 1), exception::InvalidVarType); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM EXPECT_THROW(ai.setVariable("float", {}), exception::InvalidVarType); #endif // Wrong type @@ -249,7 +249,7 @@ TEST(AgentInstanceTest, getsetVariable) { // Index out of bounds EXPECT_THROW(ai.setVariable("int2", 2, 1), exception::OutOfBoundsException); EXPECT_THROW(ai.setVariable("float", 1, 1), exception::OutOfBoundsException); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM EXPECT_THROW(ai.setVariable("ivec3_3", 4, {}), exception::OutOfBoundsException); EXPECT_THROW(ai.setVariable("int3", 1, {}), exception::OutOfBoundsException); #endif @@ -261,7 +261,7 @@ TEST(AgentInstanceTest, getsetVariable) { EXPECT_THROW(ai.getVariable("wrong"), exception::InvalidAgentVar); // Array passed to non-array method EXPECT_THROW(ai.getVariable("int2"), exception::InvalidVarType); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM EXPECT_THROW(ai.getVariable("float"), exception::InvalidVarType); #endif // Wrong type @@ -281,7 +281,7 @@ TEST(AgentInstanceTest, getsetVariable) { // Index out of bounds EXPECT_THROW(ai.getVariable("int2", 2), exception::OutOfBoundsException); EXPECT_THROW(ai.getVariable("float", 1), exception::OutOfBoundsException); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM EXPECT_THROW(ai.getVariable("ivec3_3", 4), exception::OutOfBoundsException); EXPECT_THROW(ai.getVariable("int3", 1), exception::OutOfBoundsException); #endif diff --git a/tests/test_cases/pop/test_agent_vector.cu b/tests/test_cases/pop/test_agent_vector.cu index a7cfb4900..e27a75ffe 100644 --- a/tests/test_cases/pop/test_agent_vector.cu +++ b/tests/test_cases/pop/test_agent_vector.cu @@ -15,7 +15,7 @@ TEST(AgentVectorTest, constructor) { agent.newVariable("uint", 2u); agent.newVariable("float", 3.0f); agent.newVariable("double", 4.0); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM agent.newVariable("vec3", glm::vec3(2.0f, 4.0f, 6.0f)); #endif @@ -32,7 +32,7 @@ TEST(AgentVectorTest, constructor) { ASSERT_EQ(instance.getVariable("uint"), 2u); ASSERT_EQ(instance.getVariable("float"), 3.0f); ASSERT_EQ(instance.getVariable("double"), 4.0); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(instance.getVariable("vec3"), glm::vec3(2.0f, 4.0f, 6.0f)); #endif } @@ -46,7 +46,7 @@ TEST(AgentVectorTest, copy_constructor) { agent.newVariable("uint", 2u); agent.newVariable("float", 3.0f); agent.newVariable("double", 4.0); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM agent.newVariable("vec3", glm::vec3(2.0f, 4.0f, 6.0f)); #endif @@ -65,7 +65,7 @@ TEST(AgentVectorTest, copy_constructor) { ASSERT_EQ(instance.getVariable("uint"), 2u); ASSERT_EQ(instance.getVariable("float"), 3.0f); ASSERT_EQ(instance.getVariable("double"), 4.0); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(instance.getVariable("vec3"), glm::vec3(2.0f, 4.0f, 6.0f)); #endif } @@ -79,7 +79,7 @@ TEST(AgentVectorTest, move_constructor) { agent.newVariable("uint", 2u); agent.newVariable("float", 3.0f); agent.newVariable("double", 4.0); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM agent.newVariable("vec3", glm::vec3(2.0f, 4.0f, 6.0f)); #endif @@ -98,7 +98,7 @@ TEST(AgentVectorTest, move_constructor) { ASSERT_EQ(instance.getVariable("uint"), 2u); ASSERT_EQ(instance.getVariable("float"), 3.0f); ASSERT_EQ(instance.getVariable("double"), 4.0); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(instance.getVariable("vec3"), glm::vec3(2.0f, 4.0f, 6.0f)); #endif } @@ -112,7 +112,7 @@ TEST(AgentVectorTest, copy_assignment_operator) { agent.newVariable("uint", 2u); agent.newVariable("float", 3.0f); agent.newVariable("double", 4.0); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM agent.newVariable("vec3", glm::vec3(2.0f, 4.0f, 6.0f)); #endif @@ -132,7 +132,7 @@ TEST(AgentVectorTest, copy_assignment_operator) { ASSERT_EQ(instance.getVariable("uint"), 2u); ASSERT_EQ(instance.getVariable("float"), 3.0f); ASSERT_EQ(instance.getVariable("double"), 4.0); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(instance.getVariable("vec3"), glm::vec3(2.0f, 4.0f, 6.0f)); #endif } @@ -146,7 +146,7 @@ TEST(AgentVectorTest, move_assignment_operator) { agent.newVariable("uint", 2u); agent.newVariable("float", 3.0f); agent.newVariable("double", 4.0); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM agent.newVariable("vec3", glm::vec3(2.0f, 4.0f, 6.0f)); #endif @@ -166,7 +166,7 @@ TEST(AgentVectorTest, move_assignment_operator) { ASSERT_EQ(instance.getVariable("uint"), 2u); ASSERT_EQ(instance.getVariable("float"), 3.0f); ASSERT_EQ(instance.getVariable("double"), 4.0); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(instance.getVariable("vec3"), glm::vec3(2.0f, 4.0f, 6.0f)); #endif } @@ -180,7 +180,7 @@ TEST(AgentVectorTest, at) { agent.newVariable("uint", 2u); agent.newVariable("float", 3.0f); agent.newVariable("double", 4.0); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM agent.newVariable("vec3", glm::vec3(2.0f, 4.0f, 6.0f)); #endif @@ -193,7 +193,7 @@ TEST(AgentVectorTest, at) { ASSERT_EQ(instance.getVariable("uint"), 2u); ASSERT_EQ(instance.getVariable("float"), 3.0f); ASSERT_EQ(instance.getVariable("double"), 4.0); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(instance.getVariable("vec3"), glm::vec3(2.0f, 4.0f, 6.0f)); #endif } @@ -207,7 +207,7 @@ TEST(AgentVectorTest, at) { ASSERT_EQ(instance.getVariable("uint"), 2u); ASSERT_EQ(instance.getVariable("float"), 3.0f); ASSERT_EQ(instance.getVariable("double"), 4.0); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(instance.getVariable("vec3"), glm::vec3(2.0f, 4.0f, 6.0f)); #endif } @@ -371,7 +371,7 @@ TEST(AgentVectorTest, iterator) { } ASSERT_EQ(i, 0u); } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM TEST(AgentVectorTest, iterator_GLM) { const unsigned int POP_SIZE = 10; // Test correctness of AgentVector array iterator, and the member functions for creating them. @@ -1356,7 +1356,7 @@ TEST(AgentVectorTest, AgentVector_Agent) { agent.newVariable("int3", {2, 3, 4}); agent.newVariable("int2", { 5, 6 }); agent.newVariable("float", 15.0f); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM agent.newVariable("vec3", glm::vec3(2.0f, 4.0f, 6.0f)); agent.newVariable("ivec3_3", {glm::ivec3(12, 14, 16), glm::ivec3(2, 4, 6), glm::ivec3(22, 24, 26)}); agent.newVariable("ivec3_3b", {glm::ivec3(12, 14, 16), glm::ivec3(2, 4, 6), glm::ivec3(22, 24, 26)}); @@ -1365,7 +1365,7 @@ TEST(AgentVectorTest, AgentVector_Agent) { // Create pop, variables are as expected AgentVector pop(agent, POP_SIZE); const std::array int3_ref = { 2, 3, 4 }; -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM const std::array vec_array_check = {glm::ivec3(12, 14, 16), glm::ivec3(2, 4, 6), glm::ivec3(22, 24, 26)}; #endif for (unsigned int i = 0; i < POP_SIZE; ++i) { @@ -1378,7 +1378,7 @@ TEST(AgentVectorTest, AgentVector_Agent) { ASSERT_EQ(ai.getVariable("float"), 15.0f); // check index value is as expected ASSERT_EQ(ai.getIndex(), i); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(ai.getVariable("vec3"), glm::vec3(2.0f, 4.0f, 6.0f)); const auto vec_array_test = ai.getVariable("ivec3_3"); ASSERT_EQ(vec_array_test, vec_array_check); @@ -1397,7 +1397,7 @@ TEST(AgentVectorTest, AgentVector_Agent) { ai.setVariable("int2", 0, 5 + static_cast(i)); ai.setVariable("int2", 1, 6 + static_cast(i)); ai.setVariable("float", 15.0f + static_cast(i)); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ai.setVariable("vec3", glm::vec3(2.0f + static_cast(i), 4.0f + static_cast(i), 6.0f + static_cast(i))); ai.setVariable("ivec3_3", {glm::ivec3(12, 14, 16) + glm::ivec3(static_cast(i)), glm::ivec3(2, 4, 6) + glm::ivec3(static_cast(i)), glm::ivec3(22, 24, 26) + glm::ivec3(static_cast(i))}); // Don't update ivec3_3b index 0 @@ -1416,7 +1416,7 @@ TEST(AgentVectorTest, AgentVector_Agent) { ASSERT_EQ(ai.getVariable("int2", 0), 5 + static_cast(i)); ASSERT_EQ(ai.getVariable("int2", 1), 6 + static_cast(i)); ASSERT_EQ(ai.getVariable("float"), 15.0f + static_cast(i)); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ASSERT_EQ(ai.getVariable("vec3"), glm::vec3(2.0f + static_cast(i), 4.0f + static_cast(i), 6.0f + static_cast(i))); const std::array vec_array_check2 = {glm::ivec3(12, 14, 16) + glm::ivec3(static_cast(i)), glm::ivec3(2, 4, 6) + glm::ivec3(static_cast(i)), glm::ivec3(22, 24, 26) + glm::ivec3(static_cast(i))}; const std::array vec_array_test = ai.getVariable("ivec3_3"); @@ -1434,7 +1434,7 @@ TEST(AgentVectorTest, AgentVector_Agent) { EXPECT_THROW(ai.setVariable("wrong", 1), exception::InvalidAgentVar); // Array passed to non-array method EXPECT_THROW(ai.setVariable("int2", 1), exception::InvalidVarType); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM EXPECT_THROW(ai.setVariable("float", {}), exception::InvalidVarType); #endif // Wrong type @@ -1456,7 +1456,7 @@ TEST(AgentVectorTest, AgentVector_Agent) { // Index out of bounds EXPECT_THROW(ai.setVariable("int2", 2, 1), exception::OutOfBoundsException); EXPECT_THROW(ai.setVariable("float", 1, 1), exception::OutOfBoundsException); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM EXPECT_THROW(ai.setVariable("ivec3_3", 4, {}), exception::OutOfBoundsException); EXPECT_THROW(ai.setVariable("int3", 1, {}), exception::OutOfBoundsException); #endif @@ -1468,7 +1468,7 @@ TEST(AgentVectorTest, AgentVector_Agent) { EXPECT_THROW(ai.getVariable("wrong"), exception::InvalidAgentVar); // Array passed to non-array method EXPECT_THROW(ai.getVariable("int2"), exception::InvalidVarType); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM EXPECT_THROW(ai.getVariable("float"), exception::InvalidVarType); #endif // Wrong type @@ -1488,7 +1488,7 @@ TEST(AgentVectorTest, AgentVector_Agent) { // Index out of bounds EXPECT_THROW(ai.getVariable("int2", 2), exception::OutOfBoundsException); EXPECT_THROW(ai.getVariable("float", 1), exception::OutOfBoundsException); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM EXPECT_THROW(ai.getVariable("ivec3_3", 4), exception::OutOfBoundsException); EXPECT_THROW(ai.getVariable("int3", 1), exception::OutOfBoundsException); #endif diff --git a/tests/test_cases/runtime/host_reduction/test_reduce.cu b/tests/test_cases/runtime/host_reduction/test_reduce.cu index 59b4495b6..dec91bf29 100644 --- a/tests/test_cases/runtime/host_reduction/test_reduce.cu +++ b/tests/test_cases/runtime/host_reduction/test_reduce.cu @@ -172,7 +172,7 @@ TEST_F(HostReductionTest, CustomReduceUnsignedInt64) { ms->run(); EXPECT_EQ(uint64_t_out, *std::max_element(in.begin(), in.end())); } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM FLAMEGPU_CUSTOM_REDUCTION(customMax_glm, a, b) { return glm::max(a, b); } diff --git a/tests/test_cases/runtime/host_reduction/test_transform_reduce.cu b/tests/test_cases/runtime/host_reduction/test_transform_reduce.cu index e6c8fd465..f0ade6c65 100644 --- a/tests/test_cases/runtime/host_reduction/test_transform_reduce.cu +++ b/tests/test_cases/runtime/host_reduction/test_transform_reduce.cu @@ -195,7 +195,7 @@ TEST_F(HostReductionTest, CustomTransformReduceUnsignedInt64) { std::transform(in.begin(), in.end(), inTransform.begin(), customTransform_impl::unary_function()); EXPECT_EQ(int32_t_out, std::count(inTransform.begin(), inTransform.end(), static_cast(1))); } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM FLAMEGPU_CUSTOM_REDUCTION(customMax2_glm, a, b) { return glm::max(a, b); } diff --git a/tests/test_cases/runtime/messaging/test_array.cu b/tests/test_cases/runtime/messaging/test_array.cu index db1572c77..4f2c15810 100644 --- a/tests/test_cases/runtime/messaging/test_array.cu +++ b/tests/test_cases/runtime/messaging/test_array.cu @@ -291,7 +291,7 @@ TEST(TestMessage_Array, Moore2W) { } } // Exception tests -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array, DuplicateOutputException) { #else TEST(TestMessage_Array, DISABLED_DuplicateOutputException) { @@ -388,7 +388,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreWrapOutOfBoundsX, MessageArray, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array, MooreWrap_InitOutOfBoundsX) { #else TEST(TestMessage_Array, DISABLED_MooreWrap_InitOutOfBoundsX) { @@ -428,7 +428,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreWrapBadRadius1, MessageArray, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array, MooreWrap_BadRadius1) { #else TEST(TestMessage_Array, DISABLED_MooreWrap_BadRadius1) { @@ -468,7 +468,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreWrapBadRadius2, MessageArray, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array, MooreWrap_BadRadius2) { #else TEST(TestMessage_Array, DISABLED_MooreWrap_BadRadius2) { @@ -508,7 +508,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreOutOfBoundsX, MessageArray, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array, Moore_InitOutOfBoundsX) { #else TEST(TestMessage_Array, DISABLED_Moore_InitOutOfBoundsX) { @@ -548,7 +548,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreBadRadius, MessageArray, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array, Moore_BadRadius) { #else TEST(TestMessage_Array, DISABLED_Moore_BadRadius) { @@ -664,9 +664,9 @@ void test_moore_wrap_comradius( ASSERT_EQ(expected_count, message_read); } } else { - // If the comradius would lead to double message reads, a device error is thrown when SEATBELTS is enabled + // If the comradius would lead to double message reads, a device error is thrown when FLAMEGPU_SEATBELTS is enabled // Behaviour is otherwise undefined -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS EXPECT_THROW(simulation.step(), flamegpu::exception::DeviceError); #endif } @@ -889,7 +889,7 @@ TEST(TestRTCMessage_Array, ArrayVariable) { } } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM FLAMEGPU_AGENT_FUNCTION(ArrayOut_glm, MessageNone, MessageArray) { const unsigned int index = FLAMEGPU->getVariable("index"); glm::uvec3 t = glm::uvec3(index * 3, index * 7, index * 11); diff --git a/tests/test_cases/runtime/messaging/test_array_2d.cu b/tests/test_cases/runtime/messaging/test_array_2d.cu index 27220d4ef..268f79344 100644 --- a/tests/test_cases/runtime/messaging/test_array_2d.cu +++ b/tests/test_cases/runtime/messaging/test_array_2d.cu @@ -314,7 +314,7 @@ TEST(TestMessage_Array2D, Moore2W) { } // Exception tests -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array2D, DuplicateOutputException) { #else TEST(TestMessage_Array2D, DISABLED_DuplicateOutputException) { @@ -414,7 +414,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreWOutOfBoundsX, MessageArray2D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array2D, MooreW_InitOutOfBoundsX) { #else TEST(TestMessage_Array2D, DISABLED_MooreW_InitOutOfBoundsX) { @@ -454,7 +454,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreWOutOfBoundsY, MessageArray2D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array2D, MooreW_InitOutOfBoundsY) { #else TEST(TestMessage_Array2D, DISABLED_MooreW_InitOutOfBoundsY) { @@ -494,7 +494,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreWBadRadius1, MessageArray2D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array2D, MooreW_BadRadius1) { #else TEST(TestMessage_Array2D, DISABLED_MooreW_BadRadius1) { @@ -534,7 +534,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreWBadRadius2, MessageArray2D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array2D, MooreW_BadRadius2) { #else TEST(TestMessage_Array2D, DISABLED_MooreW_BadRadius2) { @@ -574,7 +574,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreOutOfBoundsX, MessageArray2D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array2D, Moore_InitOutOfBoundsX) { #else TEST(TestMessage_Array2D, DISABLED_Moore_InitOutOfBoundsX) { @@ -614,7 +614,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreOutOfBoundsY, MessageArray2D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array2D, Moore_InitOutOfBoundsY) { #else TEST(TestMessage_Array2D, DISABLED_Moore_InitOutOfBoundsY) { @@ -654,7 +654,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreBadRadius, MessageArray2D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array2D, Moore_BadRadius) { #else TEST(TestMessage_Array2D, DISABLED_Moore_BadRadius) { @@ -796,9 +796,9 @@ void test_moore_wrap_comradius( ASSERT_EQ(value_success, 1u); } } else { - // If the comradius would lead to double message reads, a device error is thrown when SEATBELTS is enabled + // If the comradius would lead to double message reads, a device error is thrown when FLAMEGPU_SEATBELTS is enabled // Behaviour is otherwise undefined -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS EXPECT_THROW(simulation.step(), flamegpu::exception::DeviceError); #endif } @@ -1082,7 +1082,7 @@ TEST(TestRTCMessage_Array2D, ArrayVariable) { } } -#if defined(USE_GLM) +#if defined(FLAMEGPU_USE_GLM) FLAMEGPU_AGENT_FUNCTION(ArrayOut_glm, MessageNone, MessageArray2D) { const unsigned int x = FLAMEGPU->getVariable("index", 0); const unsigned int y = FLAMEGPU->getVariable("index", 1); diff --git a/tests/test_cases/runtime/messaging/test_array_3d.cu b/tests/test_cases/runtime/messaging/test_array_3d.cu index 66c914475..191fd8668 100644 --- a/tests/test_cases/runtime/messaging/test_array_3d.cu +++ b/tests/test_cases/runtime/messaging/test_array_3d.cu @@ -326,7 +326,7 @@ TEST(TestMessage_Array3D, Moore2W) { } // Exception tests -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array3D, DuplicateOutputException) { #else TEST(TestMessage_Array3D, DISABLED_DuplicateOutputException) { @@ -428,7 +428,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreWrapOutOfBoundsX, MessageArray3D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array3D, MooreWrap_InitOutOfBoundsX) { #else TEST(TestMessage_Array3D, DISABLED_MooreWrap_InitOutOfBoundsX) { @@ -468,7 +468,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreWrapOutOfBoundsY, MessageArray3D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array3D, MooreWrap_InitOutOfBoundsY) { #else TEST(TestMessage_Array3D, DISABLED_MooreWrap_InitOutOfBoundsY) { @@ -508,7 +508,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreWrapOutOfBoundsZ, MessageArray3D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array3D, MooreWrap_InitOutOfBoundsZ) { #else TEST(TestMessage_Array3D, DISABLED_MooreWrap_InitOutOfBoundsZ) { @@ -548,7 +548,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreWrapBadRadius1, MessageArray3D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array3D, MooreWrap_BadRadius1) { #else TEST(TestMessage_Array3D, DISABLED_MooreWrap_BadRadius1) { @@ -588,7 +588,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreWrapBadRadius2, MessageArray3D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array3D, MooreWrap_BadRadius2) { #else TEST(TestMessage_Array3D, DISABLED_MooreWrap_BadRadius2) { @@ -628,7 +628,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreOutOfBoundsX, MessageArray3D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array3D, Moore_InitOutOfBoundsX) { #else TEST(TestMessage_Array3D, DISABLED_Moore_InitOutOfBoundsX) { @@ -668,7 +668,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreOutOfBoundsY, MessageArray3D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array3D, Moore_InitOutOfBoundsY) { #else TEST(TestMessage_Array3D, DISABLED_Moore_InitOutOfBoundsY) { @@ -708,7 +708,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreOutOfBoundsZ, MessageArray3D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array3D, Moore_InitOutOfBoundsZ) { #else TEST(TestMessage_Array3D, DISABLED_Moore_InitOutOfBoundsZ) { @@ -748,7 +748,7 @@ FLAMEGPU_AGENT_FUNCTION(InMooreBadRadius, MessageArray3D, MessageNone) { } return ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(TestMessage_Array3D, Moore_BadRadius) { #else TEST(TestMessage_Array3D, DISABLED_Moore_BadRadius) { @@ -898,9 +898,9 @@ void test_moore_wrapped_comradius( ASSERT_EQ(value_success, 1u); } } else { - // If the comradius would lead to double message reads, a device error is thrown when SEATBELTS is enabled + // If the comradius would lead to double message reads, a device error is thrown when FLAMEGPU_SEATBELTS is enabled // Behaviour is otherwise undefined -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS EXPECT_THROW(simulation.step(), flamegpu::exception::DeviceError); #endif } @@ -1213,7 +1213,7 @@ TEST(TestRTCMessage_Array3D, ArrayVariable) { } } -#if defined(USE_GLM) +#if defined(FLAMEGPU_USE_GLM) FLAMEGPU_AGENT_FUNCTION(ArrayOut_glm, MessageNone, MessageArray3D) { const unsigned int x = FLAMEGPU->getVariable("index", 0); const unsigned int y = FLAMEGPU->getVariable("index", 1); diff --git a/tests/test_cases/runtime/messaging/test_brute_force.cu b/tests/test_cases/runtime/messaging/test_brute_force.cu index 3b5206b7e..a8ea41f78 100644 --- a/tests/test_cases/runtime/messaging/test_brute_force.cu +++ b/tests/test_cases/runtime/messaging/test_brute_force.cu @@ -663,7 +663,7 @@ TEST(TestRTCMessage_BruteForce, ArrayVariable) { } } -#if defined(USE_GLM) +#if defined(FLAMEGPU_USE_GLM) FLAMEGPU_AGENT_FUNCTION(ArrayOut_glm, MessageNone, MessageBruteForce) { const unsigned int index = FLAMEGPU->getVariable("index"); FLAMEGPU->message_out.setVariable("index", index); diff --git a/tests/test_cases/runtime/messaging/test_bucket.cu b/tests/test_cases/runtime/messaging/test_bucket.cu index 11d1babd6..0a100b618 100644 --- a/tests/test_cases/runtime/messaging/test_bucket.cu +++ b/tests/test_cases/runtime/messaging/test_bucket.cu @@ -792,7 +792,7 @@ TEST(TestRTCMessage_Bucket, ArrayVariable) { } } -#if defined(USE_GLM) +#if defined(FLAMEGPU_USE_GLM) FLAMEGPU_AGENT_FUNCTION(ArrayOut_glm, MessageNone, MessageBucket) { const unsigned int index = FLAMEGPU->getVariable("index"); glm::uvec3 t = glm::uvec3(index * 3, index * 7, index * 11); diff --git a/tests/test_cases/runtime/messaging/test_spatial_2d.cu b/tests/test_cases/runtime/messaging/test_spatial_2d.cu index fc7c8d6ec..aaf8fbb8f 100644 --- a/tests/test_cases/runtime/messaging/test_spatial_2d.cu +++ b/tests/test_cases/runtime/messaging/test_spatial_2d.cu @@ -639,7 +639,7 @@ TEST(RTCSpatial2DMessageTest, ArrayVariable) { } } -#if defined(USE_GLM) +#if defined(FLAMEGPU_USE_GLM) FLAMEGPU_AGENT_FUNCTION(ArrayOut_glm, MessageNone, MessageSpatial2D) { const unsigned int x = FLAMEGPU->getVariable("index", 0); const unsigned int y = FLAMEGPU->getVariable("index", 1); @@ -903,8 +903,8 @@ TEST(Spatial2DMessageTest, Wrapped6) { TEST(Spatial2DMessageTest, Wrapped7) { wrapped_2d_test(141.4f, -540.7f); } -#if !defined(SEATBELTS) || SEATBELTS -// Test that SEATBELTS catches out of bounds messages +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS +// Test that FLAMEGPU_SEATBELTS catches out of bounds messages TEST(Spatial2DMessageTest, Wrapped_OutOfBounds) { EXPECT_THROW(wrapped_2d_test(141.0f, -540.0f, 200.0f), exception::DeviceError); } diff --git a/tests/test_cases/runtime/messaging/test_spatial_3d.cu b/tests/test_cases/runtime/messaging/test_spatial_3d.cu index 2e63bd581..6a736c169 100644 --- a/tests/test_cases/runtime/messaging/test_spatial_3d.cu +++ b/tests/test_cases/runtime/messaging/test_spatial_3d.cu @@ -696,7 +696,7 @@ TEST(RTCSpatial3DMessageTest, ArrayVariable) { } } -#if defined(USE_GLM) +#if defined(FLAMEGPU_USE_GLM) FLAMEGPU_AGENT_FUNCTION(ArrayOut_glm, MessageNone, MessageSpatial3D) { const unsigned int x = FLAMEGPU->getVariable("index", 0); const unsigned int y = FLAMEGPU->getVariable("index", 1); @@ -972,8 +972,8 @@ TEST(Spatial3DMessageTest, Wrapped2) { TEST(Spatial3DMessageTest, Wrapped3) { wrapped_3d_test(-1401.5f, 5640.3f, -2008.8f); } -#if !defined(SEATBELTS) || SEATBELTS -// Test that SEATBELTS catches out of bounds messages +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS +// Test that FLAMEGPU_SEATBELTS catches out of bounds messages TEST(Spatial3DMessageTest, Wrapped_OutOfBounds) { EXPECT_THROW(wrapped_3d_test(141.0f, -540.0f, 0.0f, 200.0f), exception::DeviceError); } diff --git a/tests/test_cases/runtime/test_device_agent_creation.cu b/tests/test_cases/runtime/test_device_agent_creation.cu index 81fd1890f..a3a723020 100644 --- a/tests/test_cases/runtime/test_device_agent_creation.cu +++ b/tests/test_cases/runtime/test_device_agent_creation.cu @@ -1479,7 +1479,7 @@ TEST(DeviceAgentCreationTest, Output_Array) { EXPECT_EQ(is_1, AGENT_COUNT); // Original agents (unchanged) EXPECT_EQ(is_12, AGENT_COUNT); // New agents } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM FLAMEGPU_AGENT_FUNCTION(ArrayVarDeviceBirth_DefaultWorks_glm, MessageNone, MessageNone) { unsigned int i = FLAMEGPU->getVariable("id") * 3; FLAMEGPU->agent_out.setVariable("id", i); diff --git a/tests/test_cases/runtime/test_device_api.cu b/tests/test_cases/runtime/test_device_api.cu index 2d07daad6..5942ed2ad 100644 --- a/tests/test_cases/runtime/test_device_api.cu +++ b/tests/test_cases/runtime/test_device_api.cu @@ -165,7 +165,7 @@ TEST(DeviceAPITest, ArrayGet) { EXPECT_EQ(instance.getVariable("a4"), 16 + j); } } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM FLAMEGPU_AGENT_FUNCTION(agent_fn_da_set_glm, MessageNone, MessageNone) { // Read array from `array_var` // Store it's values back in `a1` -> `a4` diff --git a/tests/test_cases/runtime/test_device_environment.cu b/tests/test_cases/runtime/test_device_environment.cu index a7d577e17..e75f3e7a9 100644 --- a/tests/test_cases/runtime/test_device_environment.cu +++ b/tests/test_cases/runtime/test_device_environment.cu @@ -450,7 +450,7 @@ FLAMEGPU_AGENT_FUNCTION(get_array_shorthand, MessageNone, MessageNone) { } TEST_F(DeviceEnvironmentTest, Get_array_shorthand) { // It's no longer necessary to specify env property array length in agent functions when retrieiving them - // This test is better ran with SEATBELTS=ON, to catch the seatbelts checking + // This test is better ran with FLAMEGPU_SEATBELTS=ON, to catch the seatbelts checking // Setup agent fn ms->agent.newVariable("k"); AgentFunctionDescription deviceFn = ms->agent.newFunction("device_function", get_array_shorthand); @@ -492,7 +492,7 @@ TEST(RTCDeviceEnvironmentTest, get_array_shorthand) { const std::array t_out = population.at(0).getVariable("k"); ASSERT_EQ(t_in, t_out); } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM FLAMEGPU_AGENT_FUNCTION(get_array_glm, MessageNone, MessageNone) { glm::vec3 t = FLAMEGPU->environment.getProperty("k"); FLAMEGPU->setVariable("k", 0, t[0]); diff --git a/tests/test_cases/runtime/test_device_macro_property.cu b/tests/test_cases/runtime/test_device_macro_property.cu index 045b61b5c..77e0d75b1 100644 --- a/tests/test_cases/runtime/test_device_macro_property.cu +++ b/tests/test_cases/runtime/test_device_macro_property.cu @@ -1,7 +1,7 @@ /** * Tests of class: DeviceMacroProperty - * WriteRead: Check that SEATBELTS catches a read after write in same agent fn - * ReadWrite: Check that SEATBELTS catches a write after read in same agent fn + * WriteRead: Check that FLAMEGPU_SEATBELTS catches a read after write in same agent fn + * ReadWrite: Check that FLAMEGPU_SEATBELTS catches a write after read in same agent fn * add: Use DeviceAPI operator+=, then read the value back in a subsequent agent function * add2: Use DeviceAPI operator+, and read the returned result * sub: Use DeviceAPI operator-=, then read the value back in a subsequent agent function @@ -34,7 +34,7 @@ FLAMEGPU_AGENT_FUNCTION(WriteRead, flamegpu::MessageNone, flamegpu::MessageNone) FLAMEGPU->setVariable("b", FLAMEGPU->environment.getMacroProperty("int")); return flamegpu::ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(DeviceMacroPropertyTest, WriteRead) { #else TEST(DeviceMacroPropertyTest, DISABLED_WriteRead) { @@ -62,7 +62,7 @@ FLAMEGPU_AGENT_FUNCTION(ReadWrite, flamegpu::MessageNone, flamegpu::MessageNone) FLAMEGPU->setVariable("b", t); return flamegpu::ALIVE; } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(DeviceMacroPropertyTest, ReadWrite) { #else TEST(DeviceMacroPropertyTest, DISABLED_ReadWrite) { @@ -737,7 +737,7 @@ FLAMEGPU_AGENT_FUNCTION(WriteRead, flamegpu::MessageNone, flamegpu::MessageNone) return flamegpu::ALIVE; } )###"; -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(DeviceMacroPropertyTest, RTC_WriteRead) { #else TEST(DeviceMacroPropertyTest, DISABLED_RTC_WriteRead) { diff --git a/tests/test_cases/runtime/test_host_agent_creation.cu b/tests/test_cases/runtime/test_host_agent_creation.cu index 5879350b3..baeb4a9b9 100644 --- a/tests/test_cases/runtime/test_host_agent_creation.cu +++ b/tests/test_cases/runtime/test_host_agent_creation.cu @@ -787,7 +787,7 @@ TEST(HostAgentCreationTest, AgentID_MultipleAgents) { } ASSERT_EQ(ids_b.size(), 2 * POP_SIZE); // No collisions } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM FLAMEGPU_STEP_FUNCTION(ArrayVarHostBirthSetGet_glm) { auto t = FLAMEGPU->agent("agent_name"); for (int i = 0; i < static_cast(AGENT_COUNT); ++i) { diff --git a/tests/test_cases/runtime/test_host_environment.cu b/tests/test_cases/runtime/test_host_environment.cu index 37c246a2e..0d31d95a8 100644 --- a/tests/test_cases/runtime/test_host_environment.cu +++ b/tests/test_cases/runtime/test_host_environment.cu @@ -41,7 +41,7 @@ class MiniSim { ed.newProperty("uint64_t_", static_cast(TEST_VALUE)); ed.newProperty("read_only", static_cast(TEST_VALUE), true); ed.newProperty("bool", true); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ed.newProperty("vec3_", static_cast(TEST_VALUE)); #endif @@ -57,7 +57,7 @@ class MiniSim { ed.newProperty("uint64_t_a_", makeInit()); ed.newProperty("read_only_a", makeInit(), true); ed.newProperty("bool_a", {true, false, true}); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM ed.newProperty("vec3_a_", makeInit()); #endif } @@ -1117,7 +1117,7 @@ TEST_F(HostEnvironmentTest, BoolWorks) { ms->run(1); } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM FLAMEGPU_STEP_FUNCTION(get_set_vec3_t) { // Test Set + Get (Description set value) EXPECT_EQ(FLAMEGPU->environment.setProperty("vec3_", glm::vec3(static_cast(TEST_VALUE) * 2)), glm::vec3(static_cast(TEST_VALUE))); diff --git a/tests/test_cases/runtime/test_host_macro_property.cu b/tests/test_cases/runtime/test_host_macro_property.cu index 2e95334ed..bb1fdf099 100644 --- a/tests/test_cases/runtime/test_host_macro_property.cu +++ b/tests/test_cases/runtime/test_host_macro_property.cu @@ -488,7 +488,7 @@ TEST(HostMacroPropertyTest, ArithmeticTest) { } /* These tests, test functionality which is not exposed unless LayerDescription allows agent fn and host fn in the same layer -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(HostMacroPropertyTest, ReadSameLayerAsAgentWrite) { #else TEST(HostMacroPropertyTest, DISABLED_ReadSameLayerAsAgentWrite) { @@ -517,7 +517,7 @@ TEST(HostMacroPropertyTest, DISABLED_ReadSameLayerAsAgentWrite) { cudaSimulation.setPopulationData(population); ASSERT_THROW(cudaSimulation.simulate(), flamegpu::exception::InvalidOperation); } -#if !defined(SEATBELTS) || SEATBELTS +#if !defined(FLAMEGPU_SEATBELTS) || FLAMEGPU_SEATBELTS TEST(HostMacroPropertyTest, WriteSameLayerAsAgentRead) { #else TEST(HostMacroPropertyTest, DISABLED_WriteSameLayerAsAgentRead) { diff --git a/tests/test_cases/runtime/test_rtc_device_api.cu b/tests/test_cases/runtime/test_rtc_device_api.cu index 9978069b4..e634e2adc 100644 --- a/tests/test_cases/runtime/test_rtc_device_api.cu +++ b/tests/test_cases/runtime/test_rtc_device_api.cu @@ -330,7 +330,7 @@ TEST(DeviceRTCAPITest, AgentFunction_array_set) { } } -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM const char* rtc_array_get_agent_func_glm = R"###( FLAMEGPU_AGENT_FUNCTION(rtc_test_func, flamegpu::MessageNone, flamegpu::MessageNone) { // Read array from `array_var` diff --git a/tests/test_cases/sim/test_RunPlan.cu b/tests/test_cases/sim/test_RunPlan.cu index 03230da8d..307de04ef 100644 --- a/tests/test_cases/sim/test_RunPlan.cu +++ b/tests/test_cases/sim/test_RunPlan.cu @@ -80,7 +80,7 @@ TEST(TestRunPlan, setProperty) { environment.newProperty("f_a", {-1.0f, 0.0f, 1.0f}); environment.newProperty("i_a", {-1, 0, 1 }); environment.newProperty("u_a", {0, 1, 2 }); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM environment.newProperty("ivec3", {}); environment.newProperty("ivec32", {}); environment.newProperty("ivec33", {}); @@ -102,7 +102,7 @@ TEST(TestRunPlan, setProperty) { plan.setProperty("u_a", 0, 3u); plan.setProperty("u_a", 1, 4u); plan.setProperty("u_a", 2, 5u); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM const glm::ivec3 ivec3_1_check = glm::ivec3{ 1, 2, 3 }; const std::array ivec3_2_check = { glm::ivec3{4, 5, 6}, glm::ivec3{7, 8, 9} }; const std::array ivec3_3_check = @@ -121,7 +121,7 @@ TEST(TestRunPlan, setProperty) { EXPECT_EQ(plan.getProperty("u_a", 0), 3u); EXPECT_EQ(plan.getProperty("u_a", 1), 4u); EXPECT_EQ(plan.getProperty("u_a", 2), 5u); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM EXPECT_EQ(plan.getProperty("ivec3"), ivec3_1_check); EXPECT_EQ((plan.getProperty)("ivec33"), ivec3_3_check); EXPECT_EQ(plan.getProperty("ivec32", 0), ivec3_2_check[0]); @@ -142,7 +142,7 @@ TEST(TestRunPlan, setProperty) { plan.setProperty("u_a", 0, 13u); plan.setProperty("u_a", 1, 14u); plan.setProperty("u_a", 2, 15u); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM plan.setProperty("ivec3", glm::ivec3{ 31, 32, 33 }); const std::array ivec3_3_check2 = { glm::ivec3{ 41, 42, 43 }, glm::ivec3{44, 45, 46}, glm::ivec3{47, 48, 49} }; @@ -160,7 +160,7 @@ TEST(TestRunPlan, setProperty) { EXPECT_EQ(plan.getProperty("u_a", 0), 13u); EXPECT_EQ(plan.getProperty("u_a", 1), 14u); EXPECT_EQ(plan.getProperty("u_a", 2), 15u); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM EXPECT_EQ(plan.getProperty("ivec3"), glm::ivec3(31, 32, 33)); EXPECT_EQ((plan.getProperty)("ivec33"), ivec3_3_check2); EXPECT_EQ(plan.getProperty("ivec32", 0), ivec3_2_check[1]); @@ -202,7 +202,7 @@ TEST(TestRunPlan, setProperty) { EXPECT_THROW((plan.getProperty("u_a", 0u)), flamegpu::exception::InvalidEnvPropertyType); EXPECT_THROW((plan.getProperty("i_a", static_cast(-1))), exception::OutOfBoundsException); EXPECT_THROW((plan.getProperty("i_a", 4u)), exception::OutOfBoundsException); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM EXPECT_THROW((plan.setProperty)("ivec32", 3u, {}), exception::OutOfBoundsException); // Out of bounds EXPECT_THROW((plan.setProperty)("ivec33", 4u, {}), exception::OutOfBoundsException); // Out of bounds EXPECT_THROW((plan.getProperty)("ivec32", 3u), exception::OutOfBoundsException); // Out of bounds diff --git a/tests/test_cases/sim/test_RunPlanVector.cu b/tests/test_cases/sim/test_RunPlanVector.cu index 4ab89c2f5..c1e021a3b 100644 --- a/tests/test_cases/sim/test_RunPlanVector.cu +++ b/tests/test_cases/sim/test_RunPlanVector.cu @@ -101,7 +101,7 @@ TEST(TestRunPlanVector, setProperty) { environment.newProperty("i", iOriginal); environment.newProperty("u3", u3Original); environment.newProperty("d3", d3Original); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM environment.newProperty("ivec3", {}); environment.newProperty("ivec32", {}); environment.newProperty("ivec33", {}); @@ -126,7 +126,7 @@ TEST(TestRunPlanVector, setProperty) { plans.setProperty("d3", 0, d3New[0]); plans.setProperty("d3", 1, d3New[1]); plans.setProperty("d3", 2, d3New[2]); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM const glm::ivec3 ivec3_1_check = glm::ivec3{ 1, 2, 3 }; const std::array ivec3_2_check = { glm::ivec3{4, 5, 6}, glm::ivec3{7, 8, 9} }; const std::array ivec3_3_check = @@ -142,7 +142,7 @@ TEST(TestRunPlanVector, setProperty) { // Extra brackets allow template commas in macros. EXPECT_EQ((plan.getProperty("u3")), u3New); EXPECT_EQ((plan.getProperty("d3")), d3New); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM EXPECT_EQ(plan.getProperty("ivec3"), ivec3_1_check); EXPECT_EQ((plan.getProperty)("ivec33"), ivec3_3_check); EXPECT_EQ(plan.getProperty("ivec32", 0), ivec3_2_check[0]); @@ -168,7 +168,7 @@ TEST(TestRunPlanVector, setProperty) { EXPECT_THROW((plans.setProperty("u3", 0u, 3.f)), flamegpu::exception::InvalidEnvPropertyType); EXPECT_THROW((plans.setProperty("d3", static_cast(-1), 3)), exception::OutOfBoundsException); EXPECT_THROW((plans.setProperty("d3", 4u, 3)), exception::OutOfBoundsException); -#ifdef USE_GLM +#ifdef FLAMEGPU_USE_GLM EXPECT_THROW((plans.setProperty)("ivec32", 3u, {}), exception::OutOfBoundsException); // Out of bounds EXPECT_THROW((plans.setProperty)("ivec33", 4u, {}), exception::OutOfBoundsException); // Out of bounds #endif diff --git a/tests/test_cases/util/test_compute_capability.cu b/tests/test_cases/util/test_compute_capability.cu index 528ff5b66..c28db23ba 100644 --- a/tests/test_cases/util/test_compute_capability.cu +++ b/tests/test_cases/util/test_compute_capability.cu @@ -35,8 +35,8 @@ TEST(TestUtilComputeCapability, getComputeCapability) { // Test getting the minimum compiled cuda capabillity. TEST(TestUtilComputeCapability, minimumCompiledComputeCapability) { // If the macro is defined, the returned value should match, otherwise it should be 0. - #if defined(MIN_CUDA_ARCH) - EXPECT_EQ(util::detail::compute_capability::minimumCompiledComputeCapability(), MIN_CUDA_ARCH); + #if defined(FLAMEGPU_TEST_MIN_CUDA_ARCH) + EXPECT_EQ(util::detail::compute_capability::minimumCompiledComputeCapability(), FLAMEGPU_TEST_MIN_CUDA_ARCH); #else EXPECT_EQ(util::detail::compute_capability::minimumCompiledComputeCapability(), 0); #endif