Skip to content

Commit 7e1d2b1

Browse files
Initial config and linux-x86_64-cmake build job only
Summary: This commit is the first in a series in an attempt to incrementally enable all jobs currenlty performed by CircleCI. It includes the main configuration files provided by GitHub team + 1 build. Original PR: facebookresearch#3325 Reviewed By: junjieqi Differential Revision: D56671582 fbshipit-source-id: c8a21cd69aabaf86134eb86753e90b1facf51bc3
1 parent c5599a0 commit 7e1d2b1

File tree

3 files changed

+220
-0
lines changed

3 files changed

+220
-0
lines changed
+103
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
name: Build cmake
2+
inputs:
3+
opt_level:
4+
description: 'The optimization level'
5+
required: false
6+
default: generic
7+
gpu:
8+
description: 'The GPU to use'
9+
required: false
10+
default: OFF
11+
raft:
12+
description: 'The raft to use'
13+
required: false
14+
default: OFF
15+
runs:
16+
using: composite
17+
steps:
18+
- name: Setup miniconda
19+
uses: conda-incubator/setup-miniconda@v3.0.3
20+
with:
21+
python-version: '3.11'
22+
miniconda-version: latest
23+
- name: Set up environment
24+
shell: bash
25+
run: |
26+
conda config --set solver libmamba
27+
conda update -y -q conda
28+
- name: Install env using main channel
29+
if: inputs.raft == 'OFF'
30+
shell: bash
31+
run: |
32+
conda install -y -q python=3.11 cmake make swig mkl=2023 mkl-devel=2023 numpy scipy pytest gxx_linux-64=11.2 sysroot_linux-64
33+
- name: Install env using conda-forge channel
34+
if: inputs.raft == 'ON'
35+
shell: bash
36+
run: |
37+
conda install -y -q python=3.11 cmake make swig mkl=2023 mkl-devel=2023 numpy scipy pytest gxx_linux-64=11.2 sysroot_linux-64=2.28 libraft cuda-version=11.8 cuda-toolkit -c rapidsai-nightly -c "nvidia/label/cuda-11.8.0" -c conda-forge
38+
- name: Install CUDA
39+
if: inputs.gpu == 'ON' && inputs.raft == 'OFF'
40+
shell: bash
41+
run: |
42+
conda install -y -q cuda-toolkit -c "nvidia/label/cuda-11.8.0"
43+
- name: Build all targets
44+
shell: bash
45+
run: |
46+
eval "$(conda shell.bash hook)"
47+
conda activate
48+
cmake -B build \
49+
-DBUILD_TESTING=ON \
50+
-DBUILD_SHARED_LIBS=ON \
51+
-DFAISS_ENABLE_GPU=${{ inputs.gpu }} \
52+
-DFAISS_ENABLE_RAFT=${{ inputs.raft }} \
53+
-DFAISS_OPT_LEVEL=${{ inputs.opt_level }} \
54+
-DFAISS_ENABLE_C_API=ON \
55+
-DPYTHON_EXECUTABLE=$CONDA/bin/python \
56+
-DCMAKE_BUILD_TYPE=Release \
57+
-DBLA_VENDOR=Intel10_64_dyn \
58+
-DCMAKE_CUDA_FLAGS="-gencode arch=compute_75,code=sm_75" \
59+
.
60+
make -k -C build -j$(nproc)
61+
- name: C++ tests
62+
shell: bash
63+
run: |
64+
export GTEST_OUTPUT="xml:$(realpath .)/test-results/googletest/"
65+
make -C build test
66+
- name: Install Python extension
67+
shell: bash
68+
working-directory: build/faiss/python
69+
run: |
70+
$CONDA/bin/python setup.py install
71+
- name: Install pytest
72+
shell: bash
73+
run: |
74+
conda install -y pytest
75+
echo "$CONDA/bin" >> $GITHUB_PATH
76+
- name: Python tests (CPU only)
77+
if: inputs.gpu == 'OFF'
78+
shell: bash
79+
run: |
80+
conda install -y -q pytorch -c pytorch
81+
pytest --junitxml=test-results/pytest/results.xml tests/test_*.py
82+
pytest --junitxml=test-results/pytest/results-torch.xml tests/torch_*.py
83+
- name: Python tests (CPU + GPU)
84+
if: inputs.gpu == 'ON'
85+
shell: bash
86+
run: |
87+
conda install -y -q pytorch pytorch-cuda=11.8 -c pytorch -c nvidia/label/cuda-11.8.0
88+
pytest --junitxml=test-results/pytest/results.xml tests/test_*.py
89+
pytest --junitxml=test-results/pytest/results-torch.xml tests/torch_*.py
90+
cp tests/common_faiss_tests.py faiss/gpu/test
91+
pytest --junitxml=test-results/pytest/results-gpu.xml faiss/gpu/test/test_*.py
92+
pytest --junitxml=test-results/pytest/results-gpu-torch.xml faiss/gpu/test/torch_*.py
93+
- name: Test avx2 loading
94+
if: inputs.opt_level == 'avx2'
95+
shell: bash
96+
run: |
97+
FAISS_DISABLE_CPU_FEATURES=AVX2 LD_DEBUG=libs $CONDA/bin/python -c "import faiss" 2>&1 | grep faiss.so
98+
LD_DEBUG=libs $CONDA/bin/python -c "import faiss" 2>&1 | grep faiss_avx2.so
99+
- name: Upload test results
100+
uses: actions/upload-artifact@v4.3.1
101+
with:
102+
name: test-results-${{ inputs.opt_level }}-${{ inputs.gpu }}-${{ inputs.raft }}
103+
path: test-results
+98
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
name: Build conda
2+
description: Build conda
3+
inputs:
4+
label:
5+
description: "Label"
6+
default: ""
7+
required: false
8+
cuda:
9+
description: "cuda"
10+
default: ""
11+
required: false
12+
raft:
13+
description: "raft"
14+
default: ""
15+
required: false
16+
compiler_version:
17+
description: "compiler_version"
18+
default: ""
19+
required: false
20+
runs:
21+
using: composite
22+
steps:
23+
- name: Choose shell
24+
shell: bash
25+
id: choose_shell
26+
run: |
27+
# if runner.os != 'Windows' use bash, else use pwsh
28+
if [ "${{ runner.os }}" != "Windows" ]; then
29+
echo "shell=bash" >> "$GITHUB_OUTPUT"
30+
else
31+
echo "shell=pwsh" >> "$GITHUB_OUTPUT"
32+
fi
33+
- name: Setup miniconda
34+
uses: conda-incubator/setup-miniconda@v3.0.3
35+
with:
36+
python-version: '3.11'
37+
miniconda-version: latest
38+
- name: Install conda build tools
39+
shell: ${{ steps.choose_shell.outputs.shell }}
40+
run: |
41+
# conda config --set solver libmamba
42+
# conda config --set verbosity 3
43+
conda update -y -q conda
44+
conda install -y -q conda-build
45+
- name: Enable anaconda uploads
46+
if: inputs.label != ''
47+
shell: ${{ steps.choose_shell.outputs.shell }}
48+
env:
49+
PACKAGE_TYPE: inputs.label
50+
run: |
51+
conda install -y -q anaconda-client
52+
conda config --set anaconda_upload yes
53+
- name: Conda build (CPU)
54+
if: inputs.label == '' && inputs.cuda == ''
55+
shell: ${{ steps.choose_shell.outputs.shell }}
56+
working-directory: conda
57+
run: |
58+
conda build faiss --python 3.11 -c pytorch
59+
- name: Conda build (CPU) w/ anaconda upload
60+
if: inputs.label != '' && inputs.cuda == ''
61+
shell: ${{ steps.choose_shell.outputs.shell }}
62+
working-directory: conda
63+
env:
64+
PACKAGE_TYPE: inputs.label
65+
run: |
66+
conda build faiss --user pytorch --label ${{ inputs.label }} -c pytorch
67+
- name: Conda build (GPU)
68+
if: inputs.label == '' && inputs.cuda != '' && inputs.raft == ''
69+
shell: ${{ steps.choose_shell.outputs.shell }}
70+
working-directory: conda
71+
run: |
72+
conda build faiss-gpu --variants '{ "cudatoolkit": "${{ inputs.cuda }}", "c_compiler_version": "${{ inputs.compiler_version }}", "cxx_compiler_version": "${{ inputs.compiler_version }}" }' \
73+
-c pytorch -c nvidia/label/cuda-${{ inputs.cuda }} -c nvidia
74+
- name: Conda build (GPU) w/ anaconda upload
75+
if: inputs.label != '' && inputs.cuda != '' && inputs.raft == ''
76+
shell: ${{ steps.choose_shell.outputs.shell }}
77+
working-directory: conda
78+
env:
79+
PACKAGE_TYPE: inputs.label
80+
run: |
81+
conda build faiss-gpu --variants '{ "cudatoolkit": "${{ inputs.cuda }}", "c_compiler_version": "${{ inputs.compiler_version }}", "cxx_compiler_version": "${{ inputs.compiler_version }}" }' \
82+
--user pytorch --label ${{ inputs.label }} -c pytorch -c nvidia/label/cuda-${{ inputs.cuda }} -c nvidia
83+
- name: Conda build (GPU w/ RAFT)
84+
if: inputs.label == '' && inputs.cuda != '' && inputs.raft != ''
85+
shell: ${{ steps.choose_shell.outputs.shell }}
86+
working-directory: conda
87+
run: |
88+
conda build faiss-gpu-raft --variants '{ "cudatoolkit": "${{ inputs.cuda }}", "c_compiler_version": "${{ inputs.compiler_version }}", "cxx_compiler_version": "${{ inputs.compiler_version }}" }' \
89+
-c pytorch -c nvidia/label/cuda-${{ inputs.cuda }} -c nvidia -c rapidsai -c rapidsai-nightly -c conda-forge
90+
- name: Conda build (GPU w/ RAFT) w/ anaconda upload
91+
if: inputs.label != '' && inputs.cuda != '' && inputs.raft != ''
92+
shell: ${{ steps.choose_shell.outputs.shell }}
93+
working-directory: conda
94+
env:
95+
PACKAGE_TYPE: inputs.label
96+
run: |
97+
conda build faiss-gpu-raft --variants '{ "cudatoolkit": "${{ inputs.cuda }}", "c_compiler_version": "${{ inputs.compiler_version }}", "cxx_compiler_version": "${{ inputs.compiler_version }}" }' \
98+
--user pytorch --label ${{ inputs.label }} -c pytorch -c nvidia/label/cuda-${{ inputs.cuda }} -c nvidia -c rapidsai -c rapidsai-nightly -c conda-forge

.github/workflows/build.yml

+19
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
name: Build
2+
on:
3+
workflow_dispatch:
4+
pull_request:
5+
branches:
6+
- main
7+
push:
8+
tags:
9+
- 'v*'
10+
env:
11+
OMP_NUM_THREADS: '10'
12+
MKL_THREADING_LAYER: GNU
13+
jobs:
14+
linux-x86_64-cmake:
15+
runs-on: ubuntu-latest
16+
steps:
17+
- name: Checkout
18+
uses: actions/checkout@v4.1.1
19+
- uses: ./.github/actions/build_cmake

0 commit comments

Comments
 (0)