|
| 1 | +--- |
| 2 | +name: "Business Tests" |
| 3 | + |
| 4 | +on: |
| 5 | + pull_request: |
| 6 | + paths-ignore: |
| 7 | + - docs/** |
| 8 | + branches: |
| 9 | + - develop |
| 10 | + - release/** |
| 11 | + - main |
| 12 | + workflow_dispatch: |
| 13 | + |
| 14 | +jobs: |
| 15 | + business-test: |
| 16 | + runs-on: ubuntu-latest |
| 17 | + steps: |
| 18 | + ############## |
| 19 | + ### Set-Up ### |
| 20 | + ############## |
| 21 | + - |
| 22 | + name: Checkout |
| 23 | + uses: actions/checkout@v3 |
| 24 | + with: |
| 25 | + submodules: recursive |
| 26 | + - |
| 27 | + name: Set-Up JDK 11 |
| 28 | + uses: actions/setup-java@v3.4.0 |
| 29 | + with: |
| 30 | + java-version: '11' |
| 31 | + distribution: 'adopt' |
| 32 | + cache: 'maven' |
| 33 | + - |
| 34 | + name: Cache ContainerD Image Layers |
| 35 | + uses: actions/cache@v3 |
| 36 | + with: |
| 37 | + path: /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs |
| 38 | + key: ${{ runner.os }}-io.containerd.snapshotter.v1.overlayfs |
| 39 | + - |
| 40 | + name: Set-Up Kubectl |
| 41 | + uses: azure/setup-kubectl@v3.0 |
| 42 | + - |
| 43 | + name: Helm Set-Up |
| 44 | + uses: azure/setup-helm@v3.3 |
| 45 | + with: |
| 46 | + version: v3.8.1 |
| 47 | + - |
| 48 | + name: Create k8s Kind Cluster configuration (kind.config.yaml) |
| 49 | + run: |- |
| 50 | + export MAVEN_REPOSITORY=$(./mvnw help:evaluate -Dexpression=settings.localRepository -q -DforceStdout) |
| 51 | + cat << EOF > kind.config.yaml |
| 52 | + --- |
| 53 | + kind: Cluster |
| 54 | + apiVersion: kind.x-k8s.io/v1alpha4 |
| 55 | + nodes: |
| 56 | + - role: control-plane |
| 57 | + extraMounts: |
| 58 | + - hostPath: ${PWD} |
| 59 | + containerPath: /srv/product-edc |
| 60 | + - hostPath: ${MAVEN_REPOSITORY} |
| 61 | + containerPath: /srv/m2-repository |
| 62 | + - hostPath: /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs |
| 63 | + containerPath: /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs |
| 64 | + EOF |
| 65 | + - |
| 66 | + name: Create k8s Kind Cluster |
| 67 | + uses: helm/kind-action@v1.3.0 |
| 68 | + with: |
| 69 | + config: kind.config.yaml |
| 70 | + |
| 71 | + ############################################## |
| 72 | + ### Build and load recent images into KinD ### |
| 73 | + ############################################## |
| 74 | + - |
| 75 | + name: Build edc with Gradle to get latest snapshots |
| 76 | + run: ./gradlew publishToMavenLocal |
| 77 | + working-directory: edc |
| 78 | + - |
| 79 | + name: Build edc-controlplane-postgresql-hashicorp-vault |
| 80 | + run: |- |
| 81 | + ./mvnw -s settings.xml -B -pl .,edc-controlplane/edc-controlplane-postgresql-hashicorp-vault -am package -Dmaven.test.skip=true -Pwith-docker-image |
| 82 | + env: |
| 83 | + GITHUB_PACKAGE_USERNAME: ${{ github.actor }} |
| 84 | + GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} |
| 85 | + - |
| 86 | + name: Build edc-dataplane-hashicorp-vault |
| 87 | + run: |- |
| 88 | + ./mvnw -s settings.xml -B -pl .,edc-dataplane/edc-dataplane-hashicorp-vault -am package -Dmaven.test.skip=true -Pwith-docker-image |
| 89 | + env: |
| 90 | + GITHUB_PACKAGE_USERNAME: ${{ github.actor }} |
| 91 | + GITHUB_PACKAGE_PASSWORD: ${{ secrets.CXNG_GHCR_PAT }} |
| 92 | + - |
| 93 | + name: Load images into KinD |
| 94 | + run: |- |
| 95 | + kind get clusters | xargs -n1 kind load docker-image edc-controlplane-postgresql-hashicorp-vault:latest edc-dataplane-hashicorp-vault:latest --name |
| 96 | +
|
| 97 | + ############################################ |
| 98 | + ### Prepare And Install Test Environment ### |
| 99 | + ############################################ |
| 100 | + - |
| 101 | + name: Define test environment variables |
| 102 | + run: |- |
| 103 | + # Define endpoints |
| 104 | + echo "SOKRATES_DATA_MANAGEMENT_URL=http://sokrates-edc-controlplane:8181/data" | tee -a ${GITHUB_ENV} |
| 105 | + echo "SOKRATES_IDS_URL=http://sokrates-edc-controlplane:8282/api/v1/ids" | tee -a ${GITHUB_ENV} |
| 106 | + echo "SOKRATES_DATA_PLANE_URL=http://sokrates-edc-dataplane:8185/api/public" | tee -a ${GITHUB_ENV} |
| 107 | + echo "PLATO_DATA_MANAGEMENT_URL=http://plato-edc-controlplane:8181/data" | tee -a ${GITHUB_ENV} |
| 108 | + echo "PLATO_IDS_URL=http://plato-edc-controlplane:8282/api/v1/ids" | tee -a ${GITHUB_ENV} |
| 109 | + echo "PLATO_DATA_PLANE_URL=http://plato-edc-dataplane:8185/api/public" | tee -a ${GITHUB_ENV} |
| 110 | + - |
| 111 | + name: Install test environment via Helm |
| 112 | + run: |- |
| 113 | + # Update helm dependencies |
| 114 | + helm dependency update edc-tests/src/main/resources/deployment/helm/all-in-one |
| 115 | +
|
| 116 | + # Install the all-in-one supporting infrastructure environment (daps, vault, pgsql) |
| 117 | + helm install test-environment edc-tests/src/main/resources/deployment/helm/all-in-one \ |
| 118 | + --set platoedccontrolplane.image.tag=latest \ |
| 119 | + --set sokratesedccontrolplane.image.tag=latest \ |
| 120 | + --set platoedcdataplane.image.tag=latest \ |
| 121 | + --set sokratesedcdataplane.image.tag=latest \ |
| 122 | + --set idsdaps.enabled=true \ |
| 123 | + --set platovault.enabled=true \ |
| 124 | + --set platopostgresql.enabled=true \ |
| 125 | + --set sokratesvault.enabled=true \ |
| 126 | + --set sokratespostgresql.enabled=true \ |
| 127 | + --set platoedccontrolplane.enabled=false \ |
| 128 | + --set platoedcdataplane.enabled=false \ |
| 129 | + --set platobackendapplication.enabled=false \ |
| 130 | + --set sokratesedccontrolplane.enabled=false \ |
| 131 | + --set sokratesedcdataplane.enabled=false \ |
| 132 | + --set sokratesbackendapplication.enabled=false \ |
| 133 | + --set sokrates-backend-application.persistence.enabled=false \ |
| 134 | + --set plato-backend-application.persistence.enabled=false \ |
| 135 | + --wait-for-jobs --timeout=120s |
| 136 | +
|
| 137 | + # GH pipelines constrained by cpu, so give helm some time to register all resources \w k8s |
| 138 | + sleep 5s |
| 139 | +
|
| 140 | + # Wait for supporting infrastructure to become ready (control-/data-plane, backend service) |
| 141 | + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=idsdaps --timeout=120s || ( kubectl logs -l app.kubernetes.io/name=idsdaps && exit 1 ) |
| 142 | + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=sokratesvault --timeout=120s || ( kubectl logs -l app.kubernetes.io/name=sokratesvault && exit 1 ) |
| 143 | + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=platovault --timeout=120s || ( kubectl logs -l app.kubernetes.io/name=platovault && exit 1 ) |
| 144 | + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=sokratespostgresql --timeout=120s || ( kubectl logs -l app.kubernetes.io/name=sokratespostgresql && exit 1 ) |
| 145 | + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=platopostgresql --timeout=120s || ( kubectl logs -l app.kubernetes.io/name=platopostgresql && exit 1 ) |
| 146 | +
|
| 147 | + # Install the all-in-one Control-/DataPlanes and backend-services |
| 148 | + helm upgrade --install test-environment edc-tests/src/main/resources/deployment/helm/all-in-one \ |
| 149 | + --set platoedccontrolplane.image.tag=latest \ |
| 150 | + --set sokratesedccontrolplane.image.tag=latest \ |
| 151 | + --set platoedcdataplane.image.tag=latest \ |
| 152 | + --set sokratesedcdataplane.image.tag=latest \ |
| 153 | + --set idsdaps.enabled=true \ |
| 154 | + --set platovault.enabled=true \ |
| 155 | + --set platopostgresql.enabled=true \ |
| 156 | + --set sokratesvault.enabled=true \ |
| 157 | + --set sokratespostgresql.enabled=true \ |
| 158 | + --set platoedccontrolplane.enabled=true \ |
| 159 | + --set platoedcdataplane.enabled=true \ |
| 160 | + --set platobackendapplication.enabled=true \ |
| 161 | + --set sokratesedccontrolplane.enabled=true \ |
| 162 | + --set sokratesedcdataplane.enabled=true \ |
| 163 | + --set sokratesbackendapplication.enabled=true \ |
| 164 | + --set sokrates-backend-application.persistence.enabled=true \ |
| 165 | + --set plato-backend-application.persistence.enabled=true \ |
| 166 | + --wait-for-jobs --timeout=120s |
| 167 | +
|
| 168 | + # GH pipelines constrained by cpu, so give helm some time to register all resources \w k8s |
| 169 | + sleep 5s |
| 170 | +
|
| 171 | + # Wait for Control-/DataPlane and backend-service to become ready |
| 172 | + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=sokratesbackendapplication --timeout=120s || ( kubectl logs -since=0 -l app.kubernetes.io/name=sokratesbackendapplication && exit 1 ) |
| 173 | + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=platobackendapplication --timeout=120s || ( kubectl logs -l app.kubernetes.io/name=platobackendapplication && exit 1 ) |
| 174 | + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=sokratesedcdataplane --timeout=120s || ( kubectl logs -l app.kubernetes.io/name=sokratesedcdataplane && exit 1 ) |
| 175 | + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=platoedcdataplane --timeout=120s || ( kubectl logs -l app.kubernetes.io/name=platoedcdataplane && exit 1 ) |
| 176 | + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=sokratesedccontrolplane --timeout=600s || ( kubectl logs -l app.kubernetes.io/name=sokratesedccontrolplane && exit 1 ) |
| 177 | + kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=platoedccontrolplane --timeout=600s || ( kubectl logs -l app.kubernetes.io/name=platoedccontrolplane && exit 1 ) |
| 178 | +
|
| 179 | + ############################################## |
| 180 | + ### Run Business Tests inside kind cluster ### |
| 181 | + ############################################## |
| 182 | + - |
| 183 | + name: Run Business Tests |
| 184 | + run: |- |
| 185 | + cat << EOF >> pod.json |
| 186 | + { |
| 187 | + "apiVersion": "v1", |
| 188 | + "kind": "Pod", |
| 189 | + "spec": { |
| 190 | + "containers": [ |
| 191 | + { |
| 192 | + "args": [ |
| 193 | + "-c", |
| 194 | + "cd /product-edc && ./mvnw -s settings.xml -B -Pbusiness-tests -pl edc-tests test -Dtest=net.catenax.edc.tests.features.RunCucumberTest" |
| 195 | + ], |
| 196 | + "command": [ |
| 197 | + "/bin/sh" |
| 198 | + ], |
| 199 | + EOF |
| 200 | +
|
| 201 | + # Ugly hack to get env vars passed into the k8s-run - if '--overrides' defined '--env' is ignored :( |
| 202 | + cat << EOF >> pod.json |
| 203 | + "env": [ |
| 204 | + {"name": "SOKRATES_DATA_MANAGEMENT_API_AUTH_KEY", "value": "${SOKRATES_DATA_MANAGEMENT_API_AUTH_KEY}"}, |
| 205 | + {"name": "PLATO_DATA_MANAGEMENT_API_AUTH_KEY", "value": "${PLATO_DATA_MANAGEMENT_API_AUTH_KEY}"}, |
| 206 | + {"name": "SOKRATES_DATA_MANAGEMENT_URL", "value": "${SOKRATES_DATA_MANAGEMENT_URL}"}, |
| 207 | + {"name": "SOKRATES_IDS_URL", "value": "${SOKRATES_IDS_URL}"}, |
| 208 | + {"name": "SOKRATES_DATA_PLANE_URL", "value": "${SOKRATES_DATA_PLANE_URL}"}, |
| 209 | + {"name": "PLATO_DATA_MANAGEMENT_URL", "value": "${PLATO_DATA_MANAGEMENT_URL}"}, |
| 210 | + {"name": "PLATO_IDS_URL", "value": "${PLATO_IDS_URL}"}, |
| 211 | + {"name": "PLATO_DATA_PLANE_URL", "value": "${PLATO_DATA_PLANE_URL}"} |
| 212 | + ], |
| 213 | + EOF |
| 214 | +
|
| 215 | + cat << EOF >> pod.json |
| 216 | + "image": "openjdk:11-jdk-slim", |
| 217 | + "name": "edc-tests", |
| 218 | + "volumeMounts": [ |
| 219 | + { |
| 220 | + "mountPath": "/product-edc", |
| 221 | + "name": "product-edc" |
| 222 | + }, |
| 223 | + { |
| 224 | + "mountPath": "/root/.m2/repository", |
| 225 | + "name": "m2-repository" |
| 226 | + } |
| 227 | + ] |
| 228 | + } |
| 229 | + ], |
| 230 | + "dnsPolicy": "ClusterFirst", |
| 231 | + "restartPolicy": "Never", |
| 232 | + "volumes": [ |
| 233 | + { |
| 234 | + "hostPath": { |
| 235 | + "path": "/srv/product-edc" |
| 236 | + }, |
| 237 | + "name": "product-edc" |
| 238 | + }, |
| 239 | + { |
| 240 | + "hostPath": { |
| 241 | + "path": "/srv/m2-repository" |
| 242 | + }, |
| 243 | + "name": "m2-repository" |
| 244 | + } |
| 245 | + ] |
| 246 | + } |
| 247 | + } |
| 248 | + EOF |
| 249 | +
|
| 250 | + kubectl run -i --image=openjdk:11-jdk-slim --restart=Never --rm edc-tests --overrides="$(cat pod.json)" |
| 251 | +
|
| 252 | + ################# |
| 253 | + ### Tear Down ### |
| 254 | + ################# |
| 255 | + - |
| 256 | + name: Destroy the kind cluster |
| 257 | + if: always() |
| 258 | + run: >- |
| 259 | + kind get clusters | xargs -n1 kind delete cluster --name |
0 commit comments