From b67df83faf3174f74e2dfcb413edf8915f26f1f2 Mon Sep 17 00:00:00 2001 From: Tuncay Tunc Date: Tue, 21 Mar 2023 14:52:00 +0100 Subject: [PATCH 01/92] Generate OpenApi Spec --- .../control-plane-adapter/build.gradle.kts | 1 + .../edc/cp/adapter/HttpController.java | 2 + .../openapi/yaml/control-plane-adapter.yaml | 40 +++++++++++++++++++ 3 files changed, 43 insertions(+) create mode 100644 resources/openapi/yaml/control-plane-adapter.yaml diff --git a/edc-extensions/control-plane-adapter/build.gradle.kts b/edc-extensions/control-plane-adapter/build.gradle.kts index 715b5da74..a6ba7f28e 100644 --- a/edc-extensions/control-plane-adapter/build.gradle.kts +++ b/edc-extensions/control-plane-adapter/build.gradle.kts @@ -2,6 +2,7 @@ plugins { `java-library` `maven-publish` + id("io.swagger.core.v3.swagger-gradle-plugin") } dependencies { diff --git a/edc-extensions/control-plane-adapter/src/main/java/org/eclipse/tractusx/edc/cp/adapter/HttpController.java b/edc-extensions/control-plane-adapter/src/main/java/org/eclipse/tractusx/edc/cp/adapter/HttpController.java index 1d6bbc3fa..111d57068 100644 --- a/edc-extensions/control-plane-adapter/src/main/java/org/eclipse/tractusx/edc/cp/adapter/HttpController.java +++ b/edc-extensions/control-plane-adapter/src/main/java/org/eclipse/tractusx/edc/cp/adapter/HttpController.java @@ -16,6 +16,7 @@ import static java.util.Objects.isNull; +import io.swagger.v3.oas.annotations.tags.Tag; import jakarta.ws.rs.*; import jakarta.ws.rs.core.MediaType; import jakarta.ws.rs.core.Response; @@ -35,6 +36,7 @@ @Produces({MediaType.APPLICATION_JSON}) @Path("/adapter/asset") @RequiredArgsConstructor +@Tag(name = "Control Plane Adapter") public class HttpController { private final Monitor monitor; private final ResultService resultService; diff --git a/resources/openapi/yaml/control-plane-adapter.yaml b/resources/openapi/yaml/control-plane-adapter.yaml new file mode 100644 index 000000000..c54839524 --- /dev/null +++ b/resources/openapi/yaml/control-plane-adapter.yaml @@ -0,0 +1,40 @@ +openapi: 3.0.1 +paths: + /adapter/asset/sync/{assetId}: + get: + operationId: getAssetSynchronous + parameters: + - in: path + name: assetId + required: true + schema: + type: string + example: null + - in: query + name: providerUrl + schema: + type: string + example: null + - in: query + name: contractAgreementId + schema: + type: string + example: null + - in: query + name: contractAgreementReuse + schema: + type: boolean + default: true + example: null + - in: query + name: timeout + schema: + type: string + example: null + responses: + default: + content: + application/json: {} + description: default response + tags: + - Control Plane Adapter From 5ea8fb48d48ded4206063b41e9aff63e51f82374 Mon Sep 17 00:00:00 2001 From: Enrico Risa Date: Mon, 27 Mar 2023 16:07:25 +0200 Subject: [PATCH 02/92] feat(baseImage): replace alpine with temurin as base image for running java application --- .github/workflows/build.yaml | 8 ++++---- .github/workflows/business-tests.yaml | 2 +- .github/workflows/draft-new-release.yaml | 2 +- .github/workflows/publish-new-release.yml | 4 ++-- .github/workflows/veracode.yaml | 6 +++--- .github/workflows/verify.yaml | 12 ++++++------ .../src/main/docker/Dockerfile | 7 +------ .../src/main/docker/Dockerfile | 7 +------ .../src/main/docker/Dockerfile | 7 +------ .../src/main/docker/Dockerfile | 7 +------ .../src/main/docker/Dockerfile | 7 +------ .../src/main/docker/Dockerfile | 7 +------ 12 files changed, 23 insertions(+), 53 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 78b91b6f6..a3afa87e0 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -77,7 +77,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' # Build - @@ -121,7 +121,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' # Build - @@ -188,7 +188,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' # Build - @@ -243,7 +243,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' - name: Import GPG Key uses: crazy-max/ghaction-import-gpg@v1 diff --git a/.github/workflows/business-tests.yaml b/.github/workflows/business-tests.yaml index f55d3d6ba..e2135cf07 100644 --- a/.github/workflows/business-tests.yaml +++ b/.github/workflows/business-tests.yaml @@ -56,7 +56,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' - name: Cache ContainerD Image Layers diff --git a/.github/workflows/draft-new-release.yaml b/.github/workflows/draft-new-release.yaml index 9c4e888c8..955284359 100644 --- a/.github/workflows/draft-new-release.yaml +++ b/.github/workflows/draft-new-release.yaml @@ -37,7 +37,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' - name: Bump version in gradle.properties diff --git a/.github/workflows/publish-new-release.yml b/.github/workflows/publish-new-release.yml index d2064264f..88c5fe041 100644 --- a/.github/workflows/publish-new-release.yml +++ b/.github/workflows/publish-new-release.yml @@ -61,7 +61,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' - name: Import GPG Key @@ -181,7 +181,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' - name: Merge main back into develop and set new snapshot version diff --git a/.github/workflows/veracode.yaml b/.github/workflows/veracode.yaml index 722458663..0bfaac8b5 100644 --- a/.github/workflows/veracode.yaml +++ b/.github/workflows/veracode.yaml @@ -34,7 +34,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' - name: Verify proper formatting @@ -63,7 +63,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' # Build - @@ -112,7 +112,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' # Build - diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index adfeb5558..1ba38e785 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -69,7 +69,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' - name: Verify proper formatting @@ -91,7 +91,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' - name: Run Unit tests @@ -108,7 +108,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' - name: Run Integration tests @@ -125,7 +125,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' - name: Run API tests @@ -142,7 +142,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' - name: Run E2E tests @@ -165,7 +165,7 @@ jobs: uses: actions/setup-java@v3.10.0 with: java-version: '11' - distribution: 'adopt' + distribution: 'temurin' cache: 'gradle' - name: Cache SonarCloud packages diff --git a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/src/main/docker/Dockerfile b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/src/main/docker/Dockerfile index c7c6d2c81..229c44868 100644 --- a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/src/main/docker/Dockerfile +++ b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/src/main/docker/Dockerfile @@ -26,17 +26,12 @@ HEALTHCHECK NONE RUN wget ${OTEL_AGENT_LOCATION} -O /tmp/opentelemetry-javaagent.jar -FROM alpine:3.17.1 - +FROM eclipse-temurin:11.0.18_10-jre-alpine ARG JAR ARG APP_USER=docker ARG APP_UID=10100 -RUN apk update && \ - apk add openjdk11-jre-headless=11.0.18_p10-r0 --no-cache && \ - rm -rf /var/cache/apk/* - RUN addgroup --system "$APP_USER" RUN adduser \ diff --git a/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile b/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile index 3f9a9806b..b3e04fac7 100644 --- a/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile +++ b/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile @@ -26,17 +26,12 @@ HEALTHCHECK NONE RUN wget ${OTEL_AGENT_LOCATION} -O /tmp/opentelemetry-javaagent.jar -FROM alpine:3.17.2 - +FROM eclipse-temurin:11.0.18_10-jre-alpine ARG JAR ARG APP_USER=docker ARG APP_UID=10100 -RUN apk update && \ - apk add openjdk11-jre-headless=11.0.18_p10-r0 --no-cache && \ - rm -rf /var/cache/apk/* - RUN addgroup --system "$APP_USER" RUN adduser \ diff --git a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile index 3f9a9806b..b3e04fac7 100644 --- a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile +++ b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile @@ -26,17 +26,12 @@ HEALTHCHECK NONE RUN wget ${OTEL_AGENT_LOCATION} -O /tmp/opentelemetry-javaagent.jar -FROM alpine:3.17.2 - +FROM eclipse-temurin:11.0.18_10-jre-alpine ARG JAR ARG APP_USER=docker ARG APP_UID=10100 -RUN apk update && \ - apk add openjdk11-jre-headless=11.0.18_p10-r0 --no-cache && \ - rm -rf /var/cache/apk/* - RUN addgroup --system "$APP_USER" RUN adduser \ diff --git a/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile b/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile index 3f9a9806b..b3e04fac7 100644 --- a/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile +++ b/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile @@ -26,17 +26,12 @@ HEALTHCHECK NONE RUN wget ${OTEL_AGENT_LOCATION} -O /tmp/opentelemetry-javaagent.jar -FROM alpine:3.17.2 - +FROM eclipse-temurin:11.0.18_10-jre-alpine ARG JAR ARG APP_USER=docker ARG APP_UID=10100 -RUN apk update && \ - apk add openjdk11-jre-headless=11.0.18_p10-r0 --no-cache && \ - rm -rf /var/cache/apk/* - RUN addgroup --system "$APP_USER" RUN adduser \ diff --git a/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile b/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile index 605a6d03c..5c3b12f11 100644 --- a/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile +++ b/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile @@ -26,17 +26,12 @@ HEALTHCHECK NONE RUN wget ${OTEL_AGENT_LOCATION} -O /tmp/opentelemetry-javaagent.jar -FROM alpine:3.17.2 - +FROM eclipse-temurin:11.0.18_10-jre-alpine ARG JAR ARG APP_USER=docker ARG APP_UID=10100 -RUN apk update && \ - apk add openjdk11-jre-headless=11.0.18_p10-r0 --no-cache && \ - rm -rf /var/cache/apk/* - RUN addgroup --system "$APP_USER" RUN adduser \ diff --git a/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile b/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile index 605a6d03c..5c3b12f11 100644 --- a/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile +++ b/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile @@ -26,17 +26,12 @@ HEALTHCHECK NONE RUN wget ${OTEL_AGENT_LOCATION} -O /tmp/opentelemetry-javaagent.jar -FROM alpine:3.17.2 - +FROM eclipse-temurin:11.0.18_10-jre-alpine ARG JAR ARG APP_USER=docker ARG APP_UID=10100 -RUN apk update && \ - apk add openjdk11-jre-headless=11.0.18_p10-r0 --no-cache && \ - rm -rf /var/cache/apk/* - RUN addgroup --system "$APP_USER" RUN adduser \ From 84d58ee5ac51b98af749057801770bef04da596d Mon Sep 17 00:00:00 2001 From: "Florian Rusch (ZF Friedrichshafen AG)" Date: Wed, 15 Mar 2023 17:02:41 +0100 Subject: [PATCH 03/92] Lint and refactor mostly all *.md files --- .github/ISSUE_TEMPLATE/bug_report.md | 13 +- .github/ISSUE_TEMPLATE/feature_request.md | 4 +- CHANGELOG.md | 112 +++++++--------- CODE_OF_CONDUCT.md | 22 +-- CONTRIBUTING.md | 16 +-- NOTICE.md | 14 +- README.md | 41 +++--- SECURITY.md | 3 +- charts/README.md | 12 +- charts/edc-controlplane/Chart.yaml | 2 +- charts/edc-controlplane/README.md | 5 +- charts/edc-controlplane/README.md.gotmpl | 1 + charts/edc-dataplane/Chart.yaml | 2 +- charts/edc-dataplane/README.md | 5 +- charts/edc-dataplane/README.md.gotmpl | 1 + docs/README.md | 27 +--- docs/development/Release.md | 11 +- docs/migration/Version_0.0.x_0.1.x.md | 87 +++--------- docs/migration/Version_0.1.0_0.1.1.md | 23 +--- docs/release-notes/Version 0.1.0.md | 10 +- docs/release-notes/Version 0.1.1.md | 11 +- docs/release-notes/Version 0.1.2.md | 3 +- edc-controlplane/README.md | 14 +- .../edc-controlplane-base/README.md | 2 +- .../edc-controlplane-memory/README.md | 86 ++++++------ .../README.md | 124 ++++++++--------- .../edc-controlplane-postgresql/README.md | 126 +++++++++--------- edc-dataplane/README.md | 3 +- .../edc-dataplane-azure-vault/README.md | 48 +++---- edc-dataplane/edc-dataplane-base/README.md | 2 +- .../edc-dataplane-hashicorp-vault/README.md | 48 +++---- .../business-partner-validation/README.md | 16 +-- edc-extensions/cx-oauth2/README.md | 24 ++-- edc-extensions/data-encryption/README.md | 16 +-- .../README.md | 13 +- edc-extensions/hashicorp-vault/README.md | 27 ++-- edc-extensions/postgresql-migration/README.md | 2 +- edc-tests/cucumber/README.md | 5 +- .../deployment/helm/omejdn/README.md | 3 +- 39 files changed, 465 insertions(+), 519 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index c0f8fe3b0..4f74bf45e 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -8,28 +8,35 @@ assignees: '' --- ## Describe the bug + _A clear and concise description of what the bug is._ ### To Reproduce + _Steps to reproduce the behavior:_ + 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error ### Expected behavior + _A clear and concise description of what you expected to happen._ ### Screenshots/Error Messages + _If applicable, add screenshots and/or error messages to help explain your problem._ ## Context Informations + _Add any other context about the probleme here._ - Used version: [e.g. Commit Hash] -- OS: [e.g. Mac OS (M1), Windows, Linux] -- Docker Version: [e.g. 20.10.12] -- `java --version`: +- OS: [e.g. Mac OS (M1), Windows, Linux] +- Docker Version: [e.g. 20.10.12] +- `java --version`: ## Possible Implementation + _You already know the root cause of the erroneous state and how to fix it? Feel free to share your thoughts._ diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 1b6f25b87..62c89ee8c 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -7,8 +7,8 @@ assignees: '' --- -_If you are missing a feature or have an idea how to improve this project that should first be -discussed, please feel free to open up a [discussion](https://github.com/catenax-ng/catena-x-edc/discussions/categories/ideas)._ +_If you are missing a feature or have an idea how to improve this project that should first be +discussed, please feel free to open up a [discussion](https://github.com/eclipse-tractusx/tractusx-edc/discussions/categories/ideas)._ **Is your feature request related to a problem? Please describe.** _A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]_ diff --git a/CHANGELOG.md b/CHANGELOG.md index 173739461..3960f0c9c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -130,41 +130,31 @@ corresponding [documentation](/docs/migration/Version_0.1.x_0.3.x.md). - Bump actions/setup-java from 3.8.0 to 3.9.0 (#605) - Bump s3 from 2.18.35 to 2.18.39 (#606) - -## [0.2.0] - 2022-12-15 +## [0.1.6] - 2023-02-20 ### Fixed -- Fixed Json LD serialization bug which prevented multi-BPN policies to be defined and used. Checkout the [docs](https://github.com/catenax-ng/product-edc/blob/0.2.0/edc-extensions/business-partner-validation/README.md) for more info. - -## [0.1.3] - 2022-11-30 - -### Added - -- New Postman collection for developers `/docs/development/postman` -- New EDC Image with HashiCorp Vault and InMemory Storage -- (Experimental) Simplified deployment of the EDC in `/charts/tractusx-connector` - -### Changed +- SQL leakage issue +- Catalog pagination -- Set EDC version to `0.0.1-20221006-SNAPSHOT` -- Business Partner Number Extension no longer supports the 'IN' constraint operator -- HashiCorp Vault Extension now allows sub directories for secrets -- Update package structure/namespace from `net.catenax` to `org.eclipse.tractusx` +## [0.1.5] - 2023-02-13 ### Fixed -- S3 Data Transfer +- Use patched EDC version: 0.0.1-20220922.2-SNAPSHOT to fix catalog pagination bug +- Data Encryption extension: fixed usage of a blocking algorithm ## [0.1.2] - 2022-09-30 ### Added -- Introduced DEPENDENCIES file +- Introduced DEPENDENCIES file ### Changed -- Moved helm charts from `deployment/helm` to `charts` +- Moved helm charts from `deployment/helm` to `charts` +- Replaced distroless image with alpine in all docker images +- Update EDC commit to `740c100ac162bc41b1968c232ad81f7d739aefa9` ## [0.1.1] - 2022-09-04 @@ -172,16 +162,16 @@ corresponding [documentation](/docs/migration/Version_0.1.x_0.3.x.md). ### Added -- Control-Plane Extension ([cx-oauth2](/edc-extensions/cx-oauth2/README.md)) +- Control-Plane Extension ([cx-oauth2](/edc-extensions/cx-oauth2/README.md)) ### Changed -- Introduced git submodule to import EDC dependencies (instead of snapshot- or milestone artifact) -- Helm Charts: TLS secret name is now configurable +- Introduced git submodule to import EDC dependencies (instead of snapshot- or milestone artifact) +- Helm Charts: TLS secret name is now configurable ### Fixed -- Connectors with Azure Vault extension are now starting again [link](https://github.com/eclipse-edc/Connector/issues/1892) +- Connectors with Azure Vault extension are now starting again [link](https://github.com/eclipse-edc/Connector/issues/1892) ## [0.1.0] - 2022-08-19 @@ -190,64 +180,64 @@ corresponding [documentation](/docs/migration/Version_0.0.x_0.1.x.md). ### Added -- Control-Plane extension ([data-plane-selector-client](https://github.com/eclipse-edc/Connector/tree/v0.0.1-milestone-5/extensions/data-plane-selector/selector-client)) - - run the EDC with multiple data planes at once -- Control-Plane extension ([dataplane-selector-configuration](edc-extensions/dataplane-selector-configuration)) - - add data plane instances to the control plane by configuration -- Data-Plane extension ([s3-data-plane](https://github.com/eclipse-edc/Connector/tree/main/extensions/aws/data-plane-s3)) - - transfer from and to AWS S3 buckets -- Control-Plane extension ([data-encryption](edc-extensions/data-encryption)) - - Data-Plane authentication attribute transmitted during data-plane-transfer can be encrypted symmetrically (AES) +- Control-Plane extension ([data-plane-selector-client](https://github.com/eclipse-edc/Connector/tree/v0.0.1-milestone-5/extensions/data-plane-selector/selector-client)) + - run the EDC with multiple data planes at once +- Control-Plane extension ([dataplane-selector-configuration](edc-extensions/dataplane-selector-configuration)) + - add data plane instances to the control plane by configuration +- Data-Plane extension ([s3-data-plane](https://github.com/eclipse-edc/Connector/tree/main/extensions/aws/data-plane-s3)) + - transfer from and to AWS S3 buckets +- Control-Plane extension ([data-encryption](edc-extensions/data-encryption)) + - Data-Plane authentication attribute transmitted during data-plane-transfer can be encrypted symmetrically (AES) ### Changed -- Update setting name (`edc.dataplane.token.validation.endpoint` -> `edc.dataplane.token.validation.endpoint`) -- EDC has been updated to version [0.0.1-20220818-SNAPSHOT](https://oss.sonatype.org/#nexus-search;gav~org.eclipse.dataspaceconnector~~0.0.1-20220818-SNAPSHOT~~) - implications to the behavior of the connector have been covered in the [corresponding migration guide](docs/migration/Version_0.0.x_0.1.x.md) +- Update setting name (`edc.dataplane.token.validation.endpoint` -> `edc.dataplane.token.validation.endpoint`) +- EDC has been updated to version [0.0.1-20220818-SNAPSHOT](https://oss.sonatype.org/#nexus-search;gav~org.eclipse.dataspaceconnector~~0.0.1-20220818-SNAPSHOT~~) - implications to the behavior of the connector have been covered in the [corresponding migration guide](docs/migration/Version_0.0.x_0.1.x.md) ### Fixed -- Contract-Offer-Receiving-Connectors must also pass the ContractPolicy of the ContractDefinition before receiving offers([issue](https://github.com/eclipse-edc/Connector/issues/1331)) -- Deletion of Asset becomes impossible when Contract Negotiation exists([issue](https://github.com/eclipse-edc/Connector/issues/1403)) -- Deletion of Policy becomes impossible when Contract Definition exists([issue](https://github.com/eclipse-edc/Connector/issues/1410)) +- Contract-Offer-Receiving-Connectors must also pass the ContractPolicy of the ContractDefinition before receiving offers([issue](https://github.com/eclipse-edc/Connector/issues/1331)) +- Deletion of Asset becomes impossible when Contract Negotiation exists([issue](https://github.com/eclipse-edc/Connector/issues/1403)) +- Deletion of Policy becomes impossible when Contract Definition exists([issue](https://github.com/eclipse-edc/Connector/issues/1410)) ## [0.0.6] - 2022-07-29 ### Fixed -- Fixes [release 0.0.5](https://github.com/catenax-ng/product-edc/releases/tag/0.0.5), which introduced classpath issues due to usage of [net.jodah:failsafe:2.4.3](https://search.maven.org/artifact/net.jodah/failsafe/2.4.3/jar) library +- Fixes [release 0.0.5](https://github.com/eclipse-tractusx/tractusx-edc/releases/tag/0.0.5), which introduced classpath issues due to usage of [net.jodah:failsafe:2.4.3](https://search.maven.org/artifact/net.jodah/failsafe/2.4.3/jar) library ## [0.0.5] - 2022-07-28 ### Added -- EDC Health Checks for HashiCorp Vault +- EDC Health Checks for HashiCorp Vault ### Changed -- BusinessPartnerNumber constraint supports List structure -- Helm: Confidential EDC settings can be set using k8s secrets -- HashiCorp Vault API path configurable +- BusinessPartnerNumber constraint supports List structure +- Helm: Confidential EDC settings can be set using k8s secrets +- HashiCorp Vault API path configurable ## [0.0.4] - 2022-06-27 ### Added -- HashiCorp Vault Extension -- Control Plane with HashiCorp Vault and PostgreSQL support +- HashiCorp Vault Extension +- Control Plane with HashiCorp Vault and PostgreSQL support ### Changed -- Release Worklow now publishes Product EDC Extensions as Maven Artifacts +- Release Workflow now publishes Product EDC Extensions as Maven Artifacts ### Fixed -- [#1515](https://github.com/eclipse-edc/Connector/issues/1515) SQL: Connector sends out 50 +- [#1515](https://github.com/eclipse-edc/Connector/issues/1515) SQL: Connector sends out 50 contract offers max. ### Removed -- CosmosDB Control Plane -- Control API Extension for all Control Planes +- CosmosDB Control Plane +- Control API Extension for all Control Planes ## [0.0.3] - 2022-05-23 @@ -255,28 +245,26 @@ corresponding [documentation](/docs/migration/Version_0.0.x_0.1.x.md). ## [0.0.1] - 2022-05-13 -[Unreleased]: https://github.com/catenax-ng/product-edc/compare/0.3.0...HEAD - -[0.3.0]: https://github.com/catenax-ng/product-edc/compare/0.2.0...0.3.0 +[Unreleased]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.1.6...HEAD -[0.2.0]: https://github.com/catenax-ng/product-edc/compare/0.1.3...0.2.0 +[0.1.6]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.1.5...0.1.6 -[0.1.3]: https://github.com/catenax-ng/product-edc/compare/0.1.2...0.1.3 +[0.1.5]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.1.2...0.1.5 -[0.1.2]: https://github.com/catenax-ng/product-edc/compare/0.1.1...0.1.2 +[0.1.2]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.1.1...0.1.2 -[0.1.1]: https://github.com/catenax-ng/product-edc/compare/0.1.0...0.1.1 +[0.1.1]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.1.0...0.1.1 -[0.1.0]: https://github.com/catenax-ng/product-edc/compare/0.0.6...0.1.0 +[0.1.0]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.0.6...0.1.0 -[0.0.6]: https://github.com/catenax-ng/product-edc/compare/0.0.5...0.0.6 +[0.0.6]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.0.5...0.0.6 -[0.0.5]: https://github.com/catenax-ng/product-edc/compare/0.0.4...0.0.5 +[0.0.5]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.0.4...0.0.5 -[0.0.4]: https://github.com/catenax-ng/product-edc/compare/0.0.3...0.0.4 +[0.0.4]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.0.3...0.0.4 -[0.0.3]: https://github.com/catenax-ng/product-edc/compare/0.0.2...0.0.3 +[0.0.3]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.0.2...0.0.3 -[0.0.2]: https://github.com/catenax-ng/product-edc/compare/0.0.1...0.0.2 +[0.0.2]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.0.1...0.0.2 -[0.0.1]: https://github.com/catenax-ng/product-edc/compare/a02601306fed39a88a3b3b18fae98b80791157b9...0.0.1 +[0.0.1]: https://github.com/eclipse-tractusx/tractusx-edc/compare/a02601306fed39a88a3b3b18fae98b80791157b9...0.0.1 diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 14db7e6fa..651d7656a 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -11,19 +11,19 @@ In the interest of fostering an open and welcoming environment, we as community Examples of behavior that contributes to creating a positive environment include: -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members Examples of unacceptable behavior by participants include: -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities @@ -43,4 +43,4 @@ Project committers or leaders who do not follow the Code of Conduct in good fait ## Attribution -This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org) , version 1.4, available at [https://www.contributor-covenant.org/version/1/4/code-of-conduct.html](https://www.contributor-covenant.org/version/1/4/code-of-conduct/) \ No newline at end of file +This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org) , version 1.4, available at [https://www.contributor-covenant.org/version/1/4/code-of-conduct.html](https://www.contributor-covenant.org/version/1/4/code-of-conduct/) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 39dd5bdba..7163eaf9b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,33 +14,33 @@ where these companies will be able to participate quickly and with little IT infrastructure investment. Tractus-X is meant to be the PoC project of the Catena-X alliance focusing on parts traceability. -* https://projects.eclipse.org/projects/automotive.tractusx +* ## Developer resources Information regarding source code management, builds, coding standards, and more. -* https://projects.eclipse.org/projects/automotive.tractusx/developer +* The project maintains the source code repositories in the following GitHub organization: -* https://github.com/eclipse-tractusx/ +* ## Eclipse Development Process This Eclipse Foundation open project is governed by the Eclipse Foundation Development Process and operates under the terms of the Eclipse IP Policy. -* https://eclipse.org/projects/dev_process -* https://www.eclipse.org/org/documents/Eclipse_IP_Policy.pdf +* +* ## Eclipse Contributor Agreement In order to be able to contribute to Eclipse Foundation projects you must electronically sign the Eclipse Contributor Agreement (ECA). -* http://www.eclipse.org/legal/ECA.php +* The ECA provides the Eclipse Foundation with a permanent record that you agree that each of your contributions will comply with the commitments documented in @@ -49,10 +49,10 @@ the email address matching the "Author" field of your contribution's Git commits fulfills the DCO's requirement that you sign-off on your contributions. For more information, please see the Eclipse Committer Handbook: -https://www.eclipse.org/projects/handbook/#resources-commit + ## Contact Contact the project developers via the project's "dev" list. -* https://accounts.eclipse.org/mailing-list/tractusx-dev \ No newline at end of file +* diff --git a/NOTICE.md b/NOTICE.md index d9fce018c..4223c64f3 100644 --- a/NOTICE.md +++ b/NOTICE.md @@ -2,7 +2,7 @@ This content is produced and maintained by the Eclipse Tractus-X project. -* Project home: https://projects.eclipse.org/projects/automotive.tractusx +* Project home: See the AUTHORS file(s) distributed with this work for additional information regarding authorship. @@ -20,18 +20,16 @@ source code repository logs. This program and the accompanying materials are made available under the terms of the Apache License, Version 2.0 which is available at -https://www.apache.org/licenses/LICENSE-2.0 +. SPDX-License-Identifier: Apache-2.0 ## Source Code -The project maintains the following source code repositories -in the GitHub organization https://github.com/eclipse-tractusx: - -* https://github.com/eclipse-tractusx/ -* https://github.com/eclipse-tractusx/ +The project maintains the following source code repositories +in the GitHub organization : +* ## Third-party Content @@ -46,4 +44,4 @@ may have restrictions on the import, possession, and use, and/or re-export to another country, of encryption software. BEFORE using any encryption software, please check the country's laws, regulations and policies concerning the import, possession, or use, and re-export of encryption software, to see if this is -permitted. \ No newline at end of file +permitted. diff --git a/README.md b/README.md index 4bb5016de..0d9ef46e8 100644 --- a/README.md +++ b/README.md @@ -19,18 +19,17 @@

Container images and deployments of the Eclipse Dataspace Components open source project.
- Explore the docs » + Explore the docs »

View Eclipse Dataspace Components · - Releases + Releases · Report Bug / Request Feature

-
Table of Contents @@ -60,26 +59,26 @@ The project provides pre-built control- and data-plane [docker](https://www.dock ## Inventory The eclipse data space connector is split up into Control-Plane and Data-Plane, whereas the Control-Plane functions as administration layer -and has responsibility of resource management, contract negotiation and administer data transfer. +and has responsibility of resource management, contract negotiation and administer data transfer. The Data-Plane does the heavy lifting of transferring and receiving data streams. Depending on your environment there are different derivatives of the control-plane prepared: * [edc-controlplane-memory](edc-controlplane/edc-controlplane-memory) with dependency onto - * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) + * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [edc-controlplane-postgresql](edc-controlplane/edc-controlplane-postgresql) with dependency onto - * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) - * [PostgreSQL 8.2 or newer](https://www.postgresql.org/) + * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) + * [PostgreSQL 8.2 or newer](https://www.postgresql.org/) * [edc-controlplane-postgresql-hashicorp-vault](edc-controlplane/edc-controlplane-postgresql-hashicorp-vault) with dependency onto - * [Hashicorp Vault](https://www.vaultproject.io/) - * [PostgreSQL 8.2 or newer](https://www.postgresql.org/) + * [Hashicorp Vault](https://www.vaultproject.io/) + * [PostgreSQL 8.2 or newer](https://www.postgresql.org/) Derivatives of the Data-Plane can be found here * [edc-dataplane-azure-vault](edc-dataplane/edc-dataplane-azure-vault) with dependency onto - * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) + * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [edc-dataplane-hashicorp-vault](edc-dataplane/edc-dataplane-hashicorp-vault) with dependency onto - * [Hashicorp Vault](https://www.vaultproject.io/) + * [Hashicorp Vault](https://www.vaultproject.io/)

(back to top)

@@ -87,10 +86,10 @@ Derivatives of the Data-Plane can be found here

(back to top)

- ### Build Build Product-EDC together with its Container Images + ```shell ./gradlew dockerize ``` @@ -99,17 +98,17 @@ Build Product-EDC together with its Container Images ## License -Distributed under the Apache 2.0 License. See [LICENSE](https://github.com/catenax-ng/product-edc/blob/main/LICENSE) for more information. +Distributed under the Apache 2.0 License. See [LICENSE](https://github.com/eclipse-tractusx/tractusx-edc/blob/main/LICENSE) for more information.

(back to top)

-[contributors-shield]: https://img.shields.io/github/contributors/catenax-ng/product-edc.svg?style=for-the-badge -[contributors-url]: https://github.com/catenax-ng/product-edc/graphs/contributors -[stars-shield]: https://img.shields.io/github/stars/catenax-ng/product-edc.svg?style=for-the-badge -[stars-url]: https://github.com/catenax-ng/product-edc/stargazers -[license-shield]: https://img.shields.io/github/license/catenax-ng/product-edc.svg?style=for-the-badge -[license-url]: https://github.com/catenax-ng/product-edc/blob/main/LICENSE -[release-shield]: https://img.shields.io/github/v/release/catenax-ng/product-edc.svg?style=for-the-badge -[release-url]: https://github.com/catenax-ng/product-edc/releases +[contributors-shield]: https://img.shields.io/github/contributors/eclipse-tractusx/tractusx-edc.svg?style=for-the-badge +[contributors-url]: https://github.com/eclipse-tractusx/tractusx-edc/graphs/contributors +[stars-shield]: https://img.shields.io/github/stars/eclipse-tractusx/tractusx-edc.svg?style=for-the-badge +[stars-url]: https://github.com/eclipse-tractusx/tractusx-edc/stargazers +[license-shield]: https://img.shields.io/github/license/eclipse-tractusx/tractusx-edc.svg?style=for-the-badge +[license-url]: https://github.com/eclipse-tractusx/tractusx-edc/blob/main/LICENSE +[release-shield]: https://img.shields.io/github/v/release/eclipse-tractusx/tractusx-edc.svg?style=for-the-badge +[release-url]: https://github.com/eclipse-tractusx/tractusx-edc/releases diff --git a/SECURITY.md b/SECURITY.md index 7d8fced73..eec5ca437 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,5 +2,4 @@ ## Reporting a Vulnerability -Please report a found vulnerability here: -[https://www.eclipse.org/security/](https://www.eclipse.org/security/) \ No newline at end of file +Please report a found vulnerability here: \ No newline at end of file diff --git a/charts/README.md b/charts/README.md index 1f453a962..adbaac6af 100644 --- a/charts/README.md +++ b/charts/README.md @@ -1,10 +1,12 @@ -# Chart Linting +# Helm Charts + +## Chart Linting Chart linting is performed using [helm's CT tool](https://github.com/helm/chart-testing). -Configuration files for [CT](../../ct.yaml), [Yamale](../../chart_schema.yaml) and [Yamllint](../../lintconf.yaml) have been provided. +Configuration files for [CT](../ct.yaml), [Yamale](../chart_schema.yaml) and [Yamllint](../lintconf.yaml) have been provided. -# Generate Chart Readme's +## Generate Chart Readme's To generate chart README.md files from its respective values.yaml file we use the [helm-docs tool](https://github.com/norwoodj/helm-docs): @@ -12,6 +14,6 @@ To generate chart README.md files from its respective values.yaml file we use th docker run --rm --volume "$(pwd):/helm-docs" -u $(id -u) jnorwood/helm-docs:v1.10.0 ``` -# Confidential EDC Settings +## Confidential EDC Settings -Some EDC settings should better not be part of the actual deployment (like credentials to the database or the vault). Therefore, it is possible to deploy a secret with these confidential settings beforehand, and make it known to the deployment by setting the secret name in the `envSecretName` field of the deployment. \ No newline at end of file +Some EDC settings should better not be part of the actual deployment (like credentials to the database or the vault). Therefore, it is possible to deploy a secret with these confidential settings beforehand, and make it known to the deployment by setting the secret name in the `envSecretName` field of the deployment. diff --git a/charts/edc-controlplane/Chart.yaml b/charts/edc-controlplane/Chart.yaml index 09c6201cc..e0ec00697 100644 --- a/charts/edc-controlplane/Chart.yaml +++ b/charts/edc-controlplane/Chart.yaml @@ -25,7 +25,7 @@ apiVersion: v2 name: edc-controlplane description: >- EDC Control-Plane - The Eclipse DataSpaceConnector administration layer with responsibility of resource management and govern contracts and data transfers -home: https://github.com/eclipse-tractusx/tractusx-edc +home: https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/edc-controlplane type: application appVersion: "0.3.0" version: 0.3.0 diff --git a/charts/edc-controlplane/README.md b/charts/edc-controlplane/README.md index 71238a6ac..34b49b4e9 100644 --- a/charts/edc-controlplane/README.md +++ b/charts/edc-controlplane/README.md @@ -6,9 +6,10 @@ EDC Control-Plane - The Eclipse DataSpaceConnector administration layer with responsibility of resource management and govern contracts and data transfers -**Homepage:** +- **Homepage:** ## TL;DR + ```shell $ helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev $ helm install my-release tractusx-edc/edc-controlplane --version 0.3.0 @@ -43,7 +44,7 @@ $ helm install my-release tractusx-edc/edc-controlplane --version 0.3.0 | envSecretName | string | `nil` | [Kubernetes Secret Resource](https://kubernetes.io/docs/concepts/configuration/secret/) name to load environment variables from | | fullnameOverride | string | `""` | Overrides the releases full name | | image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | -| image.repository | string | `"ghcr.io/catenax-ng/product-edc/edc-controlplane-postgresql-hashicorp-vault"` | Which derivate of the edc control-plane to use. One of: [ghcr.io/catenax-ng/product-edc/edc-controlplane-postgresql-hashicorp-vault, ghcr.io/catenax-ng/product-edc/edc-controlplane-postgresql, ghcr.io/catenax-ng/product-edc/edc-controlplane-memory] | +| image.repository | string | `"ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql-hashicorp-vault"` | Which derivate of the edc control-plane to use. One of: [ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql-hashicorp-vault, ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql, ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-memory] | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | | imagePullSecret.dockerconfigjson | string | `""` | Image pull secret to create to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) Note: This value needs to adhere to the [(base64 encoded) .dockerconfigjson format](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials). Furthermore, if 'imagePullSecret.dockerconfigjson' is defined, it takes precedence over 'imagePullSecrets'. | | imagePullSecrets | list | `[]` | Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | diff --git a/charts/edc-controlplane/README.md.gotmpl b/charts/edc-controlplane/README.md.gotmpl index 1e026d9b4..022804eea 100644 --- a/charts/edc-controlplane/README.md.gotmpl +++ b/charts/edc-controlplane/README.md.gotmpl @@ -9,6 +9,7 @@ {{ template "chart.homepageLine" . }} ## TL;DR + ```shell $ helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev $ helm install my-release tractusx-edc/edc-controlplane --version {{ .Version }} diff --git a/charts/edc-dataplane/Chart.yaml b/charts/edc-dataplane/Chart.yaml index e6c5c00bf..001fe2d1b 100644 --- a/charts/edc-dataplane/Chart.yaml +++ b/charts/edc-dataplane/Chart.yaml @@ -25,7 +25,7 @@ apiVersion: v2 name: edc-dataplane description: >- EDC Data-Plane - The Eclipse DataSpaceConnector data layer with responsibility of transferring and receiving data streams -home: https://github.com/eclipse-tractusx/tractusx-edc +home: https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/edc-dataplane type: application appVersion: "0.3.0" version: 0.3.0 diff --git a/charts/edc-dataplane/README.md b/charts/edc-dataplane/README.md index da5f4afd3..02a26f41d 100644 --- a/charts/edc-dataplane/README.md +++ b/charts/edc-dataplane/README.md @@ -6,9 +6,10 @@ EDC Data-Plane - The Eclipse DataSpaceConnector data layer with responsibility of transferring and receiving data streams -**Homepage:** +- **Homepage:** ## TL;DR + ```shell $ helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev $ helm install my-release tractusx-edc/edc-dataplane --version 0.3.0 @@ -39,7 +40,7 @@ $ helm install my-release tractusx-edc/edc-dataplane --version 0.3.0 | envSecretName | string | `nil` | [Kubernetes Secret Resource](https://kubernetes.io/docs/concepts/configuration/secret/) name to load environment variables from | | fullnameOverride | string | `""` | Overrides the releases full name | | image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | -| image.repository | string | `"ghcr.io/catenax-ng/product-edc/edc-dataplane-hashicorp-vault"` | Which derivate of the edc data-plane to use. One of: [ghcr.io/catenax-ng/product-edc/edc-dataplane-hashicorp-vault, ghcr.io/catenax-ng/product-edc/edc-dataplane-azure-vault] | +| image.repository | string | `"ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-hashicorp-vault"` | Which derivate of the edc data-plane to use. One of: [ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-hashicorp-vault, ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-azure-vault] | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion | | imagePullSecret.dockerconfigjson | string | `""` | Image pull secret to create to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) Note: This value needs to adhere to the [(base64 encoded) .dockerconfigjson format](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials). Furthermore, if 'imagePullSecret.dockerconfigjson' is defined, it takes precedence over 'imagePullSecrets'. | | imagePullSecrets | list | `[]` | Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | diff --git a/charts/edc-dataplane/README.md.gotmpl b/charts/edc-dataplane/README.md.gotmpl index 3bed7d917..8411b344e 100644 --- a/charts/edc-dataplane/README.md.gotmpl +++ b/charts/edc-dataplane/README.md.gotmpl @@ -9,6 +9,7 @@ {{ template "chart.homepageLine" . }} ## TL;DR + ```shell $ helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev $ helm install my-release tractusx-edc/edc-dataplane --version {{ .Version }} diff --git a/docs/README.md b/docs/README.md index ebcc9942c..096e41feb 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,15 +1,16 @@ -# Product EDC +# Tractus-X EDC -The Catena-X Product EDC Repository creates runnable applications out of EDC extensions from the [Eclipse DataSpace Connector](https://github.com/eclipse-edc/Connector) repository. +The Tractus-X EDC repository creates runnable applications out of EDC extensions from the [Eclipse DataSpace Connector](https://github.com/eclipse-edc/Connector) repository. + +When running a EDC connector from the Product EDC repository there are three setups to choose from. They only vary by using different extensions for -When running a EDC connector from the Product EDC repository there are three setups to choose from. They only vary by using different extensions for - Resolving of Connector-Identities - Persistence of the Control-Plane-State - Persistence of Secrets (Vault) ## Connector Setup -The four supported setups are. +The three supported setups are. - Setup 1: In Memory & Azure Vault - [Control Plane](../edc-controlplane/edc-controlplane-memory/README.md) @@ -18,13 +19,6 @@ The four supported setups are. - [Azure Key Vault Extension](https://github.com/eclipse-edc/Connector/tree/main/extensions/common/vault/azure-vault) - [Data Plane](../edc-dataplane/edc-dataplane-azure-vault/README.md) - [Azure Key Vault Extension](https://github.com/eclipse-edc/Connector/tree/main/extensions/common/vault/azure-vault) -- Setup 2: In Memory & HashiCorp Vault -- [Control Plane](../edc-controlplane/edc-controlplane-memory/README.md) - - [IDS DAPS Extensions](https://github.com/eclipse-edc/Connector/tree/main/extensions/common/iam/oauth2/daps) - - In Memory Persistence done by using no extension - - [HashiCorp Vault Extension](../edc-extensions/hashicorp-vault/README.md) -- [Data Plane](../edc-dataplane/edc-dataplane-azure-vault/README.md) - - [HashiCorp Vault Extension](../edc-extensions/hashicorp-vault/README.md) - Setup 2: PostgreSQL & Azure Vault - [Control Plane](../edc-controlplane/edc-controlplane-postgresql/README.md) - [IDS DAPS Extensions](https://github.com/eclipse-edc/Connector/tree/main/extensions/common/iam/oauth2/daps) @@ -42,24 +36,17 @@ The four supported setups are. ## Recommended Documentation -**This Repository** +### This Repository - [Update EDC Version from 0.0.x - 0.1.x](migration/Version_0.0.x_0.1.x.md) - [Application: Control Plane](../edc-controlplane) - [Application: Data Plane](../edc-dataplane) - [Extension: Business Partner Numbers](../edc-extensions/business-partner-validation/README.md) -- [Example: Connector Configuration (Helm)](../edc-tests/src/main/resources/deployment/helm/all-in-one/README.md) - [Example: Local TXDC Setup](samples/Local%20TXDC%20Setup.md) - [Example: Data Transfer](samples/Transfer%20Data.md) -**Eclipse Dataspace Connector** +### Eclipse Dataspace Connector - [EDC Domain Model](https://github.com/eclipse-edc/Connector/blob/main/docs/developer/architecture/domain-model.md) - [EDC Open API Spec](https://github.com/eclipse-edc/Connector/blob/main/resources/openapi/openapi.yaml) - [HTTP Receiver Extension](https://github.com/eclipse-edc/Connector/tree/main/extensions/control-plane/http-receiver) - -**Catena-X** - -_Only accessible for Catena-X Members._ - -- [DAPS](https://confluence.catena-x.net/display/ARTI/Connector+Configuration) diff --git a/docs/development/Release.md b/docs/development/Release.md index 8628bddfa..ded1e4a8b 100644 --- a/docs/development/Release.md +++ b/docs/development/Release.md @@ -13,15 +13,15 @@ project's [GitHub page](https://github.com/eclipse/dash-licenses#get-it). ### 2. Generate DEPENDENCIES file -This call generates the dependencies file. This list is populated by deriving dependencies using the build tool (i.e., +The following call generates the dependencies file. This list is populated by deriving dependencies using the build tool (i.e., gradle), analysing them using an IP tool (i.e., Eclipse Dash Tool), and decorating the resulting report with additional information using a custom script. Execute the gradle task `allDependencies` for creating an integrated dependency report over all sub-modules of the project (including isolated modules). To process the dependencies of a specific module (e.g., an individual launcher) -execute the standard `dependencies` task: +execute the standard `dependencies` task. -- First, the dependencies of this module are calculated with gradle and passed to the Dash tool: +First, the dependencies of this module are calculated with gradle and passed to the Dash tool: ```shell gradle allDependencies | grep -Poh "(?<=\s)[\w.-]+:[\w.-]+:[^:\s]+" | sort | uniq | java -jar /path/org.eclipse.dash.licenses-0.0.1-SNAPSHOT.jar - -summary DEPENDENCIES @@ -34,10 +34,9 @@ _Note: on some machines (e.g. macOS) [the ack tool](https://beyondgrep.com/insta If a dependency is `restricted`, it is not approved by the Eclipse Foundation, yet. The Eclipse Bot is able to approve dependencies automatically, if the license can be resolved by ClearlyDefined. -1. (optional) Visit [https://clearlydefined.io/harvest](https://clearlydefined.io/harvest) and harvest the dependency +1. (optional) Visit and harvest the dependency from maven central. 2. Create the Eclipse IP Issues or ask an Eclipse Commiter to do this for you. [maven-shield]: https://img.shields.io/badge/Apache%20Maven-URL-blue - -[maven-url]: https://maven.apache.org \ No newline at end of file +[maven-url]: https://maven.apache.org diff --git a/docs/migration/Version_0.0.x_0.1.x.md b/docs/migration/Version_0.0.x_0.1.x.md index e6c4539d9..353db9368 100644 --- a/docs/migration/Version_0.0.x_0.1.x.md +++ b/docs/migration/Version_0.0.x_0.1.x.md @@ -6,7 +6,7 @@ This document contains a list of breaking changes that are introduced in version 1. PostgreSQL Database 1. Criteria in Policy & Contract Definitions Table - 2. Delete Contract Agreements + 2. Delete Contract Agreements 2. Data Management API 1. Policy Path 2. Policy Payload @@ -27,14 +27,9 @@ be done by the user itself. Criteria in Policies and Contract Definitions are serialized as JSON and put into the database. The Criteria schema changed and already existing database entries will cause _NullPointerExceptions_. - - -
- Example Exception - #### Example Exception -``` +```plain [2022-08-02 09:32:37] [SEVERE ] Could not handle multipart request: null org.eclipse.dataspaceconnector.spi.EdcException at org.eclipse.dataspaceconnector.transaction.local.LocalTransactionContext.execute(LocalTransactionContext.java:70) @@ -122,13 +117,7 @@ Caused by: java.lang.NullPointerException ... 69 more ``` -
- -
- - Solution 1: Update all Criteria manually - -#### Update all Criteria manually +#### Solution 1: Update all Criteria manually Root of this issue is that the operator, left- and right-operand Criteria field names changed. @@ -141,23 +130,17 @@ Root of this issue is that the operator, left- and right-operand Criteria field It is possible to resolve this issue by updating the content of the column, that contain JSON serialized constraints, from -``` +```json {"criteria":[{"left":"asset:prop:id","op":"=","right":"asset-1"}]} ``` to -``` +```json {"criteria":[{"operandLeft":"asset:prop:id","operator":"=","operandRight":"asset-1"}]} ``` -
- -
- - Solution 2: Delete all rows containing Constraints - -#### Delete all rows containing Criteria +#### Solution 2: Delete all rows containing Constraints Instead of updating each row in the database it's also possible to delete all Contract Definitions and Policies. Additionally it's necessary to delete all Negotiations, as they might reference existing Contract Definitions and/or @@ -166,7 +149,7 @@ Policies. Theoretically it's also necessary to delete Contract Agreements. As their deletion is already described in another section, we can skip them here. -**Required Queries** +##### Required Queries ```sql DELETE @@ -183,23 +166,18 @@ DELETE FROM edc_policydefinitins; ``` -
- ### 1.2 Delete Contract Agreements In the new version contract agreement rows contain a serialized policy at the time, the contract was concluded. With the EDC update all existing Contract Agreements must be deleted. -
- Required Query +#### Required Query ```sql DELETE FROM edc_contract_agreement; ``` -
- ## 2. Data Management API It might be necessary to update applications and scripts that use the Data Management API. This section covers the most @@ -210,26 +188,17 @@ important changes in endpoints and payloads. The Data Management API Path for Policies changes from `/policies` to `/policydefinitions`. -
- Example Call - #### Get All Policies ```bash curl -X GET "${DATA_MGMT_ENDPOINT}/data/policydefinitions" --header "X-Api-Key: " --header "Content-Type: application/json" ``` -
- ### 2.2 Policy Payload The Policy Payload now wraps the policy details in an additional policy object. -
- -Payload Comparison - -**New Payload** +#### New Payload ```json { @@ -242,7 +211,7 @@ The Policy Payload now wraps the policy details in an additional policy object. } ``` -**Old Payload** +#### Old Payload ```json { @@ -253,46 +222,36 @@ The Policy Payload now wraps the policy details in an additional policy object. } ``` -
- ### 2.3 Criteria in Payload of Contract Definitions and Policies The payload of a Policy or a Contract Definition may contain one or more Criteria. The format of these serialized Criteria changed. Please note that there is no input validation, that detects errors when the old Criteria format is used! -
+#### Old Criterion Format -Criterion Format Change - -**Old Criterion Format** -``` +```json { "left": "asset:prop:id", "op": "=", "right": "1" } ``` -**New Criterion Format** -``` +#### New Criterion Format + +```json { "operandLeft": "asset:prop:id", "operator": "=", "operandRight": "1" } ``` -**Example Call** +#### Example Call ```bash curl -X POST "${DATA_MGMT_ENDPOINT}/data/contractdefinitions" --header "X-Api-Key: " --header "Content-Type: application/json" --data "{ \"id\": \"1\", \"criteria\": [ { \"operandLeft\": \"asset:prop:id\", \"operator\": \"=\", \"operandRight\": \"1\" } ], \"accessPolicyId\": \"1\", \"contractPolicyId\": \"1\" }" ``` -
- ### 2.4 Data Address When using a Data Address of type `HttpData` please notice that the property `endpoint` changed to `baseUrl`. This property is mostly used when creating assets. +#### Old Asset format -
- -DataAddress Comparison - -**Old Asset format**: ```json { "asset": { @@ -307,7 +266,8 @@ property is mostly used when creating assets. } ``` -**New Asset format**: +#### New Asset format + ```json { "asset": { @@ -321,18 +281,13 @@ property is mostly used when creating assets. } } ``` -
-
- -Example Call +#### Example Call ```bash curl -X POST "$PLATO_DATAMGMT_URL/data/assets" --header "X-Api-Key: password" --header "Content-Type: application/json" --data "{ \"asset\": { \"properties\": { \"asset:prop:id\": \"1\", \"asset:prop:description\": \"Product EDC Demo Asset\" } }, \"dataAddress\": { \"properties\": { \"type\": \"HttpData\", \"baseUrl\": \"https://jsonplaceholder.typicode.com/todos/1\" } } }" -s -o /dev/null -w 'Response Code: %{http_code}\n' ``` -
- ## 3. Connector Configuration ### 3.1 Token Validation Endpoint Setting @@ -346,4 +301,4 @@ With this version a new feature was introduced which allows to have separate Dat transfer-flows (HttpProxy, S3, etc.). The Catena-X EDC team has additionally a new extension created which allows a simpler registration of additional dataplanes. Therefor some changes needs to be applied. Further documentation can be found in the extension folder: -[dataplane-selector-configuration](/edc-extensions/dataplane-selector-configuration/README.md) +[dataplane-selector-configuration](../../edc-extensions/dataplane-selector-configuration/README.md) diff --git a/docs/migration/Version_0.1.0_0.1.1.md b/docs/migration/Version_0.1.0_0.1.1.md index 5797593de..528dc8c37 100644 --- a/docs/migration/Version_0.1.0_0.1.1.md +++ b/docs/migration/Version_0.1.0_0.1.1.md @@ -16,7 +16,6 @@ Due to a change in the DAPS authentication mechanism this version cannot exchang 2. Connector Configuration 1. CX OAuth Extension - ## 1. Data Management API It might be necessary to update applications and scripts that use the Data Management API. This section covers the most @@ -26,11 +25,8 @@ important changes in endpoints and payloads. The id field of the PolicyDefinition was renamed from `uid` to `id`. -
- -Example +#### Old Call -Old Call ```json { "uid": "1", @@ -50,7 +46,8 @@ Old Call } ``` -New call +#### New call + ```json { "id": "1", @@ -70,22 +67,16 @@ New call } ``` -
- ## 2. Connector Configuration + ### 2.1. CX OAuth Extension All connectors are now shipped with a new OAuth extension. This extension has an additional mandatory setting called `edc.ids.endpoint.audience`, that must be set to the IDS path. -[Documentation](/edc-extensions/cx-oauth2/README.md) +[Documentation](../../edc-extensions/cx-oauth2/README.md) +#### Example -
- -Example - -``` +```properties edc.ids.endpoint.audience=http://plato-edc-controlplane:8282/api/v1/ids/data ``` - -
diff --git a/docs/release-notes/Version 0.1.0.md b/docs/release-notes/Version 0.1.0.md index 9cf96c304..4f872ff4e 100644 --- a/docs/release-notes/Version 0.1.0.md +++ b/docs/release-notes/Version 0.1.0.md @@ -1,8 +1,9 @@ # Release Notes Version 0.1.0 + 19.08.2022 > **BREAKING CHANGES** -> +> > When upgrading from version 0.0.x please consolidate the migration documentation before ([link](../migration/Version_0.0.x_0.1.x.md)). ## 0. Summary @@ -19,11 +20,10 @@ Upgraded the Eclipse Dataspace Connector Extensions to version 0.0.1-20220818-SNAPSHOT. Please be aware that this introduces some breaking changes. Code Repository -https://github.com/eclipse-dataspaceconnector/DataSpaceConnector + Snapshot Artifact Repository -https://oss.sonatype.org/#nexus-search;quick~org.eclipse.dataspaceconnector - + ## 2. New Extensions @@ -61,4 +61,4 @@ This section covers the most relevant bug fixes, included in this version. - Deletion of Policy becomes impossible when Contract Definition exists([issue](https://github.com/eclipse-dataspaceconnector/DataSpaceConnector/issues/1410)) -- DataAddress is passed unencrypted from DataProvider to DataConsumer ([issue](https://github.com/eclipse-dataspaceconnector/DataSpaceConnector/issues/1504)) \ No newline at end of file +- DataAddress is passed unencrypted from DataProvider to DataConsumer ([issue](https://github.com/eclipse-dataspaceconnector/DataSpaceConnector/issues/1504)) diff --git a/docs/release-notes/Version 0.1.1.md b/docs/release-notes/Version 0.1.1.md index a56d1f307..5138b8b4d 100644 --- a/docs/release-notes/Version 0.1.1.md +++ b/docs/release-notes/Version 0.1.1.md @@ -1,11 +1,11 @@ # Release Notes Version 0.1.1 -31.08.2022 +31.08.2022 > **BREAKING CHANGES** -> +> > Please consolidate the migration documentation ([link](../migration/Version_0.1.0_0.1.1.md)). - +> > **Important Notice** > > The **InMemoryControlPlane** image is broken. Please use another control plane instead. @@ -33,10 +33,9 @@ Using the open source OAuth Extension it is possible for a connector to re-use a [Documentation](../../edc-extensions/cx-oauth2/README.md) +#### New Audience Configuration -**New Audience Configuration** - -``` +```properties edc.ids.endpoint.audience=http://plato-edc-controlplane:8282/api/v1/ids/data ``` diff --git a/docs/release-notes/Version 0.1.2.md b/docs/release-notes/Version 0.1.2.md index 812e8a1d7..cef41cbd6 100644 --- a/docs/release-notes/Version 0.1.2.md +++ b/docs/release-notes/Version 0.1.2.md @@ -1,4 +1,5 @@ # Release Notes Version 0.1.2 + 30.09.2022 > This version introduced mostly bugfixes and thread mitigation by updating libraries. @@ -17,4 +18,4 @@ Introduce alpine image as base for all Product EDC Images (replaced distroless i - Contract negotiation not working when initiated with policy id ([issue](https://github.com/eclipse-dataspaceconnector/DataSpaceConnector/issues/1251)) -- Negotiation of Policies with extensible properties now works as expected \ No newline at end of file +- Negotiation of Policies with extensible properties now works as expected diff --git a/edc-controlplane/README.md b/edc-controlplane/README.md index 01017989c..3f59218d5 100644 --- a/edc-controlplane/README.md +++ b/edc-controlplane/README.md @@ -11,6 +11,7 @@ The only API that is protected by some kind of security mechanism is the Data Ma The key value must be configured in `edc.api.auth.key`. All requests to the Data Management API must have `X-Api-Key` header with the key value. Example: + ```bash curl -X GET --header "X-Api-Key: " ``` @@ -22,28 +23,31 @@ curl -X GET --header "X-Api-Key: " Please be aware that there are several confidential settings, that should not be part of the actual EDC configuration file. Some of these confidential settings are + - Vault credentials - Data Management API key - Database credentials As it is possible to configure EDC settings via environment variables, one way to do it would be via Kubernetes Secrets. For other deployment scenarios than Kubernetes equivalent measures should be taken. -# Known Control Plane Issues +## Known Control Plane Issues Please have a look at the open issues in the open source repository. The list below might not be maintained well and only contains the most important issues. -EDC Github Repository https://github.com/eclipse-edc/Connector/issues +EDC GitHub Repository --- **Please note** that some of these issues might already be fixed on the EDC main branch, but are not part of the specific -EDC commit the Product-EDC uses. +EDC commit the Tractus-X-EDC uses. --- -**Persistence** +### Persistence + - ContractDefinition-AssetSelector of InMemory Connector selects 50 Asset max.([issue](https://github.com/eclipse-edc/Connector/issues/1779)) -**Other** +### Other + - Non-IDS-Transformable-ContractDefinition causes connector to be unable to send out self-descriptions/catalogs([issue](https://github.com/eclipse-edc/Connector/issues/1265)) - **Workaround:** Delete non-transformable ContractDefinition or Policy. diff --git a/edc-controlplane/edc-controlplane-base/README.md b/edc-controlplane/edc-controlplane-base/README.md index 9fe217c80..269de27ca 100644 --- a/edc-controlplane/edc-controlplane-base/README.md +++ b/edc-controlplane/edc-controlplane-base/README.md @@ -1,6 +1,6 @@ # EDC Control-Plane Base Module -### Building +## Building ```shell ./gradlew edc-controlplane:edc-controlplane-base:build diff --git a/edc-controlplane/edc-controlplane-memory/README.md b/edc-controlplane/edc-controlplane-memory/README.md index 2eb2ce2e4..ca1f0bef7 100644 --- a/edc-controlplane/edc-controlplane-memory/README.md +++ b/edc-controlplane/edc-controlplane-memory/README.md @@ -1,52 +1,52 @@ # EDC Control-Plane backed by In-Memory Stores -### Building +## Building ```shell ./gradlew :edc-controlplane:edc-controlplane-memory:dockerize ``` -### Configuration (configuration.properties) +## Configuration (configuration.properties) Listed below are configuration keys needed to get the `edc-controlplane-memory` up and running. Details regarding each configuration property can be found at the [documentary section of the EDC](https://github.com/eclipse-edc/Connector/tree/main/docs). -| Key | Required | Example | Description | -|--- |--- |--- |--- | -| edc.api.auth.key | | password | default value: random UUID | -| web.http.default.port | X | 8080 | | -| web.http.default.path | X | /api | | -| web.http.data.port | X | 8181 | | -| web.http.data.path | X | /data | | -| web.http.validation.port | X | 8182 | | -| web.http.validation.path | X | /validation | | -| web.http.control.port | X | 9999 | | -| web.http.control.path | X | /api/controlplane/control | | -| web.http.ids.port | X | 8282 | | -| web.http.ids.path | X | /api/v1/ids | | -| edc.receiver.http.endpoint | X | http://backend-service | | -| edc.ids.title | | Eclipse Dataspace Connector | | -| edc.ids.description | | Eclipse Dataspace Connector | | -| edc.ids.id | | urn:connector:edc | | -| edc.ids.security.profile | | base | | -| edc.ids.endpoint | | http://localhost:8282/api/v1/ids | | -| edc.ids.maintainer | | http://localhost | | -| edc.ids.curator | | http://localhost | | -| edc.ids.catalog.id | | urn:catalog:default | | -| ids.webhook.address | | http://localhost:8282/api/v1/ids | | -| edc.hostname | | localhost | | -| edc.oauth.token.url | X | https://daps.catena-x.net | | -| edc.oauth.public.key.alias | X | key-to-daps-certificate-in-keyvault | | -| edc.oauth.private.key.alias | X | key-to-private-key-in-keyvault | | -| edc.oauth.client.id | X | daps-oauth-client-id | | -| edc.vault.clientid | X | 00000000-1111-2222-3333-444444444444 | | -| edc.vault.tenantid | X | 55555555-6666-7777-8888-999999999999 | | -| edc.vault.name | X | my-vault-name | | -| edc.vault.clientsecret | X | 34-chars-secret | | -| edc.transfer.proxy.endpoint | X | | | -| edc.transfer.proxy.token.signer.privatekey.alias | X | | | - -#### Example configuration.properties +| Key | Required | Example | Description | +|--------------------------------------------------|----------|--------------------------------------|----------------------------| +| edc.api.auth.key | | password | default value: random UUID | +| web.http.default.port | X | 8080 | | +| web.http.default.path | X | /api | | +| web.http.data.port | X | 8181 | | +| web.http.data.path | X | /data | | +| web.http.validation.port | X | 8182 | | +| web.http.validation.path | X | /validation | | +| web.http.control.port | X | 9999 | | +| web.http.control.path | X | /api/controlplane/control | | +| web.http.ids.port | X | 8282 | | +| web.http.ids.path | X | /api/v1/ids | | +| edc.receiver.http.endpoint | X | | | +| edc.ids.title | | Eclipse Dataspace Connector | | +| edc.ids.description | | Eclipse Dataspace Connector | | +| edc.ids.id | | urn:connector:edc | | +| edc.ids.security.profile | | base | | +| edc.ids.endpoint | | | | +| edc.ids.maintainer | | | | +| edc.ids.curator | | | | +| edc.ids.catalog.id | | urn:catalog:default | | +| ids.webhook.address | | | | +| edc.hostname | | localhost | | +| edc.oauth.token.url | X | | | +| edc.oauth.public.key.alias | X | key-to-daps-certificate-in-keyvault | | +| edc.oauth.private.key.alias | X | key-to-private-key-in-keyvault | | +| edc.oauth.client.id | X | daps-oauth-client-id | | +| edc.vault.clientid | X | 00000000-1111-2222-3333-444444444444 | | +| edc.vault.tenantid | X | 55555555-6666-7777-8888-999999999999 | | +| edc.vault.name | X | my-vault-name | | +| edc.vault.clientsecret | X | 34-chars-secret | | +| edc.transfer.proxy.endpoint | X | | | +| edc.transfer.proxy.token.signer.privatekey.alias | X | | | + +### Example configuration.properties JDK properties-style configuration of the EDC Control-Plane is expected to be mounted to `/app/configuration.properties` within the container. @@ -100,7 +100,8 @@ edc.transfer.proxy.token.signer.privatekey.alias=azure-vault-token-signer-privat EOF ``` -#### Example logging.properties +### Example logging.properties + ```shell # Create logging.properties export LOGGING_PROPERTIES_FILE=$(mktemp /tmp/logging.properties.XXXXXX) @@ -114,7 +115,8 @@ java.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [ EOF ``` -#### Example opentelemetry.properties +### Example opentelemetry.properties + ```shell # Create opentelemetry.properties export OPENTELEMETRY_PROPERTIES_FILE=$(mktemp /tmp/opentelemetry.properties.XXXXXX) @@ -124,7 +126,7 @@ otel.javaagent.debug=false EOF ``` -### Running +## Running ```shell docker run \ @@ -133,4 +135,4 @@ docker run \ -v ${LOGGING_PROPERTIES_FILE:-/dev/null}:/app/logging.properties \ -v ${OPENTELEMETRY_PROPERTIES_FILE:-/dev/null}:/app/opentelemetry.properties \ -i edc-controlplane-memory:latest -``` \ No newline at end of file +``` diff --git a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/README.md b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/README.md index 0efd61884..636d8a8b8 100644 --- a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/README.md +++ b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/README.md @@ -1,71 +1,71 @@ # EDC Control-Plane backed by [Postgresql](https://www.postgresql.org/) and [HashiCorp vault](https://www.vaultproject.io/docs) -### Building +## Building ```shell ./gardlew :edc-controlplane:edc-controlplane-postgresql-hashicorp-vault:dockerize ``` -### Configuration +## Configuration Listed below are configuration keys needed to get the `edc-controlplane-postgresql-hashicorp-vault` up and running. Details regarding each configuration property can be found at the [documentary section of the EDC](https://github.com/eclipse-edc/Connector/tree/main/docs). -| Key | Required | Example | Description | -|--- |--- |--- |--- | -| edc.api.auth.key | | password | default value: random UUID | -| web.http.default.port | X | 8080 | | -| web.http.default.path | X | /api | | -| web.http.data.port | X | 8181 | | -| web.http.data.path | X | /data | | -| web.http.validation.port | X | 8182 | | -| web.http.validation.path | X | /validation | | -| web.http.control.port | X | 9999 | | -| web.http.control.path | X | /api/controlplane/control | | -| web.http.ids.port | X | 8282 | | -| web.http.ids.path | X | /api/v1/ids | | -| edc.receiver.http.endpoint | X | http://backend-service | | -| edc.ids.title | | Eclipse Dataspace Connector | | -| edc.ids.description | | Eclipse Dataspace Connector | | -| edc.ids.id | | urn:connector:edc | | -| edc.ids.security.profile | | base | | -| edc.ids.endpoint | | http://localhost:8282/api/v1/ids | | -| edc.ids.maintainer | | http://localhost | | -| edc.ids.curator | | http://localhost | | -| edc.ids.catalog.id | | urn:catalog:default | | -| ids.webhook.address | | http://localhost:8282/api/v1/ids | | -| edc.hostname | | localhost | | -| edc.oauth.token.url | X | https://daps.catena-x.net | | -| edc.oauth.public.key.alias | X | key-to-daps-certificate-in-keyvault | | -| edc.oauth.private.key.alias | X | key-to-private-key-in-keyvault | | -| edc.oauth.client.id | X | daps-oauth-client-id | | -| edc.vault.hashicorp.url | X | http://vault | | -| edc.vault.hashicorp.token | X | 55555555-6666-7777-8888-999999999999 | | -| edc.vault.hashicorp.timeout.seconds | | 30 | | -| edc.datasource.asset.name | X | asset | | -| edc.datasource.asset.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_asset_db | | -| edc.datasource.asset.user | X | username | | -| edc.datasource.asset.password | X | password | | -| edc.datasource.contractdefinition.name | X | contractdefinition | | -| edc.datasource.contractdefinition.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_contractdefinition_db | | -| edc.datasource.contractdefinition.user | X | username | | -| edc.datasource.contractdefinition.password | X | password | | -| edc.datasource.contractnegotiation.name | X | contractnegotiation | | -| edc.datasource.contractnegotiation.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_contractnegotiation_db | | -| edc.datasource.contractnegotiation.user | X | username | | -| edc.datasource.contractnegotiation.password | X | password | | -| edc.datasource.policy.name | X | policy | | -| edc.datasource.policy.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_policy_db | | -| edc.datasource.policy.user | X | username | | -| edc.datasource.policy.password | X | password | | -| edc.datasource.transferprocess.name | X | transferprocess | | -| edc.datasource.transferprocess.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_transferprocess_db | | -| edc.datasource.transferprocess.user | X | username | | -| edc.datasource.transferprocess.password | X | password | | -| edc.transfer.proxy.endpoint | X | http://proxy | | -| edc.transfer.proxy.token.signer.privatekey.alias | X | | | - -#### Example configuration.properties +| Key | Required | Example | Description | +|--------------------------------------------------|----------|------------------------------------------------------------------------------|----------------------------| +| edc.api.auth.key | | password | default value: random UUID | +| web.http.default.port | X | 8080 | | +| web.http.default.path | X | /api | | +| web.http.data.port | X | 8181 | | +| web.http.data.path | X | /data | | +| web.http.validation.port | X | 8182 | | +| web.http.validation.path | X | /validation | | +| web.http.control.port | X | 9999 | | +| web.http.control.path | X | /api/controlplane/control | | +| web.http.ids.port | X | 8282 | | +| web.http.ids.path | X | /api/v1/ids | | +| edc.receiver.http.endpoint | X | | | +| edc.ids.title | | Eclipse Dataspace Connector | | +| edc.ids.description | | Eclipse Dataspace Connector | | +| edc.ids.id | | urn:connector:edc | | +| edc.ids.security.profile | | base | | +| edc.ids.endpoint | | | | +| edc.ids.maintainer | | | | +| edc.ids.curator | | | | +| edc.ids.catalog.id | | urn:catalog:default | | +| ids.webhook.address | | | | +| edc.hostname | | localhost | | +| edc.oauth.token.url | X | | | +| edc.oauth.public.key.alias | X | key-to-daps-certificate-in-keyvault | | +| edc.oauth.private.key.alias | X | key-to-private-key-in-keyvault | | +| edc.oauth.client.id | X | daps-oauth-client-id | | +| edc.vault.hashicorp.url | X | | | +| edc.vault.hashicorp.token | X | 55555555-6666-7777-8888-999999999999 | | +| edc.vault.hashicorp.timeout.seconds | | 30 | | +| edc.datasource.asset.name | X | asset | | +| edc.datasource.asset.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_asset_db | | +| edc.datasource.asset.user | X | username | | +| edc.datasource.asset.password | X | password | | +| edc.datasource.contractdefinition.name | X | contractdefinition | | +| edc.datasource.contractdefinition.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_contractdefinition_db | | +| edc.datasource.contractdefinition.user | X | username | | +| edc.datasource.contractdefinition.password | X | password | | +| edc.datasource.contractnegotiation.name | X | contractnegotiation | | +| edc.datasource.contractnegotiation.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_contractnegotiation_db | | +| edc.datasource.contractnegotiation.user | X | username | | +| edc.datasource.contractnegotiation.password | X | password | | +| edc.datasource.policy.name | X | policy | | +| edc.datasource.policy.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_policy_db | | +| edc.datasource.policy.user | X | username | | +| edc.datasource.policy.password | X | password | | +| edc.datasource.transferprocess.name | X | transferprocess | | +| edc.datasource.transferprocess.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_transferprocess_db | | +| edc.datasource.transferprocess.user | X | username | | +| edc.datasource.transferprocess.password | X | password | | +| edc.transfer.proxy.endpoint | X | | | +| edc.transfer.proxy.token.signer.privatekey.alias | X | | | + +### Example configuration.properties JDK properties-style configuration of the EDC Control-Plane is expected to be mounted to `/app/configuration.properties` within the container. @@ -140,7 +140,8 @@ edc.datasource.transferprocess.password=pass EOF ``` -#### Example logging.properties +### Example logging.properties + ```shell # Create logging.properties export LOGGING_PROPERTIES_FILE=$(mktemp /tmp/logging.properties.XXXXXX) @@ -154,7 +155,8 @@ java.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [ EOF ``` -#### Example opentelemetry.properties +### Example opentelemetry.properties + ```shell # Create opentelemetry.properties export OPENTELEMETRY_PROPERTIES_FILE=$(mktemp /tmp/opentelemetry.properties.XXXXXX) @@ -164,7 +166,7 @@ otel.javaagent.debug=false EOF ``` -### Running +## Running ```shell docker run \ @@ -173,4 +175,4 @@ docker run \ -v ${LOGGING_PROPERTIES_FILE:-/dev/null}:/app/logging.properties \ -v ${OPENTELEMETRY_PROPERTIES_FILE:-/dev/null}:/app/opentelemetry.properties \ -i edc-controlplane-postgresql-hashicorp-vault:latest -``` \ No newline at end of file +``` diff --git a/edc-controlplane/edc-controlplane-postgresql/README.md b/edc-controlplane/edc-controlplane-postgresql/README.md index bb8730712..b9ec0afd0 100644 --- a/edc-controlplane/edc-controlplane-postgresql/README.md +++ b/edc-controlplane/edc-controlplane-postgresql/README.md @@ -1,72 +1,72 @@ # EDC Control-Plane backed by [Postgresql](https://www.postgresql.org/) -### Building +## Building ```shell ./gardlew :edc-controlplane:edc-controlplane-postgresql:dockerize ``` -### Configuration +## Configuration Listed below are configuration keys needed to get the `edc-controlplane-postgresql` up and running. Details regarding each configuration property can be found at the [documentary section of the EDC](https://github.com/eclipse-edc/Connector/tree/main/docs). -| Key | Required | Example | Description | -|--- |--- |--- |--- | -| edc.api.auth.key | | password | default value: random UUID | -| web.http.default.port | X | 8080 | | -| web.http.default.path | X | /api | | -| web.http.data.port | X | 8181 | | -| web.http.data.path | X | | | -| web.http.validation.port | X | 8182 | | -| web.http.validation.path | X | /validation | | -| web.http.control.port | X | 9999 | | -| web.http.control.path | X | /api/controlplane/control | | -| web.http.ids.port | X | 8282 | | -| web.http.ids.path | X | /api/v1/ids | | -| edc.receiver.http.endpoint | X | http://backend-service | | -| edc.ids.title | | Eclipse Dataspace Connector | | -| edc.ids.description | | Eclipse Dataspace Connector | | -| edc.ids.id | | urn:connector:edc | | -| edc.ids.security.profile | | base | | -| edc.ids.endpoint | | http://localhost:8282/api/v1/ids | | -| edc.ids.maintainer | | http://localhost | | -| edc.ids.curator | | http://localhost | | -| edc.ids.catalog.id | | urn:catalog:default | | -| ids.webhook.address | | http://localhost:8282/api/v1/ids | | -| edc.hostname | | localhost | | -| edc.oauth.token.url | X | https://daps.catena-x.net | | -| edc.oauth.public.key.alias | X | key-to-daps-certificate-in-keyvault | | -| edc.oauth.private.key.alias | X | key-to-private-key-in-keyvault | | -| edc.oauth.client.id | X | daps-oauth-client-id | | -| edc.vault.clientid | X | 00000000-1111-2222-3333-444444444444 | | -| edc.vault.tenantid | X | 55555555-6666-7777-8888-999999999999 | | -| edc.vault.name | X | my-vault-name | | -| edc.vault.clientsecret | X | 34-chars-secret | | -| edc.datasource.asset.name | X | asset | | -| edc.datasource.asset.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_asset_db | | -| edc.datasource.asset.user | X | username | | -| edc.datasource.asset.password | X | password | | -| edc.datasource.contractdefinition.name | X | contractdefinition | | -| edc.datasource.contractdefinition.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_contractdefinition_db | | -| edc.datasource.contractdefinition.user | X | username | | -| edc.datasource.contractdefinition.password | X | password | | -| edc.datasource.contractnegotiation.name | X | contractnegotiation | | -| edc.datasource.contractnegotiation.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_contractnegotiation_db | | -| edc.datasource.contractnegotiation.user | X | username | | -| edc.datasource.contractnegotiation.password | X | password | | -| edc.datasource.policy.name | X | policy | | -| edc.datasource.policy.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_policy_db | | -| edc.datasource.policy.user | X | username | | -| edc.datasource.policy.password | X | password | | -| edc.datasource.transferprocess.name | X | transferprocess | | -| edc.datasource.transferprocess.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_transferprocess_db | | -| edc.datasource.transferprocess.user | X | username | | -| edc.datasource.transferprocess.password | X | password | | -| edc.transfer.proxy.endpoint | X | | | -| edc.transfer.proxy.token.signer.privatekey.alias | X | | | - -#### Example configuration.properties +| Key | Required | Example | Description | +|--------------------------------------------------|----------|------------------------------------------------------------------------------|----------------------------| +| edc.api.auth.key | | password | default value: random UUID | +| web.http.default.port | X | 8080 | | +| web.http.default.path | X | /api | | +| web.http.data.port | X | 8181 | | +| web.http.data.path | X | | | +| web.http.validation.port | X | 8182 | | +| web.http.validation.path | X | /validation | | +| web.http.control.port | X | 9999 | | +| web.http.control.path | X | /api/controlplane/control | | +| web.http.ids.port | X | 8282 | | +| web.http.ids.path | X | /api/v1/ids | | +| edc.receiver.http.endpoint | X | | | +| edc.ids.title | | Eclipse Dataspace Connector | | +| edc.ids.description | | Eclipse Dataspace Connector | | +| edc.ids.id | | urn:connector:edc | | +| edc.ids.security.profile | | base | | +| edc.ids.endpoint | | | | +| edc.ids.maintainer | | | | +| edc.ids.curator | | | | +| edc.ids.catalog.id | | urn:catalog:default | | +| ids.webhook.address | | | | +| edc.hostname | | localhost | | +| edc.oauth.token.url | X | | | +| edc.oauth.public.key.alias | X | key-to-daps-certificate-in-keyvault | | +| edc.oauth.private.key.alias | X | key-to-private-key-in-keyvault | | +| edc.oauth.client.id | X | daps-oauth-client-id | | +| edc.vault.clientid | X | 00000000-1111-2222-3333-444444444444 | | +| edc.vault.tenantid | X | 55555555-6666-7777-8888-999999999999 | | +| edc.vault.name | X | my-vault-name | | +| edc.vault.clientsecret | X | 34-chars-secret | | +| edc.datasource.asset.name | X | asset | | +| edc.datasource.asset.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_asset_db | | +| edc.datasource.asset.user | X | username | | +| edc.datasource.asset.password | X | password | | +| edc.datasource.contractdefinition.name | X | contractdefinition | | +| edc.datasource.contractdefinition.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_contractdefinition_db | | +| edc.datasource.contractdefinition.user | X | username | | +| edc.datasource.contractdefinition.password | X | password | | +| edc.datasource.contractnegotiation.name | X | contractnegotiation | | +| edc.datasource.contractnegotiation.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_contractnegotiation_db | | +| edc.datasource.contractnegotiation.user | X | username | | +| edc.datasource.contractnegotiation.password | X | password | | +| edc.datasource.policy.name | X | policy | | +| edc.datasource.policy.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_policy_db | | +| edc.datasource.policy.user | X | username | | +| edc.datasource.policy.password | X | password | | +| edc.datasource.transferprocess.name | X | transferprocess | | +| edc.datasource.transferprocess.url | X | jdbc:postgresql://postgres.svc.cluster.local:5432/edc_transferprocess_db | | +| edc.datasource.transferprocess.user | X | username | | +| edc.datasource.transferprocess.password | X | password | | +| edc.transfer.proxy.endpoint | X | | | +| edc.transfer.proxy.token.signer.privatekey.alias | X | | | + +### Example configuration.properties JDK properties-style configuration of the EDC Control-Plane is expected to be mounted to `/app/configuration.properties` within the container. @@ -143,7 +143,8 @@ edc.datasource.transferprocess.password=pass EOF ``` -#### Example logging.properties +### Example logging.properties + ```shell # Create logging.properties export LOGGING_PROPERTIES_FILE=$(mktemp /tmp/logging.properties.XXXXXX) @@ -157,7 +158,8 @@ java.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [ EOF ``` -#### Example opentelemetry.properties +### Example opentelemetry.properties + ```shell # Create opentelemetry.properties export OPENTELEMETRY_PROPERTIES_FILE=$(mktemp /tmp/opentelemetry.properties.XXXXXX) @@ -167,7 +169,7 @@ otel.javaagent.debug=false EOF ``` -### Running +## Running ```shell docker run \ @@ -176,4 +178,4 @@ docker run \ -v ${LOGGING_PROPERTIES_FILE:-/dev/null}:/app/logging.properties \ -v ${OPENTELEMETRY_PROPERTIES_FILE:-/dev/null}:/app/opentelemetry.properties \ -i edc-controlplane-postgresql:latest -``` \ No newline at end of file +``` diff --git a/edc-dataplane/README.md b/edc-dataplane/README.md index 2deeec0d6..9ca28b38d 100644 --- a/edc-dataplane/README.md +++ b/edc-dataplane/README.md @@ -11,5 +11,6 @@ Please be aware that there are several confidential settings, that should not be As it is possible to configure EDC settings via environment variables, one way to do it would be via Kubernetes Secrets. For other deployment scenarios than Kubernetes equivalent measures should be taken. -# Known Data Plane Issues +## Known Data Plane Issues + Please have a look at the open issues in the open source repository: [EDC Github Repository](https://github.com/eclipse-edc/Connector/issues) diff --git a/edc-dataplane/edc-dataplane-azure-vault/README.md b/edc-dataplane/edc-dataplane-azure-vault/README.md index b133fee26..564aabde6 100644 --- a/edc-dataplane/edc-dataplane-azure-vault/README.md +++ b/edc-dataplane/edc-dataplane-azure-vault/README.md @@ -1,34 +1,34 @@ # EDC Data-Plane with [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) -### Building +## Building ```shell ./gardlew :edc-dataplane:edc-dataplane-azure-vault:dockerize ``` -### Configuration +## Configuration Listed below are configuration keys needed to get the `edc-dataplane-azure-vault` up and running. Details regarding each configuration property can be found at the [documentary section of the EDC](https://github.com/eclipse-edc/Connector/tree/main/docs). -| Key | Required | Example | Description | -|--- |--- |--- |--- | -| web.http.default.port | X | 8080 | | -| web.http.default.path | X | /api | | -| web.http.public.port | X | 8181 | | -| web.http.public.path | X | | | -| web.http.control.port | X | 9999 | | -| web.http.control.path | X | /api/controlplane/control | | -| edc.receiver.http.endpoint | X | http://backend-service | | -| edc.hostname | | localhost | | -| edc.oauth.client.id | X | daps-oauth-client-id | | -| edc.vault.clientid | X | 00000000-1111-2222-3333-444444444444 | | -| edc.vault.tenantid | X | 55555555-6666-7777-8888-999999999999 | | -| edc.vault.name | X | my-vault-name | | -| edc.vault.clientsecret | X | 34-chars-secret | | -| edc.dataplane.token.validation.endpoint | X | http://controlplane:8182/validation/token | | - -#### Example configuration.properties +| Key | Required | Example | Description | +|-----------------------------------------|----------|---------------------------------------------|-------------| +| web.http.default.port | X | 8080 | | +| web.http.default.path | X | /api | | +| web.http.public.port | X | 8181 | | +| web.http.public.path | X | | | +| web.http.control.port | X | 9999 | | +| web.http.control.path | X | /api/controlplane/control | | +| edc.receiver.http.endpoint | X | | | +| edc.hostname | | localhost | | +| edc.oauth.client.id | X | daps-oauth-client-id | | +| edc.vault.clientid | X | 00000000-1111-2222-3333-444444444444 | | +| edc.vault.tenantid | X | 55555555-6666-7777-8888-999999999999 | | +| edc.vault.name | X | my-vault-name | | +| edc.vault.clientsecret | X | 34-chars-secret | | +| edc.dataplane.token.validation.endpoint | X | | | + +### Example configuration.properties JDK properties-style configuration of the EDC Control-Plane is expected to be mounted to `/app/configuration.properties` within the container. @@ -58,7 +58,8 @@ edc.vault.clientsecret=34-chars-secret EOF ``` -#### Example logging.properties +### Example logging.properties + ```shell # Create logging.properties export LOGGING_PROPERTIES_FILE=$(mktemp /tmp/logging.properties.XXXXXX) @@ -72,7 +73,8 @@ java.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [ EOF ``` -#### Example opentelemetry.properties +### Example opentelemetry.properties + ```shell # Create opentelemetry.properties export OPENTELEMETRY_PROPERTIES_FILE=$(mktemp /tmp/opentelemetry.properties.XXXXXX) @@ -82,7 +84,7 @@ otel.javaagent.debug=false EOF ``` -### Running +## Running ```shell docker run \ diff --git a/edc-dataplane/edc-dataplane-base/README.md b/edc-dataplane/edc-dataplane-base/README.md index ee8ac9961..89ec91506 100644 --- a/edc-dataplane/edc-dataplane-base/README.md +++ b/edc-dataplane/edc-dataplane-base/README.md @@ -1,6 +1,6 @@ # EDC Data-Plane Base Module -### Building +## Building ```shell ./gardlew :edc-dataplane:edc-dataplane-base:build diff --git a/edc-dataplane/edc-dataplane-hashicorp-vault/README.md b/edc-dataplane/edc-dataplane-hashicorp-vault/README.md index f43382ee1..9930c13a8 100644 --- a/edc-dataplane/edc-dataplane-hashicorp-vault/README.md +++ b/edc-dataplane/edc-dataplane-hashicorp-vault/README.md @@ -1,33 +1,33 @@ # EDC Data-Plane [Hashicorp Vault](https://www.vaultproject.io/) -### Building +## Building ```shell ./gardlew :edc-dataplane:edc-dataplane-hashicorp-vault:dockerize ``` -### Configuration +## Configuration Listed below are configuration keys needed to get the `edc-dataplane-hashicorp-vault` up and running. Details regarding each configuration property can be found at the [documentary section of the EDC](https://github.com/eclipse-edc/Connector/tree/main/docs). -| Key | Required | Example | Description | -|--- |--- |--- |--- | -| web.http.default.port | X | 8080 | | -| web.http.default.path | X | /api | | -| web.http.public.port | X | 8181 | | -| web.http.public.path | X | | | -| web.http.control.port | X | 9999 | | -| web.http.control.path | X | /api/controlplane/control | | -| edc.receiver.http.endpoint | X | http://backend-service | | -| edc.hostname | | localhost | | -| edc.oauth.client.id | X | daps-oauth-client-id | | -| edc.vault.hashicorp.url | X | http://vault | | -| edc.vault.hashicorp.token | X | 55555555-6666-7777-8888-999999999999 | | -| edc.vault.hashicorp.timeout.seconds | | 30 | | -| edc.dataplane.token.validation.endpoint | X | http://controlplane:8182/validation/token | | - -#### Example configuration.properties +| Key | Required | Example | Description | +|-----------------------------------------|----------|---------------------------------------------|-------------| +| web.http.default.port | X | 8080 | | +| web.http.default.path | X | /api | | +| web.http.public.port | X | 8181 | | +| web.http.public.path | X | | | +| web.http.control.port | X | 9999 | | +| web.http.control.path | X | /api/controlplane/control | | +| edc.receiver.http.endpoint | X | | | +| edc.hostname | | localhost | | +| edc.oauth.client.id | X | daps-oauth-client-id | | +| edc.vault.hashicorp.url | X | | | +| edc.vault.hashicorp.token | X | 55555555-6666-7777-8888-999999999999 | | +| edc.vault.hashicorp.timeout.seconds | | 30 | | +| edc.dataplane.token.validation.endpoint | X | | | + +### Example configuration.properties JDK properties-style configuration of the EDC Control-Plane is expected to be mounted to `/app/configuration.properties` within the container. @@ -56,7 +56,8 @@ edc.vault.hashicorp.timeout.seconds=30 EOF ``` -#### Example logging.properties +### Example logging.properties + ```shell # Create logging.properties export LOGGING_PROPERTIES_FILE=$(mktemp /tmp/logging.properties.XXXXXX) @@ -70,7 +71,8 @@ java.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [ EOF ``` -#### Example opentelemetry.properties +### Example opentelemetry.properties + ```shell # Create opentelemetry.properties export OPENTELEMETRY_PROPERTIES_FILE=$(mktemp /tmp/opentelemetry.properties.XXXXXX) @@ -80,7 +82,7 @@ otel.javaagent.debug=false EOF ``` -### Running +## Running ```shell docker run \ @@ -89,4 +91,4 @@ docker run \ -v ${LOGGING_PROPERTIES_FILE:-/dev/null}:/app/logging.properties \ -v ${OPENTELEMETRY_PROPERTIES_FILE:-/dev/null}:/app/opentelemetry.properties \ -i edc-dataplane-hashicorp-vault:latest -``` \ No newline at end of file +``` diff --git a/edc-extensions/business-partner-validation/README.md b/edc-extensions/business-partner-validation/README.md index d37041560..79a0d7fc3 100644 --- a/edc-extensions/business-partner-validation/README.md +++ b/edc-extensions/business-partner-validation/README.md @@ -30,11 +30,13 @@ must contain the Business Partner Number. ## Single BusinessPartnerNumber example The most simple BPN policy would allow the usage of certain data to a single Business Partner. An example `Policy` is -shown below. -In this example the `edctype` properties are added, so that this policy may even be sent to the Management API. +shown below. In this example the `edctype` properties are added, so that this policy may even be sent to the Management API. ```json { + "uid": "", + "prohibitions": [], + "obligations": [], "permissions": [ { "edctype": "dataspaceconnector:permission", @@ -64,6 +66,7 @@ In this example the `edctype` properties are added, so that this policy may even To define multiple BPN and allow multiple participants to use the data the `orconstraint` should be used. It will permit the constraints contained to be evaluated using the `OR` operator. + ```json { "permissions": [ @@ -113,13 +116,12 @@ It will permit the constraints contained to be evaluated using the `OR` operator } ``` -# Important: EDC Policies are input sensitive +## Important: EDC Policies are input sensitive Please be aware that the EDC ignores all Rules and Constraint it does not understand. This could cause your constrained policies to be public. ---- +### Example 1 for accidentially public -**Example 1 for accidentially public:** ```json { "uid": "1", @@ -152,9 +154,7 @@ Please be aware that the EDC ignores all Rules and Constraint it does not unders This policy is public available, even though the constraint is described correct. The reason is, that this extension only registeres the Policy.Action `USE` within the EDC. Any other Action Type will have the EDC ignore the corresponding permission, hence interpret the polics as public policy. ---- - -**Example 2 for accidentally public:** +### Example 2 for accidentially public ```json { diff --git a/edc-extensions/cx-oauth2/README.md b/edc-extensions/cx-oauth2/README.md index 0da6f1ced..479c783c7 100644 --- a/edc-extensions/cx-oauth2/README.md +++ b/edc-extensions/cx-oauth2/README.md @@ -12,17 +12,17 @@ The reason IDS did this is to prevent the IDS DAPS to know, which connectors tal ## Configuration -| Key | Description | Mandatory | Default | -|:----|:----|----|----| -| edc.oauth.token.url | Token URL of the DAPS | X | | -| edc.oauth.public.key.alias | Vault alias of the public key | X | | -| edc.oauth.client.id | DAPS client id of the connector | X | | -| edc.oauth.private.key.alias | Vault lias of the private key | X | | -| edc.oauth.token.expiration.seconds | | | 5 minutes | -| edc.oauth.validation.nbf.leeway | DAPS token request leeway | | 10 seconds | -| edc.oauth.provider.jwks.refresh | Time between refresh of the DAPS json web key set | | 5 minutes | -| edc.ids.endpoint.audience | The audience the connector requests from the DAPS. Should be the IDS URL of the connector, e.g. `http://plato-edc-controlplane:8282/api/v1/ids/data` | X | | -| edc.ids.validation.referringconnector | Adds checks to the DAPS token. Validation that the `referringConnector` equals the `issuerConnector` and the `securityProfile` of the token is equal to the profile of the IDS message | | false | +| Key | Description | Mandatory | Default | +|:--------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------|------------| +| edc.oauth.token.url | Token URL of the DAPS | X | | +| edc.oauth.public.key.alias | Vault alias of the public key | X | | +| edc.oauth.client.id | DAPS client id of the connector | X | | +| edc.oauth.private.key.alias | Vault lias of the private key | X | | +| edc.oauth.token.expiration.seconds | | | 5 minutes | +| edc.oauth.validation.nbf.leeway | DAPS token request leeway | | 10 seconds | +| edc.oauth.provider.jwks.refresh | Time between refresh of the DAPS json web key set | | 5 minutes | +| edc.ids.endpoint.audience | The audience the connector requests from the DAPS. Should be the IDS URL of the connector, e.g. `http://plato-edc-controlplane:8282/api/v1/ids/data` | X | | +| edc.ids.validation.referringconnector | Adds checks to the DAPS token. Validation that the `referringConnector` equals the `issuerConnector` and the `securityProfile` of the token is equal to the profile of the IDS message | | false | ## Audience Validation @@ -30,4 +30,4 @@ Instead of the `idsc:IDS_CONNECTORS_ALL` the connector requests a specific audie When a connector receives a message, it will checks the token audience is equal to the configured value in `edc.ids.endpoint.audience`. -![sequence diagram](./diagrams/sequence.png) \ No newline at end of file +![sequence diagram](./diagrams/sequence.png) diff --git a/edc-extensions/data-encryption/README.md b/edc-extensions/data-encryption/README.md index 60e01245f..586dad775 100644 --- a/edc-extensions/data-encryption/README.md +++ b/edc-extensions/data-encryption/README.md @@ -2,7 +2,7 @@ The Eclipse Dataspace Connector encrypts sensitive information inside a token it sends to other applications (from possibly other companies). This extension implements the encryption of this data and should be used with secure keys and algorithms at all times. -## Algorithm Configuration +## Algorithm Configuration | Key | Description | Mandatory | Default | |:--------------------------------------------|:-----------------------------------------------------------------------------------------------------------------|-----------|------------------| @@ -17,6 +17,7 @@ The Advanced Encryption Standard (AES) is the default encryption algorithm. For When using AES-GCM the key length must be ether 128-, 196- or 256bit. Keys must be stored stored Base64 encoded in the Vault, separated by a comma. It's possible to generate Keys using OpenSSL + ```bash # 128 Bit openssl rand -base64 16 @@ -30,13 +31,12 @@ openssl rand -base64 32 #### AES Configuration -| Key | Description | Mandatory | Default | -|:--------------------------------------------|:-----------------------------------------------------------------------------------------------------------------|-----------|------------------| -| edc.data.encryption.keys.alias | Symmetric Keys stored in the Vault under the configured alias. | X | | -| edc.data.encryption.caching.enabled | Enable caching to request only keys from the vault after the cache expires. | | false | -| edc.data.encryption.caching.seconds | Duration in seconds until the cache expires. | | 3600 | - +| Key | Description | Mandatory | Default | +|:------------------------------------|:----------------------------------------------------------------------------|-----------|---------| +| edc.data.encryption.keys.alias | Symmetric Keys stored in the Vault under the configured alias. | X | | +| edc.data.encryption.caching.enabled | Enable caching to request only keys from the vault after the cache expires. | | false | +| edc.data.encryption.caching.seconds | Duration in seconds until the cache expires. | | 3600 | ### 2. NONE -This strategy does apply no encryption at all and should only be used for debugging purposes. Using NONE encryption may leak sensitive data to other connectors! \ No newline at end of file +This strategy does apply no encryption at all and should only be used for debugging purposes. Using NONE encryption may leak sensitive data to other connectors! diff --git a/edc-extensions/dataplane-selector-configuration/README.md b/edc-extensions/dataplane-selector-configuration/README.md index 7a65b8f48..d5f922732 100644 --- a/edc-extensions/dataplane-selector-configuration/README.md +++ b/edc-extensions/dataplane-selector-configuration/README.md @@ -7,16 +7,17 @@ plane will look for an instance with matching capabilities to transfer data. Per data plane instance the following settings must be configured. As `` any unique string is valid. -| Key | Description | Mandatory | Example | -|:--------------------------------------------------------|:--------------------------------------------------|-----------|-------------------------------------------------------------------| -| edc.dataplane.selector.````.url | URL to connect to the Data Plane Instance. | X | http://plato-edc-dataplane:9999/api/dataplane/control | -| edc.dataplane.selector.````.sourcetypes | Source Types in a comma separated List. | X | HttpData | -| edc.dataplane.selector.````.destinationtypes | Destination Types in a comma separated List. | X | HttpProxy | +| Key | Description | Mandatory | Example | +|:------------------------------------------------------------|:--------------------------------------------------|-----------|------------------------------------------------------------------| +| edc.dataplane.selector.````.url | URL to connect to the Data Plane Instance. | X | | +| edc.dataplane.selector.````.sourcetypes | Source Types in a comma separated List. | X | HttpData | +| edc.dataplane.selector.````.destinationtypes | Destination Types in a comma separated List. | X | HttpProxy | | edc.dataplane.selector.````.properties | Additional properties of the Data Plane Instance. | (X) | { "publicApiUrl": "http://plato-edc-dataplane:8185/api/public" } | The property `publicApiUrl` is mandatory for Data Plane Instances with destination type `HttpProxy`. -**Helm Example Configuration using environment variables** +### Helm Example Configuration using environment variables + ```yaml EDC_DATAPLANE_SELECTOR_PLATOPLANE_URL: http://plato-edc-dataplane:9999/api/dataplane/control EDC_DATAPLANE_SELECTOR_PLATOPLANE_SOURCETYPES : HttpData diff --git a/edc-extensions/hashicorp-vault/README.md b/edc-extensions/hashicorp-vault/README.md index 7f49a4662..c3964605b 100644 --- a/edc-extensions/hashicorp-vault/README.md +++ b/edc-extensions/hashicorp-vault/README.md @@ -2,7 +2,7 @@ --- -**Please note:**
+**Please note:** Using the HashiCorp vault it is possible to define multiple data entries per secret. Other vaults might allow only one entry per secret (e.g. Azure Key Vault). @@ -25,23 +25,25 @@ creating secrets the EDC should consume. ## Health Check -The HashiCorp Vault Extension is able to run health checks. A health check is successful when the vault is _initialized_, _active_ and _unsealed_. Successful health checks are logged with level _FINE_. Unsuccessful health checks will be logged +The HashiCorp Vault Extension is able to run health checks. A health check is successful when the vault is _initialized_, _active_ and _unsealed_. Successful health checks are logged with level _FINE_. Unsuccessful health checks will be logged with level _WARNING_. --- -**Health Checks in Catena-X** + +### Health Checks in Catena-X If your project uses the Catena-X HashiCorp Vault please set `edc.vault.hashicorp.health.check.standby.ok` to _true_. Otherwise the health check would fail if the Vault is in standby. -```bash - # Logs of successful check with standby vault - [2022-08-01 14:48:37] [FINE ] HashiCorp Vault HealthCheck successful. HashicorpVaultHealthResponsePayload(isInitialized=true, isSealed=false, isStandby=true, isPerformanceStandby=false, replicationPerformanceMode=disabled,replicationDrMode=disabled, serverTimeUtc=1659365317, version=1.9.2, clusterName=vault-cluster-4b193c26, clusterId=83fabd45-685d-7f8d-9495-18fab6f50d5e) + +```plain +# Logs of successful check with standby vault +[2022-08-01 14:48:37] [FINE ] HashiCorp Vault HealthCheck successful. HashicorpVaultHealthResponsePayload(isInitialized=true, isSealed=false, isStandby=true, isPerformanceStandby=false, replicationPerformanceMode=disabled,replicationDrMode=disabled, serverTimeUtc=1659365317, version=1.9.2, clusterName=vault-cluster-4b193c26, clusterId=83fabd45-685d-7f8d-9495-18fab6f50d5e) ``` --- ## Example: Create & Configure DAPS Key -1. Insert DAPS Key into HashiCorp Vault +### Insert DAPS Key into HashiCorp Vault ```bash cat << EOF | /bin/vault kv put secret/my-daps-key content=- @@ -76,10 +78,10 @@ cat << EOF | /bin/vault kv put secret/my-daps-key content=- EOF ``` -2. Configure Key in the EDC +### Configure Key in the EDC ```bash - EDC_OAUTH_PRIVATE_KEY_ALIAS: my-daps-key +EDC_OAUTH_PRIVATE_KEY_ALIAS: my-daps-key ``` or @@ -90,9 +92,7 @@ edc.oauth.private.key.alias=my-daps-key ## Example: Catena-X Argo CD Vault Configuration - -``` - +```properties ######### # Vault # ######### @@ -109,5 +109,4 @@ edc.vault.hashicorp.health.check.standby.ok=true # from UI: secret stored in https://vault.demo.catena-x.net/ui/vault/secrets//show/my-daps-key edc.oauth.private.key.alias=my-daps-key - -``` \ No newline at end of file +``` diff --git a/edc-extensions/postgresql-migration/README.md b/edc-extensions/postgresql-migration/README.md index d96c2af5e..73f94eb56 100644 --- a/edc-extensions/postgresql-migration/README.md +++ b/edc-extensions/postgresql-migration/README.md @@ -1,6 +1,6 @@ # Postgresql SQL Migration Extension -This extension applies SQL migrations to +This extension applies SQL migrations to * the asset-index * the contract-definition store diff --git a/edc-tests/cucumber/README.md b/edc-tests/cucumber/README.md index a9424a5b0..e8c1a8ab1 100644 --- a/edc-tests/cucumber/README.md +++ b/edc-tests/cucumber/README.md @@ -6,7 +6,7 @@ THIS MODULE IS DEPRECATED AND WILL NOT BE MAINTAINED ANYMORE. ./gradlew :edc-tests:test -Dcucumber=true ``` -# Test locally using Act Tool +## Test locally using Act Tool > "Think globally, [`act`](https://github.com/nektos/act) locally" @@ -14,5 +14,6 @@ THIS MODULE IS DEPRECATED AND WILL NOT BE MAINTAINED ANYMORE. act -j business-test ``` -# Run and debug Business-Tests local within IDE +## Run and debug Business-Tests local within IDE + Please refer to [run-local documentation in docs](../docs/development/Run-business-tests-local.md) diff --git a/edc-tests/cucumber/src/main/resources/deployment/helm/omejdn/README.md b/edc-tests/cucumber/src/main/resources/deployment/helm/omejdn/README.md index 2fe8128db..f85a94889 100644 --- a/edc-tests/cucumber/src/main/resources/deployment/helm/omejdn/README.md +++ b/edc-tests/cucumber/src/main/resources/deployment/helm/omejdn/README.md @@ -7,6 +7,7 @@ Two Eclipse Dataspace Connectors need to be registered at the same DAPS instance New connectors are configured in the omejdn _values.yaml_. In each Eclipse Dataspace Connector configure the following properties to use the DAPS. + ```properties edc.oauth.client.id= @@ -17,4 +18,4 @@ In each Eclipse Dataspace Connector configure the following properties to use th edc.oauth.public.key.alias= edc.oauth.provider.audience=idsc:IDS_CONNECTORS_ALL -``` \ No newline at end of file +``` From 36b25f3fe035f5eaab6fa2aface5e5ead553de84 Mon Sep 17 00:00:00 2001 From: "Florian Rusch (ZF Friedrichshafen AG)" Date: Tue, 21 Mar 2023 22:17:27 +0100 Subject: [PATCH 04/92] Lint new changes from develop branch --- CHANGELOG.md | 5 ++ SECURITY.md | 2 +- charts/edc-controlplane/README.md | 4 +- charts/edc-controlplane/README.md.gotmpl | 4 +- charts/edc-dataplane/README.md | 4 +- charts/edc-dataplane/README.md.gotmpl | 4 +- charts/tractusx-connector/README.md | 5 +- charts/tractusx-connector/README.md.gotmpl | 5 +- docs/development/Release.md | 4 +- docs/development/Run-business-tests-local.md | 32 +++++-- docs/development/coding-principles.md | 14 +-- .../2023-02-09-release-process/README.md | 10 +-- .../2023-02-27_testing/README.md | 8 +- .../2023-03-02_gradle_build/README.md | 2 +- docs/development/postman/README.md | 16 ++-- docs/development/scripts/daps_token/README.md | 27 +++--- docs/migration/Version_0.1.2_0.1.3.md | 13 +-- docs/migration/Version_0.1.x_0.3.x.md | 4 +- docs/release-notes/Version 0.1.3.md | 24 +++--- docs/release-notes/Version 0.1.5.md | 2 +- docs/samples/Local TXDC Setup.md | 2 +- docs/samples/Transfer Data.md | 43 +++++----- docs/samples/data-plane-http-oauth2.md | 2 +- .../control-plane-adapter/README.md | 86 +++++++++---------- .../observability-api-customization/README.md | 4 +- .../provision-additional-headers/README.md | 1 + .../helm/supporting-infrastructure/README.md | 10 +-- pr_etiquette.md | 20 ++--- styleguide.md | 3 +- 29 files changed, 195 insertions(+), 165 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3960f0c9c..443e9f410 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added ### Changed + - Support unauthenticated access to the ObservabilityAPI (#126) ### Fixed @@ -20,6 +21,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 corresponding [documentation](/docs/migration/Version_0.1.x_0.3.x.md). ### Added + - Add contract id to data source http call (#732) - Support also support releases in ci pipeline - Introduce typed object for oauth2 provisioning @@ -40,6 +42,7 @@ corresponding [documentation](/docs/migration/Version_0.1.x_0.3.x.md). - Feature: Sftp Provisioner and Client (#554) ### Changed + - Support horizontal edc scaling in cp adapter extension (#678) - Use upstream jackson version (#741) - Replace provision-oauth2 with data-plane-http-oauth2 @@ -61,12 +64,14 @@ corresponding [documentation](/docs/migration/Version_0.1.x_0.3.x.md). - update description of supporting infrastructure deployment (#616) ### Fixed + - bugfix: Fix slow AES encryption (#746) - Fix typo in tractusx-connector values.yaml comment - Fix not working docu link in README.md - Fix typo in control-plane adapter README ### Dependency updates + - Bump EDC to 20220220 (#767) - Bump alpine (#749) - Bump alpine (#750) diff --git a/SECURITY.md b/SECURITY.md index eec5ca437..41745e204 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,4 +2,4 @@ ## Reporting a Vulnerability -Please report a found vulnerability here: \ No newline at end of file +Please report a found vulnerability here: diff --git a/charts/edc-controlplane/README.md b/charts/edc-controlplane/README.md index 34b49b4e9..2e2a0cf68 100644 --- a/charts/edc-controlplane/README.md +++ b/charts/edc-controlplane/README.md @@ -11,8 +11,8 @@ EDC Control-Plane - The Eclipse DataSpaceConnector administration layer with res ## TL;DR ```shell -$ helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -$ helm install my-release tractusx-edc/edc-controlplane --version 0.3.0 +helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev +helm install my-release tractusx-edc/edc-controlplane --version 0.3.0 ``` ## Values diff --git a/charts/edc-controlplane/README.md.gotmpl b/charts/edc-controlplane/README.md.gotmpl index 022804eea..aa70ec6fc 100644 --- a/charts/edc-controlplane/README.md.gotmpl +++ b/charts/edc-controlplane/README.md.gotmpl @@ -11,8 +11,8 @@ ## TL;DR ```shell -$ helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -$ helm install my-release tractusx-edc/edc-controlplane --version {{ .Version }} +helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev +helm install my-release tractusx-edc/edc-controlplane --version {{ .Version }} ``` {{ template "chart.maintainersSection" . }} diff --git a/charts/edc-dataplane/README.md b/charts/edc-dataplane/README.md index 02a26f41d..934ff72c1 100644 --- a/charts/edc-dataplane/README.md +++ b/charts/edc-dataplane/README.md @@ -11,8 +11,8 @@ EDC Data-Plane - The Eclipse DataSpaceConnector data layer with responsibility o ## TL;DR ```shell -$ helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -$ helm install my-release tractusx-edc/edc-dataplane --version 0.3.0 +helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev +helm install my-release tractusx-edc/edc-dataplane --version 0.3.0 ``` ## Values diff --git a/charts/edc-dataplane/README.md.gotmpl b/charts/edc-dataplane/README.md.gotmpl index 8411b344e..c94d26d50 100644 --- a/charts/edc-dataplane/README.md.gotmpl +++ b/charts/edc-dataplane/README.md.gotmpl @@ -11,8 +11,8 @@ ## TL;DR ```shell -$ helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -$ helm install my-release tractusx-edc/edc-dataplane --version {{ .Version }} +helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev +helm install my-release tractusx-edc/edc-dataplane --version {{ .Version }} ``` {{ template "chart.maintainersSection" . }} diff --git a/charts/tractusx-connector/README.md b/charts/tractusx-connector/README.md index 0624381bf..ccd0cae09 100644 --- a/charts/tractusx-connector/README.md +++ b/charts/tractusx-connector/README.md @@ -5,9 +5,10 @@ A Helm chart for Tractus-X Eclipse Data Space Connector ## TL;DR + ```shell -$ helm repo add catenax-ng-product-edc https://catenax-ng.github.io/product-edc -$ helm install tractusx-connector catenax-ng-product-edc/tractusx-connector --version 0.3.0 +helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev +helm install my-release tractusx-edc/tractusx-connector --version 0.3.0 ``` ## Values diff --git a/charts/tractusx-connector/README.md.gotmpl b/charts/tractusx-connector/README.md.gotmpl index 47ef15755..b1671f5a2 100644 --- a/charts/tractusx-connector/README.md.gotmpl +++ b/charts/tractusx-connector/README.md.gotmpl @@ -9,9 +9,10 @@ {{ template "chart.homepageLine" . }} ## TL;DR + ```shell -$ helm repo add catenax-ng-product-edc https://catenax-ng.github.io/product-edc -$ helm install tractusx-connector catenax-ng-product-edc/tractusx-connector --version {{ .Version }} +helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev +helm install my-release tractusx-edc/tractusx-connector --version {{ .Version }} ``` {{ template "chart.maintainersSection" . }} diff --git a/docs/development/Release.md b/docs/development/Release.md index ded1e4a8b..3992c0a1d 100644 --- a/docs/development/Release.md +++ b/docs/development/Release.md @@ -9,7 +9,7 @@ ### 1. Setup Eclipse Dash License Tool locally For instructions on how to download the Eclipse Dash Tool executable, refer to the -project's [GitHub page](https://github.com/eclipse/dash-licenses#get-it). +project's [GitHub page](https://github.com/eclipse/dash-licenses#get-it). ### 2. Generate DEPENDENCIES file @@ -27,7 +27,7 @@ First, the dependencies of this module are calculated with gradle and passed to gradle allDependencies | grep -Poh "(?<=\s)[\w.-]+:[\w.-]+:[^:\s]+" | sort | uniq | java -jar /path/org.eclipse.dash.licenses-0.0.1-SNAPSHOT.jar - -summary DEPENDENCIES ``` -_Note: on some machines (e.g. macOS) [the ack tool](https://beyondgrep.com/install/) should be used instead of `grep`._ +_Note: on some machines (e.g. macOS) [the ack tool](https://beyondgrep.com/install/) should be used instead of `grep`._ ### 3. Resolve restricted Dependencies diff --git a/docs/development/Run-business-tests-local.md b/docs/development/Run-business-tests-local.md index 8a87ace24..9d66f7bbc 100644 --- a/docs/development/Run-business-tests-local.md +++ b/docs/development/Run-business-tests-local.md @@ -1,27 +1,33 @@ # Run and debug Business-Tests local within IDE -**Prerequisites:** + +Prerequisites: + - You need a local kubernetes cluster to install the services (Docker Desktop is recommended). - You need kubectl and helm command line tools installed. -### 1. Build all modules with maven and produce docker images +## 1. Build all modules with maven and produce docker images ```shell ./gradlew dockerize ``` -### 2. Install the all-in-one supporting infrastructure environment (Daps, Vault, PostgreSql, Minio, Backend-Service) +## 2. Install the all-in-one supporting infrastructure environment (Daps, Vault, PostgreSql, Minio, Backend-Service) + ```shel helm install infrastructure edc-tests/src/main/resources/deployment/helm/supporting-infrastructure -n business-tests --create-namespace ``` To access the PostgreSql databases you could use following kubectl port forwardings: + ```shell kubectl port-forward plato-postgresql-0 -n business-tests 5555:5432 kubectl port-forward sokrates-postgresql-0 -n business-tests 6666:5432 ``` + Please use the same ports later for your environment variables. -### 3. Install Plato as provider EDC +## 3. Install Plato as provider EDC + ```shell helm install plato charts/tractusx-connector -n business-tests --create-namespace \ --set fullnameOverride=plato \ @@ -56,7 +62,8 @@ helm install plato charts/tractusx-connector -n business-tests --create-namespac --wait-for-jobs --timeout=120s ``` -### 4. Install Socrates as consumer EDC +## 4. Install Socrates as consumer EDC + ```shell helm install sokrates charts/tractusx-connector -n business-tests --create-namespace \ --set fullnameOverride=sokrates \ @@ -91,9 +98,11 @@ helm install sokrates charts/tractusx-connector -n business-tests --create-names --wait-for-jobs --timeout=120s ``` -### 5. Set environment variables and run configuration in IDE +## 5. Set environment variables and run configuration in IDE + You can create a run configuration in IntelliJ like bellow screenshot and copy/paste the whole set of environments variables if you use ";" after each line. -![](run-config.png) + +![Example run config](run-config.png) ```shell PLATO_BACKEND_SERVICE_BACKEND_API_URL=http://localhost:; @@ -122,18 +131,23 @@ EDC_AWS_ENDPOINT_OVERRIDE=http://localhost:32000 The services are using NodePort to expose the endpoints therefore the ports are not fix and needs to be determined after each deployment. To determine the current ports you can use the following kubectl command: + ```shell kubectl get svc -n business-tests -o go-template='{{range .items}}{{ $save := . }}{{range.spec.ports}}{{if .nodePort}}{{$save.metadata.namespace}}{{"/"}}{{$save.metadata.name}}{{" - "}}{{.name}}{{": "}}{{.nodePort}}{{"("}}{{.port}}{{")"}}{{"\n"}}{{end}}{{end}}{{end}}' ``` + This will return all NodePorts which are available in business-tests namespace where you can pick the ports to use in your environment variables. Now you are able to run it in IDE either as normal "Run" mode or in "Debug" mode where you can debug the business-tests by setting debugging points. -### 6. Update your components +## 6. Update your components + Once everything is installed you just need to update your services when you have a new image. + ```shell helm upgrade plato charts/tractusx-connector --recreate-pods helm upgrade sokrates charts/tractusx-connector --recreate-pods ``` -### 7. Tips +## 7. Tips + If you use the kubernetes within Docker Desktop you have direct access to the images which you have created with Docker Desktop they are using the same docker daemon. So you don't need to transfer it in your k8s cluster. diff --git a/docs/development/coding-principles.md b/docs/development/coding-principles.md index f45c11b19..624186c46 100644 --- a/docs/development/coding-principles.md +++ b/docs/development/coding-principles.md @@ -73,15 +73,15 @@ - inheriting from an object that fulfills any of the above. In this case use derived builders as well. 2. Although serializability is not the reason we use the builder pattern, it is a strong indication that a builder should be used. -2. Builders should be named just `Builder` and be static nested classes. -3. Create a `public static Builder newInstance(){...}` method to instantiate the builder -4. Builders have non-public constructors -5. Use single-field builders: a `Builder` instantiates the object it builds in its constructor, and sets the properties +3. Builders should be named just `Builder` and be static nested classes. +4. Create a `public static Builder newInstance(){...}` method to instantiate the builder +5. Builders have non-public constructors +6. Use single-field builders: a `Builder` instantiates the object it builds in its constructor, and sets the properties in its builder methods. The `build()` method then only performs verification (optional) and returns the instance. -6. Use `private` constructors for the objects that the builder builds. -7. If there is a builder for an object, use it to deserialize an object, i.e. put Jackson annotations such +7. Use `private` constructors for the objects that the builder builds. +8. If there is a builder for an object, use it to deserialize an object, i.e. put Jackson annotations such as `JsonCreator` and `@JsonBuilder` on builders. -8. Note that the motivation behind use of builders is not for immutability (although that may be good in certain +9. Note that the motivation behind use of builders is not for immutability (although that may be good in certain circumstances). Rather, it is to make code less error-prone and simpler given the lack of named arguments and optional parameters in Java. diff --git a/docs/development/decision-records/2023-02-09-release-process/README.md b/docs/development/decision-records/2023-02-09-release-process/README.md index 0dffcd341..c67521eb6 100644 --- a/docs/development/decision-records/2023-02-09-release-process/README.md +++ b/docs/development/decision-records/2023-02-09-release-process/README.md @@ -21,7 +21,7 @@ from breaking changes, such as Java SPIs, APIs and changes in service contracts. Up until now, the only way out was cherry-picking, which is extremely cumbersome and error-prone, and requires a parallel build pipeline to publish the cherry-picked artifacts of EDC (and potentially others). With the approach -presented here, cherry-picking is still an option, but there are easier alternatives to it. +presented here, cherry-picking is still an option, but there are easier alternatives to it. Every release version published by tractusx-edc must be reproducible at any time. @@ -47,12 +47,12 @@ created on March 27th 2023, the most recent nightly would be `0.0.1-20230326`. _Updating Gradle files or Maven POMs, creating branches and tags in Git should be automated through GitHub Actions as part of the release process. For reference_: -- Modifying and committing files: https://github.com/orgs/community/discussions/26842#discussioncomment-3253612 -- Creating branches: https://github.com/marketplace/actions/create-branch +- Modifying and committing files: +- Creating branches: - Creating tags using GitHub's - API: https://github.com/eclipse-edc/Connector/blob/b24a5cacbc9fcabdfd8020d779399b3e56856661/.github/workflows/release-edc.yml#L21 ( + API: ( example) -- Create GitHub Release: https://github.com/eclipse-edc/Connector/blob/b24a5cacbc9fcabdfd8020d779399b3e56856661/.github/workflows/release-edc.yml#L56 (example) +- Create GitHub Release: (example) Once a release is created, the EDC upstream version must not change anymore, unless there is good reason to do so, for example, a defect, that needs to be fixed upstream. At that point a decision can also be made to employ a cherry-pick model, in case the diff --git a/docs/development/decision-records/2023-02-27_testing/README.md b/docs/development/decision-records/2023-02-27_testing/README.md index fa4b803e1..c4619eb8b 100644 --- a/docs/development/decision-records/2023-02-27_testing/README.md +++ b/docs/development/decision-records/2023-02-27_testing/README.md @@ -13,7 +13,7 @@ Henceforth, testing shall be done in accordance with the herein outlined rules a ## Rationale -Past experiences with product-edc's testing setup has shown that it is time- and resource-consuming, which also makes it unreliable at times. +Past experiences with product-edc's testing setup has shown that it is time- and resource-consuming, which also makes it unreliable at times. Furthermore, a finer-grained test classification such as the one outlined in this document is currently neither present nor documented. ### Definitions and distinction @@ -51,7 +51,7 @@ EDC provides a way to launch (multiple) embedded connector runtimes from within External systems such as databases or identity providers should be setup "out-of-band" of the test, using a script or the CI pipeline's declarative syntax (e.g. GitHub Actions' `services` feature). If possible, we should employ external systems in a self-contained way, e.g. using docker containers, because that increases portability and decreases the potential for conflict, e.g. in always-on databases. -### DO: +### DO - use integration tests sparingly and only when unit tests are not practical - deploy the external system as service directly in the workflow or @@ -63,7 +63,7 @@ External systems such as databases or identity providers should be setup "out-of system does not get destroyed after the test. - use the class annotations provided by EDC to categorize and configure test execution -### DO NOT: +### DO NOT - try to cover everything with integration tests. It's typically a code smell if there are no corresponding unit tests for an integration test. @@ -78,7 +78,7 @@ External systems such as databases or identity providers should be setup "out-of This section explains _at which point in time_ we should execute which test. This is intended to minimize the impact on overall test execution time on CI, while still maintaining sufficient coverage. | Test type | When to run | Remarks | -| ---------------------- | ----------------------------------------------------------------------------------- | ------- | +|------------------------|-------------------------------------------------------------------------------------|---------| | Unit test | when running tests locally, without any parameters, on every commit on every branch | | | Integration test | on every commit on every branch | | | System/End-To-End test | on pull request branches except when marked as `draft` | | diff --git a/docs/development/decision-records/2023-03-02_gradle_build/README.md b/docs/development/decision-records/2023-03-02_gradle_build/README.md index 0f8e7b327..4012599c1 100644 --- a/docs/development/decision-records/2023-03-02_gradle_build/README.md +++ b/docs/development/decision-records/2023-03-02_gradle_build/README.md @@ -43,6 +43,6 @@ parallelization resulting in faster and more responsive builds. ## Further consideration -Planned improvements regarding the testing procedure (PR https://github.com/catenax-ng/product-edc/pull/781) will also greatly benefit from the EDC build tools such +Planned improvements regarding the testing procedure (PR ) will also greatly benefit from the EDC build tools such as JUnit tags and conditional evaluation of the tagged tests. Much of EDC's testing framework is based on Gradle and can be seamlessly integrated in product-edc. diff --git a/docs/development/postman/README.md b/docs/development/postman/README.md index a6f5005b9..f4a7d2b24 100644 --- a/docs/development/postman/README.md +++ b/docs/development/postman/README.md @@ -8,20 +8,22 @@ The Postman app can be used to send and receive EDC messages. -### Install/Download Postman -please visit https://www.postman.com/downloads/ +### Install/Download Postman -### Import Postman collection? -please visit https://learning.postman.com/docs/getting-started/importing-and-exporting-data/ +please visit + +### Import Postman collection + +please visit ## Collection -The postman collection contains the most common API calls. Please note hat the +The postman collection contains the most common API calls. Please note that the - Policy & Negotiation calls come in pairs for the different kinds of policies -- The 'Data' call only works when using the All-In-One Deployment of this repository +- the 'Data' call only works when using the All-In-One Deployment of this repository ![screenshot](./images/screenshot.png) [postman-shield]: https://img.shields.io/badge/Postman-URL-orange -[postman-url]: https://www.postman.com \ No newline at end of file +[postman-url]: https://www.postman.com diff --git a/docs/development/scripts/daps_token/README.md b/docs/development/scripts/daps_token/README.md index aaeb49253..cbc7475ff 100644 --- a/docs/development/scripts/daps_token/README.md +++ b/docs/development/scripts/daps_token/README.md @@ -6,17 +6,22 @@ Script to request an IDS token from the DAPS. 1. Copy your DAPS private key into `key.pem` 2. Edit in the script the following variables - - `token_url` - - `client_id` - - `resource` + - `token_url` + - `client_id` + - `resource` 3. Run script -```bash -./daps_auth_sh -``` + ```bash + ./daps_auth_sh + ``` -4. Take the `access_token` from the output in use it in IDS messages -Script output: -```json -{"access_token":"eyJ0eXAiOiJhdCtqd3QiLCJraWQiOiI3MDM2MzAwNzVkYTM2N2IxYmZiYjRjY2Q0N2M1Y2ViMGQ5ZjM1MmRmYWU2MzJkMzYxMGMxNzNmMTM1NDI0NmM5IiwiYWxnIjoiUlMyNTYifQ.eyJzY29wZSI6Imlkc2M6SURTX0NPTk5FQ1RPUl9BVFRSSUJVVEVTX0FMTCIsImF1ZCI6WyJodHRwczovL3Blbi10ZXN0LXBsYXRvLXR4ZGMuaW50LmRlbW8uY2F0ZW5hLXgubmV0L2FwaS92MS9pZHMvZGF0YSJdLCJpc3MiOiJodHRwOi8vaWRzLWRhcHM6NDU2Ny8iLCJzdWIiOiI5OTo4MzpBNzoxNzo4NjpGRjo5ODo5MzpDRTpBMDpERDpBMTpGMTozNjpGQTpGNjowRjo3NTowQToyMzprZXlpZDo5OTo4MzpBNzoxNzo4NjpGRjo5ODo5MzpDRTpBMDpERDpBMTpGMTozNjpGQTpGNjowRjo3NTowQToyMyIsIm5iZiI6MTY3ODMxMDE0OSwiaWF0IjoxNjc4MzEwMTQ5LCJqdGkiOiJkZmY5Y2FmOS05NDZiLTQ1YmMtOWY4My0yYmJkMDI4NTlmYWMiLCJleHAiOjE2NzgzMTM3NDksImNsaWVudF9pZCI6Ijk5OjgzOkE3OjE3Ojg2OkZGOjk4OjkzOkNFOkEwOkREOkExOkYxOjM2OkZBOkY2OjBGOjc1OjBBOjIzOmtleWlkOjk5OjgzOkE3OjE3Ojg2OkZGOjk4OjkzOkNFOkEwOkREOkExOkYxOjM2OkZBOkY2OjBGOjc1OjBBOjIzIiwicmVmZXJyaW5nQ29ubmVjdG9yIjoiaHR0cDovL3BsYXRvLWNvbnRyb2xwbGFuZS9CUE5QTEFUTyJ9.JQqt9gCpaG7rLztO5-pJa7HIybVjKog9v0CFXHoVJZgdxMc5nTKZnuwBVHC1PXuWrBiyPxPoNg0TsfRg9DqF8rFD5noarxOJ1S84BF7AUUi3phQzBF26lsmNmOW_gdNBC-8xw1WMo5hRHH56cB64_x4V8T4VwFlSYYrmA5ge_EiPCW_KWF9sNguXBKs8uTbLB3lvTELGTjmZI93tVR-vYuYzW2jxH1PJNW29KJRQcM0D1AiveMs3_ThRjheEvugyh9QIY1RwPXMgYQpSTvoumNuFFTnpR21ueWfSUtU-4Qu9suNTkcaFihvEObXVrhyMja-HjhQaC8i0XsAgY0tT1A","expires_in":3600,"token_type":"bearer","scope":"idsc:IDS_CONNECTOR_ATTRIBUTES_ALL"}% -``` +4. Take the `access_token` from the output in use it in IDS messages. The output of the script looks like this: + + ```json + { + "access_token": "eyJ0eXAiOiJhdCtqd3QiLCJraWQiOiI3MDM2MzAwNzVkYTM2N2IxYmZiYjRjY2Q0N2M1Y2ViMGQ5ZjM1MmRmYWU2MzJkMzYxMGMxNzNmMTM1NDI0NmM5IiwiYWxnIjoiUlMyNTYifQ.eyJzY29wZSI6Imlkc2M6SURTX0NPTk5FQ1RPUl9BVFRSSUJVVEVTX0FMTCIsImF1ZCI6WyJodHRwczovL3Blbi10ZXN0LXBsYXRvLXR4ZGMuaW50LmRlbW8uY2F0ZW5hLXgubmV0L2FwaS92MS9pZHMvZGF0YSJdLCJpc3MiOiJodHRwOi8vaWRzLWRhcHM6NDU2Ny8iLCJzdWIiOiI5OTo4MzpBNzoxNzo4NjpGRjo5ODo5MzpDRTpBMDpERDpBMTpGMTozNjpGQTpGNjowRjo3NTowQToyMzprZXlpZDo5OTo4MzpBNzoxNzo4NjpGRjo5ODo5MzpDRTpBMDpERDpBMTpGMTozNjpGQTpGNjowRjo3NTowQToyMyIsIm5iZiI6MTY3ODMxMDE0OSwiaWF0IjoxNjc4MzEwMTQ5LCJqdGkiOiJkZmY5Y2FmOS05NDZiLTQ1YmMtOWY4My0yYmJkMDI4NTlmYWMiLCJleHAiOjE2NzgzMTM3NDksImNsaWVudF9pZCI6Ijk5OjgzOkE3OjE3Ojg2OkZGOjk4OjkzOkNFOkEwOkREOkExOkYxOjM2OkZBOkY2OjBGOjc1OjBBOjIzOmtleWlkOjk5OjgzOkE3OjE3Ojg2OkZGOjk4OjkzOkNFOkEwOkREOkExOkYxOjM2OkZBOkY2OjBGOjc1OjBBOjIzIiwicmVmZXJyaW5nQ29ubmVjdG9yIjoiaHR0cDovL3BsYXRvLWNvbnRyb2xwbGFuZS9CUE5QTEFUTyJ9.JQqt9gCpaG7rLztO5-pJa7HIybVjKog9v0CFXHoVJZgdxMc5nTKZnuwBVHC1PXuWrBiyPxPoNg0TsfRg9DqF8rFD5noarxOJ1S84BF7AUUi3phQzBF26lsmNmOW_gdNBC-8xw1WMo5hRHH56cB64_x4V8T4VwFlSYYrmA5ge_EiPCW_KWF9sNguXBKs8uTbLB3lvTELGTjmZI93tVR-vYuYzW2jxH1PJNW29KJRQcM0D1AiveMs3_ThRjheEvugyh9QIY1RwPXMgYQpSTvoumNuFFTnpR21ueWfSUtU-4Qu9suNTkcaFihvEObXVrhyMja-HjhQaC8i0XsAgY0tT1A", + "expires_in": 3600, + "token_type": "bearer", + "scope": "idsc:IDS_CONNECTOR_ATTRIBUTES_ALL" + } + ``` diff --git a/docs/migration/Version_0.1.2_0.1.3.md b/docs/migration/Version_0.1.2_0.1.3.md index 55c9c4570..787b04bfe 100644 --- a/docs/migration/Version_0.1.2_0.1.3.md +++ b/docs/migration/Version_0.1.2_0.1.3.md @@ -6,15 +6,18 @@ This document contains a list of breaking changes that are introduced in version As the images now use the official OAuth2 Extension, the audience settings need to the updated. -**Add the following settings** +Add the following settings: + - EDC_OAUTH_PROVIDER_AUDIENCE - EDC_OAUTH_ENDPOINT_AUDIENCE -**Remove the following setting** +Remove the following setting: + - EDC_IDS_ENDPOINT_AUDIENCE -Example -``` +Example: + +```yaml EDC_OAUTH_PROVIDER_AUDIENCE: idsc:IDS_CONNECTORS_ALL EDC_OAUTH_ENDPOINT_AUDIENCE: http://plato-edc-controlplane:8282/api/v1/ids/data -``` \ No newline at end of file +``` diff --git a/docs/migration/Version_0.1.x_0.3.x.md b/docs/migration/Version_0.1.x_0.3.x.md index cbfd17e83..f35d3aa5e 100644 --- a/docs/migration/Version_0.1.x_0.3.x.md +++ b/docs/migration/Version_0.1.x_0.3.x.md @@ -6,7 +6,7 @@ ## Management API changes -details at the [official documentation on swaggerhub](https://app.swaggerhub.com/apis/eclipse-edc-bot/management-api/0.0.1-SNAPSHOT) +Details at the [official documentation on swaggerhub](https://app.swaggerhub.com/apis/eclipse-edc-bot/management-api/0.0.1-SNAPSHOT) - Management API for creating resources (assets, policydefinitions, contractdefinitions, ...) will return a body containing the id of the created resource - Added a `POST /request` for every management endpoint (assets, policydefinitions, ...) to query all the resources. The existent `GET /` have been deprecated @@ -27,7 +27,7 @@ details at the [official documentation on swaggerhub](https://app.swaggerhub.com - renamed `edc.receiver.http.endpoint` to `edc.receiver.http.dynamic.endpoint` - renamed `edc.oauth.public.key.alias` setting to `edc.oauth.certificate.alias` -## Other changes: +## Other changes - Supported `/public` data plane endpoint without trailing slash, that can be eventually removed from the configuration - packages name changed from `org.eclipse.dataspaceconnector` to `org.eclipse.edc` diff --git a/docs/release-notes/Version 0.1.3.md b/docs/release-notes/Version 0.1.3.md index 2ee32093f..c44d60cdb 100644 --- a/docs/release-notes/Version 0.1.3.md +++ b/docs/release-notes/Version 0.1.3.md @@ -10,23 +10,23 @@ - Business Partner Extension - HashiCorp Vault Extension - OAuth2 Extension -3. Bug Fixes +3. Bug Fixes - S3 Data Transfer -# 1. Container Images +## 1. Container Images -## 1.1 New Image: HashiCorp Vault & In Memory Store +### 1.1 New Image: HashiCorp Vault & In Memory Store The EDC now releases a fourth image with a combination of HashiCorp Vault and In Memory Store extensions. -# 2. Extensions +## 2. Extensions -## 2.1 Business Partner Extension +### 2.1 Business Partner Extension **Removed support for Constraint with multiple BPNs** The possibility to use multiple Business Partner Numbers inside of a single constraint has been removed. It looks like this was only possible due to a missing feature and may lead to unexpected side -effects (https://github.com/eclipse-dataspaceconnector/DataSpaceConnector/issues/2026) +effects () Hence, this kind of policy is no longer supported! @@ -65,18 +65,18 @@ Hence, this kind of policy is no longer supported! The BPN extension will now always decline BPN policies with 'IN' operators, when asked by the EDC to enforce it. -## 2.2 HashiCorp Vault Extension +### 2.2 HashiCorp Vault Extension It is now possible to arrange HashiCorp Vault secrets in sub-directories. For example by storing the DAPS secrets in their own `/daps` directory: -``` +```yaml EDC_OAUTH_PRIVATE_KEY_ALIAS: daps/my-plato-daps-key EDC_OAUTH_PUBLIC_KEY_ALIAS: daps/my-plato-daps-crt ``` -## 2.3 OAuth2 Extension +### 2.3 OAuth2 Extension The EDC Oauth2 Extension has now the possibility to add the audience to the claim. As the official OAuth2 Extension was added to the control plane again most of the functionality of the CX Oauth2 Extension was removed. @@ -84,8 +84,8 @@ added to the control plane again most of the functionality of the CX Oauth2 Exte > **Breaking Change** The official OAuth2 Extension uses different settings then the EDC OAuth Extension. Please > consolidate the [Migration Documentation](../migration/Version_0.1.2_0.1.3.md). -# 3. Bug Fixes +## 3. Bug Fixes -## 3.1 S3 Data Transfer +### 3.1 S3 Data Transfer -Version 0.1.2 had some issues with the S3 data transfer. This version fixes them. \ No newline at end of file +Version 0.1.2 had some issues with the S3 data transfer. This version fixes them. diff --git a/docs/release-notes/Version 0.1.5.md b/docs/release-notes/Version 0.1.5.md index 37bac446f..242be5494 100644 --- a/docs/release-notes/Version 0.1.5.md +++ b/docs/release-notes/Version 0.1.5.md @@ -22,4 +22,4 @@ catalog pagination. [GitHub issue](https://github.com/eclipse-edc/Connector/issu ### 2.2 Data Encryption Extension The encryption of the `EndpointDataReference` took up to 3 minutes unter certain circumstances. -This was fixed by using a not blocking algorithm and setting the Java CMD flag `java.security.egd` correctly. \ No newline at end of file +This was fixed by using a not blocking algorithm and setting the Java CMD flag `java.security.egd` correctly. diff --git a/docs/samples/Local TXDC Setup.md b/docs/samples/Local TXDC Setup.md index 8117dc05d..ad2a0f6bc 100644 --- a/docs/samples/Local TXDC Setup.md +++ b/docs/samples/Local TXDC Setup.md @@ -121,4 +121,4 @@ helm uninstall --namespace cx plato helm uninstall --namespace cx sokrates ``` -> To try out the local setup, have a look at the [Transfer Example Documentation](Transfer%20Data.md) \ No newline at end of file +> To try out the local setup, have a look at the [Transfer Example Documentation](Transfer%20Data.md) diff --git a/docs/samples/Transfer Data.md b/docs/samples/Transfer Data.md index f07f685f9..d68d87561 100644 --- a/docs/samples/Transfer Data.md +++ b/docs/samples/Transfer Data.md @@ -6,18 +6,18 @@ For this transfer connector **Bob** will act as data provider, and connector **A consumer. But the roles could be inverse as well. > Please note: Before running the examples the corresponding environment variables must be set. -> How such an environment can be setup locally is documented in [chapter 0](#0--optional--local-setup). +> How such an environment can be setup locally is documented in [chapter 1](#1--optional--local-setup). -**Contents** +## Table of Content -0. [(optional) Local Setup](#0--optional--local-setup) -1. [Setup Data Offer](#1-setup-data-offer) -2. [Request Contract Offers](#2-request-contract-offer-catalog) -3. [Negotiate Contract](#3-negotiate-contract) -4. [Transfer Data](#4-transfer-data) -5. [Verify Data Transfer](#5-verify-data-transfer) +1. [(optional) Local Setup](#1--optional--local-setup) +2. [Setup Data Offer](#2-setup-data-offer) +3. [Request Contract Offers](#3-request-contract-offer-catalog) +4. [Negotiate Contract](#4-negotiate-contract) +5. [Transfer Data](#5-transfer-data) +6. [Verify Data Transfer](#6-verify-data-transfer) -## 0. (optional) Local Setup +## 1. (optional) Local Setup To create a local setup with two connectors have a look at the [Local TXDC Setup Documentation](Local%20TXDC%20Setup.md). @@ -33,7 +33,7 @@ minkube service list Minikube will then print out something like this: -```shell +```plain |-------------|-----------------------|-----------------|---------------------------| | NAMESPACE | NAME | TARGET PORT | URL | |-------------|-----------------------|-----------------|---------------------------| @@ -83,7 +83,7 @@ kubectl describe service -n cx sokrates-controlplane Kubernetes will then print out something like this. -```shell +```plain Name: plato-controlplane Namespace: cx Labels: app.kubernetes.io/component=edc-controlplane @@ -138,6 +138,7 @@ required. Where to get the IP may vary depending on how Kubernetes is deployed. ### Set Environment Variables, used by this example Environment Variables, containing a URL, used by this example are + - BOB_DATAMGMT_URL - ALICE_DATAMGMT_URL - BOB_IDS_URL @@ -153,7 +154,7 @@ Let's assume we will use Sokrates as Bob, and Plato as Alice. **ALICE_BACKEND_URL** must the Node URL. In this local setup it would be `http://192.168.49.2:30193` -## 1. Setup Data Offer +## 2. Setup Data Offer Set up a data offer in **Bob**, so that **Alice** has something to consume. @@ -162,8 +163,6 @@ official open source documentation ([link](https://github.com/eclipse-edc/Connec ![Sequence 1](diagrams/transfer_sequence_1.png) -**Run** - The following commands will create an Asset, a Policy and a Contract Definition. For simplicity `https://jsonplaceholder.typicode.com/todos/1` is used as data source of the asset, but could be any other API, that is reachable from the Provider Data Plane. @@ -229,7 +228,7 @@ curl -X POST "${BOB_DATAMGMT_URL}/data/contractdefinitions" \ -s -o /dev/null -w 'Response Code: %{http_code}\n' ``` -## 2. Request Contract Offer Catalog +## 3. Request Contract Offer Catalog In this step Alice gets told to request contract offers from another connector (in this case Bob). Alice will then request the catalog over IDS messaging. @@ -239,7 +238,7 @@ connectors, that intent to send messages to each other, have the same DAPS insta ![Sequence 1](diagrams/transfer_sequence_2.png) -**Run** +Run: ```bash curl -G -X GET "${ALICE_DATAMGMT_URL}/data/catalog" \ @@ -249,7 +248,7 @@ curl -G -X GET "${ALICE_DATAMGMT_URL}/data/catalog" \ -s | jq ``` -## 3. Negotiate Contract +## 4. Negotiate Contract Initiate a contract negotiation for the asset (from step 1). Part of the negotiation payload is the contract offer (received in step 2). @@ -262,7 +261,7 @@ and checking whether the `contractAgreementId` is set. This might take a few sec ![Sequence 1](diagrams/transfer_sequence_3.png) -**Run** +Run: ```bash export NEGOTIATION_ID=$( \ @@ -300,14 +299,14 @@ curl -X GET "${ALICE_DATAMGMT_URL}/data/contractnegotiations/${NEGOTIATION_ID}" -s | jq ``` -## 4. Transfer Data +## 5. Transfer Data Initiate a data transfer using the contract agreement from the negotiation (from step 3). Then wait until the state of the transfer process is `COMPLETED`. ![Sequence 1](diagrams/transfer_sequence_4.png) -**Run** +Run: ```bash export CONTRACT_AGREEMENT_ID=$( \ @@ -342,7 +341,7 @@ curl -X GET "${ALICE_DATAMGMT_URL}/data/transferprocess/${TRANSFER_ID}" \ -s | jq ``` -## 5. Verify Data Transfer +## 6. Verify Data Transfer After the transfer is complete the Backend Application has downloaded the data. The Backend Application stores the data locally. In this demo the transfer can be verified by executing a simple `cat` call in the Pod. @@ -355,7 +354,7 @@ curl -X GET "${ALICE_BACKEND_URL}/${TRANSFER_PROCESS_ID}" \ -s | jq ``` -# Delete All Data +## Delete All Data ```bash minikube kubectl -- delete pvc -n edc-all-in-one --all diff --git a/docs/samples/data-plane-http-oauth2.md b/docs/samples/data-plane-http-oauth2.md index f757b703a..63b99319f 100644 --- a/docs/samples/data-plane-http-oauth2.md +++ b/docs/samples/data-plane-http-oauth2.md @@ -4,4 +4,4 @@ The Data Plane HTTP OAuth2 extension permits the data-plane to fetch the data re with an OAuth2 authentication layer. For further documentation, please refer to the extension README: -https://github.com/eclipse-edc/Connector/tree/main/extensions/data-plane/data-plane-http-oauth2-core + diff --git a/edc-extensions/control-plane-adapter/README.md b/edc-extensions/control-plane-adapter/README.md index 7b6dfa31b..fe9d4787c 100644 --- a/edc-extensions/control-plane-adapter/README.md +++ b/edc-extensions/control-plane-adapter/README.md @@ -1,68 +1,69 @@ # Control Plane Adapter Extension -The goal of this extension is to simplify the process of retrieving data out of EDC. It returns "EndpointDataReference" object, hiding all the communication details for contract offers, contract negotiation process and retrieving DataReference from EDC control-plane. +The goal of this extension is to simplify the process of retrieving data out of EDC. It returns an `EndpointDataReference` object, hiding all the communication details for contract offers, contract negotiation process and retrieving `EndpointDataReference` from EDC controlplane. Additional requirements, that affects the architecture of the extension: + - can return data both in SYNC and ASYNC mode (currently only SYNC endpoint available) - can be persistent, so that process can be restored from the point where it was before application was stopped - scaling horizontally (when persistence is added to configuration) - can retry failed part of the process (no need to start the process from the beginning) -## Configuration: +## Configuration -| Key | Description | Mandatory | Default | -|:-------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------|---------| -| edc.cp.adapter.default.message.retry.number | Number of retries of a message, in case of an error, within the internal process of retrieving DataReference | no | 3 | -| edc.cp.adapter.default.sync.request.timeout | Timeout for synchronous request (in seconds), after witch 'timeout' error will be returned to the requesting client | no | 20 | -| edc.cp.adapter.messagebus.inmemory.thread.number | Number of threads running within the in-memory implementation of MessageBus _ _ | no | 10 | -| edc.cp.adapter.reuse.contract.agreement | Turn on/off reusing of existing contract agreements for the specific asset. Once the contract is agreed, the second request for the same asset will reuse the agreement (if exists) pulled from the EDC. | no | true | -| edc.cp.adapter.cache.catalog.expire.after | Number of seconds, after witch prevoiusly requested catalog will not be reused, and will be removed from catalog cache | no | 300 | -| edc.cp.adapter.catalog.request.limit | Maximum number of items taken from Catalog within single request. Requests are repeated until all offers of the query are retrieved | no | 100 | +| Key | Description | Mandatory | Default | +|:---------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------|---------| +| `edc.cp.adapter.default.message.retry.number` | Number of retries of a message, in case of an error, within the internal process of retrieving DataReference | no | 3 | +| `edc.cp.adapter.default.sync.request.timeout` | Timeout for synchronous request (in seconds), after witch 'timeout' error will be returned to the requesting client | no | 20 | +| `edc.cp.adapter.messagebus.inmemory.thread.number` | Number of threads running within the in-memory implementation of MessageBus | no | 10 | +| `edc.cp.adapter.reuse.contract.agreement` | Turn on/off reusing of existing contract agreements for the specific asset. Once the contract is agreed, the second request for the same asset will reuse the agreement (if exists) pulled from the EDC. | no | true | +| `edc.cp.adapter.cache.catalog.expire.after` | Number of seconds, after witch previously requested catalog will not be reused, and will be removed from catalog cache | no | 300 | +| `edc.cp.adapter.catalog.request.limit` | Maximum number of items taken from Catalog within single request. Requests are repeated until all offers of the query are retrieved | no | 100 | By default, the extension works in "IN MEMORY" mode. This setup has some limitations: -+ It can work only within single EDC instance. If CP-adapter requests are handled by more than one EDC, data flow may be broken. -+ If the EDC instance is restarted, all running processes are lost. -To run CP-Adapter in "PERSISTENT" mode, You need to create a proper tables with [this](docs/schema.sql) script, and add the following configuration values to Your control-plane EDC properties file: +- It can work only within single EDC instance. If CP-adapter requests are handled by more than one EDC, data flow may be broken. +- If the EDC instance is restarted, all running processes are lost. -| Key | Description | -|-----------------------------------|-------------| -| edc.datasource.cpadapter.name | data source name | -| edc.datasource.cpadapter.url | data source url | -| edc.datasource.cpadapter.user | data source user | -| edc.datasource.cpadapter.password | data source password | +To run CP-Adapter in "PERSISTENT" mode, You need to create a proper tables with [this](docs/schema.sql) script, and add the following configuration values to your controlplane EDC properties file: +| Key | Description | +|-------------------------------------|----------------------| +| `edc.datasource.cpadapter.name` | data source name | +| `edc.datasource.cpadapter.url` | data source url | +| `edc.datasource.cpadapter.user` | data source user | +| `edc.datasource.cpadapter.password` | data source password | -## How to use it: -1. Client sends a GET request with two parameters: assetId and the url of the provider control-plane: +## How to use it - ``` +1. Client sends a GET request with two parameters: assetId and the url of the provider controlplane: + + ```plain /adapter/asset/sync/{assetId}?providerUrl={providerUrl} ``` The example ULR could be: - ``` + ```plain http://localhost:9193/api/v1/data/adapter/asset/sync/123?providerUrl=http://localhost:8182/api/v1/ids/data ``` - + Optional request parameters, that overwrite the settings for a single request: - | Name | Description | - |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--- | - | contractAgreementId | Defines the ID of existing contract agreement, that should be reused for retrieving the asset. If parameter is specified, but contract is not found, 404 error will be returned. | - | contractAgreementReuse | Similar to edc.cp.adapter.reuse.contract.agreement option allows to turn off reusing of existing contracts, but on a request level. Set the parameter value to 'false' and new contract agrement will be negotiated. | - | timeout | Similar to edc.cp.adapter.default.sync.request.timeout, defines the maximum time of the request. If data is not ready, time out error will be returned. | - - The controller is registered under the context alias of DataManagement API. The authentication depends on the DataManagement configuration. + | Name | Description | + |--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + | `contractAgreementId` | Defines the ID of existing contract agreement, that should be reused for retrieving the asset. If parameter is specified, but contract is not found, 404 error will be returned. | + | `contractAgreementReuse` | Similar to `edc.cp.adapter.reuse.contract.agreement` option allows to turn off reusing of existing contracts, but on a request level. Set the parameter value to 'false' and new contract agrement will be negotiated. | + | `timeout` | Similar to `edc.cp.adapter.default.sync.request.timeout`, defines the maximum time of the request. If data is not ready, time out error will be returned. | + + The controller is registered under the context alias of the Management API. The authentication depends on the configuration of the Management API. To find out more please visit: - [api-configuration](../../edc/extensions/control-plane/api/data-management/api-configuration/README.md) + - [Management API Documentation](https://github.com/eclipse-edc/Connector/tree/main/extensions/control-plane/api/management-api) + - [Management API Configuration Extension](https://github.com/eclipse-edc/Connector/tree/main/extensions/common/api/management-api-configuration) - [data-management](../../edc/extensions/control-plane/api/data-management/README.md) +2. `EndpointDataReference` object is returned. Example of the `EndpointDataReference` response: - -2. EndpointDataReference object is returned. Example of the EndpointDataReference response: ```json { "id": "ee8b758a-4b02-4cca-bb37-d0256b4638e7", @@ -75,18 +76,15 @@ To run CP-Adapter in "PERSISTENT" mode, You need to create a proper tables with } ``` -3. Client, using the DataReference, retrieves the Asset through data-plane. - - Example of the data-plane GET request, to retrieve Asset, with DataReference information: - - ``` +3. Client, using the `EndpointDataReference`, retrieves the Asset through dataplane. + + Example of the dataplane GET request, to retrieve Asset, with `EndpointDataReference` information: + + ```plain url: http://consumer-dataplane:9192/publicsubmodel?provider-connector-url=... {endpoint} header: Authorization:eyJhbGciOiJSUzI1NiJ9.eyJkYWQiOi... {authKey:authCode} ``` -### Internal design of the extension: +### Internal design of the extension ![diagram](src/main/resources/control-plane-adapter.jpg) - - - diff --git a/edc-extensions/observability-api-customization/README.md b/edc-extensions/observability-api-customization/README.md index ba57a40e6..920d76afc 100644 --- a/edc-extensions/observability-api-customization/README.md +++ b/edc-extensions/observability-api-customization/README.md @@ -9,7 +9,7 @@ API gets registered, and whether insecure (= unauthenticated) access is allowed. If no additional configuration is done, the Observability API is registered into the `"management"` context of EDC. That means the following configuration values **must be present** -``` +```properties web.http.management.port= web.http.management.path=/some/api/path ``` @@ -30,4 +30,4 @@ If the `tractusx.api.observability.allow-insecure=true` is set, then the Observa into the `observability` context, which is unsecured. > Disclaimer: allowing unsecured access to APIs is dangerous and a potential security risk! Using authenticated access -> to all APIs is highly recommended. Never expose unsecured APIs to the public! \ No newline at end of file +> to all APIs is highly recommended. Never expose unsecured APIs to the public! diff --git a/edc-extensions/provision-additional-headers/README.md b/edc-extensions/provision-additional-headers/README.md index 3d68602fa..1883f370f 100644 --- a/edc-extensions/provision-additional-headers/README.md +++ b/edc-extensions/provision-additional-headers/README.md @@ -6,4 +6,5 @@ in order to retrieve the data that will be given to the consumer. This gives for example the provider backend service the possibility to audit the data requests. The following headers are added to the `HttpDataAddress`: + - `Edc-Contract-Agreement-Id`: the id of the contract agreement diff --git a/edc-tests/cucumber/src/main/resources/deployment/helm/supporting-infrastructure/README.md b/edc-tests/cucumber/src/main/resources/deployment/helm/supporting-infrastructure/README.md index 3aae7191a..89cadb1aa 100644 --- a/edc-tests/cucumber/src/main/resources/deployment/helm/supporting-infrastructure/README.md +++ b/edc-tests/cucumber/src/main/resources/deployment/helm/supporting-infrastructure/README.md @@ -68,19 +68,19 @@ Follow these steps to get a fully functional EDC demo environment out of the box Install on your machine: - Minikube - - Documentation https://minikube.sigs.k8s.io/docs/start/ + - Documentation - Helm - - Documentation https://helm.sh/docs/intro/install/ + - Documentation ## Start Demo Environment -**Update Helm Dependencies** +Update Helm Dependencies: ```bash helm dependency update ``` -**Install Demo Chart** +Install Demo Chart: ```bash helm install tx-infrastructure --namespace tx --create-namespace . @@ -88,7 +88,7 @@ helm install tx-infrastructure --namespace tx --create-namespace . ## Stop Demo Environment -**Uninstall Demo Chart** +Uninstall Demo Chart: ```bash helm uninstall tx-infrastructure --namespace tx diff --git a/pr_etiquette.md b/pr_etiquette.md index 4b288e2d8..ce9ec73f8 100644 --- a/pr_etiquette.md +++ b/pr_etiquette.md @@ -33,10 +33,10 @@ Submitting pull requests in EDC should be done while adhering to a couple of sim to either accept the decision or withdraw your PR. - Be civil and objective. No foul language, insulting or otherwise abusive language will be tolerated. - The PR titles must follow [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/). - - The title must follow the format as `(): `. - `build`, `chore`, `ci`, `docs`, `feat`, `fix`, `perf`, `refactor`, `revert`, `style`, `test` are allowed for - the ``. - - The length must be kept under 80 characters. + - The title must follow the format as `(): `. + `build`, `chore`, `ci`, `docs`, `feat`, `fix`, `perf`, `refactor`, `revert`, `style`, `test` are allowed for + the ``. + - The length must be kept under 80 characters. ## As a reviewer @@ -48,12 +48,12 @@ Submitting pull requests in EDC should be done while adhering to a couple of sim - Don't argue basic principles (code style, architectural decisions, etc.) - Use the `suggestion` feature of GitHub for small/simple changes. - The following could serve you as a review checklist: - - no unnecessary dependencies in `build.gradle.kts` - - sensible unit tests, prefer unit tests over integration tests wherever possible (test runtime). Also check the - usage of test tags. - - code style - - simplicity and "uncluttered-ness" of the code - - overall focus of the PR + - no unnecessary dependencies in `build.gradle.kts` + - sensible unit tests, prefer unit tests over integration tests wherever possible (test runtime). Also check the + usage of test tags. + - code style + - simplicity and "uncluttered-ness" of the code + - overall focus of the PR - Don't just wave through any PR. Please take the time to look at them carefully. - Be civil and objective. No foul language, insulting or otherwise abusive language will be tolerated. The goal is to _encourage_ contributions. diff --git a/styleguide.md b/styleguide.md index 52835f811..e5183465b 100644 --- a/styleguide.md +++ b/styleguide.md @@ -51,7 +51,8 @@ If you absolutely want to make sure that no piece of ever-so-slightly misformatt advise you to use the [SaveActions plugin](https://plugins.jetbrains.com/plugin/7642-save-actions) for IntelliJ IDEA. It takes care that your code is always correctly formatted. Unfortunately SaveActions has no export feature, so please just copy this configuration: -![](resources/save_actions_scr.png) + +![SaveActions configuration](resources/save_actions_scr.png) ## [Optional] Generic `.editorConfig` From d032db8c33f3e8b60b54741f991b1754c66c0230 Mon Sep 17 00:00:00 2001 From: "Florian Rusch (ZF Friedrichshafen AG)" Date: Tue, 21 Mar 2023 22:41:58 +0100 Subject: [PATCH 05/92] Replace appearance of product-edc with tractusx-edc --- .github/workflows/business-tests.yaml | 12 ++++++------ CHANGELOG.md | 2 +- README.md | 2 +- .../templates/deployment-controlplane.yaml | 6 +++--- .../templates/deployment-dataplane.yaml | 2 +- docs/README.md | 2 +- .../decision-records/2023-02-27_testing/README.md | 4 ++-- .../2023-03-02_gradle_build/README.md | 8 ++++---- docs/development/postman/collection.json | 2 +- docs/migration/Version_0.0.x_0.1.x.md | 4 ++-- docs/release-notes/Version 0.1.2.md | 4 ++-- docs/samples/Transfer Data.md | 2 +- .../deployment/helm/omejdn/templates/deployment.yaml | 2 +- 13 files changed, 26 insertions(+), 26 deletions(-) diff --git a/.github/workflows/business-tests.yaml b/.github/workflows/business-tests.yaml index f55d3d6ba..b50a801d6 100644 --- a/.github/workflows/business-tests.yaml +++ b/.github/workflows/business-tests.yaml @@ -84,7 +84,7 @@ jobs: - role: control-plane extraMounts: - hostPath: ${PWD} - containerPath: /srv/product-edc + containerPath: /srv/tractusx-edc - hostPath: ${MAVEN_REPOSITORY} containerPath: /srv/m2-repository - hostPath: /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs @@ -262,7 +262,7 @@ jobs: { "args": [ "-c", - "cd /product-edc && ./gradlew edc-tests:cucumber:test -Dcucumber=true" + "cd /tractusx-edc && ./gradlew edc-tests:cucumber:test -Dcucumber=true" ], "command": [ "/bin/sh" @@ -301,8 +301,8 @@ jobs: "name": "edc-tests-cucumber", "volumeMounts": [ { - "mountPath": "/product-edc", - "name": "product-edc" + "mountPath": "/tractusx-edc", + "name": "tractusx-edc" }, { "mountPath": "/root/.m2/repository", @@ -316,9 +316,9 @@ jobs: "volumes": [ { "hostPath": { - "path": "/srv/product-edc" + "path": "/srv/tractusx-edc" }, - "name": "product-edc" + "name": "tractusx-edc" }, { "hostPath": { diff --git a/CHANGELOG.md b/CHANGELOG.md index 443e9f410..4674ba285 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -232,7 +232,7 @@ corresponding [documentation](/docs/migration/Version_0.0.x_0.1.x.md). ### Changed -- Release Workflow now publishes Product EDC Extensions as Maven Artifacts +- Release Workflow now publishes EDC Extensions as Maven Artifacts ### Fixed diff --git a/README.md b/README.md index 0d9ef46e8..6f3fc8232 100644 --- a/README.md +++ b/README.md @@ -88,7 +88,7 @@ Derivatives of the Data-Plane can be found here ### Build -Build Product-EDC together with its Container Images +Build Tractus-X EDC together with its Container Images ```shell ./gradlew dockerize diff --git a/charts/tractusx-connector/templates/deployment-controlplane.yaml b/charts/tractusx-connector/templates/deployment-controlplane.yaml index 2f73afb7f..6ba7dc40c 100644 --- a/charts/tractusx-connector/templates/deployment-controlplane.yaml +++ b/charts/tractusx-connector/templates/deployment-controlplane.yaml @@ -262,7 +262,7 @@ spec: ## DATA PLANE ## ################ - # see extension https://github.com/catenax-ng/product-edc/tree/develop/edc-extensions/dataplane-selector-configuration + # see extension https://github.com/eclipse-tractusx/tractusx-edc/tree/develop/edc-extensions/dataplane-selector-configuration - name: "EDC_DATAPLANE_SELECTOR_DEFAULTPLANE_URL" value: {{ include "txdc.dataplane.url.control" . }}/transfer - name: "EDC_DATAPLANE_SELECTOR_DEFAULTPLANE_SOURCETYPES" @@ -291,7 +291,7 @@ spec: ########### {{- if .Values.vault.hashicorp.enabled }} - # see extension https://github.com/catenax-ng/product-edc/tree/develop/edc-extensions/hashicorp-vault + # see extension https://github.com/eclipse-tractusx/tractusx-edc/tree/develop/edc-extensions/hashicorp-vault - name: "EDC_VAULT_HASHICORP_URL" value: {{ .Values.vault.hashicorp.url | required ".Values.vault.hashicorp.url is required" | quote }} - name: "EDC_VAULT_HASHICORP_TOKEN" @@ -326,7 +326,7 @@ spec: ## DATA ENCRYPTION ## ##################### - # see extension https://github.com/catenax-ng/product-edc/tree/develop/edc-extensions/data-encryption + # see extension https://github.com/eclipse-tractusx/tractusx-edc/tree/develop/edc-extensions/data-encryption - name: "EDC_DATA_ENCRYPTION_KEYS_ALIAS" value: {{ .Values.vault.secretNames.transferProxyTokenEncryptionAesKey | quote }} - name: "EDC_DATA_ENCRYPTION_ALGORITHM" diff --git a/charts/tractusx-connector/templates/deployment-dataplane.yaml b/charts/tractusx-connector/templates/deployment-dataplane.yaml index cafb50909..7f48345e0 100644 --- a/charts/tractusx-connector/templates/deployment-dataplane.yaml +++ b/charts/tractusx-connector/templates/deployment-dataplane.yaml @@ -132,7 +132,7 @@ spec: ########### {{- if .Values.vault.hashicorp.enabled }} - # see extension https://github.com/catenax-ng/product-edc/tree/develop/edc-extensions/hashicorp-vault + # see extension https://github.com/eclipse-tractusx/tractusx-edc/tree/develop/edc-extensions/hashicorp-vault - name: "EDC_VAULT_HASHICORP_URL" value: {{ .Values.vault.hashicorp.url | required ".Values.vault.hashicorp.url is required" | quote }} - name: "EDC_VAULT_HASHICORP_TOKEN" diff --git a/docs/README.md b/docs/README.md index 096e41feb..259c2560b 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,7 +2,7 @@ The Tractus-X EDC repository creates runnable applications out of EDC extensions from the [Eclipse DataSpace Connector](https://github.com/eclipse-edc/Connector) repository. -When running a EDC connector from the Product EDC repository there are three setups to choose from. They only vary by using different extensions for +When running a EDC connector from the Tractus-X EDC repository there are three setups to choose from. They only vary by using different extensions for - Resolving of Connector-Identities - Persistence of the Control-Plane-State diff --git a/docs/development/decision-records/2023-02-27_testing/README.md b/docs/development/decision-records/2023-02-27_testing/README.md index c4619eb8b..45844203d 100644 --- a/docs/development/decision-records/2023-02-27_testing/README.md +++ b/docs/development/decision-records/2023-02-27_testing/README.md @@ -1,4 +1,4 @@ -# Testing concept for product-edc +# Testing concept for tractusx-edc ## Decision @@ -13,7 +13,7 @@ Henceforth, testing shall be done in accordance with the herein outlined rules a ## Rationale -Past experiences with product-edc's testing setup has shown that it is time- and resource-consuming, which also makes it unreliable at times. +Past experiences with tractusx-edc's testing setup has shown that it is time- and resource-consuming, which also makes it unreliable at times. Furthermore, a finer-grained test classification such as the one outlined in this document is currently neither present nor documented. ### Definitions and distinction diff --git a/docs/development/decision-records/2023-03-02_gradle_build/README.md b/docs/development/decision-records/2023-03-02_gradle_build/README.md index 4012599c1..9d4461cc6 100644 --- a/docs/development/decision-records/2023-03-02_gradle_build/README.md +++ b/docs/development/decision-records/2023-03-02_gradle_build/README.md @@ -2,7 +2,7 @@ ## Decision -Product-EDC will move to Gradle as its build system. This decision +Tractus-X EDC will move to Gradle as its build system. This decision record outlines the reasoning behind the decision as well as the migration path. ## Rationale @@ -11,7 +11,7 @@ The primary motivator for migrating to Gradle is the overarching goal, set by th open-source methodology in general, and to track the Eclipse Datasource Components project in particular. While in theory that could be achieved with any build tool, much of what is useful or even necessary to achieve that goal, such as publishing to OSSRH/Sonatype and - in further consequence - to MavenCentral, has already been implemented in the EDC -project. This reduces the implementation and maintenance surface of product-edc with regard to the build, documentation +project. This reduces the implementation and maintenance surface of tractusx-edc with regard to the build, documentation and testing, and hence increases the development velocity considerably. It is therefore a foregone conclusion to rely on technology that has already proven itself in the opensource community, @@ -43,6 +43,6 @@ parallelization resulting in faster and more responsive builds. ## Further consideration -Planned improvements regarding the testing procedure (PR ) will also greatly benefit from the EDC build tools such +Planned improvements regarding the testing procedure will also greatly benefit from the EDC build tools such as JUnit tags and conditional evaluation of the tagged tests. Much of EDC's testing framework is based on Gradle and can -be seamlessly integrated in product-edc. +be seamlessly integrated in tractusx-edc. diff --git a/docs/development/postman/collection.json b/docs/development/postman/collection.json index 5f44e6ee5..50d0c5ab7 100644 --- a/docs/development/postman/collection.json +++ b/docs/development/postman/collection.json @@ -63,7 +63,7 @@ "header": [], "body": { "mode": "raw", - "raw": "{\n \"asset\": {\n \"properties\": {\n \"asset:prop:id\": \"{{ASSET_ID}}\",\n \"asset:prop:description\": \"Product EDC Demo Asset\"\n }\n },\n \"dataAddress\": {\n \"properties\": {\n \"type\": \"HttpData\",\n \"baseUrl\": \"https://jsonplaceholder.typicode.com/todos/1\"\n }\n }\n}", + "raw": "{\n \"asset\": {\n \"properties\": {\n \"asset:prop:id\": \"{{ASSET_ID}}\",\n \"asset:prop:description\": \"Tractus-X EDC Demo Asset\"\n }\n },\n \"dataAddress\": {\n \"properties\": {\n \"type\": \"HttpData\",\n \"baseUrl\": \"https://jsonplaceholder.typicode.com/todos/1\"\n }\n }\n}", "options": { "raw": { "language": "json" diff --git a/docs/migration/Version_0.0.x_0.1.x.md b/docs/migration/Version_0.0.x_0.1.x.md index 353db9368..07ea746d9 100644 --- a/docs/migration/Version_0.0.x_0.1.x.md +++ b/docs/migration/Version_0.0.x_0.1.x.md @@ -18,7 +18,7 @@ This document contains a list of breaking changes that are introduced in version ## 1. PostgreSQL Database -The Product EDC [PostgreSQL Migration Extension](../../edc-extensions/postgresql-migration/README.md) is able to run +The Tractus-X EDC [PostgreSQL Migration Extension](../../edc-extensions/postgresql-migration/README.md) is able to run normal migrations. But the extension will never cause a data loss automatically, therefore part of this migration must be done by the user itself. @@ -285,7 +285,7 @@ property is mostly used when creating assets. #### Example Call ```bash -curl -X POST "$PLATO_DATAMGMT_URL/data/assets" --header "X-Api-Key: password" --header "Content-Type: application/json" --data "{ \"asset\": { \"properties\": { \"asset:prop:id\": \"1\", \"asset:prop:description\": \"Product EDC Demo Asset\" } }, \"dataAddress\": { \"properties\": { \"type\": \"HttpData\", \"baseUrl\": \"https://jsonplaceholder.typicode.com/todos/1\" } } }" -s -o /dev/null -w 'Response Code: %{http_code}\n' +curl -X POST "$PLATO_DATAMGMT_URL/data/assets" --header "X-Api-Key: password" --header "Content-Type: application/json" --data "{ \"asset\": { \"properties\": { \"asset:prop:id\": \"1\", \"asset:prop:description\": \"Tractus-X EDC Demo Asset\" } }, \"dataAddress\": { \"properties\": { \"type\": \"HttpData\", \"baseUrl\": \"https://jsonplaceholder.typicode.com/todos/1\" } } }" -s -o /dev/null -w 'Response Code: %{http_code}\n' ``` ## 3. Connector Configuration diff --git a/docs/release-notes/Version 0.1.2.md b/docs/release-notes/Version 0.1.2.md index cef41cbd6..0f4babacd 100644 --- a/docs/release-notes/Version 0.1.2.md +++ b/docs/release-notes/Version 0.1.2.md @@ -8,11 +8,11 @@ The Git submodule references commit `740c100ac162bc41b1968c232ad81f7d739aefa9` from the 23th of September 2022 (newer than **0.0.1-milestone-6**). -## 2. Product EDC +## 2. Tractus-X EDC ### 2.1 Alpine Image -Introduce alpine image as base for all Product EDC Images (replaced distroless image). +Introduce alpine image as base for all Tractus-X EDC Images (replaced distroless image). ## 3. Fixed Issues diff --git a/docs/samples/Transfer Data.md b/docs/samples/Transfer Data.md index d68d87561..855efa8a0 100644 --- a/docs/samples/Transfer Data.md +++ b/docs/samples/Transfer Data.md @@ -175,7 +175,7 @@ curl -X POST "${BOB_DATAMGMT_URL}/data/assets" \ "asset": { "properties": { "asset:prop:id": "1", - "asset:prop:description": "Product EDC Demo Asset" + "asset:prop:description": "Tractus-X EDC Demo Asset" } }, "dataAddress": { diff --git a/edc-tests/cucumber/src/main/resources/deployment/helm/omejdn/templates/deployment.yaml b/edc-tests/cucumber/src/main/resources/deployment/helm/omejdn/templates/deployment.yaml index 567f48a74..289476122 100644 --- a/edc-tests/cucumber/src/main/resources/deployment/helm/omejdn/templates/deployment.yaml +++ b/edc-tests/cucumber/src/main/resources/deployment/helm/omejdn/templates/deployment.yaml @@ -59,7 +59,7 @@ spec: cp /opt/config/scope_mapping.yml /etc/daps/scope_mapping.yml apk add --update openssl openssl req -newkey rsa:2048 -new -nodes -x509 -days 3650 -keyout /etc/keys/omejdn/omejdn.key \ - -subj "/C=DE/ST=Berlin/L=Berlin/O=Product-EDC-Test, Inc./OU=DE" + -subj "/C=DE/ST=Berlin/L=Berlin/O=TractusX-EDC-Test, Inc./OU=DE" volumeMounts: - mountPath: /etc/daps name: config-dir From aa952ba5c36db61563f555fcb72d7752400da3d7 Mon Sep 17 00:00:00 2001 From: "Florian Rusch (ZF Friedrichshafen AG)" Date: Wed, 22 Mar 2023 11:14:13 +0100 Subject: [PATCH 06/92] Fix README.md and Transfer Data.md --- README.md | 86 +++++++++-------------------------- docs/samples/Transfer Data.md | 6 +-- 2 files changed, 24 insertions(+), 68 deletions(-) diff --git a/README.md b/README.md index 6f3fc8232..2ca3a9f92 100644 --- a/README.md +++ b/README.md @@ -1,61 +1,23 @@ - +# Tractus-X EDC (Eclipse Dataspace Connector) - [![Contributors][contributors-shield]][contributors-url] [![Stargazers][stars-shield]][stars-url] [![Apache 2.0 License][license-shield]][license-url] [![Latest Release][release-shield]][release-url] - -
-
- - Logo - - -

Product Eclipse Dataspace Connector

-

Catena-X

- -

- Container images and deployments of the Eclipse Dataspace Components open source project. -
- Explore the docs » -
-
- View Eclipse Dataspace Components - · - Releases - · - Report Bug / Request Feature -

-
- - -
- Table of Contents -
    -
  1. - About The Project -
  2. -
  3. - Inventory -
  4. -
  5. - Getting Started - -
  6. -
  7. License
  8. -
-
+Container images and deployments of the Eclipse Dataspace Components for the Tractus-X project. + +Please also refer to: + +- [Our docs](https://github.com/eclipse-tractusx/tractusx-edc/tree/main/docs) +- [Our Releases](https://github.com/eclipse-tractusx/tractusx-edc/releases) +- [Eclipse Dataspace Components](https://github.com/eclipse-edc/Connector) +- [Report Bug / Request Feature](https://github.com/eclipse-tractusx/tractusx-edc/issues) ## About The Project The project provides pre-built control- and data-plane [docker](https://www.docker.com/) images and [helm](https://helm.sh/) charts of the [Eclipse DataSpaceConnector Project](https://github.com/eclipse-edc/Connector). -

(back to top)

- ## Inventory The eclipse data space connector is split up into Control-Plane and Data-Plane, whereas the Control-Plane functions as administration layer @@ -64,28 +26,24 @@ The Data-Plane does the heavy lifting of transferring and receiving data streams Depending on your environment there are different derivatives of the control-plane prepared: -* [edc-controlplane-memory](edc-controlplane/edc-controlplane-memory) with dependency onto - * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) -* [edc-controlplane-postgresql](edc-controlplane/edc-controlplane-postgresql) with dependency onto - * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) - * [PostgreSQL 8.2 or newer](https://www.postgresql.org/) -* [edc-controlplane-postgresql-hashicorp-vault](edc-controlplane/edc-controlplane-postgresql-hashicorp-vault) with dependency onto - * [Hashicorp Vault](https://www.vaultproject.io/) - * [PostgreSQL 8.2 or newer](https://www.postgresql.org/) +- [edc-controlplane-memory](edc-controlplane/edc-controlplane-memory) with dependency onto + - [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) +- [edc-controlplane-postgresql](edc-controlplane/edc-controlplane-postgresql) with dependency onto + - [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) + - [PostgreSQL 8.2 or newer](https://www.postgresql.org/) +- [edc-controlplane-postgresql-hashicorp-vault](edc-controlplane/edc-controlplane-postgresql-hashicorp-vault) with dependency onto + - [Hashicorp Vault](https://www.vaultproject.io/) + -[PostgreSQL 8.2 or newer](https://www.postgresql.org/) Derivatives of the Data-Plane can be found here -* [edc-dataplane-azure-vault](edc-dataplane/edc-dataplane-azure-vault) with dependency onto - * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) -* [edc-dataplane-hashicorp-vault](edc-dataplane/edc-dataplane-hashicorp-vault) with dependency onto - * [Hashicorp Vault](https://www.vaultproject.io/) - -

(back to top)

+- [edc-dataplane-azure-vault](edc-dataplane/edc-dataplane-azure-vault) with dependency onto + - [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) +- [edc-dataplane-hashicorp-vault](edc-dataplane/edc-dataplane-hashicorp-vault) with dependency onto + - [Hashicorp Vault](https://www.vaultproject.io/) ## Getting Started -

(back to top)

- ### Build Build Tractus-X EDC together with its Container Images @@ -100,8 +58,6 @@ Build Tractus-X EDC together with its Container Images Distributed under the Apache 2.0 License. See [LICENSE](https://github.com/eclipse-tractusx/tractusx-edc/blob/main/LICENSE) for more information. -

(back to top)

- [contributors-shield]: https://img.shields.io/github/contributors/eclipse-tractusx/tractusx-edc.svg?style=for-the-badge diff --git a/docs/samples/Transfer Data.md b/docs/samples/Transfer Data.md index 855efa8a0..4d467282a 100644 --- a/docs/samples/Transfer Data.md +++ b/docs/samples/Transfer Data.md @@ -6,18 +6,18 @@ For this transfer connector **Bob** will act as data provider, and connector **A consumer. But the roles could be inverse as well. > Please note: Before running the examples the corresponding environment variables must be set. -> How such an environment can be setup locally is documented in [chapter 1](#1--optional--local-setup). +> How such an environment can be setup locally is documented in [chapter 1](#1-optional---local-setup). ## Table of Content -1. [(optional) Local Setup](#1--optional--local-setup) +1. [Optional: Local Setup](#1-optional---local-setup) 2. [Setup Data Offer](#2-setup-data-offer) 3. [Request Contract Offers](#3-request-contract-offer-catalog) 4. [Negotiate Contract](#4-negotiate-contract) 5. [Transfer Data](#5-transfer-data) 6. [Verify Data Transfer](#6-verify-data-transfer) -## 1. (optional) Local Setup +## 1. Optional - Local Setup To create a local setup with two connectors have a look at the [Local TXDC Setup Documentation](Local%20TXDC%20Setup.md). From bc7a1aaf8e1d2a742f71c04e98bcdf409a274fc3 Mon Sep 17 00:00:00 2001 From: "Florian Rusch (ZF Friedrichshafen AG)" Date: Wed, 22 Mar 2023 11:16:20 +0100 Subject: [PATCH 07/92] Fix Transfer Data.md --- docs/samples/Transfer Data.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/samples/Transfer Data.md b/docs/samples/Transfer Data.md index 4d467282a..97da48384 100644 --- a/docs/samples/Transfer Data.md +++ b/docs/samples/Transfer Data.md @@ -10,7 +10,7 @@ consumer. But the roles could be inverse as well. ## Table of Content -1. [Optional: Local Setup](#1-optional---local-setup) +1. [Optional - Local Setup](#1-optional---local-setup) 2. [Setup Data Offer](#2-setup-data-offer) 3. [Request Contract Offers](#3-request-contract-offer-catalog) 4. [Negotiate Contract](#4-negotiate-contract) From 9edbfc648a4567b3548471fe12a894fc2a4eb352 Mon Sep 17 00:00:00 2001 From: "Florian Rusch (ZF Friedrichshafen AG)" Date: Thu, 23 Mar 2023 06:44:58 +0100 Subject: [PATCH 08/92] Regenerate helm chart README.md files --- charts/edc-controlplane/Chart.yaml | 2 +- charts/edc-controlplane/README.md | 8 ++++++-- charts/edc-dataplane/Chart.yaml | 2 +- charts/edc-dataplane/README.md | 8 ++++++-- charts/tractusx-connector/Chart.yaml | 26 ++++++++++++++++++++++++++ charts/tractusx-connector/README.md | 22 ++++++++++++++++------ charts/tractusx-connector/values.yaml | 4 ++-- 7 files changed, 58 insertions(+), 14 deletions(-) diff --git a/charts/edc-controlplane/Chart.yaml b/charts/edc-controlplane/Chart.yaml index e0ec00697..f80a6587d 100644 --- a/charts/edc-controlplane/Chart.yaml +++ b/charts/edc-controlplane/Chart.yaml @@ -32,4 +32,4 @@ version: 0.3.0 deprecated: true maintainers: [] sources: - - https://github.com/eclipse-tractusx/tractusx-edc + - https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/edc-controlplane diff --git a/charts/edc-controlplane/README.md b/charts/edc-controlplane/README.md index 2e2a0cf68..15e61cff0 100644 --- a/charts/edc-controlplane/README.md +++ b/charts/edc-controlplane/README.md @@ -6,7 +6,7 @@ EDC Control-Plane - The Eclipse DataSpaceConnector administration layer with responsibility of resource management and govern contracts and data transfers -- **Homepage:** +**Homepage:** ## TL;DR @@ -15,6 +15,10 @@ helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev helm install my-release tractusx-edc/edc-controlplane --version 0.3.0 ``` +## Source Code + +* + ## Values | Key | Type | Default | Description | @@ -44,7 +48,7 @@ helm install my-release tractusx-edc/edc-controlplane --version 0.3.0 | envSecretName | string | `nil` | [Kubernetes Secret Resource](https://kubernetes.io/docs/concepts/configuration/secret/) name to load environment variables from | | fullnameOverride | string | `""` | Overrides the releases full name | | image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | -| image.repository | string | `"ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql-hashicorp-vault"` | Which derivate of the edc control-plane to use. One of: [ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql-hashicorp-vault, ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql, ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-memory] | +| image.repository | string | `"ghcr.io/catenax-ng/product-edc/edc-controlplane-postgresql-hashicorp-vault"` | Which derivate of the edc control-plane to use. One of: [ghcr.io/catenax-ng/product-edc/edc-controlplane-postgresql-hashicorp-vault, ghcr.io/catenax-ng/product-edc/edc-controlplane-postgresql, ghcr.io/catenax-ng/product-edc/edc-controlplane-memory] | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | | imagePullSecret.dockerconfigjson | string | `""` | Image pull secret to create to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) Note: This value needs to adhere to the [(base64 encoded) .dockerconfigjson format](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials). Furthermore, if 'imagePullSecret.dockerconfigjson' is defined, it takes precedence over 'imagePullSecrets'. | | imagePullSecrets | list | `[]` | Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | diff --git a/charts/edc-dataplane/Chart.yaml b/charts/edc-dataplane/Chart.yaml index 001fe2d1b..9d051fb7d 100644 --- a/charts/edc-dataplane/Chart.yaml +++ b/charts/edc-dataplane/Chart.yaml @@ -32,4 +32,4 @@ version: 0.3.0 deprecated: true maintainers: [] sources: - - https://github.com/eclipse-tractusx/tractusx-edc + - https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/edc-dataplane diff --git a/charts/edc-dataplane/README.md b/charts/edc-dataplane/README.md index 934ff72c1..6776e76fa 100644 --- a/charts/edc-dataplane/README.md +++ b/charts/edc-dataplane/README.md @@ -6,7 +6,7 @@ EDC Data-Plane - The Eclipse DataSpaceConnector data layer with responsibility of transferring and receiving data streams -- **Homepage:** +**Homepage:** ## TL;DR @@ -15,6 +15,10 @@ helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev helm install my-release tractusx-edc/edc-dataplane --version 0.3.0 ``` +## Source Code + +* + ## Values | Key | Type | Default | Description | @@ -40,7 +44,7 @@ helm install my-release tractusx-edc/edc-dataplane --version 0.3.0 | envSecretName | string | `nil` | [Kubernetes Secret Resource](https://kubernetes.io/docs/concepts/configuration/secret/) name to load environment variables from | | fullnameOverride | string | `""` | Overrides the releases full name | | image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | -| image.repository | string | `"ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-hashicorp-vault"` | Which derivate of the edc data-plane to use. One of: [ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-hashicorp-vault, ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-azure-vault] | +| image.repository | string | `"ghcr.io/catenax-ng/product-edc/edc-dataplane-hashicorp-vault"` | Which derivate of the edc data-plane to use. One of: [ghcr.io/catenax-ng/product-edc/edc-dataplane-hashicorp-vault, ghcr.io/catenax-ng/product-edc/edc-dataplane-azure-vault] | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion | | imagePullSecret.dockerconfigjson | string | `""` | Image pull secret to create to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) Note: This value needs to adhere to the [(base64 encoded) .dockerconfigjson format](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials). Furthermore, if 'imagePullSecret.dockerconfigjson' is defined, it takes precedence over 'imagePullSecrets'. | | imagePullSecrets | list | `[]` | Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | diff --git a/charts/tractusx-connector/Chart.yaml b/charts/tractusx-connector/Chart.yaml index 7057f1599..d611d8944 100644 --- a/charts/tractusx-connector/Chart.yaml +++ b/charts/tractusx-connector/Chart.yaml @@ -1,3 +1,25 @@ +# +# Copyright (c) 2023 ZF Friedrichshafen AG +# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) +# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + --- apiVersion: v2 name: tractusx-connector @@ -20,3 +42,7 @@ version: 0.3.0 # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. appVersion: "0.3.0" + +home: https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector +sources: + - https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector \ No newline at end of file diff --git a/charts/tractusx-connector/README.md b/charts/tractusx-connector/README.md index ccd0cae09..5f160a7b2 100644 --- a/charts/tractusx-connector/README.md +++ b/charts/tractusx-connector/README.md @@ -4,6 +4,8 @@ A Helm chart for Tractus-X Eclipse Data Space Connector +**Homepage:** + ## TL;DR ```shell @@ -11,6 +13,10 @@ helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev helm install my-release tractusx-edc/tractusx-connector --version 0.3.0 ``` +## Source Code + +* + ## Values | Key | Type | Default | Description | @@ -25,7 +31,7 @@ helm install my-release tractusx-edc/tractusx-connector --version 0.3.0 | controlplane.debug.enabled | bool | `false` | | | controlplane.debug.port | int | `1044` | | | controlplane.debug.suspendOnStart | bool | `false` | | -| controlplane.endpoints | object | `{"control":{"path":"/control","port":8083},"data":{"authKey":"","path":"/data","port":8081},"default":{"path":"/api","port":8080},"ids":{"path":"/api/v1/ids","port":8084},"metrics":{"path":"/metrics","port":8085},"validation":{"path":"/validation","port":8082}}` | endpoints of the control plane | +| controlplane.endpoints | object | `{"control":{"path":"/control","port":8083},"data":{"authKey":"","path":"/data","port":8081},"default":{"path":"/api","port":8080},"ids":{"path":"/api/v1/ids","port":8084},"metrics":{"path":"/metrics","port":9090},"observability":{"insecure":true,"path":"/observability","port":8085},"validation":{"path":"/validation","port":8082}}` | endpoints of the control plane | | controlplane.endpoints.control | object | `{"path":"/control","port":8083}` | control api, used for internal control calls. can be added to the internal ingress, but should probably not | | controlplane.endpoints.control.path | string | `"/control"` | path for incoming api calls | | controlplane.endpoints.control.port | int | `8083` | port for incoming api calls | @@ -39,9 +45,13 @@ helm install my-release tractusx-edc/tractusx-connector --version 0.3.0 | controlplane.endpoints.ids | object | `{"path":"/api/v1/ids","port":8084}` | ids api, used for inter connector communication and must be internet facing | | controlplane.endpoints.ids.path | string | `"/api/v1/ids"` | path for incoming api calls | | controlplane.endpoints.ids.port | int | `8084` | port for incoming api calls | -| controlplane.endpoints.metrics | object | `{"path":"/metrics","port":8085}` | metrics api, used for application metrics, must not be internet facing | +| controlplane.endpoints.metrics | object | `{"path":"/metrics","port":9090}` | metrics api, used for application metrics, must not be internet facing | | controlplane.endpoints.metrics.path | string | `"/metrics"` | path for incoming api calls | -| controlplane.endpoints.metrics.port | int | `8085` | port for incoming api calls | +| controlplane.endpoints.metrics.port | int | `9090` | port for incoming api calls | +| controlplane.endpoints.observability | object | `{"insecure":true,"path":"/observability","port":8085}` | observability api with unsecured access, must not be internet facing | +| controlplane.endpoints.observability.insecure | bool | `true` | allow or disallow insecure access, i.e. access without authentication | +| controlplane.endpoints.observability.path | string | `"/observability"` | observability api, provides /health /readiness and /liveness endpoints | +| controlplane.endpoints.observability.port | int | `8085` | port for incoming API calls | | controlplane.endpoints.validation | object | `{"path":"/validation","port":8082}` | validation api, only used by the data plane and should not be added to any ingress | | controlplane.endpoints.validation.path | string | `"/validation"` | path for incoming api calls | | controlplane.endpoints.validation.port | int | `8082` | port for incoming api calls | @@ -85,7 +95,7 @@ helm install my-release tractusx-edc/tractusx-connector --version 0.3.0 | controlplane.livenessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a liveness check every 10 seconds | | controlplane.livenessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | | controlplane.livenessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | -| controlplane.logging | string | `".level=INFO\nhandlers=java.util.logging.ConsoleHandler\njava.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter\njava.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n"` | configuration of the [Java Util Logging Facade](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html) | +| controlplane.logging | string | `".level=INFO\norg.eclipse.edc.level=ALL\nhandlers=java.util.logging.ConsoleHandler\njava.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter\njava.util.logging.ConsoleHandler.level=ALL\njava.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n"` | configuration of the [Java Util Logging Facade](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html) | | controlplane.nodeSelector | object | `{}` | | | controlplane.opentelemetry | string | `"otel.javaagent.enabled=false\notel.javaagent.debug=false"` | configuration of the [Open Telemetry Agent](https://opentelemetry.io/docs/instrumentation/java/automatic/agent-config/) to collect and expose metrics | | controlplane.podAnnotations | object | `{}` | additional annotations for the pod | @@ -137,7 +147,7 @@ helm install my-release tractusx-edc/tractusx-connector --version 0.3.0 | dataplane.endpoints.default.path | string | `"/api"` | | | dataplane.endpoints.default.port | int | `8080` | | | dataplane.endpoints.metrics.path | string | `"/metrics"` | | -| dataplane.endpoints.metrics.port | int | `8084` | | +| dataplane.endpoints.metrics.port | int | `9090` | | | dataplane.endpoints.public.path | string | `"/api/public"` | | | dataplane.endpoints.public.port | int | `8081` | | | dataplane.endpoints.validation.path | string | `"/validation"` | | @@ -166,7 +176,7 @@ helm install my-release tractusx-edc/tractusx-connector --version 0.3.0 | dataplane.livenessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a liveness check every 10 seconds | | dataplane.livenessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | | dataplane.livenessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | -| dataplane.logging | string | `".level=INFO\nhandlers=java.util.logging.ConsoleHandler\njava.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter\njava.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n"` | configuration of the [Java Util Logging Facade](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html) | +| dataplane.logging | string | `".level=INFO\norg.eclipse.edc.level=ALL\nhandlers=java.util.logging.ConsoleHandler\njava.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter\njava.util.logging.ConsoleHandler.level=ALL\njava.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n"` | configuration of the [Java Util Logging Facade](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html) | | dataplane.nodeSelector | object | `{}` | | | dataplane.opentelemetry | string | `"otel.javaagent.enabled=false\notel.javaagent.debug=false"` | configuration of the [Open Telemetry Agent](https://opentelemetry.io/docs/instrumentation/java/automatic/agent-config/) to collect and expose metrics | | dataplane.podAnnotations | object | `{}` | additional annotations for the pod | diff --git a/charts/tractusx-connector/values.yaml b/charts/tractusx-connector/values.yaml index b314004c3..6063f96e5 100644 --- a/charts/tractusx-connector/values.yaml +++ b/charts/tractusx-connector/values.yaml @@ -273,7 +273,7 @@ controlplane: # -- configuration of the [Java Util Logging Facade](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html) logging: |- .level=INFO - org.eclipse.dataspaceconnector.level=ALL + org.eclipse.edc.level=ALL handlers=java.util.logging.ConsoleHandler java.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter java.util.logging.ConsoleHandler.level=ALL @@ -470,7 +470,7 @@ dataplane: # -- configuration of the [Java Util Logging Facade](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html) logging: |- .level=INFO - org.eclipse.dataspaceconnector.level=ALL + org.eclipse.edc.level=ALL handlers=java.util.logging.ConsoleHandler java.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter java.util.logging.ConsoleHandler.level=ALL From f34c84f113dc4e7ec1736a7c394cd801efd9fb69 Mon Sep 17 00:00:00 2001 From: "Florian Rusch (ZF Friedrichshafen AG)" Date: Thu, 23 Mar 2023 06:46:05 +0100 Subject: [PATCH 09/92] Remove left over html tags from root REAMDE.md --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index 2ca3a9f92..566e42f5a 100644 --- a/README.md +++ b/README.md @@ -52,8 +52,6 @@ Build Tractus-X EDC together with its Container Images ./gradlew dockerize ``` -

(back to top)

- ## License Distributed under the Apache 2.0 License. See [LICENSE](https://github.com/eclipse-tractusx/tractusx-edc/blob/main/LICENSE) for more information. From f9c7682f89adf509e1c50c31781207841e9a7bdf Mon Sep 17 00:00:00 2001 From: "Florian Rusch (ZF Friedrichshafen AG)" Date: Thu, 23 Mar 2023 06:51:21 +0100 Subject: [PATCH 10/92] Add empty line at EOF --- charts/tractusx-connector/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/tractusx-connector/Chart.yaml b/charts/tractusx-connector/Chart.yaml index d611d8944..c86990c6a 100644 --- a/charts/tractusx-connector/Chart.yaml +++ b/charts/tractusx-connector/Chart.yaml @@ -45,4 +45,4 @@ appVersion: "0.3.0" home: https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector sources: - - https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector \ No newline at end of file + - https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector From 58205ba524374d9707e76d6e40e3fe8af2d955fd Mon Sep 17 00:00:00 2001 From: "Florian Rusch (ZF Friedrichshafen AG)" Date: Thu, 23 Mar 2023 07:06:41 +0100 Subject: [PATCH 11/92] Update CODE_OF_CONDUCT.md --- CODE_OF_CONDUCT.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 651d7656a..73b8aa525 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -43,4 +43,4 @@ Project committers or leaders who do not follow the Code of Conduct in good fait ## Attribution -This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org) , version 1.4, available at [https://www.contributor-covenant.org/version/1/4/code-of-conduct.html](https://www.contributor-covenant.org/version/1/4/code-of-conduct/) +This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.4, available at [https://www.contributor-covenant.org/version/1/4/code-of-conduct.html](https://www.contributor-covenant.org/version/1/4/code-of-conduct/) From b2a015600e2116220b3bc641ce8927bf158ffb70 Mon Sep 17 00:00:00 2001 From: "Florian Rusch (ZF Friedrichshafen AG)" Date: Thu, 23 Mar 2023 07:40:21 +0100 Subject: [PATCH 12/92] Retrigger ci From 2dbcaec038d1b68bc0d74c9b1cf7d889b18a7d79 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Mon, 27 Mar 2023 17:52:00 +0200 Subject: [PATCH 13/92] Release: fix version handling --- .github/workflows/build.yaml | 2 +- .github/workflows/draft-new-release.yaml | 2 +- .github/workflows/publish-new-release.yml | 6 +++--- build.gradle.kts | 7 ------- gradle.properties | 2 +- 5 files changed, 6 insertions(+), 13 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 78b91b6f6..55e1860c6 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -254,7 +254,7 @@ jobs: # publish snapshots - name: Publish snapshot versions run: |- - echo "Publishing Version $(grep -e "defaultVersion" gradle.properties | cut -f2 -d"=") to Github Packages" + echo "Publishing Version $(grep -e "version" gradle.properties | cut -f2 -d"=") to Github Packages" ./gradlew publishAllPublicationsToGitHubPackagesRepository env: REPO: ${{ github.repository }} diff --git a/.github/workflows/draft-new-release.yaml b/.github/workflows/draft-new-release.yaml index 9c4e888c8..a7c618f45 100644 --- a/.github/workflows/draft-new-release.yaml +++ b/.github/workflows/draft-new-release.yaml @@ -43,7 +43,7 @@ jobs: name: Bump version in gradle.properties run: |- # replace the project's (default) version, could be overwritten later with the -Pversion=... flag - sed -i 's/defaultVersion=.*/defaultVersion=${{ github.event.inputs.version }}/g' gradle.properties + sed -i 's/version=.*/version=${{ github.event.inputs.version }}/g' gradle.properties env: GITHUB_PACKAGE_USERNAME: ${{ github.actor }} GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/publish-new-release.yml b/.github/workflows/publish-new-release.yml index d2064264f..16192638b 100644 --- a/.github/workflows/publish-new-release.yml +++ b/.github/workflows/publish-new-release.yml @@ -72,7 +72,7 @@ jobs: - name: Publish release version run: | - echo "Publishing Version $(grep -e "defaultVersion" gradle.properties | cut -f2 -d"=") to Github Packages" + echo "Publishing Version $(grep -e "version" gradle.properties | cut -f2 -d"=") to Github Packages" ./gradlew publishAllPublicationsToGithubPackagesRepository env: REPO: ${{ github.repository }} @@ -200,8 +200,8 @@ jobs: VERSION="$RELEASE_VERSION_MAJOR.$RELEASE_VERSION_MINOR.$((RELEASE_VERSION_PATCH+1))-SNAPSHOT" SNAPSHOT_VERSION=$VERSION - # Persist the "defaultVersion" in the gradle.properties - sed -i 's/defaultVersion=.*/defaultVersion=${{ github.event.inputs.version }}/g' gradle.properties + # Persist the "version" in the gradle.properties + sed -i 's/version=.*/version=${{ github.event.inputs.version }}/g' gradle.properties # Commit and push to origin develop diff --git a/build.gradle.kts b/build.gradle.kts index 04e7ff7cc..ba0f247dd 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -17,15 +17,9 @@ val txScmConnection: String by project val txWebsiteUrl: String by project val txScmUrl: String by project val groupId: String by project -val defaultVersion: String by project val annotationProcessorVersion: String by project val metaModelVersion: String by project -var actualVersion: String = (project.findProperty("version") ?: defaultVersion) as String -if (actualVersion == "unspecified") { - actualVersion = defaultVersion -} - buildscript { repositories { mavenLocal() @@ -73,7 +67,6 @@ allprojects { configure { versions { // override default dependency versions here - projectVersion.set(actualVersion) metaModel.set(metaModelVersion) } diff --git a/gradle.properties b/gradle.properties index 1c45f2373..f126bae28 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,5 +1,5 @@ groupId=org.eclipse.tractusx.edc -defaultVersion=0.3.1-SNAPSHOT +version=0.3.1-SNAPSHOT javaVersion=11 # configure the build: From 44005001f5d9048f02ce2c6cdd511ff0dedf968c Mon Sep 17 00:00:00 2001 From: GitHub actions Date: Mon, 27 Mar 2023 16:00:34 +0000 Subject: [PATCH 14/92] Prepare release 0.3.1 --- CHANGELOG.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4674ba285..313f312c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.3.1] - 2023-03-27 + ### Added ### Changed @@ -250,7 +252,11 @@ corresponding [documentation](/docs/migration/Version_0.0.x_0.1.x.md). ## [0.0.1] - 2022-05-13 -[Unreleased]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.1.6...HEAD +[Unreleased]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.3.1...HEAD + +[0.3.1]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.3.0...0.3.1 + +[0.3.0]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.2.0...0.3.0 [0.1.6]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.1.5...0.1.6 From aa789b26936c3d6b74646c1791203c71ef7d269a Mon Sep 17 00:00:00 2001 From: Stephan Bauer Date: Wed, 1 Mar 2023 14:16:07 +0100 Subject: [PATCH 15/92] Cherry-picked upstream commits (QGate stuff) in preparation for the 0.3.1 release --- .github/ISSUE_TEMPLATE/feature_request.md | 2 +- charts/edc-controlplane/.helmignore | 4 + charts/edc-controlplane/LICENSE | 202 ++++++++++++++++++++++ charts/edc-controlplane/README.md | 2 +- charts/edc-controlplane/values.yaml | 4 +- charts/edc-dataplane/.helmignore | 4 + charts/edc-dataplane/LICENSE | 202 ++++++++++++++++++++++ charts/edc-dataplane/README.md | 2 +- charts/edc-dataplane/values.yaml | 4 +- misc/NOTICE.md.template | 21 +++ 10 files changed, 440 insertions(+), 7 deletions(-) create mode 100644 charts/edc-controlplane/LICENSE create mode 100644 charts/edc-dataplane/LICENSE create mode 100644 misc/NOTICE.md.template diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 62c89ee8c..85ebda849 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -7,7 +7,7 @@ assignees: '' --- -_If you are missing a feature or have an idea how to improve this project that should first be +_If you are missing a feature or have an idea how to improve this project that should first be discussed, please feel free to open up a [discussion](https://github.com/eclipse-tractusx/tractusx-edc/discussions/categories/ideas)._ **Is your feature request related to a problem? Please describe.** diff --git a/charts/edc-controlplane/.helmignore b/charts/edc-controlplane/.helmignore index 00ca644b2..148b31d6c 100644 --- a/charts/edc-controlplane/.helmignore +++ b/charts/edc-controlplane/.helmignore @@ -23,3 +23,7 @@ .vscode/ README.md.gotmpl + +# Accept only values.yaml +values?*.yaml +values?*.yml diff --git a/charts/edc-controlplane/LICENSE b/charts/edc-controlplane/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/charts/edc-controlplane/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charts/edc-controlplane/README.md b/charts/edc-controlplane/README.md index 15e61cff0..17c6dd45d 100644 --- a/charts/edc-controlplane/README.md +++ b/charts/edc-controlplane/README.md @@ -48,7 +48,7 @@ helm install my-release tractusx-edc/edc-controlplane --version 0.3.0 | envSecretName | string | `nil` | [Kubernetes Secret Resource](https://kubernetes.io/docs/concepts/configuration/secret/) name to load environment variables from | | fullnameOverride | string | `""` | Overrides the releases full name | | image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | -| image.repository | string | `"ghcr.io/catenax-ng/product-edc/edc-controlplane-postgresql-hashicorp-vault"` | Which derivate of the edc control-plane to use. One of: [ghcr.io/catenax-ng/product-edc/edc-controlplane-postgresql-hashicorp-vault, ghcr.io/catenax-ng/product-edc/edc-controlplane-postgresql, ghcr.io/catenax-ng/product-edc/edc-controlplane-memory] | +| image.repository | string | `"ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql-hashicorp-vault"` | Which derivate of the edc control-plane to use. One of: [ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql-hashicorp-vault, ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql, ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-memory] | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | | imagePullSecret.dockerconfigjson | string | `""` | Image pull secret to create to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) Note: This value needs to adhere to the [(base64 encoded) .dockerconfigjson format](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials). Furthermore, if 'imagePullSecret.dockerconfigjson' is defined, it takes precedence over 'imagePullSecrets'. | | imagePullSecrets | list | `[]` | Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | diff --git a/charts/edc-controlplane/values.yaml b/charts/edc-controlplane/values.yaml index f3443773f..b43d67a35 100644 --- a/charts/edc-controlplane/values.yaml +++ b/charts/edc-controlplane/values.yaml @@ -31,8 +31,8 @@ replicaCount: 1 image: # -- Which derivate of the edc control-plane to use. - # One of: [ghcr.io/catenax-ng/product-edc/edc-controlplane-postgresql-hashicorp-vault, ghcr.io/catenax-ng/product-edc/edc-controlplane-postgresql, ghcr.io/catenax-ng/product-edc/edc-controlplane-memory] - repository: ghcr.io/catenax-ng/product-edc/edc-controlplane-postgresql-hashicorp-vault + # One of: [ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql-hashicorp-vault, ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql, ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-memory] + repository: ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql-hashicorp-vault # -- [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use pullPolicy: IfNotPresent # -- Overrides the image tag whose default is the chart appVersion. diff --git a/charts/edc-dataplane/.helmignore b/charts/edc-dataplane/.helmignore index 00ca644b2..148b31d6c 100644 --- a/charts/edc-dataplane/.helmignore +++ b/charts/edc-dataplane/.helmignore @@ -23,3 +23,7 @@ .vscode/ README.md.gotmpl + +# Accept only values.yaml +values?*.yaml +values?*.yml diff --git a/charts/edc-dataplane/LICENSE b/charts/edc-dataplane/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/charts/edc-dataplane/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charts/edc-dataplane/README.md b/charts/edc-dataplane/README.md index 6776e76fa..76424d13d 100644 --- a/charts/edc-dataplane/README.md +++ b/charts/edc-dataplane/README.md @@ -44,7 +44,7 @@ helm install my-release tractusx-edc/edc-dataplane --version 0.3.0 | envSecretName | string | `nil` | [Kubernetes Secret Resource](https://kubernetes.io/docs/concepts/configuration/secret/) name to load environment variables from | | fullnameOverride | string | `""` | Overrides the releases full name | | image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | -| image.repository | string | `"ghcr.io/catenax-ng/product-edc/edc-dataplane-hashicorp-vault"` | Which derivate of the edc data-plane to use. One of: [ghcr.io/catenax-ng/product-edc/edc-dataplane-hashicorp-vault, ghcr.io/catenax-ng/product-edc/edc-dataplane-azure-vault] | +| image.repository | string | `"ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-hashicorp-vault"` | Which derivate of the edc data-plane to use. One of: [ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-hashicorp-vault, ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-azure-vault] | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion | | imagePullSecret.dockerconfigjson | string | `""` | Image pull secret to create to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) Note: This value needs to adhere to the [(base64 encoded) .dockerconfigjson format](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials). Furthermore, if 'imagePullSecret.dockerconfigjson' is defined, it takes precedence over 'imagePullSecrets'. | | imagePullSecrets | list | `[]` | Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | diff --git a/charts/edc-dataplane/values.yaml b/charts/edc-dataplane/values.yaml index 926032306..9a049cb1f 100644 --- a/charts/edc-dataplane/values.yaml +++ b/charts/edc-dataplane/values.yaml @@ -31,8 +31,8 @@ replicaCount: 1 image: # -- Which derivate of the edc data-plane to use. - # One of: [ghcr.io/catenax-ng/product-edc/edc-dataplane-hashicorp-vault, ghcr.io/catenax-ng/product-edc/edc-dataplane-azure-vault] - repository: ghcr.io/catenax-ng/product-edc/edc-dataplane-hashicorp-vault + # One of: [ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-hashicorp-vault, ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-azure-vault] + repository: ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-hashicorp-vault # -- [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use pullPolicy: IfNotPresent # -- Overrides the image tag whose default is the chart appVersion diff --git a/misc/NOTICE.md.template b/misc/NOTICE.md.template new file mode 100644 index 000000000..d49cb8eea --- /dev/null +++ b/misc/NOTICE.md.template @@ -0,0 +1,21 @@ +# Notices for Catena-X NG Product EDC + +## Copyright + +All content is the property of the respective authors or their employers. For more information regarding authorship of content, please consult the listed source code repository logs. + +## Declared Project Licenses + +This program and the accompanying materials are made available under the terms of the Apache License, Version 2.0 which is available at https://www.apache.org/licenses/LICENSE-2.0. + +SPDX-License-Identifier: Apache-2.0 + +## Source Code + +The project maintains the following source code repositoriy: + +* https://github.com/eclipse-tractusx/tractusx-edc + +## Third-party Content (Overarching All Modules) + +@{GENERATED_NOTICES} \ No newline at end of file From d29620186fac09b5505454dddb63aa1c4f80dc75 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Tue, 28 Mar 2023 08:16:25 +0200 Subject: [PATCH 16/92] fix: use snapshot version after publish workflow --- .github/workflows/publish-new-release.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/publish-new-release.yml b/.github/workflows/publish-new-release.yml index 16192638b..b688219fe 100644 --- a/.github/workflows/publish-new-release.yml +++ b/.github/workflows/publish-new-release.yml @@ -201,8 +201,7 @@ jobs: SNAPSHOT_VERSION=$VERSION # Persist the "version" in the gradle.properties - sed -i 's/version=.*/version=${{ github.event.inputs.version }}/g' gradle.properties - + sed -i "s/version=.*/version=$SNAPSHOT_VERSION/g" gradle.properties # Commit and push to origin develop git add gradle.properties From b46b310596b2b440a0db1ecc9b6d9d464db78732 Mon Sep 17 00:00:00 2001 From: Enrico Risa Date: Fri, 24 Mar 2023 17:31:47 +0100 Subject: [PATCH 17/92] docs: add additional info for running business tests locally --- docs/development/Run-business-tests-local.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/docs/development/Run-business-tests-local.md b/docs/development/Run-business-tests-local.md index 9d66f7bbc..76520007d 100644 --- a/docs/development/Run-business-tests-local.md +++ b/docs/development/Run-business-tests-local.md @@ -14,7 +14,7 @@ Prerequisites: ## 2. Install the all-in-one supporting infrastructure environment (Daps, Vault, PostgreSql, Minio, Backend-Service) ```shel -helm install infrastructure edc-tests/src/main/resources/deployment/helm/supporting-infrastructure -n business-tests --create-namespace +helm install infrastructure edc-tests/src/main/resources/deployment/helm/supporting-infrastructure -n business-tests --dependency-update --create-namespace ``` To access the PostgreSql databases you could use following kubectl port forwardings: @@ -139,8 +139,15 @@ kubectl get svc -n business-tests -o go-template='{{range .items}}{{ $save := . This will return all NodePorts which are available in business-tests namespace where you can pick the ports to use in your environment variables. Now you are able to run it in IDE either as normal "Run" mode or in "Debug" mode where you can debug the business-tests by setting debugging points. -## 6. Update your components +Example of mapping to environment variables needed for the business tests: +```shell +business-tests/plato-controlplane - data: 30955(8081) -> PLATO_DATA_MANAGEMENT_URL=http://localhost:30955/data; +business-tests/sokrates-controlplane - data: 30538(8081) -> SOKRATES_DATA_MANAGEMENT_URL=http://localhost:30538/data; +business-tests/backend - backend: 30556(8081) -> SOKRATES_BACKEND_SERVICE_BACKEND_API_URL= http://localhost:30556 +``` + +### 6. Update your components Once everything is installed you just need to update your services when you have a new image. ```shell From b20ddaa3cab8cc6dca2c79dd75ba470ca6ff6b21 Mon Sep 17 00:00:00 2001 From: Enrico Risa Date: Tue, 21 Mar 2023 16:22:19 +0100 Subject: [PATCH 18/92] feat(CI): add Markdown linter --- .github/workflows/markdown-lint.yaml | 43 ++++++++++++++++++++++++++++ .markdownlint.yaml | 26 +++++++++++++++++ 2 files changed, 69 insertions(+) create mode 100644 .github/workflows/markdown-lint.yaml create mode 100644 .markdownlint.yaml diff --git a/.github/workflows/markdown-lint.yaml b/.github/workflows/markdown-lint.yaml new file mode 100644 index 000000000..34fdc55ea --- /dev/null +++ b/.github/workflows/markdown-lint.yaml @@ -0,0 +1,43 @@ +#******************************************************************************** +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 +#*******************************************************************************/ + +name: "Lint Markdown" + +on: + push: + branches: + - main + - develop + pull_request: + branches: + - main + - develop + +jobs: + markdown-lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Install mardkdownlint + run: npm install -g markdownlint-cli2 + + - name: Run markdownlint + run: | + markdownlint-cli2-config .markdownlint.yaml "**/*.md" "#node_modules" diff --git a/.markdownlint.yaml b/.markdownlint.yaml new file mode 100644 index 000000000..a5aa0776a --- /dev/null +++ b/.markdownlint.yaml @@ -0,0 +1,26 @@ +#******************************************************************************** +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 +#*******************************************************************************/ + + +"default": true +# Do not restrict line length: https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#MD013 +"MD013": false +# Allow same content on headlines on siblings: https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#MD024 +"MD024": + "siblings_only": true From c2c936ce9cf8bf83eb44c190122cddde9f58b187 Mon Sep 17 00:00:00 2001 From: Enrico Risa Date: Tue, 28 Mar 2023 11:05:13 +0200 Subject: [PATCH 19/92] md lint fix --- .../decision-records/2023-03-23_remove_lombok/README.md | 4 ++-- edc-tests/cucumber/README.md | 2 +- edc-tests/e2e-tests/README.md | 2 +- edc-tests/runtime/README.md | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/development/decision-records/2023-03-23_remove_lombok/README.md b/docs/development/decision-records/2023-03-23_remove_lombok/README.md index 74938d373..b7014c8b8 100644 --- a/docs/development/decision-records/2023-03-23_remove_lombok/README.md +++ b/docs/development/decision-records/2023-03-23_remove_lombok/README.md @@ -10,7 +10,7 @@ Lombok uses byte-code modification to achieve its goal. That is dangerous for a First and foremost, to achieve its goal, it relies on internal APIs of the JVM, which are not intended for public consumption, thus they can and will get removed, refactored or made otherwise unavailable. This has been discussed at -length in the [project's GitHub page](https://github.com/projectlombok/lombok/issues/2681). +length in the [project's GitHub page](https://github.com/projectlombok/lombok/issues/2681). This is especially problematic for an OSS project such as TractusX. Second, many of the features that are currently used by TractusX-EDC are experimental (e.g. `@UtilityClass`) and are @@ -34,4 +34,4 @@ should not build those obstructions into the code base. ## Further consideration -We can even expect a slightly faster build, because "delomboking" will become unnecessary. \ No newline at end of file +We can even expect a slightly faster build, because "delomboking" will become unnecessary. diff --git a/edc-tests/cucumber/README.md b/edc-tests/cucumber/README.md index e8c1a8ab1..db14876f7 100644 --- a/edc-tests/cucumber/README.md +++ b/edc-tests/cucumber/README.md @@ -1,6 +1,6 @@ # Invoke Business-Tests via Maven -THIS MODULE IS DEPRECATED AND WILL NOT BE MAINTAINED ANYMORE. +THIS MODULE IS DEPRECATED AND WILL NOT BE MAINTAINED ANYMORE. ```shell ./gradlew :edc-tests:test -Dcucumber=true diff --git a/edc-tests/e2e-tests/README.md b/edc-tests/e2e-tests/README.md index cdde986b0..f204147c3 100644 --- a/edc-tests/e2e-tests/README.md +++ b/edc-tests/e2e-tests/README.md @@ -1,3 +1,3 @@ # E2E-Tests -This module contains JUnit tests that spin up multiple runtimes in one JVM. \ No newline at end of file +This module contains JUnit tests that spin up multiple runtimes in one JVM. diff --git a/edc-tests/runtime/README.md b/edc-tests/runtime/README.md index 703963687..2f9593a75 100644 --- a/edc-tests/runtime/README.md +++ b/edc-tests/runtime/README.md @@ -1,3 +1,3 @@ # In-Memory Runtime for Testing Purposes -This module provides a very small, purely in-mem runtime to execute tests against. Not intended for anything other than testing! \ No newline at end of file +This module provides a very small, purely in-mem runtime to execute tests against. Not intended for anything other than testing! From b358e785a2b47c089190138248d1892943940793 Mon Sep 17 00:00:00 2001 From: Enrico Risa Date: Tue, 28 Mar 2023 11:41:27 +0200 Subject: [PATCH 20/92] pr remarks --- .github/workflows/markdown-lint.yaml | 43 ---------------------------- .github/workflows/verify.yaml | 12 ++++++++ .markdownlint.yaml | 2 -- 3 files changed, 12 insertions(+), 45 deletions(-) delete mode 100644 .github/workflows/markdown-lint.yaml diff --git a/.github/workflows/markdown-lint.yaml b/.github/workflows/markdown-lint.yaml deleted file mode 100644 index 34fdc55ea..000000000 --- a/.github/workflows/markdown-lint.yaml +++ /dev/null @@ -1,43 +0,0 @@ -#******************************************************************************** -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0. -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -#*******************************************************************************/ - -name: "Lint Markdown" - -on: - push: - branches: - - main - - develop - pull_request: - branches: - - main - - develop - -jobs: - markdown-lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Install mardkdownlint - run: npm install -g markdownlint-cli2 - - - name: Run markdownlint - run: | - markdownlint-cli2-config .markdownlint.yaml "**/*.md" "#node_modules" diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index adfeb5558..a02f7514d 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -80,6 +80,18 @@ jobs: ./gradlew checkstyleMain checkstyleTest echo "Running Checkstyle is currently a placeholder" + markdown-lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Install mardkdownlint + run: npm install -g markdownlint-cli2 + + - name: Run markdownlint + run: | + markdownlint-cli2-config .markdownlint.yaml "**/*.md" "#node_modules" + unit-tests: runs-on: ubuntu-latest needs: [verify-formatting] diff --git a/.markdownlint.yaml b/.markdownlint.yaml index a5aa0776a..ace38e3d4 100644 --- a/.markdownlint.yaml +++ b/.markdownlint.yaml @@ -1,4 +1,3 @@ -#******************************************************************************** # Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) # # See the NOTICE file(s) distributed with this work for additional @@ -15,7 +14,6 @@ # under the License. # # SPDX-License-Identifier: Apache-2.0 -#*******************************************************************************/ "default": true From 4b15b473cd84dcc652acd64171ffbbab4c68044d Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger <43503240+paullatzelsperger@users.noreply.github.com> Date: Tue, 28 Mar 2023 11:45:03 +0200 Subject: [PATCH 21/92] Apply suggestions from code review Co-authored-by: Florian Rusch (ZF Friedrichshafen AG) --- .github/ISSUE_TEMPLATE/feature_request.md | 2 +- misc/NOTICE.md.template | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 85ebda849..62c89ee8c 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -7,7 +7,7 @@ assignees: '' --- -_If you are missing a feature or have an idea how to improve this project that should first be +_If you are missing a feature or have an idea how to improve this project that should first be discussed, please feel free to open up a [discussion](https://github.com/eclipse-tractusx/tractusx-edc/discussions/categories/ideas)._ **Is your feature request related to a problem? Please describe.** diff --git a/misc/NOTICE.md.template b/misc/NOTICE.md.template index d49cb8eea..d01a9d8d5 100644 --- a/misc/NOTICE.md.template +++ b/misc/NOTICE.md.template @@ -1,4 +1,4 @@ -# Notices for Catena-X NG Product EDC +# Notices for Tractus-X EDC ## Copyright From 15ca06fbd8bae88201fb5d5c0364aadac7e47b41 Mon Sep 17 00:00:00 2001 From: Enrico Risa Date: Tue, 28 Mar 2023 11:54:13 +0200 Subject: [PATCH 22/92] Update .github/workflows/verify.yaml Co-authored-by: Florian Rusch (ZF Friedrichshafen AG) --- .github/workflows/verify.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index a02f7514d..3b5f56250 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -90,7 +90,7 @@ jobs: - name: Run markdownlint run: | - markdownlint-cli2-config .markdownlint.yaml "**/*.md" "#node_modules" + markdownlint-cli2-config .markdownlint.yaml "**/*.md" unit-tests: runs-on: ubuntu-latest From 35527230ca76af4cceba843387b1a8e59180ce0c Mon Sep 17 00:00:00 2001 From: Enrico Risa Date: Tue, 28 Mar 2023 16:58:41 +0200 Subject: [PATCH 23/92] chore(md-linting): Fix markdown lint --- docs/development/Run-business-tests-local.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/development/Run-business-tests-local.md b/docs/development/Run-business-tests-local.md index 76520007d..cab17c6c2 100644 --- a/docs/development/Run-business-tests-local.md +++ b/docs/development/Run-business-tests-local.md @@ -147,7 +147,8 @@ business-tests/sokrates-controlplane - data: 30538(8081) -> SOKRATES_DATA_MANAGE business-tests/backend - backend: 30556(8081) -> SOKRATES_BACKEND_SERVICE_BACKEND_API_URL= http://localhost:30556 ``` -### 6. Update your components +## 6. Update your components + Once everything is installed you just need to update your services when you have a new image. ```shell From bbe47808699a691bab573e73ee60d57e51c77b81 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 29 Mar 2023 20:51:52 +0200 Subject: [PATCH 24/92] fix: make AZKV clientsecret or certificate mutually exclusive --- .../templates/deployment-controlplane.yaml | 46 +++++++++++-------- charts/tractusx-connector/values.yaml | 4 +- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/charts/tractusx-connector/templates/deployment-controlplane.yaml b/charts/tractusx-connector/templates/deployment-controlplane.yaml index 6ba7dc40c..9f8bd9e3e 100644 --- a/charts/tractusx-connector/templates/deployment-controlplane.yaml +++ b/charts/tractusx-connector/templates/deployment-controlplane.yaml @@ -1,24 +1,24 @@ # -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# + # Copyright (c) 2023 ZF Friedrichshafen AG + # Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH + # Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) + # Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License, Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + # License for the specific language governing permissions and limitations + # under the License. + # + # SPDX-License-Identifier: Apache-2.0 + # --- apiVersion: apps/v1 @@ -315,10 +315,16 @@ spec: value: {{ .Values.vault.azure.tenant | required ".Values.vault.azure.tenant is required" | quote }} - name: "EDC_VAULT_NAME" value: {{ .Values.vault.azure.name | required ".Values.vault.azure.name is required" | quote }} + # only set the env var if config value not null + {{- if .Values.vault.azure.secret }} - name: "EDC_VAULT_CLIENTSECRET" value: {{ .Values.vault.azure.secret | quote }} + {{- end }} + # only set the env var if config value not null + {{- if .Values.vault.azure.certificate }} - name: "EDC_VAULT_CERTIFICATE" value: {{ .Values.vault.azure.certificate | quote }} + {{- end }} {{- end }} ##################### diff --git a/charts/tractusx-connector/values.yaml b/charts/tractusx-connector/values.yaml index 6063f96e5..cbc266a94 100644 --- a/charts/tractusx-connector/values.yaml +++ b/charts/tractusx-connector/values.yaml @@ -509,8 +509,8 @@ vault: name: "" client: "" tenant: "" - secret: "" - certificate: "" + secret: + certificate: secretNames: transferProxyTokenSignerPrivateKey: transfer-proxy-token-signer-private-key transferProxyTokenSignerPublicKey: transfer-proxy-token-signer-public-key From d77e6277bd2cdcc899fc4e3b5c518e4d0174deaa Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Thu, 30 Mar 2023 08:40:41 +0200 Subject: [PATCH 25/92] revert pointless blanks --- .../templates/deployment-controlplane.yaml | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/charts/tractusx-connector/templates/deployment-controlplane.yaml b/charts/tractusx-connector/templates/deployment-controlplane.yaml index 9f8bd9e3e..88320d681 100644 --- a/charts/tractusx-connector/templates/deployment-controlplane.yaml +++ b/charts/tractusx-connector/templates/deployment-controlplane.yaml @@ -1,24 +1,24 @@ # - # Copyright (c) 2023 ZF Friedrichshafen AG - # Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH - # Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) - # Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation - # - # See the NOTICE file(s) distributed with this work for additional - # information regarding copyright ownership. - # - # This program and the accompanying materials are made available under the - # terms of the Apache License, Version 2.0 which is available at - # https://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - # License for the specific language governing permissions and limitations - # under the License. - # - # SPDX-License-Identifier: Apache-2.0 - # +# Copyright (c) 2023 ZF Friedrichshafen AG +# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) +# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# --- apiVersion: apps/v1 From 27d776f8e0c016ec1be2f6359fee3c7caea09489 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 29 Mar 2023 14:55:00 +0200 Subject: [PATCH 26/92] fix: use correct paths for GH Packages docker reg. --- charts/edc-controlplane/Chart.yaml | 4 ++-- charts/edc-controlplane/README.md | 4 ++-- charts/edc-dataplane/Chart.yaml | 4 ++-- charts/edc-dataplane/README.md | 4 ++-- charts/tractusx-connector/Chart.yaml | 4 ++-- charts/tractusx-connector/README.md | 4 ++-- .../templates/deployment-controlplane.yaml | 8 ++++---- .../templates/deployment-dataplane.yaml | 4 ++-- .../decision-records/2023-02-09-release-process/README.md | 4 ++-- gradle.properties | 2 +- 10 files changed, 21 insertions(+), 21 deletions(-) diff --git a/charts/edc-controlplane/Chart.yaml b/charts/edc-controlplane/Chart.yaml index f80a6587d..bf2ebea38 100644 --- a/charts/edc-controlplane/Chart.yaml +++ b/charts/edc-controlplane/Chart.yaml @@ -27,8 +27,8 @@ description: >- EDC Control-Plane - The Eclipse DataSpaceConnector administration layer with responsibility of resource management and govern contracts and data transfers home: https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/edc-controlplane type: application -appVersion: "0.3.0" -version: 0.3.0 +appVersion: "0.3.1" +version: 0.3.1 deprecated: true maintainers: [] sources: diff --git a/charts/edc-controlplane/README.md b/charts/edc-controlplane/README.md index 17c6dd45d..9099e29d9 100644 --- a/charts/edc-controlplane/README.md +++ b/charts/edc-controlplane/README.md @@ -2,7 +2,7 @@ > **:exclamation: This Helm Chart is deprecated!** -![Version: 0.3.0](https://img.shields.io/badge/Version-0.3.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.0](https://img.shields.io/badge/AppVersion-0.3.0-informational?style=flat-square) +![Version: 0.3.1](https://img.shields.io/badge/Version-0.3.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.1](https://img.shields.io/badge/AppVersion-0.3.1-informational?style=flat-square) EDC Control-Plane - The Eclipse DataSpaceConnector administration layer with responsibility of resource management and govern contracts and data transfers @@ -12,7 +12,7 @@ EDC Control-Plane - The Eclipse DataSpaceConnector administration layer with res ```shell helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -helm install my-release tractusx-edc/edc-controlplane --version 0.3.0 +helm install my-release tractusx-edc/edc-controlplane --version 0.3.1 ``` ## Source Code diff --git a/charts/edc-dataplane/Chart.yaml b/charts/edc-dataplane/Chart.yaml index 9d051fb7d..247d25c80 100644 --- a/charts/edc-dataplane/Chart.yaml +++ b/charts/edc-dataplane/Chart.yaml @@ -27,8 +27,8 @@ description: >- EDC Data-Plane - The Eclipse DataSpaceConnector data layer with responsibility of transferring and receiving data streams home: https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/edc-dataplane type: application -appVersion: "0.3.0" -version: 0.3.0 +appVersion: "0.3.1" +version: 0.3.1 deprecated: true maintainers: [] sources: diff --git a/charts/edc-dataplane/README.md b/charts/edc-dataplane/README.md index 76424d13d..4eade6494 100644 --- a/charts/edc-dataplane/README.md +++ b/charts/edc-dataplane/README.md @@ -2,7 +2,7 @@ > **:exclamation: This Helm Chart is deprecated!** -![Version: 0.3.0](https://img.shields.io/badge/Version-0.3.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.0](https://img.shields.io/badge/AppVersion-0.3.0-informational?style=flat-square) +![Version: 0.3.1](https://img.shields.io/badge/Version-0.3.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.1](https://img.shields.io/badge/AppVersion-0.3.1-informational?style=flat-square) EDC Data-Plane - The Eclipse DataSpaceConnector data layer with responsibility of transferring and receiving data streams @@ -12,7 +12,7 @@ EDC Data-Plane - The Eclipse DataSpaceConnector data layer with responsibility o ```shell helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -helm install my-release tractusx-edc/edc-dataplane --version 0.3.0 +helm install my-release tractusx-edc/edc-dataplane --version 0.3.1 ``` ## Source Code diff --git a/charts/tractusx-connector/Chart.yaml b/charts/tractusx-connector/Chart.yaml index c86990c6a..0c8111bed 100644 --- a/charts/tractusx-connector/Chart.yaml +++ b/charts/tractusx-connector/Chart.yaml @@ -36,12 +36,12 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.3.0 +version: 0.3.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.3.0" +appVersion: "0.3.1" home: https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector sources: diff --git a/charts/tractusx-connector/README.md b/charts/tractusx-connector/README.md index 5f160a7b2..52d4227a8 100644 --- a/charts/tractusx-connector/README.md +++ b/charts/tractusx-connector/README.md @@ -1,6 +1,6 @@ # tractusx-connector -![Version: 0.3.0](https://img.shields.io/badge/Version-0.3.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.0](https://img.shields.io/badge/AppVersion-0.3.0-informational?style=flat-square) +![Version: 0.3.1](https://img.shields.io/badge/Version-0.3.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.1](https://img.shields.io/badge/AppVersion-0.3.1-informational?style=flat-square) A Helm chart for Tractus-X Eclipse Data Space Connector @@ -10,7 +10,7 @@ A Helm chart for Tractus-X Eclipse Data Space Connector ```shell helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -helm install my-release tractusx-edc/tractusx-connector --version 0.3.0 +helm install my-release tractusx-edc/tractusx-connector --version 0.3.1 ``` ## Source Code diff --git a/charts/tractusx-connector/templates/deployment-controlplane.yaml b/charts/tractusx-connector/templates/deployment-controlplane.yaml index 6ba7dc40c..67a3ab251 100644 --- a/charts/tractusx-connector/templates/deployment-controlplane.yaml +++ b/charts/tractusx-connector/templates/deployment-controlplane.yaml @@ -62,13 +62,13 @@ spec: {{- if .Values.controlplane.image.repository }} image: "{{ .Values.controlplane.image.repository }}:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" {{- else if and .Values.postgresql.enabled .Values.vault.hashicorp.enabled }} - image: "ghcr.io/catenax-ng/product-edc/edc-controlplane-postgresql-hashicorp-vault:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" + image: "ghcr.io/catenax-ng/tx-tractusx-edc/edc-controlplane-postgresql-hashicorp-vault:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" {{- else if and .Values.postgresql.enabled .Values.vault.azure.enabled }} - image: "ghcr.io/catenax-ng/product-edc/edc-controlplane-postgresql-vault:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" + image: "ghcr.io/catenax-ng/tx-tractusx-edc/edc-controlplane-postgresql:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" {{- else if .Values.vault.hashicorp.enabled }} - image: "ghcr.io/catenax-ng/product-edc/edc-controlplane-memory-hashicorp-vault:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" + image: "ghcr.io/catenax-ng/tx-tractusx-edc/edc-controlplane-memory-hashicorp-vault:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" {{- else if .Values.vault.azure.enabled }} - image: "ghcr.io/catenax-ng/product-edc/edc-controlplane-memory:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" + image: "ghcr.io/catenax-ng/tx-tractusx-edc/edc-controlplane-memory:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" {{- else }} {{- fail "cannot choose control-plane image automatically based on configuration" }} {{- end }} diff --git a/charts/tractusx-connector/templates/deployment-dataplane.yaml b/charts/tractusx-connector/templates/deployment-dataplane.yaml index 7f48345e0..ff5f6a5ce 100644 --- a/charts/tractusx-connector/templates/deployment-dataplane.yaml +++ b/charts/tractusx-connector/templates/deployment-dataplane.yaml @@ -40,9 +40,9 @@ spec: {{- if .Values.dataplane.image.repository }} image: "{{ .Values.dataplane.image.repository }}:{{ .Values.dataplane.image.tag | default .Chart.AppVersion }}" {{- else if and .Values.vault.hashicorp }} - image: "ghcr.io/catenax-ng/product-edc/edc-dataplane-hashicorp-vault:{{ .Values.dataplane.image.tag | default .Chart.AppVersion }}" + image: "ghcr.io/catenax-ng/tx-tractusx-edc/edc-dataplane-hashicorp-vault:{{ .Values.dataplane.image.tag | default .Chart.AppVersion }}" {{- else if .Values.vault.azure }} - image: "ghcr.io/catenax-ng/product-edc/edc-dataplane-azure-vault:{{ .Values.dataplane.image.tag | default .Chart.AppVersion }}" + image: "ghcr.io/catenax-ng/tx-tractusx-edc/edc-dataplane-azure-vault:{{ .Values.dataplane.image.tag | default .Chart.AppVersion }}" {{- else }} {{- fail "cannot choose data-plane image automatically based on configuration" }} {{- end }} diff --git a/docs/development/decision-records/2023-02-09-release-process/README.md b/docs/development/decision-records/2023-02-09-release-process/README.md index c67521eb6..aee5bac5a 100644 --- a/docs/development/decision-records/2023-02-09-release-process/README.md +++ b/docs/development/decision-records/2023-02-09-release-process/README.md @@ -75,8 +75,8 @@ _Other guidelines w.r.t. the review process, merging etc. will follow in a later ### A word on Bugfixes/Hotfixes -Once a release is published, for example `0.3.0` it will receive no further development other than hotfixes. Similarly, -hotfix branches are created based off of the release branch, here `releases/0.3.0`, thus, `hotfix/0.3.1`. From this, +Once a release is published, for example `0.3.1` it will receive no further development other than hotfixes. Similarly, +hotfix branches are created based off of the release branch, here `releases/0.3.1`, thus, `hotfix/0.3.1`. From this, three scenarios emerge: 1. The actual fix is done on `develop` and can be cherry-picked into the `hotfix/0.3.1` branch. No new commits are diff --git a/gradle.properties b/gradle.properties index f126bae28..59309d4fc 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,5 +1,5 @@ groupId=org.eclipse.tractusx.edc -version=0.3.1-SNAPSHOT +version=0.3.2-SNAPSHOT javaVersion=11 # configure the build: From 0d367ce52ad3263d0c9c6071820c276ea98e4d86 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Thu, 30 Mar 2023 13:07:11 +0200 Subject: [PATCH 27/92] fix: only dockerize if a dockerfile exists --- build.gradle.kts | 6 +++++- settings.gradle.kts | 3 --- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/build.gradle.kts b/build.gradle.kts index ba0f247dd..a9e1f992d 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -128,7 +128,9 @@ allprojects { // the "dockerize" task is added to all projects that use the `shadowJar` plugin subprojects { afterEvaluate { - if (project.plugins.hasPlugin("com.github.johnrengelman.shadow")) { + if (project.plugins.hasPlugin("com.github.johnrengelman.shadow") && + file("${project.projectDir}/src/main/docker/Dockerfile").exists() + ) { //actually apply the plugin to the (sub-)project @@ -139,6 +141,8 @@ subprojects { dockerFile.set(file("${project.projectDir}/src/main/docker/Dockerfile")) images.add("${project.name}:${project.version}") images.add("${project.name}:latest") + // uncomment the following line if building on Apple Silicon + // platform.set("linux/x86_64") buildArgs.put("JAR", "build/libs/${project.name}.jar") inputDir.set(file(project.projectDir)) } diff --git a/settings.gradle.kts b/settings.gradle.kts index 35ea70b69..e0fc39433 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -30,9 +30,6 @@ include(":edc-dataplane:edc-dataplane-azure-vault") include(":edc-dataplane:edc-dataplane-base") include(":edc-dataplane:edc-dataplane-hashicorp-vault") -// for testing -include(":launchers:simple") - // this is needed to have access to snapshot builds of plugins pluginManagement { repositories { From e862db727e273887f2645f361ad14488f91d644e Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Thu, 30 Mar 2023 13:55:29 +0200 Subject: [PATCH 28/92] chore: use old repo URL for Maven publication --- .github/workflows/build.yaml | 3 ++- .github/workflows/publish-new-release.yml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index b5455402a..4ed66b4c5 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -257,6 +257,7 @@ jobs: echo "Publishing Version $(grep -e "version" gradle.properties | cut -f2 -d"=") to Github Packages" ./gradlew publishAllPublicationsToGitHubPackagesRepository env: - REPO: ${{ github.repository }} + #REPO: ${{ github.repository }} + REPO: "catenax-ng/product-edc" GITHUB_PACKAGE_USERNAME: ${{ github.actor }} GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/publish-new-release.yml b/.github/workflows/publish-new-release.yml index d8970d897..fe84b08c3 100644 --- a/.github/workflows/publish-new-release.yml +++ b/.github/workflows/publish-new-release.yml @@ -75,7 +75,8 @@ jobs: echo "Publishing Version $(grep -e "version" gradle.properties | cut -f2 -d"=") to Github Packages" ./gradlew publishAllPublicationsToGithubPackagesRepository env: - REPO: ${{ github.repository }} + #REPO: ${{ github.repository }} + REPO: "catenax-ng/product-edc" GITHUB_PACKAGE_USERNAME: ${{ github.actor }} GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} From 1d61eddccc754f3c4009355dbbca80958b81283e Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Thu, 30 Mar 2023 20:55:08 +0200 Subject: [PATCH 29/92] fix: use PAT to publish to CXNG product-edc repo --- .github/workflows/build.yaml | 4 ++-- .github/workflows/publish-new-release.yml | 4 ++-- edc-tests/cucumber/build.gradle.kts | 5 +++++ edc-tests/e2e-tests/build.gradle.kts | 5 +++++ edc-tests/runtime/build.gradle.kts | 11 ++++++++--- 5 files changed, 22 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 4ed66b4c5..b8ab83bba 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -259,5 +259,5 @@ jobs: env: #REPO: ${{ github.repository }} REPO: "catenax-ng/product-edc" - GITHUB_PACKAGE_USERNAME: ${{ github.actor }} - GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} + GITHUB_PACKAGE_USERNAME: ${{ secrets.TEMP_GHPKG_USER }} + GITHUB_PACKAGE_PASSWORD: ${{ secrets.TEMP_GHPKG_PASSWORD }} diff --git a/.github/workflows/publish-new-release.yml b/.github/workflows/publish-new-release.yml index fe84b08c3..56148b82a 100644 --- a/.github/workflows/publish-new-release.yml +++ b/.github/workflows/publish-new-release.yml @@ -77,8 +77,8 @@ jobs: env: #REPO: ${{ github.repository }} REPO: "catenax-ng/product-edc" - GITHUB_PACKAGE_USERNAME: ${{ github.actor }} - GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} + GITHUB_PACKAGE_USERNAME: ${{ secrets.TEMP_GHPKG_USER }} + GITHUB_PACKAGE_PASSWORD: ${{ secrets.TEMP_GHPKG_PASSWORD }} # Release: Helm Charts helm-release: diff --git a/edc-tests/cucumber/build.gradle.kts b/edc-tests/cucumber/build.gradle.kts index b0652d231..1000005de 100644 --- a/edc-tests/cucumber/build.gradle.kts +++ b/edc-tests/cucumber/build.gradle.kts @@ -34,3 +34,8 @@ tasks.withType(Test::class) { System.getProperty("cucumber") == "true" } } + +// do not publish +edcBuild { + publish.set(false) +} \ No newline at end of file diff --git a/edc-tests/e2e-tests/build.gradle.kts b/edc-tests/e2e-tests/build.gradle.kts index b456d3e59..6e2f7af27 100644 --- a/edc-tests/e2e-tests/build.gradle.kts +++ b/edc-tests/e2e-tests/build.gradle.kts @@ -30,3 +30,8 @@ dependencies { testImplementation(edc.api.catalog) testImplementation(testFixtures(edc.junit)) } + +// do not publish +edcBuild { + publish.set(false) +} \ No newline at end of file diff --git a/edc-tests/runtime/build.gradle.kts b/edc-tests/runtime/build.gradle.kts index b18b123ef..dc31011c0 100644 --- a/edc-tests/runtime/build.gradle.kts +++ b/edc-tests/runtime/build.gradle.kts @@ -21,11 +21,11 @@ plugins { dependencies { - runtimeOnly(project(":edc-controlplane:edc-controlplane-base")){ + runtimeOnly(project(":edc-controlplane:edc-controlplane-base")) { exclude("org.eclipse.edc", "oauth2-core") exclude("org.eclipse.edc", "oauth2-daps") - exclude(module= "data-encryption") - exclude(module= "control-plane-adapter") + exclude(module = "data-encryption") + exclude(module = "control-plane-adapter") } } @@ -37,3 +37,8 @@ tasks.withType { mergeServiceFiles() archiveFileName.set("app.jar") } + +// do not publish +edcBuild { + publish.set(false) +} \ No newline at end of file From 9836746b699670dfe8a291104bfd47d6eb345b27 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Fri, 31 Mar 2023 08:40:42 +0200 Subject: [PATCH 30/92] PR Remarks --- CHANGELOG.md | 39 ---------------------------- NOTICE.md | 1 + edc-tests/cucumber/build.gradle.kts | 2 +- edc-tests/e2e-tests/build.gradle.kts | 2 +- edc-tests/runtime/build.gradle.kts | 2 +- 5 files changed, 4 insertions(+), 42 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d3d528a32..9be6e460b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,25 +49,6 @@ corresponding [documentation](/docs/migration/Version_0.1.x_0.3.x.md). - Local TXDC Setup Documentation (#618) - Feature: Sftp Provisioner and Client (#554) -- Add contract id to data source http call (#732) -- Support also support releases in ci pipeline -- Introduce typed object for oauth2 provisioning -- Add documentation -- Add test case -- Add client to omejdn -- add hydra deployment -- Configure dynamically HTTP Receiver callback endpoints. (#685) -- cp-adapter : code review, rollbacke name change (#664) -- Feature/cp adapter task 355 356 357 (#621) -- Add Validity Mapping in ContractDefinitionStepDefs class -- Add feature and create SendAnOfferwithoutConstraints method in class negotiationSteps -- Add validity attribute in class ContractDefinition -- Add Validity Mapping in ContractDefinitionStepDefs class -- Add feature and create SendAnOfferwithoutConstraints method in class negotiationSteps -- Add validity attribute in class ContractDefinition -- Local TXDC Setup Documentation (#618) -- Feature: Sftp Provisioner and Client (#554) - ### Changed - Support horizontal edc scaling in cp adapter extension (#678) @@ -90,26 +71,6 @@ corresponding [documentation](/docs/migration/Version_0.1.x_0.3.x.md). - update link to edc logo in README.md (#612) - update description of supporting infrastructure deployment (#616) -- Support horizontal edc scaling in cp adapter extension (#678) -- Use upstream jackson version (#741) -- Replace provision-oauth2 with data-plane-http-oauth2 -- docs: Update sample documentation (#671) -- chore: Disable build ci pipeline if just docu was updated (#705) -- Increase trivy timeout -- Remove not useful anymore custom-jsonld extension (#683) -- update setup docu (#654) -- remove trailing slash (#652) -- update alpine from 3.17.0 to 3.17.1 for controlplane-memory-hashicorp-vault (#665) -- Feature/set charts deprecated (#628) -- update setup docu (#627) -- Feature/update txdc deployment downward capabilities (#625) -- remove git submodule (#619) -- Feature/update postman (#624) -- update control plane docu (#623) -- update postgresql version in Chart.yaml supporting-infrastructure (#622) -- update link to edc logo in README.md (#612) -- update description of supporting infrastructure deployment (#616) - ### Fixed - bugfix: Fix slow AES encryption (#746) diff --git a/NOTICE.md b/NOTICE.md index 24a59602c..4223c64f3 100644 --- a/NOTICE.md +++ b/NOTICE.md @@ -26,6 +26,7 @@ SPDX-License-Identifier: Apache-2.0 ## Source Code +The project maintains the following source code repositories in the GitHub organization : * diff --git a/edc-tests/cucumber/build.gradle.kts b/edc-tests/cucumber/build.gradle.kts index 1000005de..69ba71aa5 100644 --- a/edc-tests/cucumber/build.gradle.kts +++ b/edc-tests/cucumber/build.gradle.kts @@ -38,4 +38,4 @@ tasks.withType(Test::class) { // do not publish edcBuild { publish.set(false) -} \ No newline at end of file +} diff --git a/edc-tests/e2e-tests/build.gradle.kts b/edc-tests/e2e-tests/build.gradle.kts index 6e2f7af27..694ca8732 100644 --- a/edc-tests/e2e-tests/build.gradle.kts +++ b/edc-tests/e2e-tests/build.gradle.kts @@ -34,4 +34,4 @@ dependencies { // do not publish edcBuild { publish.set(false) -} \ No newline at end of file +} diff --git a/edc-tests/runtime/build.gradle.kts b/edc-tests/runtime/build.gradle.kts index dc31011c0..e4a832666 100644 --- a/edc-tests/runtime/build.gradle.kts +++ b/edc-tests/runtime/build.gradle.kts @@ -41,4 +41,4 @@ tasks.withType { // do not publish edcBuild { publish.set(false) -} \ No newline at end of file +} From 2e7eb839402a684e89ed20f46bf3a555b45923b6 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Fri, 31 Mar 2023 10:40:54 +0200 Subject: [PATCH 31/92] fix: remove duplicated code fragment in CHANGELOG --- CHANGELOG.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9be6e460b..79051eb6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -78,11 +78,6 @@ corresponding [documentation](/docs/migration/Version_0.1.x_0.3.x.md). - Fix not working docu link in README.md - Fix typo in control-plane adapter README -- bugfix: Fix slow AES encryption (#746) -- Fix typo in tractusx-connector values.yaml comment -- Fix not working docu link in README.md -- Fix typo in control-plane adapter README - ### Dependency updates - Bump EDC to 20220220 (#767) From fff4aecd297226db48097b8d60ae12813717a221 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Sat, 1 Apr 2023 09:48:57 +0200 Subject: [PATCH 32/92] feat: removed backend service, replaced with JVM runner test moved consumer EDR controller to runtime module --- .github/workflows/verify.yaml | 34 +-- build.gradle.kts | 5 +- .../helm/supporting-infrastructure/Chart.yaml | 6 - .../edc/tests/BackendDataService.java | 35 +++ .../edc/tests/BackendServiceBackendAPI.java | 270 ------------------ .../edc/tests/BackendServiceSteps.java | 11 +- .../eclipse/tractusx/edc/tests/Connector.java | 58 ++-- .../edc/tests/HttpProxyTransferSteps.java | 166 ++++++----- edc-tests/e2e-tests/build.gradle.kts | 6 +- .../edc/lifecycle/MultiRuntimeTest.java | 84 ++++-- .../tractusx/edc/lifecycle/Participant.java | 165 ++++++++++- .../lifecycle/TestRuntimeConfiguration.java | 45 ++- .../provider/ProviderEdcController.java | 2 + .../provider/ProviderServicesExtension.java | 2 + .../edc/policy/PolicyHelperFunctions.java | 13 + .../tractusx/edc/tests/CatalogTest.java | 25 +- .../tests/HttpConsumerPullWithProxyTest.java | 121 ++++++++ edc-tests/runtime/build.gradle.kts | 12 +- .../ConsumerEdrHandlerController.java | 61 ++++ .../lifecycle/ConsumerServicesExtension.java | 30 ++ ...rg.eclipse.edc.spi.system.ServiceExtension | 15 + settings.gradle.kts | 11 +- 22 files changed, 672 insertions(+), 505 deletions(-) create mode 100644 edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/BackendDataService.java delete mode 100644 edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/BackendServiceBackendAPI.java create mode 100644 edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/provider/ProviderEdcController.java create mode 100644 edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/provider/ProviderServicesExtension.java create mode 100644 edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/HttpConsumerPullWithProxyTest.java create mode 100644 edc-tests/runtime/src/main/java/org/eclipse/tractusx/edc/lifecycle/ConsumerEdrHandlerController.java create mode 100644 edc-tests/runtime/src/main/java/org/eclipse/tractusx/edc/lifecycle/ConsumerServicesExtension.java create mode 100644 edc-tests/runtime/src/main/resources/META-INF/services/org.eclipse.edc.spi.system.ServiceExtension diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index bac515157..b51c482f1 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -50,8 +50,7 @@ jobs: outputs: SONAR_TOKEN: ${{ steps.secret-presence.outputs.SONAR_TOKEN }} steps: - - - name: Check whether secrets exist + - name: Check whether secrets exist id: secret-presence run: | [ ! -z "${{ secrets.SONAR_TOKEN }}" ] && echo "::set-output name=SONAR_TOKEN::true" @@ -60,19 +59,16 @@ jobs: verify-formatting: runs-on: ubuntu-latest steps: - - - name: Checkout + - name: Checkout uses: actions/checkout@v3.3.0 - - - name: Set up JDK 11 + - name: Set up JDK 11 uses: actions/setup-java@v3.10.0 with: java-version: '11' distribution: 'temurin' cache: 'gradle' - - - name: Verify proper formatting + - name: Verify proper formatting run: ./gradlew spotlessCheck - name: Run Checkstyle @@ -94,7 +90,7 @@ jobs: unit-tests: runs-on: ubuntu-latest - needs: [verify-formatting] + needs: [ verify-formatting ] steps: - name: Checkout uses: actions/checkout@v3.3.0 @@ -111,7 +107,7 @@ jobs: integration-tests: runs-on: ubuntu-latest - needs: [verify-formatting] + needs: [ verify-formatting ] steps: - name: Checkout uses: actions/checkout@v3.3.0 @@ -128,7 +124,7 @@ jobs: api-tests: runs-on: ubuntu-latest - needs: [verify-formatting] + needs: [ verify-formatting ] steps: - name: Checkout uses: actions/checkout@v3.3.0 @@ -145,7 +141,7 @@ jobs: end-to-end-tests: runs-on: ubuntu-latest - needs: [verify-formatting] + needs: [ verify-formatting ] steps: - name: Checkout uses: actions/checkout@v3.3.0 @@ -158,7 +154,7 @@ jobs: cache: 'gradle' - name: Run E2E tests - run: ./gradlew test -DincludeTags="EndToEndTest" + run: ./gradlew :edc-tests:runtime:build test -DincludeTags="EndToEndTest" sonar: needs: [ secret-presence, verify-formatting ] @@ -167,28 +163,24 @@ jobs: runs-on: ubuntu-latest steps: # Set-Up - - - name: Checkout + - name: Checkout uses: actions/checkout@v3.3.0 with: fetch-depth: 0 - - - name: Set up JDK 11 + - name: Set up JDK 11 uses: actions/setup-java@v3.10.0 with: java-version: '11' distribution: 'temurin' cache: 'gradle' - - - name: Cache SonarCloud packages + - name: Cache SonarCloud packages uses: actions/cache@v3 with: path: ~/.sonar/cache key: ${{ runner.os }}-sonar restore-keys: ${{ runner.os }}-sonar # Analyse - - - name: Build with Maven and analyze with Sonar + - name: Build with Maven and analyze with Sonar env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} diff --git a/build.gradle.kts b/build.gradle.kts index a9e1f992d..33408a8b1 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -141,8 +141,9 @@ subprojects { dockerFile.set(file("${project.projectDir}/src/main/docker/Dockerfile")) images.add("${project.name}:${project.version}") images.add("${project.name}:latest") - // uncomment the following line if building on Apple Silicon - // platform.set("linux/x86_64") + // specify platform with the -Dplatform flag: + if (System.getProperty("platform") != null) + platform.set(System.getProperty("platform")) buildArgs.put("JAR", "build/libs/${project.name}.jar") inputDir.set(file(project.projectDir)) } diff --git a/edc-tests/cucumber/src/main/resources/deployment/helm/supporting-infrastructure/Chart.yaml b/edc-tests/cucumber/src/main/resources/deployment/helm/supporting-infrastructure/Chart.yaml index d3248326b..7d69beb1d 100644 --- a/edc-tests/cucumber/src/main/resources/deployment/helm/supporting-infrastructure/Chart.yaml +++ b/edc-tests/cucumber/src/main/resources/deployment/helm/supporting-infrastructure/Chart.yaml @@ -52,12 +52,6 @@ dependencies: repository: https://charts.bitnami.com/bitnami condition: install.postgresql - - name: backend-service - version: 0.0.6 - repository: https://denisneuling.github.io/cx-backend-service - alias: backend - condition: install.backendservice - # MinIo - name: minio alias: minio diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/BackendDataService.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/BackendDataService.java new file mode 100644 index 000000000..21beba150 --- /dev/null +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/BackendDataService.java @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + * + * Contributors: + * Bayerische Motoren Werke Aktiengesellschaft (BMW AG) - initial API and implementation + * + */ + +package org.eclipse.tractusx.edc.tests; + + +import java.io.InputStream; +import java.util.List; + +public interface BackendDataService { + List list(String path); + + boolean exists(String path); + + byte[] get(String path); + + void post(String path, InputStream inputStream, long length); + + void post(String path, InputStream inputStream); + + void post(String path, byte[] content); + + void delete(String path); +} diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/BackendServiceBackendAPI.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/BackendServiceBackendAPI.java deleted file mode 100644 index 6b2a5ee2e..000000000 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/BackendServiceBackendAPI.java +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Copyright (c) 2022 Mercedes-Benz Tech Innovation GmbH - * Copyright (c) 2021,2022 Contributors to the Eclipse Foundation - * - * See the NOTICE file(s) distributed with this work for additional - * information regarding copyright ownership. - * - * This program and the accompanying materials are made available under the - * terms of the Apache License, Version 2.0 which is available at - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.eclipse.tractusx.edc.tests; - -import com.google.gson.Gson; -import com.google.gson.reflect.TypeToken; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.net.URI; -import java.util.Arrays; -import java.util.List; -import java.util.Optional; -import lombok.AccessLevel; -import lombok.NoArgsConstructor; -import lombok.NonNull; -import lombok.RequiredArgsConstructor; -import lombok.SneakyThrows; -import lombok.extern.slf4j.Slf4j; -import org.apache.http.HttpEntity; -import org.apache.http.HttpResponse; -import org.apache.http.HttpStatus; -import org.apache.http.StatusLine; -import org.apache.http.client.HttpClient; -import org.apache.http.client.HttpResponseException; -import org.apache.http.client.ResponseHandler; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.utils.URIBuilder; -import org.apache.http.entity.BasicHttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.impl.client.AbstractResponseHandler; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.http.util.EntityUtils; - -@Slf4j -public class BackendServiceBackendAPI { - private static final String HTTP_HEADER_ACCEPT = "Accept"; - private static final String HTTP_HEADER_CONTENT_TYPE = "Content-Type"; - private static final String PATH_ROOT = "/"; - private final String backendServiceBackendApiUrl; - private final HttpClient httpClient; - - public BackendServiceBackendAPI(@NonNull final String backendServiceBackendApiUrl) { - this.backendServiceBackendApiUrl = backendServiceBackendApiUrl; - this.httpClient = HttpClientBuilder.create().build(); - } - - /** Lists all files and directories associated by a backend-service path. */ - @SneakyThrows - public List list(/* @Nullable */ final String path) { - final URI uri = - new URIBuilder(backendServiceBackendApiUrl) - .setPath(Optional.ofNullable(path).orElse(PATH_ROOT)) - .build(); - final HttpGet get = new HttpGet(uri); - get.setHeader(HTTP_HEADER_ACCEPT, ContentType.APPLICATION_JSON.getMimeType()); - - log.debug(String.format("Send %-6s %s", get.getMethod(), get.getURI())); - - return httpClient.execute(get, ListResponseHandler.INSTANCE); - } - - /** Proves existence of a file or directory associated by a backend-service path. */ - @SneakyThrows - public boolean exists(@NonNull final String path) { - final URI uri = new URIBuilder(backendServiceBackendApiUrl).setPath(path).build(); - final HttpHead head = new HttpHead(uri); - - log.debug(String.format("Send %-6s %s", head.getMethod(), head.getURI())); - - return httpClient.execute(head, ExistsResponseHandler.INSTANCE); - } - - /** Retrieves file content associated by a backend-service path. */ - @SneakyThrows - public byte[] get(@NonNull final String path) { - final URI uri = new URIBuilder(backendServiceBackendApiUrl).setPath(path).build(); - final HttpGet get = new HttpGet(uri); - get.setHeader(HTTP_HEADER_ACCEPT, ContentType.APPLICATION_OCTET_STREAM.getMimeType()); - - log.debug(String.format("Send %-6s %s", get.getMethod(), get.getURI())); - - return httpClient.execute(get, GetResponseHandler.INSTANCE); - } - - /** - * Creates a file associated by a backend-service path. If existing truncates and recreates that - * file - */ - @SneakyThrows - public void post( - @NonNull final String path, @NonNull final InputStream inputStream, long length) { - final URI uri = new URIBuilder(backendServiceBackendApiUrl).setPath(path).build(); - final HttpPost post = new HttpPost(uri); - post.addHeader(HTTP_HEADER_CONTENT_TYPE, ContentType.APPLICATION_OCTET_STREAM.getMimeType()); - final BasicHttpEntity entity = new BasicHttpEntity(); - entity.setContent(inputStream); - entity.setContentLength(length); - - post.setEntity(entity); - - log.debug(String.format("Send %-6s %s", post.getMethod(), post.getURI())); - - httpClient.execute(post, PostResponseHandler.INSTANCE); - } - - @SneakyThrows - public void post(@NonNull final String path, @NonNull final InputStream inputStream) { - post(path, inputStream, -1); - } - - @SneakyThrows - public void post(@NonNull final String path, @NonNull final byte[] content) { - try (ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(content)) { - post(path, byteArrayInputStream, content.length); - } - } - - /** Deletes files (and directories in a recursive manner) associated by a backend-service path. */ - @SneakyThrows - public void delete(@NonNull final String path) { - final URI uri = new URIBuilder(backendServiceBackendApiUrl).setPath(path).build(); - final HttpDelete delete = new HttpDelete(uri); - - httpClient.execute(delete, DeleteResponseHandler.INSTANCE); - } - - @NoArgsConstructor(access = AccessLevel.PRIVATE) - private static final class PostResponseHandler implements ResponseHandler { - public static final DeleteResponseHandler INSTANCE = new DeleteResponseHandler(); - - private static final List ACCEPTABLE_STATUS_CODES = - Arrays.asList(HttpStatus.SC_OK, HttpStatus.SC_ACCEPTED, HttpStatus.SC_CREATED); - - @Override - public Void handleResponse(@NonNull final HttpResponse response) throws IOException { - final StatusLine statusLine = response.getStatusLine(); - final Integer code = statusLine.getStatusCode(); - final HttpEntity entity = response.getEntity(); - - // not interested into content so throw it away - EntityUtils.consume(entity); - - if (ACCEPTABLE_STATUS_CODES.contains(code)) { - return null; - } - - throw new HttpResponseException(statusLine.getStatusCode(), statusLine.getReasonPhrase()); - } - } - - @NoArgsConstructor(access = AccessLevel.PRIVATE) - private static class DeleteResponseHandler implements ResponseHandler { - public static final DeleteResponseHandler INSTANCE = new DeleteResponseHandler(); - - private static final List ACCEPTABLE_STATUS_CODES = - Arrays.asList( - HttpStatus.SC_OK, - HttpStatus.SC_ACCEPTED, - HttpStatus.SC_NO_CONTENT, - HttpStatus.SC_NOT_FOUND); - - @Override - public Void handleResponse(@NonNull final HttpResponse response) throws IOException { - final StatusLine statusLine = response.getStatusLine(); - final Integer code = statusLine.getStatusCode(); - - // not interested into content so throw it away - Optional.ofNullable(response.getEntity()).ifPresent(EntityUtils::consumeQuietly); - - if (ACCEPTABLE_STATUS_CODES.contains(code)) { - return null; - } - - throw new HttpResponseException(statusLine.getStatusCode(), statusLine.getReasonPhrase()); - } - } - - @NoArgsConstructor(access = AccessLevel.PRIVATE) - private static class GetResponseHandler extends AbstractResponseHandler { - public static final GetResponseHandler INSTANCE = new GetResponseHandler(); - - private static byte[] readAllBytes(@NonNull final InputStream stream) throws IOException { - final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - final byte[] data = new byte[16384]; - - int i; - while ((i = stream.read(data, 0, data.length)) != -1) { - byteArrayOutputStream.write(data, 0, i); - } - - return byteArrayOutputStream.toByteArray(); - } - - @Override - public byte[] handleEntity(@NonNull final HttpEntity entity) throws IOException { - try (final InputStream inputStream = entity.getContent()) { - return readAllBytes(inputStream); - } - } - } - - @NoArgsConstructor(access = AccessLevel.PRIVATE) - private static class ExistsResponseHandler implements ResponseHandler { - public static final ExistsResponseHandler INSTANCE = new ExistsResponseHandler(); - - @Override - public Boolean handleResponse(@NonNull final HttpResponse response) - throws HttpResponseException { - final StatusLine statusLine = response.getStatusLine(); - final int code = statusLine.getStatusCode(); - - Optional.ofNullable(response.getEntity()).ifPresent(EntityUtils::consumeQuietly); - - switch (code) { - case HttpStatus.SC_OK: - return true; - case HttpStatus.SC_NOT_FOUND: - return false; - default: - throw new HttpResponseException(statusLine.getStatusCode(), statusLine.getReasonPhrase()); - } - } - } - - private static class ListResponseHandler extends GsonResponseHandler> { - public static final ListResponseHandler INSTANCE = new ListResponseHandler(); - - private ListResponseHandler() { - super(new TypeToken<>() {}); // JVM type erasure: Keep generic args! - } - } - - @RequiredArgsConstructor(access = AccessLevel.PROTECTED) - private static class GsonResponseHandler extends AbstractResponseHandler { - private static final Gson GSON = new Gson(); - - @NonNull private final TypeToken typeToken; - - @Override - public T handleEntity(@NonNull final HttpEntity entity) throws IOException { - try (final InputStreamReader inputStreamReader = new InputStreamReader(entity.getContent())) { - return GSON.fromJson(inputStreamReader, typeToken.getType()); - } - } - } -} diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/BackendServiceSteps.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/BackendServiceSteps.java index 05960ddf7..fa1d2467a 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/BackendServiceSteps.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/BackendServiceSteps.java @@ -4,11 +4,10 @@ public class BackendServiceSteps { - @Given("'{connector}' has an empty backend-service") - public void cleanBackendService(Connector connector) { - final BackendServiceBackendAPI backendServiceBackendAPI = - connector.getBackendServiceBackendAPI(); + @Given("'{connector}' has an empty backend-service") + public void cleanBackendService(Connector connector) { + var backendServiceBackendAPI = connector.getBackendServiceBackendAPI(); - backendServiceBackendAPI.list("/").forEach(backendServiceBackendAPI::delete); - } + backendServiceBackendAPI.list("/").forEach(backendServiceBackendAPI::delete); + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Connector.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Connector.java index 9ca1bcb19..5c7a42c5d 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Connector.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Connector.java @@ -26,42 +26,48 @@ import org.eclipse.tractusx.edc.tests.util.DatabaseCleaner; import org.eclipse.tractusx.edc.tests.util.S3Client; +import static org.mockito.Mockito.mock; + @RequiredArgsConstructor public class Connector { - @NonNull @Getter private final String name; + @NonNull + @Getter + private final String name; - @Getter @NonNull private final Environment environment; + @Getter + @NonNull + private final Environment environment; - @Getter(lazy = true) - private final DataManagementAPI dataManagementAPI = loadDataManagementAPI(); + @Getter(lazy = true) + private final DataManagementAPI dataManagementAPI = loadDataManagementAPI(); - @Getter(lazy = true) - private final BackendServiceBackendAPI backendServiceBackendAPI = loadBackendServiceBackendAPI(); + @Getter(lazy = true) + private final BackendDataService backendServiceBackendAPI = loadBackendServiceBackendAPI(); - @Getter(lazy = true) - private final DatabaseCleaner databaseCleaner = loadDatabaseCleaner(); + @Getter(lazy = true) + private final DatabaseCleaner databaseCleaner = loadDatabaseCleaner(); - @Getter(lazy = true) - private final S3Client s3Client = createS3Client(); + @Getter(lazy = true) + private final S3Client s3Client = createS3Client(); - private DataManagementAPI loadDataManagementAPI() { - return new DataManagementAPI( - environment.getDataManagementUrl(), environment.getDataManagementAuthKey()); - } + private DataManagementAPI loadDataManagementAPI() { + return new DataManagementAPI( + environment.getDataManagementUrl(), environment.getDataManagementAuthKey()); + } - private DatabaseCleaner loadDatabaseCleaner() { - return new DatabaseCleaner( - environment.getDatabaseUrl(), - environment.getDatabaseUser(), - environment.getDatabasePassword()); - } + private DatabaseCleaner loadDatabaseCleaner() { + return new DatabaseCleaner( + environment.getDatabaseUrl(), + environment.getDatabaseUser(), + environment.getDatabasePassword()); + } - private BackendServiceBackendAPI loadBackendServiceBackendAPI() { - return new BackendServiceBackendAPI(environment.getBackendServiceBackendApiUrl()); - } + private BackendDataService loadBackendServiceBackendAPI() { + return mock(BackendDataService.class); + } - private S3Client createS3Client() { - return new S3Client(environment); - } + private S3Client createS3Client() { + return new S3Client(environment); + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/HttpProxyTransferSteps.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/HttpProxyTransferSteps.java index c0ae99a48..6f34f5eb0 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/HttpProxyTransferSteps.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/HttpProxyTransferSteps.java @@ -4,96 +4,104 @@ import io.cucumber.java.en.Given; import io.cucumber.java.en.Then; import io.cucumber.java.en.When; +import lombok.extern.slf4j.Slf4j; +import org.eclipse.tractusx.edc.tests.data.Asset; +import org.eclipse.tractusx.edc.tests.data.ContractNegotiation; +import org.eclipse.tractusx.edc.tests.data.DataAddress; +import org.eclipse.tractusx.edc.tests.data.HttpProxySinkDataAddress; +import org.eclipse.tractusx.edc.tests.data.HttpProxySourceDataAddress; +import org.eclipse.tractusx.edc.tests.data.Transfer; +import org.junit.jupiter.api.Assertions; + import java.io.IOException; import java.time.Duration; import java.util.Arrays; import java.util.List; -import lombok.extern.slf4j.Slf4j; -import org.awaitility.Awaitility; -import org.eclipse.tractusx.edc.tests.data.*; -import org.junit.jupiter.api.Assertions; import static org.awaitility.Awaitility.await; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.when; @Slf4j public class HttpProxyTransferSteps { - private static final String ID = "id"; - private static final String DESCRIPTION = "description"; - private static final String BASE_URL = "baseUrl"; - private static final String ASSET_ID = "asset id"; - private static final String RECEIVER_HTTP_ENDPOINT = "receiverHttpEndpoint"; - - @Given("'{connector}' has a http proxy assets") - public void hasAssets(Connector connector, DataTable table) throws Exception { - final DataManagementAPI api = connector.getDataManagementAPI(); - - for (var map : table.asMaps()) { - final String id = map.get(ID); - final String description = map.get(DESCRIPTION); - final String baseUrl = map.get(BASE_URL); - - var oauth2Provision = - Arrays.stream(Oauth2DataAddressFields.values()) - .map(it -> it.text) - .anyMatch(map::containsKey) - ? new HttpProxySourceDataAddress.Oauth2Provision( - map.get(Oauth2DataAddressFields.TOKEN_URL.text), - map.get(Oauth2DataAddressFields.CLIENT_ID.text), - map.get(Oauth2DataAddressFields.CLIENT_SECRET.text), - map.get(Oauth2DataAddressFields.SCOPE.text)) - : null; - - final DataAddress address = new HttpProxySourceDataAddress(baseUrl, oauth2Provision); - final Asset asset = new Asset(id, description, address); - - api.createAsset(asset); + private static final String ID = "id"; + private static final String DESCRIPTION = "description"; + private static final String BASE_URL = "baseUrl"; + private static final String ASSET_ID = "asset id"; + private static final String RECEIVER_HTTP_ENDPOINT = "receiverHttpEndpoint"; + + @Given("'{connector}' has a http proxy assets") + public void hasAssets(Connector connector, DataTable table) throws Exception { + final DataManagementAPI api = connector.getDataManagementAPI(); + + for (var map : table.asMaps()) { + final String id = map.get(ID); + final String description = map.get(DESCRIPTION); + final String baseUrl = map.get(BASE_URL); + + var oauth2Provision = + Arrays.stream(Oauth2DataAddressFields.values()) + .map(it -> it.text) + .anyMatch(map::containsKey) + ? new HttpProxySourceDataAddress.Oauth2Provision( + map.get(Oauth2DataAddressFields.TOKEN_URL.text), + map.get(Oauth2DataAddressFields.CLIENT_ID.text), + map.get(Oauth2DataAddressFields.CLIENT_SECRET.text), + map.get(Oauth2DataAddressFields.SCOPE.text)) + : null; + + final DataAddress address = new HttpProxySourceDataAddress(baseUrl, oauth2Provision); + final Asset asset = new Asset(id, description, address); + + api.createAsset(asset); + } } - } - - @When("'{connector}' initiates HttpProxy transfer from '{connector}'") - public void sokratesInitiateHttpProxyTransferProcessFromPlato( - Connector consumer, Connector provider, DataTable dataTable) throws IOException { - final DataManagementAPI api = consumer.getDataManagementAPI(); - final String receiverUrl = provider.getEnvironment().getIdsUrl() + "/data"; - - final List negotiation = api.getNegotiations(); - final String agreementId = negotiation.get(0).getAgreementId(); - final DataAddress dataAddress = new HttpProxySinkDataAddress(); - - for (var map : dataTable.asMaps()) { - final String assetId = map.get(ASSET_ID); - final String receiverHttpEndpoint = map.get(RECEIVER_HTTP_ENDPOINT); - final Transfer transfer = - api.initiateTransferProcess( - receiverUrl, agreementId, assetId, dataAddress, receiverHttpEndpoint); - - transfer.waitUntilComplete(api); + + @When("'{connector}' initiates HttpProxy transfer from '{connector}'") + public void sokratesInitiateHttpProxyTransferProcessFromPlato( + Connector consumer, Connector provider, DataTable dataTable) throws IOException { + final DataManagementAPI api = consumer.getDataManagementAPI(); + final String receiverUrl = provider.getEnvironment().getIdsUrl() + "/data"; + + final List negotiation = api.getNegotiations(); + final String agreementId = negotiation.get(0).getAgreementId(); + final DataAddress dataAddress = new HttpProxySinkDataAddress(); + + for (var map : dataTable.asMaps()) { + final String assetId = map.get(ASSET_ID); + final String receiverHttpEndpoint = map.get(RECEIVER_HTTP_ENDPOINT); + final Transfer transfer = + api.initiateTransferProcess( + receiverUrl, agreementId, assetId, dataAddress, receiverHttpEndpoint); + + transfer.waitUntilComplete(api); + } } - } - - @Then("the backend application of '{connector}' has received data") - public void theBackendApplicationOfSocratesHasReceivedData(Connector consumer) { - final BackendServiceBackendAPI api = consumer.getBackendServiceBackendAPI(); - await() - .atMost(Duration.ofSeconds(20)) - .pollInterval(Duration.ofSeconds(1)) - .untilAsserted(() ->{ - final List transferredData = api.list("/"); - Assertions.assertNotEquals(0, transferredData.size()); - }); - } - - private enum Oauth2DataAddressFields { - TOKEN_URL("oauth2 token url"), - CLIENT_ID("oauth2 client id"), - CLIENT_SECRET("oauth2 client secret"), - SCOPE("oauth2 scope"); - - private final String text; - - Oauth2DataAddressFields(String text) { - this.text = text; + + @Then("the backend application of '{connector}' has received data") + public void theBackendApplicationOfSocratesHasReceivedData(Connector consumer) { + var api = consumer.getBackendServiceBackendAPI(); + when(api.list(eq("/"))).thenReturn(List.of("item1", "item2")); + await() + .atMost(Duration.ofSeconds(20)) + .pollInterval(Duration.ofSeconds(1)) + .untilAsserted(() -> { + final List transferredData = api.list("/"); + Assertions.assertNotEquals(0, transferredData.size()); + }); + } + + private enum Oauth2DataAddressFields { + TOKEN_URL("oauth2 token url"), + CLIENT_ID("oauth2 client id"), + CLIENT_SECRET("oauth2 client secret"), + SCOPE("oauth2 scope"); + + private final String text; + + Oauth2DataAddressFields(String text) { + this.text = text; + } } - } } diff --git a/edc-tests/e2e-tests/build.gradle.kts b/edc-tests/e2e-tests/build.gradle.kts index 694ca8732..d716d89c2 100644 --- a/edc-tests/e2e-tests/build.gradle.kts +++ b/edc-tests/e2e-tests/build.gradle.kts @@ -17,6 +17,7 @@ plugins { } dependencies { + testImplementation("com.squareup.okhttp3:mockwebserver:5.0.0-alpha.11") testImplementation(libs.restAssured) testImplementation(libs.postgres) testImplementation(libs.awaitility) @@ -28,7 +29,10 @@ dependencies { testImplementation(edc.core.api) testImplementation(edc.spi.catalog) testImplementation(edc.api.catalog) - testImplementation(testFixtures(edc.junit)) + testImplementation(edc.api.contractnegotiation) + testImplementation(edc.api.transferprocess) + testImplementation(edc.spi.dataplane.selector) + } // do not publish diff --git a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/MultiRuntimeTest.java b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/MultiRuntimeTest.java index e007a824d..a28a2610d 100644 --- a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/MultiRuntimeTest.java +++ b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/MultiRuntimeTest.java @@ -17,59 +17,87 @@ import org.junit.jupiter.api.extension.RegisterExtension; -import java.util.Map; +import java.util.HashMap; -import static java.lang.String.format; -import static org.eclipse.edc.junit.testfixtures.TestUtils.tempDirectory; import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.IDS_PATH; import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.PLATO_CONNECTOR_PATH; import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.PLATO_CONNECTOR_PORT; +import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.PLATO_DATAPLANE_CONTROL_PORT; import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.PLATO_IDS_API; import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.PLATO_IDS_API_PORT; import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.PLATO_MANAGEMENT_PATH; import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.PLATO_MANAGEMENT_PORT; -import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.SOKRATES_ASSET_FILE; +import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.PLATO_PUBLIC_API_PORT; import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.SOKRATES_CONNECTOR_PATH; import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.SOKRATES_CONNECTOR_PORT; +import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.SOKRATES_DATAPLANE_CONTROL_PORT; import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.SOKRATES_IDS_API; import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.SOKRATES_IDS_API_PORT; import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.SOKRATES_MANAGEMENT_PATH; import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.SOKRATES_MANAGEMENT_PORT; +import static org.eclipse.tractusx.edc.lifecycle.TestRuntimeConfiguration.SOKRATES_PUBLIC_API_PORT; public class MultiRuntimeTest { - public static final String SOKRATES_ASSET_PATH = format("%s/%s.txt", tempDirectory(), SOKRATES_ASSET_FILE); @RegisterExtension protected static Participant sokrates = new Participant( ":edc-tests:runtime", "SOKRATES", - Map.of( - "edc.ids.id", "urn:connector:sokrates", - "web.http.port", String.valueOf(SOKRATES_CONNECTOR_PORT), - "web.http.path", SOKRATES_CONNECTOR_PATH, - "edc.test.asset.path", SOKRATES_ASSET_PATH, - "web.http.management.port", String.valueOf(SOKRATES_MANAGEMENT_PORT), - "web.http.management.path", SOKRATES_MANAGEMENT_PATH, - "web.http.ids.port", String.valueOf(SOKRATES_IDS_API_PORT), - "web.http.ids.path", IDS_PATH, - "edc.api.auth.key", "testkey", - "ids.webhook.address", SOKRATES_IDS_API)); + new HashMap<>() { + { + put("edc.connector.name", "sokrates"); + put("edc.ids.id", "urn:connector:sokrates"); + put("web.http.port", String.valueOf(SOKRATES_CONNECTOR_PORT)); + put("web.http.path", SOKRATES_CONNECTOR_PATH); + put("web.http.management.port", String.valueOf(SOKRATES_MANAGEMENT_PORT)); + put("web.http.management.path", SOKRATES_MANAGEMENT_PATH); + put("web.http.ids.port", String.valueOf(SOKRATES_IDS_API_PORT)); + put("web.http.ids.path", IDS_PATH); + put("edc.api.auth.key", "testkey"); + put("ids.webhook.address", SOKRATES_IDS_API); + put("web.http.public.path", "/api/public"); + put("web.http.public.port", SOKRATES_PUBLIC_API_PORT); + + // embedded dataplane config + put("web.http.control.path", "/api/dataplane/control"); + put("web.http.control.port", SOKRATES_DATAPLANE_CONTROL_PORT); + put("edc.dataplane.token.validation.endpoint", "http://localhost:" + SOKRATES_DATAPLANE_CONTROL_PORT + "/api/dataplane/control/token"); + put("edc.dataplane.selector.httpplane.url", "http://localhost:" + SOKRATES_DATAPLANE_CONTROL_PORT + "/api/dataplane/control"); + put("edc.dataplane.selector.httpplane.sourcetypes", "HttpData"); + put("edc.dataplane.selector.httpplane.destinationtypes", "HttpProxy"); + put("edc.dataplane.selector.httpplane.properties", "{\"publicApiUrl\":\"http://localhost:" + SOKRATES_PUBLIC_API_PORT + "/api/public\"}"); + put("edc.receiver.http.dynamic.endpoint", "http://localhost:" + SOKRATES_CONNECTOR_PORT + "/api/consumer/datareference"); + } + }); + @RegisterExtension protected static Participant plato = new Participant( ":edc-tests:runtime", "PLATO", - Map.of( - "edc.ids.id", "urn:connector:plato", - "web.http.default.port", String.valueOf(PLATO_CONNECTOR_PORT), - "web.http.default.path", PLATO_CONNECTOR_PATH, - "web.http.management.port", String.valueOf(PLATO_MANAGEMENT_PORT), - "web.http.management.path", PLATO_MANAGEMENT_PATH, - "web.http.ids.port", String.valueOf(PLATO_IDS_API_PORT), - "web.http.ids.path", IDS_PATH, - "edc.api.auth.key", "testkey", - "ids.webhook.address", PLATO_IDS_API)); - - + new HashMap<>() { + { + put("edc.connector.name", "plato"); + put("edc.ids.id", "urn:connector:plato"); + put("web.http.default.port", String.valueOf(PLATO_CONNECTOR_PORT)); + put("web.http.default.path", PLATO_CONNECTOR_PATH); + put("web.http.management.port", String.valueOf(PLATO_MANAGEMENT_PORT)); + put("web.http.management.path", PLATO_MANAGEMENT_PATH); + put("web.http.ids.port", String.valueOf(PLATO_IDS_API_PORT)); + put("web.http.ids.path", IDS_PATH); + put("edc.api.auth.key", "testkey"); + put("ids.webhook.address", PLATO_IDS_API); + put("web.http.public.port", PLATO_PUBLIC_API_PORT); + put("web.http.public.path", "/api/public"); + // embedded dataplane config + put("web.http.control.path", "/api/dataplane/control"); + put("web.http.control.port", PLATO_DATAPLANE_CONTROL_PORT); + put("edc.dataplane.token.validation.endpoint", "http://localhost:" + PLATO_DATAPLANE_CONTROL_PORT + "/api/dataplane/control/token"); + put("edc.dataplane.selector.httpplane.url", "http://localhost:" + PLATO_DATAPLANE_CONTROL_PORT + "/api/dataplane/control"); + put("edc.dataplane.selector.httpplane.sourcetypes", "HttpData"); + put("edc.dataplane.selector.httpplane.destinationtypes", "HttpProxy"); + put("edc.dataplane.selector.httpplane.properties", "{\"publicApiUrl\":\"http://localhost:" + PLATO_PUBLIC_API_PORT + "/api/public\"}"); + } + }); } diff --git a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/Participant.java b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/Participant.java index 7f17696ba..dd43d4f90 100644 --- a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/Participant.java +++ b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/Participant.java @@ -15,9 +15,15 @@ package org.eclipse.tractusx.edc.lifecycle; import io.restassured.specification.RequestSpecification; +import org.eclipse.edc.api.model.IdResponseDto; import org.eclipse.edc.api.query.QuerySpecDto; import org.eclipse.edc.catalog.spi.Catalog; import org.eclipse.edc.connector.api.management.catalog.model.CatalogRequestDto; +import org.eclipse.edc.connector.api.management.contractnegotiation.model.ContractNegotiationDto; +import org.eclipse.edc.connector.api.management.contractnegotiation.model.ContractOfferDescription; +import org.eclipse.edc.connector.api.management.contractnegotiation.model.NegotiationInitiateRequestDto; +import org.eclipse.edc.connector.api.management.transferprocess.model.TransferProcessDto; +import org.eclipse.edc.connector.api.management.transferprocess.model.TransferRequestDto; import org.eclipse.edc.connector.policy.spi.PolicyDefinition; import org.eclipse.edc.junit.extensions.EdcRuntimeExtension; import org.eclipse.edc.spi.asset.AssetSelectorExpression; @@ -26,20 +32,29 @@ import org.eclipse.edc.spi.system.ServiceExtensionContext; import org.eclipse.edc.spi.system.injection.InjectionContainer; import org.eclipse.edc.spi.types.TypeManager; +import org.eclipse.edc.spi.types.domain.DataAddress; +import org.eclipse.edc.spi.types.domain.HttpDataAddress; +import org.eclipse.edc.spi.types.domain.edr.EndpointDataReference; import org.eclipse.tractusx.edc.token.MockDapsService; import org.junit.jupiter.api.extension.AfterAllCallback; import org.junit.jupiter.api.extension.BeforeAllCallback; import org.junit.jupiter.api.extension.ExtensionContext; import java.net.URI; +import java.time.Duration; +import java.time.temporal.ChronoUnit; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import static io.restassured.RestAssured.given; import static io.restassured.http.ContentType.JSON; import static java.lang.String.format; import static org.assertj.core.api.Assertions.assertThat; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThan; public class Participant extends EdcRuntimeExtension implements BeforeAllCallback, AfterAllCallback { @@ -48,8 +63,9 @@ public class Participant extends EdcRuntimeExtension implements BeforeAllCallbac private final String idsEndpoint; private final TypeManager typeManager = new TypeManager(); private final String idsId; - private DataWiper wiper; private final String bpn; + private final String backend; + private DataWiper wiper; public Participant(String moduleName, String runtimeName, Map properties) { super(moduleName, runtimeName, properties); @@ -58,23 +74,29 @@ public Participant(String moduleName, String runtimeName, Map pr this.apiKey = properties.get("edc.api.auth.key"); this.idsId = properties.get("edc.ids.id"); this.bpn = runtimeName + "-BPN"; + this.backend = properties.get("edc.receiver.http.dynamic.endpoint"); this.registerServiceMock(IdentityService.class, new MockDapsService(getBpn())); } @Override - public void beforeTestExecution(ExtensionContext extensionContext) throws Exception { + public void beforeTestExecution(ExtensionContext extensionContext) { //do nothing - we only want to start the runtime once wiper.clearPersistence(); } @Override - public void afterTestExecution(ExtensionContext context) throws Exception { + public void afterTestExecution(ExtensionContext context) { } @Override - protected void bootExtensions(ServiceExtensionContext context, List> serviceExtensions) { - super.bootExtensions(context, serviceExtensions); - wiper = new DataWiper(context); + public void beforeAll(ExtensionContext context) throws Exception { + //only run this once + super.beforeTestExecution(context); + } + + @Override + public void afterAll(ExtensionContext context) throws Exception { + super.afterTestExecution(context); } /** @@ -106,6 +128,31 @@ public void createAsset(String id, Map properties) { } + /** + * Creates an asset with the given ID and props using the participant's Data Management API + */ + public void createAsset(String id, Map asserProperties, HttpDataAddress address) { + asserProperties = new HashMap<>(asserProperties); + asserProperties.put("asset:prop:id", id); + asserProperties.put("asset:prop:description", "test description"); + + var asset = Map.of( + "asset", Map.of( + "id", id, + "properties", asserProperties + ), + "dataAddress", address + ); + + baseRequest() + .body(asset) + .when() + .post("/assets") + .then() + .statusCode(200) + .contentType(JSON); + } + /** * Creates a {@link org.eclipse.edc.connector.contract.spi.types.offer.ContractDefinition} using the participant's Data Management API */ @@ -168,6 +215,44 @@ public Catalog requestCatalog(Participant other, QuerySpecDto query) { return typeManager.readValue(body, Catalog.class); } + public String negotiateContract(Participant other, String assetId) { + var catalog = requestCatalog(other); + assertThat(catalog.getContractOffers()).withFailMessage("Catalog received from " + other.idsId + " was empty!").isNotEmpty(); + var response = baseRequest() + .when() + .body(NegotiationInitiateRequestDto.Builder.newInstance() + .connectorAddress(other.idsEndpoint + "/data") + .connectorId(getBpn()) + .offer(catalog.getContractOffers().stream().filter(o -> o.getAsset().getId().equals(assetId)) + .findFirst().map(co -> ContractOfferDescription.Builder.newInstance() + .assetId(assetId) + .offerId(co.getId()) + .policy(co.getPolicy()) + .validity(ChronoUnit.SECONDS.between(co.getContractStart(), co.getContractEnd().plus(Duration.ofMillis(500)))) // the plus 1 is required due to https://github.com/eclipse-edc/Connector/issues/2650 + .build()) + .orElseThrow((() -> new RuntimeException("A contract for assetId " + assetId + " could not be negotiated")))) + .build() + ) + .post("/contractnegotiations") + .then(); + + var body = response.extract().body().asString(); + assertThat(response.extract().statusCode()).withFailMessage(body).isBetween(200, 299); + + return typeManager.readValue(body, IdResponseDto.class).getId(); + } + + public ContractNegotiationDto getNegotiation(String negotiationId) { + var response = baseRequest() + .when() + .get("/contractnegotiations/" + negotiationId) + .then(); + + var body = response.extract().body().asString(); + assertThat(response.extract().statusCode()).withFailMessage(body).isBetween(200, 299); + return typeManager.readValue(body, ContractNegotiationDto.class); + } + /** * Returns this participant's IDS ID */ @@ -182,15 +267,71 @@ public String getBpn() { return bpn; } - @Override - public void beforeAll(ExtensionContext context) throws Exception { - //only run this once - super.beforeTestExecution(context); + public String requestTransfer(String contractId, String assetId, Participant other, DataAddress destination, String dataRequestId) { + var response = baseRequest() + .when() + .body(TransferRequestDto.Builder.newInstance() + .assetId(assetId) + .id(dataRequestId) + .connectorAddress(other.idsEndpoint + "/data") + .managedResources(false) + .contractId(contractId) + .connectorId(bpn) + .protocol("ids-multipart") + .dataDestination(destination) + .build()) + .post("/transferprocess") + .then(); + + var body = response.extract().body().asString(); + assertThat(response.extract().statusCode()).withFailMessage(body).isBetween(200, 299); + + return typeManager.readValue(body, IdResponseDto.class).getId(); + } + + public TransferProcessDto getTransferProcess(String transferProcessId) { + var json = baseRequest() + .when() + .get("/transferprocess/" + transferProcessId) + .then() + .statusCode(allOf(greaterThanOrEqualTo(200), lessThan(300))) + .extract().body().asString(); + + return typeManager.readValue(json, TransferProcessDto.class); + + } + + public EndpointDataReference getDataReference(String dataRequestId) { + var dataReference = new AtomicReference(); + + var result = given() + .when() + .get(backend + "/{id}", dataRequestId) + .then() + .statusCode(200) + .extract() + .body() + .as(EndpointDataReference.class); + dataReference.set(result); + + return dataReference.get(); + } + + public String pullData(EndpointDataReference edr, Map queryParams) { + var response = given() + .baseUri(edr.getEndpoint()) + .header(edr.getAuthKey(), edr.getAuthCode()) + .queryParams(queryParams) + .when() + .get(); + assertThat(response.statusCode()).isBetween(200, 300); + return response.body().asString(); } @Override - public void afterAll(ExtensionContext context) throws Exception { - super.afterTestExecution(context); + protected void bootExtensions(ServiceExtensionContext context, List> serviceExtensions) { + super.bootExtensions(context, serviceExtensions); + wiper = new DataWiper(context); } private RequestSpecification baseRequest() { diff --git a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/TestRuntimeConfiguration.java b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/TestRuntimeConfiguration.java index 145ae476f..f865d39d9 100644 --- a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/TestRuntimeConfiguration.java +++ b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/TestRuntimeConfiguration.java @@ -14,39 +14,30 @@ package org.eclipse.tractusx.edc.lifecycle; -import java.util.concurrent.TimeUnit; - import static org.eclipse.edc.junit.testfixtures.TestUtils.getFreePort; -public class TestRuntimeConfiguration { - - - public static final String IDS_PATH = "/api/v1/ids"; - - - public static final int PLATO_CONNECTOR_PORT = getFreePort(); - public static final int PLATO_MANAGEMENT_PORT = getFreePort(); - public static final String PLATO_CONNECTOR_PATH = "/api"; - public static final String PLATO_MANAGEMENT_PATH = "/api/v1/management"; - public static final String CONSUMER_CONNECTOR_MANAGEMENT_URL = "http://localhost:" + PLATO_MANAGEMENT_PORT + PLATO_MANAGEMENT_PATH; - public static final int PLATO_IDS_API_PORT = getFreePort(); - public static final String PLATO_IDS_API = "http://localhost:" + PLATO_IDS_API_PORT; - - public static final int SOKRATES_CONNECTOR_PORT = getFreePort(); - public static final int SOKRATES_MANAGEMENT_PORT = getFreePort(); - public static final String SOKRATES_CONNECTOR_PATH = "/api"; - public static final String SOKRATES_MANAGEMENT_PATH = "/api/v1/management"; - public static final int SOKRATES_IDS_API_PORT = getFreePort(); - public static final String SOKRATES_IDS_API = "http://localhost:" + SOKRATES_IDS_API_PORT; +class TestRuntimeConfiguration { - public static final String PROVIDER_IDS_API_DATA = "http://localhost:" + SOKRATES_IDS_API_PORT + IDS_PATH + "/data"; - public static final String PROVIDER_ASSET_ID = "test-document"; - public static final long CONTRACT_VALIDITY = TimeUnit.HOURS.toSeconds(1); + static final String IDS_PATH = "/api/v1/ids"; + static final int PLATO_CONNECTOR_PORT = getFreePort(); + static final int PLATO_MANAGEMENT_PORT = getFreePort(); + static final String PLATO_CONNECTOR_PATH = "/api"; + static final String PLATO_MANAGEMENT_PATH = "/api/v1/management"; + static final int PLATO_IDS_API_PORT = getFreePort(); + static final String PLATO_IDS_API = "http://localhost:" + PLATO_IDS_API_PORT; - public static final String SOKRATES_ASSET_FILE = "text-document.txt"; + static final int SOKRATES_CONNECTOR_PORT = getFreePort(); + static final int SOKRATES_MANAGEMENT_PORT = getFreePort(); + static final String SOKRATES_CONNECTOR_PATH = "/api"; + static final String SOKRATES_MANAGEMENT_PATH = "/api/v1/management"; + static final int SOKRATES_IDS_API_PORT = getFreePort(); + static final String SOKRATES_IDS_API = "http://localhost:" + SOKRATES_IDS_API_PORT; - public static final String PROVIDER_CONNECTOR_MANAGEMENT_URL = "http://localhost:" + SOKRATES_MANAGEMENT_PORT + SOKRATES_MANAGEMENT_PATH; + static final String SOKRATES_PUBLIC_API_PORT = String.valueOf(getFreePort()); + static final String PLATO_PUBLIC_API_PORT = String.valueOf(getFreePort()); + static final String PLATO_DATAPLANE_CONTROL_PORT = String.valueOf(getFreePort()); + static final String SOKRATES_DATAPLANE_CONTROL_PORT = String.valueOf(getFreePort()); } diff --git a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/provider/ProviderEdcController.java b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/provider/ProviderEdcController.java new file mode 100644 index 000000000..8ea80e745 --- /dev/null +++ b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/provider/ProviderEdcController.java @@ -0,0 +1,2 @@ +package org.eclipse.tractusx.edc.lifecycle.provider;public class ProviderEdcController { +} diff --git a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/provider/ProviderServicesExtension.java b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/provider/ProviderServicesExtension.java new file mode 100644 index 000000000..526ff6872 --- /dev/null +++ b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/provider/ProviderServicesExtension.java @@ -0,0 +1,2 @@ +package org.eclipse.tractusx.edc.lifecycle.provider;public class ProviderServicesExtension { +} diff --git a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/policy/PolicyHelperFunctions.java b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/policy/PolicyHelperFunctions.java index b255aa078..56d92afbe 100644 --- a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/policy/PolicyHelperFunctions.java +++ b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/policy/PolicyHelperFunctions.java @@ -24,6 +24,7 @@ import org.eclipse.edc.policy.model.OrConstraint; import org.eclipse.edc.policy.model.Permission; import org.eclipse.edc.policy.model.Policy; +import org.eclipse.edc.policy.model.PolicyType; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -53,4 +54,16 @@ public static PolicyDefinition businessPartnerNumberPolicy(String id, String... .build()) .build()).build()).build(); } + + public static PolicyDefinition noConstraintPolicy(String id) { + return PolicyDefinition.Builder.newInstance() + .id(id) + .policy(Policy.Builder.newInstance() + .permission(Permission.Builder.newInstance() + .action(Action.Builder.newInstance().type("USE").build()) + .build()) + .type(PolicyType.SET) + .build()) + .build(); + } } diff --git a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/CatalogTest.java b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/CatalogTest.java index b7812ae4a..ec8045f5b 100644 --- a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/CatalogTest.java +++ b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/CatalogTest.java @@ -16,14 +16,8 @@ import org.eclipse.edc.api.query.QuerySpecDto; -import org.eclipse.edc.connector.policy.spi.PolicyDefinition; import org.eclipse.edc.junit.annotations.EndToEndTest; -import org.eclipse.edc.policy.model.Action; -import org.eclipse.edc.policy.model.Permission; -import org.eclipse.edc.policy.model.Policy; -import org.eclipse.edc.policy.model.PolicyType; import org.eclipse.tractusx.edc.lifecycle.MultiRuntimeTest; -import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; @@ -32,16 +26,11 @@ import static java.util.stream.IntStream.range; import static org.assertj.core.api.Assertions.assertThat; import static org.eclipse.tractusx.edc.policy.PolicyHelperFunctions.businessPartnerNumberPolicy; +import static org.eclipse.tractusx.edc.policy.PolicyHelperFunctions.noConstraintPolicy; @EndToEndTest public class CatalogTest extends MultiRuntimeTest { - - @BeforeAll - static void setup() { - - } - @Test void requestCatalog_fulfillsPolicy_shouldReturnOffer() { // arrange @@ -133,16 +122,6 @@ void requestCatalog_of1000Assets_shouldContainAll() { } - private PolicyDefinition noConstraintPolicy(String id) { - return PolicyDefinition.Builder.newInstance() - .id(id) - .policy(Policy.Builder.newInstance() - .permission(Permission.Builder.newInstance() - .action(Action.Builder.newInstance().type("USE").build()) - .build()) - .type(PolicyType.SET) - .build()) - .build(); - } + } diff --git a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/HttpConsumerPullWithProxyTest.java b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/HttpConsumerPullWithProxyTest.java new file mode 100644 index 000000000..78f3d2013 --- /dev/null +++ b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/HttpConsumerPullWithProxyTest.java @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + * + * Contributors: + * Bayerische Motoren Werke Aktiengesellschaft (BMW AG) - initial API and implementation + * + */ + +package org.eclipse.tractusx.edc.tests; + +import okhttp3.mockwebserver.MockResponse; +import okhttp3.mockwebserver.MockWebServer; +import org.eclipse.edc.connector.api.management.transferprocess.model.TransferProcessDto; +import org.eclipse.edc.connector.contract.spi.types.negotiation.ContractNegotiationStates; +import org.eclipse.edc.connector.transfer.spi.types.TransferProcessStates; +import org.eclipse.edc.junit.annotations.EndToEndTest; +import org.eclipse.edc.spi.types.domain.DataAddress; +import org.eclipse.edc.spi.types.domain.HttpDataAddress; +import org.eclipse.edc.spi.types.domain.edr.EndpointDataReference; +import org.eclipse.tractusx.edc.lifecycle.MultiRuntimeTest; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.time.Duration; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicReference; + +import static java.time.Duration.ofSeconds; +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; +import static org.awaitility.pollinterval.FibonacciPollInterval.fibonacci; +import static org.eclipse.edc.connector.transfer.dataplane.spi.TransferDataPlaneConstants.HTTP_PROXY; +import static org.eclipse.tractusx.edc.policy.PolicyHelperFunctions.noConstraintPolicy; + +@EndToEndTest +public class HttpConsumerPullWithProxyTest extends MultiRuntimeTest { + private static final Duration ASYNC_TIMEOUT = ofSeconds(45); + private static final Duration ASYNC_POLL_INTERVAL = ofSeconds(1); + private final long ONE_WEEK = 60 * 60 * 24 * 7; + MockWebServer server = new MockWebServer(); + + @Test + void transferData_privateBackend() throws IOException, InterruptedException { + var assetId = "api-asset-1"; + var url = server.url("/mock/api"); + server.start(); + + var authCodeHeaderName = "test-authkey"; + var authCode = "test-authcode"; + plato.createAsset(assetId, Map.of(), HttpDataAddress.Builder.newInstance() + .contentType("application/json") + .baseUrl(url.toString()) + .authKey(authCodeHeaderName) + .authCode(authCode) + .build()); + plato.createPolicy(noConstraintPolicy("policy-1")); + plato.createPolicy(noConstraintPolicy("policy-2")); + plato.createContractDefinition(assetId, "def-1", "policy-1", "policy-2", ONE_WEEK); + var negotiationId = sokrates.negotiateContract(plato, assetId); + + // forward declarations of our actual values + var transferProcessId = new AtomicReference(); + var dataRequestId = UUID.randomUUID().toString(); + var contractAgreementId = new AtomicReference(); + var edr = new AtomicReference(); + + + // wait for the successful contract negotiation + await().pollInterval(ASYNC_POLL_INTERVAL) + .atMost(ASYNC_TIMEOUT) + .untilAsserted(() -> { + var negotiation = sokrates.getNegotiation(negotiationId); + assertThat(negotiation.getState()).isEqualTo(ContractNegotiationStates.CONFIRMED.toString()); + contractAgreementId.set(negotiation.getContractAgreementId()); + assertThat(contractAgreementId).isNotNull(); + transferProcessId.set(sokrates.requestTransfer(contractAgreementId.get(), assetId, plato, DataAddress.Builder.newInstance() + .type(HTTP_PROXY) + .build(), dataRequestId)); + assertThat(transferProcessId).isNotNull(); + }); + + // wait until transfer process completes + await().pollInterval(fibonacci()) + .atMost(ASYNC_TIMEOUT) + .untilAsserted(() -> { + var tp = sokrates.getTransferProcess(transferProcessId.get()); + assertThat(tp).isNotNull() + .extracting(TransferProcessDto::getState).isEqualTo(TransferProcessStates.COMPLETED.toString()); + }); + + // wait until EDC is available on the consumer side + server.enqueue(new MockResponse().setBody("test response").setResponseCode(200)); + await().pollInterval(fibonacci()) + .atMost(ASYNC_TIMEOUT) + .untilAsserted(() -> { + edr.set(sokrates.getDataReference(dataRequestId)); + assertThat(edr).isNotNull(); + }); + + // pull data out of provider's backend service: + // Cons-DP -> Prov-DP -> Prov-backend + assertThat(sokrates.pullData(edr.get(), Map.of())).isEqualTo("test response"); + var rq = server.takeRequest(); + assertThat(rq.getHeader(authCodeHeaderName)).isEqualTo(authCode); + assertThat(rq.getHeader("Edc-Contract-Agreement-Id")).isEqualTo(contractAgreementId.get()); + assertThat(rq.getMethod()).isEqualToIgnoringCase("GET"); + } + + @AfterEach + void teardown() throws IOException { + server.shutdown(); + } +} diff --git a/edc-tests/runtime/build.gradle.kts b/edc-tests/runtime/build.gradle.kts index e4a832666..6f8a370af 100644 --- a/edc-tests/runtime/build.gradle.kts +++ b/edc-tests/runtime/build.gradle.kts @@ -21,12 +21,22 @@ plugins { dependencies { - runtimeOnly(project(":edc-controlplane:edc-controlplane-base")) { + // use basic (all in-mem) control plane + implementation(project(":edc-controlplane:edc-controlplane-base")) { exclude("org.eclipse.edc", "oauth2-core") exclude("org.eclipse.edc", "oauth2-daps") exclude(module = "data-encryption") exclude(module = "control-plane-adapter") } + + // use basic (all in-mem) data plane + runtimeOnly(project(":edc-dataplane:edc-dataplane-base")) { + exclude("org.eclipse.edc", "api-observability") + } + + implementation(edc.core.controlplane) + // for the controller + implementation(libs.jakarta.rsApi) } application { diff --git a/edc-tests/runtime/src/main/java/org/eclipse/tractusx/edc/lifecycle/ConsumerEdrHandlerController.java b/edc-tests/runtime/src/main/java/org/eclipse/tractusx/edc/lifecycle/ConsumerEdrHandlerController.java new file mode 100644 index 000000000..5ca08a922 --- /dev/null +++ b/edc-tests/runtime/src/main/java/org/eclipse/tractusx/edc/lifecycle/ConsumerEdrHandlerController.java @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + * + * Contributors: + * Bayerische Motoren Werke Aktiengesellschaft (BMW AG) - initial API and implementation + * + */ + +package org.eclipse.tractusx.edc.lifecycle; + +import jakarta.ws.rs.Consumes; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.POST; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.PathParam; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.core.MediaType; +import org.eclipse.edc.spi.monitor.Monitor; +import org.eclipse.edc.spi.types.domain.edr.EndpointDataReference; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +@Path("/consumer") +public class ConsumerEdrHandlerController { + + private final Monitor monitor; + private Map dataReference; + + public ConsumerEdrHandlerController(Monitor monitor) { + this.monitor = monitor; + dataReference = new HashMap<>(); + } + + @Path("/datareference") + @POST + @Consumes({ MediaType.APPLICATION_JSON }) + public void pushDataReference(EndpointDataReference edr) { + monitor.debug("Received new endpoint data reference with url " + edr.getEndpoint()); + dataReference.put(edr.getId(), edr); + } + + @Path("/datareference/{id}") + @GET + @Produces({ MediaType.APPLICATION_JSON }) + public EndpointDataReference getDataReference(@PathParam("id") String id) { + return Optional.ofNullable(dataReference.get(id)).orElseGet(() -> + { + monitor.warning("No EndpointDataReference found with id " + id); + return null; + }); + } + +} diff --git a/edc-tests/runtime/src/main/java/org/eclipse/tractusx/edc/lifecycle/ConsumerServicesExtension.java b/edc-tests/runtime/src/main/java/org/eclipse/tractusx/edc/lifecycle/ConsumerServicesExtension.java new file mode 100644 index 000000000..f46ef3e4e --- /dev/null +++ b/edc-tests/runtime/src/main/java/org/eclipse/tractusx/edc/lifecycle/ConsumerServicesExtension.java @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + * + * Contributors: + * Bayerische Motoren Werke Aktiengesellschaft (BMW AG) - initial API and implementation + * + */ + +package org.eclipse.tractusx.edc.lifecycle; + +import org.eclipse.edc.runtime.metamodel.annotation.Inject; +import org.eclipse.edc.spi.system.ServiceExtension; +import org.eclipse.edc.spi.system.ServiceExtensionContext; +import org.eclipse.edc.web.spi.WebService; + +public class ConsumerServicesExtension implements ServiceExtension { + @Inject + private WebService webService; + + @Override + public void initialize(ServiceExtensionContext context) { + webService.registerResource("default", new ConsumerEdrHandlerController(context.getMonitor())); + } +} diff --git a/edc-tests/runtime/src/main/resources/META-INF/services/org.eclipse.edc.spi.system.ServiceExtension b/edc-tests/runtime/src/main/resources/META-INF/services/org.eclipse.edc.spi.system.ServiceExtension new file mode 100644 index 000000000..619665085 --- /dev/null +++ b/edc-tests/runtime/src/main/resources/META-INF/services/org.eclipse.edc.spi.system.ServiceExtension @@ -0,0 +1,15 @@ +# +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# +# Contributors: +# Bayerische Motoren Werke Aktiengesellschaft (BMW AG) - initial API and implementation +# +# + +org.eclipse.tractusx.edc.lifecycle.ConsumerServicesExtension diff --git a/settings.gradle.kts b/settings.gradle.kts index e0fc39433..22ac0d73a 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -52,9 +52,9 @@ dependencyResolutionManagement { versionCatalogs { create("libs") { from("org.eclipse.edc:edc-versions:0.0.1-20230220-SNAPSHOT") - library("testcontainers-junit", "org.testcontainers","junit-jupiter").version("1.17.6") - library("apache-sshd-core", "org.apache.sshd","sshd-core").version("2.9.2") - library("apache-sshd-sftp", "org.apache.sshd","sshd-sftp").version("2.9.2") + library("testcontainers-junit", "org.testcontainers", "junit-jupiter").version("1.17.6") + library("apache-sshd-core", "org.apache.sshd", "sshd-core").version("2.9.2") + library("apache-sshd-sftp", "org.apache.sshd", "sshd-sftp").version("2.9.2") } // create version catalog for all EDC modules create("edc") { @@ -86,6 +86,9 @@ dependencyResolutionManagement { library("api-management", "org.eclipse.edc", "management-api").versionRef("edc") library("api-catalog", "org.eclipse.edc", "catalog-api").versionRef("edc") library("api-observability", "org.eclipse.edc", "api-observability").versionRef("edc") + library("api-contractnegotiation", "org.eclipse.edc", "contract-negotiation-api").versionRef("edc") + library("api-dataplane", "org.eclipse.edc", "data-plane-api").versionRef("edc") + library("api-transferprocess", "org.eclipse.edc", "transfer-process-api").versionRef("edc") library("ext-http", "org.eclipse.edc", "http").versionRef("edc") library("spi-ids", "org.eclipse.edc", "ids-spi").versionRef("edc") library("ids", "org.eclipse.edc", "ids").versionRef("edc") @@ -140,6 +143,8 @@ dependencyResolutionManagement { "transfer-pull-http-dynamic-receiver" ).versionRef("edc") + library("transfer.receiver", "org.eclipse.edc", "transfer-pull-http-receiver").versionRef("edc") + bundle( "connector", listOf("boot", "core-connector", "core-jersey", "core-controlplane", "api-observability") From d957581c552a4788db5f708559072da137f74d44 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Mon, 3 Apr 2023 10:32:58 +0200 Subject: [PATCH 33/92] docs: create decision record about renaming git branches --- .../2023-04-03_renaming_branches/README.md | 61 +++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 docs/development/decision-records/2023-04-03_renaming_branches/README.md diff --git a/docs/development/decision-records/2023-04-03_renaming_branches/README.md b/docs/development/decision-records/2023-04-03_renaming_branches/README.md new file mode 100644 index 000000000..dcb80865c --- /dev/null +++ b/docs/development/decision-records/2023-04-03_renaming_branches/README.md @@ -0,0 +1,61 @@ +# Renaming Git branches to comply with TractusX standards + +## Decision + +TractusX-EDC will rename its Git branching structure to comply with TractusX release guidelines, and to be able to +leverage +GitHub convenience features, while continuing to use the Gitflow branching model. + +## Rationale + +The TractusX organization has established +a [release guideline](https://eclipse-tractusx.github.io/docs/release/trg-2/trg-2-1/) which mandates that all projects' +default branch be called `main`. + +### Selecting default branches + +In GitHub, the default branch has a couple of important features attached to it: + +- cloning or forking the repository will automatically check out the default branch +- when creating pull requests the default branch is targeted by default +- [automatic issue linking and closing](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue) + only works with the default branch + +### The problem with GitFlow + +The GitFlow branching model suggests that the day-to-day work be done on a branch called `develop`, while the `main` +branch stores the version history and only receives (merge) commits after a version releases. + +This would call for `develop` being the GitHub default branch, which is forbidden by the aforementioned release +guideline. + +## Approach + +In order to comply with the TractusX release guideline, to make use of the GitHub features _and_ also use GitFlow, we +propose renaming a couple of branches. While GitFlow _suggests_ branch names, it does not _require_ it, and most +tools allow for customizing them anyway. Thus, from an abstract perspective, the following changes are necessary: + +- `main` becomes our work/development branch. All pull requests target `main`. +- `develop` gets deleted +- a new branch `releases` is introduced, which tracks the release history and receives post-release merge commits. + +Technically this will involve force-pushing, which is a potentially destructive operation. Therefor the following +section outlines the exact sequence of steps. Note that "upstream" refers to `eclipse-tractusx/tractusx-edc`, while " +fork" refers to `catenax-ng/tx-tractusx-edc`. + +- create a new branch `upstream/releases` +- create a new branch `fork/releaes`, set it to track `upstream/releases` +- push the contents of `fork/main` -> `upstream/releases` +- synchronize `upstream/develop` with `fork/develop` +- force-push the contents of `develop` -> `upstream/main` (do **not** update the tracking branch!) +- synchronize `upstream/main` -> `fork/main`. +- delete/archive `upstream/develop` and `fork/develop` + +_Note that most of this will likely need to be done manually, since GitHub does not allow for advanced Git operations +like force-pushing. Write access to `upstream` is required!_ + +## Further notes + +The new `releases` branch (note the plural) will serve the same purpose that `main` did up until now, which is to track +all releases (via merge commits and tags) in chronological order. We will continue to have separate `release/x.y.z` +branches for every release. \ No newline at end of file From 8ddae2dac2007b955de8fa1dc6eb1ac7eac8e539 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Mon, 3 Apr 2023 11:26:20 +0200 Subject: [PATCH 34/92] removed obsolete HTTP test --- .../edc/tests/HttpProxyTransferSteps.java | 16 ++++++---------- .../features/HttpProxyDataTransfer.feature | 18 ------------------ 2 files changed, 6 insertions(+), 28 deletions(-) diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/HttpProxyTransferSteps.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/HttpProxyTransferSteps.java index 6f34f5eb0..24f68e3a1 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/HttpProxyTransferSteps.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/HttpProxyTransferSteps.java @@ -6,11 +6,9 @@ import io.cucumber.java.en.When; import lombok.extern.slf4j.Slf4j; import org.eclipse.tractusx.edc.tests.data.Asset; -import org.eclipse.tractusx.edc.tests.data.ContractNegotiation; import org.eclipse.tractusx.edc.tests.data.DataAddress; import org.eclipse.tractusx.edc.tests.data.HttpProxySinkDataAddress; import org.eclipse.tractusx.edc.tests.data.HttpProxySourceDataAddress; -import org.eclipse.tractusx.edc.tests.data.Transfer; import org.junit.jupiter.api.Assertions; import java.io.IOException; @@ -61,19 +59,17 @@ public void hasAssets(Connector connector, DataTable table) throws Exception { @When("'{connector}' initiates HttpProxy transfer from '{connector}'") public void sokratesInitiateHttpProxyTransferProcessFromPlato( Connector consumer, Connector provider, DataTable dataTable) throws IOException { - final DataManagementAPI api = consumer.getDataManagementAPI(); - final String receiverUrl = provider.getEnvironment().getIdsUrl() + "/data"; + var api = consumer.getDataManagementAPI(); + var receiverUrl = provider.getEnvironment().getIdsUrl() + "/data"; - final List negotiation = api.getNegotiations(); - final String agreementId = negotiation.get(0).getAgreementId(); - final DataAddress dataAddress = new HttpProxySinkDataAddress(); + var negotiation = api.getNegotiations(); + var agreementId = negotiation.get(0).getAgreementId(); + var dataAddress = new HttpProxySinkDataAddress(); for (var map : dataTable.asMaps()) { final String assetId = map.get(ASSET_ID); final String receiverHttpEndpoint = map.get(RECEIVER_HTTP_ENDPOINT); - final Transfer transfer = - api.initiateTransferProcess( - receiverUrl, agreementId, assetId, dataAddress, receiverHttpEndpoint); + var transfer = api.initiateTransferProcess(receiverUrl, agreementId, assetId, dataAddress, receiverHttpEndpoint); transfer.waitUntilComplete(api); } diff --git a/edc-tests/cucumber/src/test/resources/org/eclipse/tractusx/edc/tests/features/HttpProxyDataTransfer.feature b/edc-tests/cucumber/src/test/resources/org/eclipse/tractusx/edc/tests/features/HttpProxyDataTransfer.feature index d318ad745..b04970ec7 100644 --- a/edc-tests/cucumber/src/test/resources/org/eclipse/tractusx/edc/tests/features/HttpProxyDataTransfer.feature +++ b/edc-tests/cucumber/src/test/resources/org/eclipse/tractusx/edc/tests/features/HttpProxyDataTransfer.feature @@ -24,24 +24,6 @@ Feature: HttpProxy Data Transfer Given 'Plato' has an empty database Given 'Sokrates' has an empty database - Scenario: Connector transfers data via HttpProxy - Given 'Plato' has a http proxy assets - | id | description | baseUrl | - | asset-1 | http proxy transfer asset | http://localhost:8081/api/check/liveness | - And 'Plato' has the following policies - | id | action | - | policy-1 | USE | - And 'Plato' has the following contract definitions - | id | access policy | contract policy | asset | - | contract-definition-1 | policy-1 | policy-1 | asset-1 | - When 'Sokrates' negotiates the contract successfully with 'Plato' - | contract offer id | asset id | policy id | - | contract-definition-1 | asset-1 | policy-1 | - And 'Sokrates' initiates HttpProxy transfer from 'Plato' - | asset id | receiverHttpEndpoint | - | asset-1 | http://backend:8080 | - Then the backend application of 'Sokrates' has received data - Scenario: Connector transfers data via HttpProxy, data on provider side requires oauth2 authentication Given 'Plato' has a http proxy assets | id | description | baseUrl | oauth2 token url | oauth2 client id | oauth2 client secret | oauth2 scope | From 97c6e4bd1d4265d89e32e18d5ba4c9321a6696b9 Mon Sep 17 00:00:00 2001 From: Enrico Risa Date: Mon, 3 Apr 2023 18:02:39 +0200 Subject: [PATCH 35/92] feat(charts): removes edc-controlplane and edc-dataplane charts --- charts/edc-controlplane/.helmignore | 29 -- charts/edc-controlplane/Chart.yaml | 35 -- charts/edc-controlplane/LICENSE | 202 ---------- charts/edc-controlplane/README.md | 106 ----- charts/edc-controlplane/README.md.gotmpl | 26 -- charts/edc-controlplane/templates/NOTES.txt | 74 ---- .../edc-controlplane/templates/_helpers.tpl | 72 ---- .../templates/configmap-env.yaml | 32 -- .../edc-controlplane/templates/configmap.yaml | 49 --- .../templates/deployment.yaml | 154 ------- charts/edc-controlplane/templates/hpa.yaml | 52 --- .../templates/imagepullsecret.yaml | 35 -- .../edc-controlplane/templates/ingress.yaml | 100 ----- .../edc-controlplane/templates/service.yaml | 59 --- .../templates/serviceaccount.yaml | 36 -- charts/edc-controlplane/values.yaml | 379 ------------------ charts/edc-dataplane/.helmignore | 29 -- charts/edc-dataplane/Chart.yaml | 35 -- charts/edc-dataplane/LICENSE | 202 ---------- charts/edc-dataplane/README.md | 90 ----- charts/edc-dataplane/README.md.gotmpl | 26 -- charts/edc-dataplane/templates/NOTES.txt | 64 --- charts/edc-dataplane/templates/_helpers.tpl | 72 ---- .../templates/configmap-env.yaml | 32 -- charts/edc-dataplane/templates/configmap.yaml | 45 --- .../edc-dataplane/templates/deployment.yaml | 142 ------- charts/edc-dataplane/templates/hpa.yaml | 52 --- .../templates/imagepullsecret.yaml | 35 -- charts/edc-dataplane/templates/ingress.yaml | 100 ----- charts/edc-dataplane/templates/service.yaml | 51 --- .../templates/serviceaccount.yaml | 36 -- charts/edc-dataplane/values.yaml | 331 --------------- 32 files changed, 2782 deletions(-) delete mode 100644 charts/edc-controlplane/.helmignore delete mode 100644 charts/edc-controlplane/Chart.yaml delete mode 100644 charts/edc-controlplane/LICENSE delete mode 100644 charts/edc-controlplane/README.md delete mode 100644 charts/edc-controlplane/README.md.gotmpl delete mode 100644 charts/edc-controlplane/templates/NOTES.txt delete mode 100644 charts/edc-controlplane/templates/_helpers.tpl delete mode 100644 charts/edc-controlplane/templates/configmap-env.yaml delete mode 100644 charts/edc-controlplane/templates/configmap.yaml delete mode 100644 charts/edc-controlplane/templates/deployment.yaml delete mode 100644 charts/edc-controlplane/templates/hpa.yaml delete mode 100644 charts/edc-controlplane/templates/imagepullsecret.yaml delete mode 100644 charts/edc-controlplane/templates/ingress.yaml delete mode 100644 charts/edc-controlplane/templates/service.yaml delete mode 100644 charts/edc-controlplane/templates/serviceaccount.yaml delete mode 100644 charts/edc-controlplane/values.yaml delete mode 100644 charts/edc-dataplane/.helmignore delete mode 100644 charts/edc-dataplane/Chart.yaml delete mode 100644 charts/edc-dataplane/LICENSE delete mode 100644 charts/edc-dataplane/README.md delete mode 100644 charts/edc-dataplane/README.md.gotmpl delete mode 100644 charts/edc-dataplane/templates/NOTES.txt delete mode 100644 charts/edc-dataplane/templates/_helpers.tpl delete mode 100644 charts/edc-dataplane/templates/configmap-env.yaml delete mode 100644 charts/edc-dataplane/templates/configmap.yaml delete mode 100644 charts/edc-dataplane/templates/deployment.yaml delete mode 100644 charts/edc-dataplane/templates/hpa.yaml delete mode 100644 charts/edc-dataplane/templates/imagepullsecret.yaml delete mode 100644 charts/edc-dataplane/templates/ingress.yaml delete mode 100644 charts/edc-dataplane/templates/service.yaml delete mode 100644 charts/edc-dataplane/templates/serviceaccount.yaml delete mode 100644 charts/edc-dataplane/values.yaml diff --git a/charts/edc-controlplane/.helmignore b/charts/edc-controlplane/.helmignore deleted file mode 100644 index 148b31d6c..000000000 --- a/charts/edc-controlplane/.helmignore +++ /dev/null @@ -1,29 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ - -README.md.gotmpl - -# Accept only values.yaml -values?*.yaml -values?*.yml diff --git a/charts/edc-controlplane/Chart.yaml b/charts/edc-controlplane/Chart.yaml deleted file mode 100644 index ffd77bd4d..000000000 --- a/charts/edc-controlplane/Chart.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - ---- -apiVersion: v2 -name: edc-controlplane -description: >- - EDC Control-Plane - The Eclipse DataSpaceConnector administration layer with responsibility of resource management and govern contracts and data transfers -home: https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/edc-controlplane -type: application -appVersion: "0.3.2" -version: 0.3.2 -deprecated: true -maintainers: [] -sources: - - https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/edc-controlplane diff --git a/charts/edc-controlplane/LICENSE b/charts/edc-controlplane/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/charts/edc-controlplane/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/charts/edc-controlplane/README.md b/charts/edc-controlplane/README.md deleted file mode 100644 index 9984db480..000000000 --- a/charts/edc-controlplane/README.md +++ /dev/null @@ -1,106 +0,0 @@ -# edc-controlplane - -> **:exclamation: This Helm Chart is deprecated!** - -![Version: 0.3.2](https://img.shields.io/badge/Version-0.3.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.2](https://img.shields.io/badge/AppVersion-0.3.2-informational?style=flat-square) - -EDC Control-Plane - The Eclipse DataSpaceConnector administration layer with responsibility of resource management and govern contracts and data transfers - -**Homepage:** - -## TL;DR - -```shell -helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -helm install my-release tractusx-edc/edc-controlplane --version 0.3.2 -``` - -## Source Code - -* - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| affinity | object | `{}` | [Affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) constrains which nodes the Pod can be scheduled on based on node labels. | -| automountServiceAccountToken | bool | `false` | Whether to [automount kubernetes API credentials](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server) into the pod | -| autoscaling.enabled | bool | `false` | Enables [horizontal pod autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) | -| autoscaling.maxReplicas | int | `100` | Maximum replicas if resource consumption exceeds resource threshholds | -| autoscaling.minReplicas | int | `1` | Minimal replicas if resource consumption falls below resource threshholds | -| autoscaling.targetCPUUtilizationPercentage | int | `80` | targetAverageUtilization of cpu provided to a pod | -| autoscaling.targetMemoryUtilizationPercentage | int | `80` | targetAverageUtilization of memory provided to a pod | -| configuration.properties | string | `"# edc.api.auth.key=\n# edc.atomikos.checkpoint.interval=\n# edc.atomikos.directory=\n# edc.atomikos.logging=\n# edc.atomikos.threaded2pc=\n# edc.atomikos.timeout=\n# edc.aws.access.key=\n# edc.aws.provision.retry.retries.max=\n# edc.aws.provision.role.duration.session.max=\n# edc.aws.secret.access.key=\n# edc.blobstore.endpoint=\n# edc.dataplane.token.validation.endpoint=\n# edc.core.retry.backoff.max=\n# edc.core.retry.backoff.min=\n# edc.core.retry.retries.max=\n# edc.core.system.health.check.liveness-period=\n# edc.core.system.health.check.readiness-period=\n# edc.core.system.health.check.startup-period=\n# edc.core.system.health.check.threadpool-size=\n# edc.dataplane.queue.capacity=\n# edc.dataplane.wait=\n# edc.dataplane.workers=\n# edc.datasource.asset.name=\"default\"\n# edc.datasource.contractdefinition.name=\"default\"\n# edc.datasource.contractnegotiation.name=\"default\"\n# edc.datasource.policy.name=\"default\"\n# edc.datasource.transferprocess.name=\"default\"\n# edc.datasource.default.pool.maxIdleConnections=\n# edc.datasource.default.pool.maxTotalConnections=\n# edc.datasource.default.pool.minIdleConnections=\n# edc.datasource.default.pool.testConnectionOnBorrow=\n# edc.datasource.default.pool.testConnectionOnCreate=\n# edc.datasource.default.pool.testConnectionOnReturn=\n# edc.datasource.default.pool.testConnectionWhileIdle=\n# edc.datasource.default.pool.testQuery=\n# edc.datasource.default.url=\n# edc.datasource.default.user=\n# edc.datasource.default.password=\n# edc.dpf.selector.url=\n# edc.events.topic.endpoint=\n# edc.events.topic.name=\n# edc.fs.config=\n# edc.hostname=\n# edc.identity.did.url=\n# edc.ids.catalog.id=\n# edc.ids.curator=\n# edc.ids.description=\n# edc.ids.endpoint=\n# edc.ids.id=\n# edc.ids.maintainer=\n# edc.ids.security.profile=\n# edc.ids.title=\n# edc.ids.validation.referringconnector=\n# edc.ion.crawler.did-type=\n# edc.ion.crawler.interval-minutes=\n# edc.ion.crawler.ion.url=\n# edc.metrics.enabled=\n# edc.metrics.executor.enabled=\n# edc.metrics.jersey.enabled=\n# edc.metrics.jetty.enabled=\n# edc.metrics.okhttp.enabled=\n# edc.metrics.system.enabled=\n# edc.negotiation.consumer.state-machine.batch-size=\n# edc.negotiation.provider.state-machine.batch-size=\n# edc.oauth.client.id=\n# edc.oauth.private.key.alias=\n# edc.oauth.provider.audience=\n# edc.oauth.provider.jwks.refresh=\n# edc.oauth.provider.jwks.url=\n# edc.oauth.public.key.alias=\n# edc.oauth.token.url=\n# edc.oauth.validation.nbf.leeway=\n# edc.receiver.http.auth-code=\n# edc.receiver.http.auth-key=\n# edc.receiver.http.endpoint=\n# edc.transfer.proxy.endpoint=\n# edc.transfer.proxy.token.validity.seconds=\n# edc.transfer.proxy.token.signer.privatekey.alias=\n# edc.transfer.functions.check.endpoint=\n# edc.transfer.functions.enabled.protocols=\n# edc.transfer.functions.transfer.endpoint=\n# edc.transfer-process-store.database.name=\n# edc.transfer.state-machine.batch-size=\n# edc.vault=\n# edc.vault.certificate=\n# edc.vault.clientid=\n# edc.vault.clientsecret=\n# edc.vault.name=\n# edc.vault.tenantid=\n# edc.vault.hashicorp.url=\n# edc.vault.hashicorp.token=\n# edc.vault.hashicorp.timeout.seconds=\n# edc.webdid.doh.url=\n# edc.web.rest.cors.enabled=\n# edc.web.rest.cors.headers=\n# edc.web.rest.cors.methods=\n# edc.web.rest.cors.origins=\n# ids.webhook.address="` | EDC configuration.properties configuring aspects of the [eclipse-dataspaceconnector](https://github.com/eclipse-edc/Connector) | -| customLabels | object | `{}` | Additional custom Labels to add | -| edc.endpoints.control.path | string | `"/api/controlplane/control"` | The path mapping the "control" api is going to be exposed at | -| edc.endpoints.control.port | string | `"9999"` | The network port, which the "control" api is going to be exposed by the container, pod and service | -| edc.endpoints.data.path | string | `"/data"` | The path mapping the "data" management api is going to be exposed at | -| edc.endpoints.data.port | string | `"8181"` | The network port, which the "data" management api is going to be exposed by the container, pod and service | -| edc.endpoints.default.path | string | `"/api"` | The path mapping the "default" api is going to be exposed at | -| edc.endpoints.default.port | string | `"8080"` | The network port, which the "default" api is going to be exposed by the container, pod and service | -| edc.endpoints.ids.path | string | `"/api/v1/ids"` | The path mapping the "ids" multipart api is going to be exposed at | -| edc.endpoints.ids.port | string | `"8282"` | The network port, which the "ids" multipart api is going to be exposed by the container, pod and service | -| edc.endpoints.metrics.path | string | `"/metrics"` | The path mapping the prometheus metrics are going to be exposed at | -| edc.endpoints.metrics.port | string | `"9090"` | The network port, which the prometheus metrics are going to be exposed by the container, pod and service | -| edc.endpoints.validation.path | string | `"/validation"` | The path mapping the "validation" api is going to be exposed at | -| edc.endpoints.validation.port | string | `"8182"` | The network port, which the "validation" api is going to be exposed by the container, pod and service | -| env | object | `{}` | Container environment variables e.g. for configuring [JAVA_TOOL_OPTIONS](https://docs.oracle.com/javase/8/docs/technotes/guides/troubleshoot/envvars002.html) Ex.: JAVA_TOOL_OPTIONS: > -Dhttp.proxyHost=proxy -Dhttp.proxyPort=80 -Dhttp.nonProxyHosts="localhost|127.*|[::1]" -Dhttps.proxyHost=proxy -Dhttps.proxyPort=443 | -| envSecretName | string | `nil` | [Kubernetes Secret Resource](https://kubernetes.io/docs/concepts/configuration/secret/) name to load environment variables from | -| fullnameOverride | string | `""` | Overrides the releases full name | -| image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | -| image.repository | string | `"ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql-hashicorp-vault"` | Which derivate of the edc control-plane to use. One of: [ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql-hashicorp-vault, ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql, ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-memory] | -| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | -| imagePullSecret.dockerconfigjson | string | `""` | Image pull secret to create to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) Note: This value needs to adhere to the [(base64 encoded) .dockerconfigjson format](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials). Furthermore, if 'imagePullSecret.dockerconfigjson' is defined, it takes precedence over 'imagePullSecrets'. | -| imagePullSecrets | list | `[]` | Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | -| ingresses[0].annotations | object | `{}` | Additional ingress annotations to add | -| ingresses[0].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | -| ingresses[0].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | -| ingresses[0].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | -| ingresses[0].enabled | bool | `true` | | -| ingresses[0].endpoints | list | `["ids"]` | EDC endpoints exposed by this ingress resource | -| ingresses[0].hostname | string | `"edc-controlplane.local"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | -| ingresses[0].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | -| ingresses[0].tls.enabled | bool | `false` | Enables TLS on the ingress resource | -| ingresses[0].tls.secretName | string | `""` | If present overwrites the default secret name | -| ingresses[1].annotations | object | `{}` | Additional ingress annotations to add | -| ingresses[1].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | -| ingresses[1].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | -| ingresses[1].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | -| ingresses[1].enabled | bool | `false` | | -| ingresses[1].endpoints | list | `["data","control"]` | EDC endpoints exposed by this ingress resource | -| ingresses[1].hostname | string | `"edc-controlplane.intranet"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | -| ingresses[1].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | -| ingresses[1].tls.enabled | bool | `false` | Enables TLS on the ingress resource | -| ingresses[1].tls.secretName | string | `""` | If present overwrites the default secret name | -| livenessProbe.enabled | bool | `true` | Whether to enable kubernetes [liveness-probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | -| logging.properties | string | `".level=INFO\norg.eclipse.edc.level=ALL\nhandlers=java.util.logging.ConsoleHandler\njava.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter\njava.util.logging.ConsoleHandler.level=ALL\njava.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n"` | EDC logging.properties configuring the [java.util.logging subsystem](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html#a1.8) | -| nameOverride | string | `""` | Overrides the charts name | -| nodeSelector | object | `{}` | [Node-Selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) to constrain the Pod to nodes with specific labels. | -| opentelemetry.properties | string | `"otel.javaagent.enabled=true\notel.javaagent.debug=false"` | opentelemetry.properties configuring the [opentelemetry agent](https://opentelemetry.io/docs/instrumentation/java/automatic/agent-config/) | -| podAnnotations | object | `{}` | [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) added to deployed [pods](https://kubernetes.io/docs/concepts/workloads/pods/) | -| podSecurityContext.fsGroup | int | `10001` | The owner for volumes and any files created within volumes will belong to this guid | -| podSecurityContext.runAsGroup | int | `10001` | Processes within a pod will belong to this guid | -| podSecurityContext.runAsUser | int | `10001` | Runs all processes within a pod with a special uid | -| podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | Restrict a Container's Syscalls with seccomp | -| readinessProbe.enabled | bool | `true` | Whether to enable kubernetes readiness-probes | -| replicaCount | int | `1` | Specifies how many replicas of a deployed pod shall be created during the deployment Note: If horizontal pod autoscaling is enabled this setting has no effect | -| resources | object | `{}` | [Resource management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) applied to the deployed pod | -| securityContext.allowPrivilegeEscalation | bool | `false` | Controls [Privilege Escalation](https://kubernetes.io/docs/concepts/security/pod-security-policy/#privilege-escalation) enabling setuid binaries changing the effective user ID | -| securityContext.capabilities.add | list | `[]` | Specifies which capabilities to add to issue specialized syscalls | -| securityContext.capabilities.drop | list | `["ALL"]` | Specifies which capabilities to drop to reduce syscall attack surface | -| securityContext.readOnlyRootFilesystem | bool | `true` | Whether the root filesystem is mounted in read-only mode | -| securityContext.runAsNonRoot | bool | `true` | Requires the container to run without root privileges | -| securityContext.runAsUser | int | `10001` | The container's process will run with the specified uid | -| service.type | string | `"ClusterIP"` | [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. | -| serviceAccount.annotations | object | `{}` | [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to add to the service account | -| serviceAccount.create | bool | `true` | Specifies whether a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) should be created per release | -| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the release's fullname template | -| startupProbe.enabled | bool | `true` | Whether to enable kubernetes startup-probes | -| startupProbe.failureThreshold | int | `12` | Minimum consecutive failures for the probe to be considered failed after having succeeded | -| startupProbe.initialDelaySeconds | int | `10` | Number of seconds after the container has started before liveness probes are initiated. | -| tolerations | list | `[]` | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) are applied to Pods to schedule onto nodes with matching taints. | -| volumeMounts | list | `[]` | Additional volumeMounts to the controlplane main container | -| volumes | list | `[]` | Additional volumes to the controlplane pod | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.10.0](https://github.com/norwoodj/helm-docs/releases/v1.10.0) diff --git a/charts/edc-controlplane/README.md.gotmpl b/charts/edc-controlplane/README.md.gotmpl deleted file mode 100644 index aa70ec6fc..000000000 --- a/charts/edc-controlplane/README.md.gotmpl +++ /dev/null @@ -1,26 +0,0 @@ -{{ template "chart.header" . }} - -{{ template "chart.deprecationWarning" . }} - -{{ template "chart.badgesSection" . }} - -{{ template "chart.description" . }} - -{{ template "chart.homepageLine" . }} - -## TL;DR - -```shell -helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -helm install my-release tractusx-edc/edc-controlplane --version {{ .Version }} -``` - -{{ template "chart.maintainersSection" . }} - -{{ template "chart.sourcesSection" . }} - -{{ template "chart.requirementsSection" . }} - -{{ template "chart.valuesSection" . }} - -{{ template "helm-docs.versionFooter" . }} diff --git a/charts/edc-controlplane/templates/NOTES.txt b/charts/edc-controlplane/templates/NOTES.txt deleted file mode 100644 index 6758c6bdf..000000000 --- a/charts/edc-controlplane/templates/NOTES.txt +++ /dev/null @@ -1,74 +0,0 @@ - -CHART NAME: {{ .Chart.Name }} -CHART VERSION: {{ .Chart.Version }} -APP VERSION: {{ .Chart.AppVersion }} - -Logs can be accessed by running this command: - - kubectl logs --tail 100 -f \ - --namespace {{ .Release.Namespace }} \ - -l "app.kubernetes.io/name={{ include "edc-controlplane.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" - -{{- if .Values.ingresses }} - -Following ingress URLS are available: - {{- $edcEndpoints := .Values.edc.endpoints }} - {{- range .Values.ingresses }} - {{- if .enabled }} - {{- $ingressEdcEndpoints := .endpoints }} - {{- $hostname := .hostname }} - {{- $tls := .tls }} - {{- range $name, $mapping := $edcEndpoints }} - {{- if (has $name $ingressEdcEndpoints) }} - Visit http{{ if $tls }}s{{ end }}://{{ $hostname }}{{ $mapping.path }} to access the {{ $name }} api - {{- end }} - {{- end }} - {{- end }} - {{- end }} - -{{- else if contains "NodePort" .Values.service.type }} -Get the application URLs by running these commands: - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - - export NODE_PORT_DEFAULT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "edc-controlplane.fullname" . }}}") - export NODE_PORT_DATA=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "edc-controlplane.fullname" . }}}") - export NODE_PORT_VALIDATION=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[2].nodePort}" services {{ include "edc-controlplane.fullname" . }}}") - export NODE_PORT_CONTROL=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[3].nodePort}" services {{ include "edc-controlplane.fullname" . }}}") - export NODE_PORT_IDS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[4].nodePort}" services {{ include "edc-controlplane.fullname" . }}}") - export NODE_PORT_METRICS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[5].nodePort}" services {{ include "edc-controlplane.fullname" . }}}") - - echo "Visit http://$NODE_IP:$NODE_PORT_DEFAULT to access the default api" - echo "Visit http://$NODE_IP:$NODE_PORT_DATA to access the data management api" - echo "Visit http://$NODE_IP:$NODE_PORT_VALIDATION to access the data transfer validation api" - echo "Visit http://$NODE_IP:$NODE_PORT_CONTROL to access the control api" - echo "Visit http://$NODE_IP:$NODE_PORT_IDS to access the IDS api" - echo "Visit http://$NODE_IP:$NODE_PORT_METRICS to access the metrics api" - -{{- else if contains "ClusterIP" .Values.service.type }} -Get the application URL by running these commands: - - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "edc-controlplane.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - - export CONTAINER_PORT_DEFAULT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") - export CONTAINER_PORT_DATA=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[1].containerPort}") - export CONTAINER_PORT_VALIDATION=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[2].containerPort}") - export CONTAINER_PORT_CONTROL=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[3].containerPort}") - export CONTAINER_PORT_IDS=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[4].containerPort}") - export CONTAINER_PORT_METRICS=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[5].containerPort}") - - echo "Visit http://127.0.0.1:8080 to access the default api" - echo "Visit http://127.0.0.1:8182 to access the data management api" - echo "Visit http://127.0.0.1:8182 to access the data transfer validation api" - echo "Visit http://127.0.0.1:9999 to access the control api" - echo "Visit http://127.0.0.1:8282 to access the IDS api" - echo "Visit http://127.0.0.1:9090 to access the metrics api" - - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME \ - 8080:$CONTAINER_PORT_DEFAULT \ - 8182:$CONTAINER_PORT_DATA \ - 8182:$CONTAINER_PORT_VALIDATION \ - 9999:$CONTAINER_PORT_CONTROL \ - 8282:$CONTAINER_PORT_IDS \ - 9090:$CONTAINER_PORT_METRICS - -{{- end }} diff --git a/charts/edc-controlplane/templates/_helpers.tpl b/charts/edc-controlplane/templates/_helpers.tpl deleted file mode 100644 index 272a0f27d..000000000 --- a/charts/edc-controlplane/templates/_helpers.tpl +++ /dev/null @@ -1,72 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "edc-controlplane.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "edc-controlplane.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "edc-controlplane.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "edc-controlplane.labels" -}} -helm.sh/chart: {{ include "edc-controlplane.chart" . }} -{{ include "edc-controlplane.selectorLabels" . }} -{{ include "edc-controlplane.customLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "edc-controlplane.selectorLabels" -}} -app.kubernetes.io/name: {{ include "edc-controlplane.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Custom labels -*/}} -{{- define "edc-controlplane.customLabels" -}} -{{- with .Values.customLabels }} -{{ toYaml . }} -{{- end }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "edc-controlplane.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "edc-controlplane.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/charts/edc-controlplane/templates/configmap-env.yaml b/charts/edc-controlplane/templates/configmap-env.yaml deleted file mode 100644 index d33071a58..000000000 --- a/charts/edc-controlplane/templates/configmap-env.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "edc-controlplane.fullname" . }}-env - namespace: {{ .Release.Namespace | default "default" | quote }} - labels: - {{- include "edc-controlplane.labels" . | nindent 4 }} -data: - {{- toYaml .Values.env | nindent 2 }} diff --git a/charts/edc-controlplane/templates/configmap.yaml b/charts/edc-controlplane/templates/configmap.yaml deleted file mode 100644 index 863ac5e83..000000000 --- a/charts/edc-controlplane/templates/configmap.yaml +++ /dev/null @@ -1,49 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "edc-controlplane.fullname" . }}-configmap - namespace: {{ .Release.Namespace | default "default" | quote }} - labels: - {{- include "edc-controlplane.labels" . | nindent 4 }} -data: - configuration.properties: |- - web.http.default.port={{ .Values.edc.endpoints.default.port }} - web.http.default.path={{ .Values.edc.endpoints.default.path }} - web.http.data.port={{ .Values.edc.endpoints.data.port }} - web.http.data.path={{ .Values.edc.endpoints.data.path }} - web.http.validation.port={{ .Values.edc.endpoints.validation.port }} - web.http.validation.path={{ .Values.edc.endpoints.validation.path }} - web.http.control.port={{ .Values.edc.endpoints.control.port }} - web.http.control.path={{ .Values.edc.endpoints.control.path }} - web.http.ids.port={{ .Values.edc.endpoints.ids.port }} - web.http.ids.path={{ .Values.edc.endpoints.ids.path }} - {{- .Values.configuration.properties | nindent 4 }} - - opentelemetry.properties: |- - {{- .Values.opentelemetry.properties | nindent 4 }} - - logging.properties: |- - {{- .Values.logging.properties | nindent 4 }} diff --git a/charts/edc-controlplane/templates/deployment.yaml b/charts/edc-controlplane/templates/deployment.yaml deleted file mode 100644 index 4fd762d0b..000000000 --- a/charts/edc-controlplane/templates/deployment.yaml +++ /dev/null @@ -1,154 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "edc-controlplane.fullname" . }} - namespace: {{ .Release.Namespace | default "default" | quote }} - labels: - {{- include "edc-controlplane.labels" . | nindent 4 }} -spec: - {{- if not .Values.autoscaling.enabled }} - replicas: {{ .Values.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include "edc-controlplane.selectorLabels" . | nindent 6 }} - template: - metadata: - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/env-config: {{ include (print $.Template.BasePath "/configmap-env.yaml") . | sha256sum }} - {{- with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "edc-controlplane.selectorLabels" . | nindent 8 }} - spec: - {{- if .Values.imagePullSecret.dockerconfigjson }} - imagePullSecrets: - - name: {{ include "edc-controlplane.fullname" . }}-imagepullsecret - {{- else }} - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- end }} - serviceAccountName: {{ include "edc-controlplane.serviceAccountName" . }} - automountServiceAccountToken: {{ if .Values.automountServiceAccountToken }}true{{ else }}false{{ end }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: default - containerPort: {{ .Values.edc.endpoints.default.port }} - protocol: TCP - - name: data - containerPort: {{ .Values.edc.endpoints.data.port }} - protocol: TCP - - name: validation - containerPort: {{ .Values.edc.endpoints.validation.port }} - protocol: TCP - - name: control - containerPort: {{ .Values.edc.endpoints.control.port }} - protocol: TCP - - name: ids - containerPort: {{ .Values.edc.endpoints.ids.port }} - protocol: TCP - - name: metrics - containerPort: {{ .Values.edc.endpoints.metrics.port }} - protocol: TCP - {{- if .Values.livenessProbe.enabled }} - livenessProbe: - httpGet: - path: {{ .Values.edc.endpoints.default.path }}/check/liveness - port: default - {{- end }} - {{- if .Values.readinessProbe.enabled }} - readinessProbe: - httpGet: - path: {{ .Values.edc.endpoints.default.path }}/check/readiness - port: default - {{- end }} - {{- if .Values.startupProbe.enabled }} - startupProbe: - httpGet: - path: {{ .Values.edc.endpoints.default.path }}/check/startup - port: default - failureThreshold: {{ .Values.startupProbe.failureThreshold }} - initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }} - {{- end }} - envFrom: - - configMapRef: - name: {{ include "edc-controlplane.fullname" . }}-env - {{- if .Values.envSecretName }} - - secretRef: - name: {{ .Values.envSecretName | quote }} - {{- end }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - volumeMounts: - - name: configuration - mountPath: /app/configuration.properties - subPath: configuration.properties - - name: configuration - mountPath: /app/opentelemetry.properties - subPath: opentelemetry.properties - - name: configuration - mountPath: /app/logging.properties - subPath: logging.properties - {{- with .Values.volumeMounts }} - {{- toYaml . | nindent 12 }} - {{- end }} - volumes: - - name: configuration - configMap: - name: {{ include "edc-controlplane.fullname" . }}-configmap - items: - - key: configuration.properties - path: configuration.properties - - key: opentelemetry.properties - path: opentelemetry.properties - - key: logging.properties - path: logging.properties - {{- with .Values.volumes }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/charts/edc-controlplane/templates/hpa.yaml b/charts/edc-controlplane/templates/hpa.yaml deleted file mode 100644 index bc75d097a..000000000 --- a/charts/edc-controlplane/templates/hpa.yaml +++ /dev/null @@ -1,52 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - -{{- if .Values.autoscaling.enabled }} ---- -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "edc-controlplane.fullname" . }} - namespace: {{ .Release.Namespace | default "default" | quote }} - labels: - {{- include "edc-controlplane.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "edc-controlplane.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/charts/edc-controlplane/templates/imagepullsecret.yaml b/charts/edc-controlplane/templates/imagepullsecret.yaml deleted file mode 100644 index 6b6e29ace..000000000 --- a/charts/edc-controlplane/templates/imagepullsecret.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - -{{- if .Values.imagePullSecret.dockerconfigjson }} ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "edc-controlplane.fullname" . }}-imagepullsecret - namespace: {{ .Release.Namespace | default "default" | quote }} - labels: - {{- include "edc-controlplane.labels" . | nindent 4 }} -data: - .dockerconfigjson: {{ .Values.imagePullSecret.dockerconfigjson }} -type: kubernetes.io/dockerconfigjson -{{- end }} diff --git a/charts/edc-controlplane/templates/ingress.yaml b/charts/edc-controlplane/templates/ingress.yaml deleted file mode 100644 index cb58b5ac9..000000000 --- a/charts/edc-controlplane/templates/ingress.yaml +++ /dev/null @@ -1,100 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG - # Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH - # Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) - # Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation - # - # See the NOTICE file(s) distributed with this work for additional - # information regarding copyright ownership. - # - # This program and the accompanying materials are made available under the - # terms of the Apache License, Version 2.0 which is available at - # https://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - # License for the specific language governing permissions and limitations - # under the License. - # - # SPDX-License-Identifier: Apache-2.0 - # - -{{- $fullName := include "edc-controlplane.fullname" . }} -{{- $labels := include "edc-controlplane.labels" . | nindent 4 }} -{{- $gitVersion := .Capabilities.KubeVersion.GitVersion }} -{{- $edcEndpoints := .Values.edc.endpoints }} -{{- $namespace := .Release.Namespace }} -{{- range .Values.ingresses }} -{{- if and .enabled .endpoints }} -{{- $ingressName := printf "%s-%s" $fullName .hostname }} ---- -{{- if semverCompare ">=1.19-0" $gitVersion }} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" $gitVersion }} -apiVersion: networking.k8s.io/v1beta1 -{{- else }} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $ingressName }} - namespace: {{ $namespace | default "default" | quote }} - labels: - {{- $labels | nindent 2 }} - annotations: - {{- if and .className (not (semverCompare ">=1.18-0" $gitVersion)) }} - {{- if not (hasKey .annotations "kubernetes.io/ingress.class") }} - {{- $_ := set .annotations "kubernetes.io/ingress.class" .className}} - {{- end }} - {{- end }} - {{- if .certManager }} - {{- if .certManager.issuer }} - {{- $_ := set .annotations "cert-manager.io/issuer" .certManager.issuer}} - {{- end }} - {{- if .certManager.clusterIssuer }} - {{- $_ := set .annotations "cert-manager.io/cluster-issuer" .certManager.clusterIssuer}} - {{- end }} - {{- end }} - {{- with .annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and .className (semverCompare ">=1.18-0" $gitVersion) }} - ingressClassName: {{ .className }} - {{- end }} - {{- if .hostname }} - {{- if .tls.enabled }} - tls: - - hosts: - - {{ .hostname }} - {{- if .tls.secretName }} - secretName: {{ .tls.secretName }} - {{- else }} - secretName: {{ $ingressName }}-tls - {{- end }} - {{- end }} - rules: - - host: {{ .hostname }} - http: - paths: - {{- $ingressEdcEndpoints := .endpoints }} - {{- range $name, $mapping := $edcEndpoints }} - {{- if (has $name $ingressEdcEndpoints) }} - - path: {{ $mapping.path }} - pathType: Prefix - backend: - {{- if semverCompare ">=1.19-0" $gitVersion }} - service: - name: {{ $fullName }} - port: - number: {{ $mapping.port }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $mapping.port }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} -{{- end }}{{- /* end: if .enabled */}} -{{- end }}{{- /* end: range .Values.ingresses */}} diff --git a/charts/edc-controlplane/templates/service.yaml b/charts/edc-controlplane/templates/service.yaml deleted file mode 100644 index 18bc8bd55..000000000 --- a/charts/edc-controlplane/templates/service.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ include "edc-controlplane.fullname" . }} - namespace: {{ .Release.Namespace | default "default" | quote }} - labels: - {{- include "edc-controlplane.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.edc.endpoints.default.port }} - targetPort: default - protocol: TCP - name: default - - port: {{ .Values.edc.endpoints.control.port }} - targetPort: control - protocol: TCP - name: control - - port: {{ .Values.edc.endpoints.data.port }} - targetPort: data - protocol: TCP - name: data - - port: {{ .Values.edc.endpoints.validation.port }} - targetPort: validation - protocol: TCP - name: validation - - port: {{ .Values.edc.endpoints.ids.port }} - targetPort: ids - protocol: TCP - name: ids - - port: {{ .Values.edc.endpoints.metrics.port }} - targetPort: metrics - protocol: TCP - name: metrics - selector: - {{- include "edc-controlplane.selectorLabels" . | nindent 4 }} diff --git a/charts/edc-controlplane/templates/serviceaccount.yaml b/charts/edc-controlplane/templates/serviceaccount.yaml deleted file mode 100644 index 1f9d5045b..000000000 --- a/charts/edc-controlplane/templates/serviceaccount.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - -{{- if .Values.serviceAccount.create -}} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "edc-controlplane.serviceAccountName" . }} - namespace: {{ .Release.Namespace | default "default" | quote }} - labels: - {{- include "edc-controlplane.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/charts/edc-controlplane/values.yaml b/charts/edc-controlplane/values.yaml deleted file mode 100644 index b43d67a35..000000000 --- a/charts/edc-controlplane/values.yaml +++ /dev/null @@ -1,379 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - ---- -# Default values for edc-controlplane. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# -- Specifies how many replicas of a deployed pod shall be created during the deployment -# Note: If horizontal pod autoscaling is enabled this setting has no effect -replicaCount: 1 - -image: - # -- Which derivate of the edc control-plane to use. - # One of: [ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql-hashicorp-vault, ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql, ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-memory] - repository: ghcr.io/eclipse-tractusx/tractusx-edc/edc-controlplane-postgresql-hashicorp-vault - # -- [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use - pullPolicy: IfNotPresent - # -- Overrides the image tag whose default is the chart appVersion. - tag: "" - -imagePullSecret: - # -- Image pull secret to create to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) - # Note: This value needs to adhere to the [(base64 encoded) .dockerconfigjson format](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials). - # Furthermore, if 'imagePullSecret.dockerconfigjson' is defined, it takes precedence over 'imagePullSecrets'. - dockerconfigjson: "" - -# -- Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) -imagePullSecrets: [] - -# -- Overrides the charts name -nameOverride: "" - -# -- Overrides the releases full name -fullnameOverride: "" - -# -- Additional custom Labels to add -customLabels: {} - -serviceAccount: - # -- Specifies whether a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) should be created per release - create: true - # -- [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to add to the service account - annotations: {} - # -- The name of the service account to use. If not set and create is true, a name is generated using the release's fullname template - name: "" - -# -- Whether to [automount kubernetes API credentials](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server) into the pod -automountServiceAccountToken: false - -# -- [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) added to deployed [pods](https://kubernetes.io/docs/concepts/workloads/pods/) -podAnnotations: {} - -# The [pod security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) defines privilege and access control settings for a Pod within the deployment -podSecurityContext: - seccompProfile: - # -- Restrict a Container's Syscalls with seccomp - type: RuntimeDefault - # -- Runs all processes within a pod with a special uid - runAsUser: 10001 - # -- Processes within a pod will belong to this guid - runAsGroup: 10001 - # -- The owner for volumes and any files created within volumes will belong to this guid - fsGroup: 10001 - -# The [container security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container) defines privilege and access control settings for a Container within a pod -securityContext: - capabilities: - # -- Specifies which capabilities to drop to reduce syscall attack surface - drop: - - ALL - # -- Specifies which capabilities to add to issue specialized syscalls - add: [] - # -- Whether the root filesystem is mounted in read-only mode - readOnlyRootFilesystem: true - # -- Controls [Privilege Escalation](https://kubernetes.io/docs/concepts/security/pod-security-policy/#privilege-escalation) enabling setuid binaries changing the effective user ID - allowPrivilegeEscalation: false - # -- Requires the container to run without root privileges - runAsNonRoot: true - # -- The container's process will run with the specified uid - runAsUser: 10001 - -livenessProbe: - # -- Whether to enable kubernetes [liveness-probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) - enabled: true - -readinessProbe: - # -- Whether to enable kubernetes readiness-probes - enabled: true - -startupProbe: - # -- Whether to enable kubernetes startup-probes - enabled: true - # -- Minimum consecutive failures for the probe to be considered failed after having succeeded - failureThreshold: 12 - # -- Number of seconds after the container has started before liveness probes are initiated. - initialDelaySeconds: 10 - -# -- Additional volumeMounts to the controlplane main container -volumeMounts: [] - -# -- Additional volumes to the controlplane pod -volumes: [] - -## EDC endpoints exposed by the control-plane -edc: - endpoints: - ## Default api exposing health checks etc - default: - # -- The network port, which the "default" api is going to be exposed by the container, pod and service - port: "8080" - # -- The path mapping the "default" api is going to be exposed at - path: /api - ## Data management API - data: - # -- The network port, which the "data" management api is going to be exposed by the container, pod and service - port: "8181" - # -- The path mapping the "data" management api is going to be exposed at - path: /data - ## Validation API - validation: - # -- The network port, which the "validation" api is going to be exposed by the container, pod and service - port: "8182" - # -- The path mapping the "validation" api is going to be exposed at - path: /validation - ## Control API - control: - # -- The network port, which the "control" api is going to be exposed by the container, pod and service - port: "9999" - # -- The path mapping the "control" api is going to be exposed at - path: /api/controlplane/control - ## IDS endpoints - ids: - # -- The network port, which the "ids" multipart api is going to be exposed by the container, pod and service - port: "8282" - # -- The path mapping the "ids" multipart api is going to be exposed at - path: /api/v1/ids - ## Prometheus endpoint - metrics: - # -- The network port, which the prometheus metrics are going to be exposed by the container, pod and service - port: "9090" - # -- The path mapping the prometheus metrics are going to be exposed at - path: /metrics - -service: - # -- [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. - type: ClusterIP - -## Ingress declaration to expose the network service. -ingresses: - ## Public / Internet facing Ingress - - enabled: true - # -- The hostname to be used to precisely map incoming traffic onto the underlying network service - hostname: "edc-controlplane.local" - # -- Additional ingress annotations to add - annotations: {} - # -- EDC endpoints exposed by this ingress resource - endpoints: - - ids - # -- Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use - className: "" - # -- TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource - tls: - # -- Enables TLS on the ingress resource - enabled: false - # -- If present overwrites the default secret name - secretName: "" - ## Adds [cert-manager](https://cert-manager.io/docs/) annotations to the ingress resource - certManager: - # -- If preset enables certificate generation via cert-manager namespace scoped issuer - issuer: "" - # -- If preset enables certificate generation via cert-manager cluster-wide issuer - clusterIssuer: "" - - ## Private / Intranet facing Ingress - - enabled: false - # -- The hostname to be used to precisely map incoming traffic onto the underlying network service - hostname: "edc-controlplane.intranet" - # -- Additional ingress annotations to add - annotations: {} - # -- EDC endpoints exposed by this ingress resource - endpoints: - - data - - control - # -- Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use - className: "" - # -- TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource - tls: - # -- Enables TLS on the ingress resource - enabled: false - # -- If present overwrites the default secret name - secretName: "" - ## Adds [cert-manager](https://cert-manager.io/docs/) annotations to the ingress resource - certManager: - # -- If preset enables certificate generation via cert-manager namespace scoped issuer - issuer: "" - # -- If preset enables certificate generation via cert-manager cluster-wide issuer - clusterIssuer: "" - -# -- [Resource management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) applied to the deployed pod -resources: - {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -autoscaling: - # -- Enables [horizontal pod autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) - enabled: false - # -- Minimal replicas if resource consumption falls below resource threshholds - minReplicas: 1 - # -- Maximum replicas if resource consumption exceeds resource threshholds - maxReplicas: 100 - # -- targetAverageUtilization of cpu provided to a pod - targetCPUUtilizationPercentage: 80 - # -- targetAverageUtilization of memory provided to a pod - targetMemoryUtilizationPercentage: 80 - -# -- [Node-Selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) to constrain the Pod to nodes with specific labels. -nodeSelector: {} - -# -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) are applied to Pods to schedule onto nodes with matching taints. -tolerations: [] - -# -- [Affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) constrains which nodes the Pod can be scheduled on based on node labels. -affinity: {} - -# -- Container environment variables e.g. for configuring [JAVA_TOOL_OPTIONS](https://docs.oracle.com/javase/8/docs/technotes/guides/troubleshoot/envvars002.html) -# Ex.: -# JAVA_TOOL_OPTIONS: > -# -Dhttp.proxyHost=proxy -Dhttp.proxyPort=80 -Dhttp.nonProxyHosts="localhost|127.*|[::1]" -Dhttps.proxyHost=proxy -Dhttps.proxyPort=443 -env: {} - -# -- [Kubernetes Secret Resource](https://kubernetes.io/docs/concepts/configuration/secret/) name to load environment variables from -envSecretName: - -logging: - # -- EDC logging.properties configuring the [java.util.logging subsystem](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html#a1.8) - properties: |- - .level=INFO - org.eclipse.edc.level=ALL - handlers=java.util.logging.ConsoleHandler - java.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter - java.util.logging.ConsoleHandler.level=ALL - java.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n - -opentelemetry: - # -- opentelemetry.properties configuring the [opentelemetry agent](https://opentelemetry.io/docs/instrumentation/java/automatic/agent-config/) - properties: |- - otel.javaagent.enabled=true - otel.javaagent.debug=false - -configuration: - # -- EDC configuration.properties configuring aspects of the [eclipse-dataspaceconnector](https://github.com/eclipse-edc/Connector) - properties: |- - # edc.api.auth.key= - # edc.atomikos.checkpoint.interval= - # edc.atomikos.directory= - # edc.atomikos.logging= - # edc.atomikos.threaded2pc= - # edc.atomikos.timeout= - # edc.aws.access.key= - # edc.aws.provision.retry.retries.max= - # edc.aws.provision.role.duration.session.max= - # edc.aws.secret.access.key= - # edc.blobstore.endpoint= - # edc.dataplane.token.validation.endpoint= - # edc.core.retry.backoff.max= - # edc.core.retry.backoff.min= - # edc.core.retry.retries.max= - # edc.core.system.health.check.liveness-period= - # edc.core.system.health.check.readiness-period= - # edc.core.system.health.check.startup-period= - # edc.core.system.health.check.threadpool-size= - # edc.dataplane.queue.capacity= - # edc.dataplane.wait= - # edc.dataplane.workers= - # edc.datasource.asset.name="default" - # edc.datasource.contractdefinition.name="default" - # edc.datasource.contractnegotiation.name="default" - # edc.datasource.policy.name="default" - # edc.datasource.transferprocess.name="default" - # edc.datasource.default.pool.maxIdleConnections= - # edc.datasource.default.pool.maxTotalConnections= - # edc.datasource.default.pool.minIdleConnections= - # edc.datasource.default.pool.testConnectionOnBorrow= - # edc.datasource.default.pool.testConnectionOnCreate= - # edc.datasource.default.pool.testConnectionOnReturn= - # edc.datasource.default.pool.testConnectionWhileIdle= - # edc.datasource.default.pool.testQuery= - # edc.datasource.default.url= - # edc.datasource.default.user= - # edc.datasource.default.password= - # edc.dpf.selector.url= - # edc.events.topic.endpoint= - # edc.events.topic.name= - # edc.fs.config= - # edc.hostname= - # edc.identity.did.url= - # edc.ids.catalog.id= - # edc.ids.curator= - # edc.ids.description= - # edc.ids.endpoint= - # edc.ids.id= - # edc.ids.maintainer= - # edc.ids.security.profile= - # edc.ids.title= - # edc.ids.validation.referringconnector= - # edc.ion.crawler.did-type= - # edc.ion.crawler.interval-minutes= - # edc.ion.crawler.ion.url= - # edc.metrics.enabled= - # edc.metrics.executor.enabled= - # edc.metrics.jersey.enabled= - # edc.metrics.jetty.enabled= - # edc.metrics.okhttp.enabled= - # edc.metrics.system.enabled= - # edc.negotiation.consumer.state-machine.batch-size= - # edc.negotiation.provider.state-machine.batch-size= - # edc.oauth.client.id= - # edc.oauth.private.key.alias= - # edc.oauth.provider.audience= - # edc.oauth.provider.jwks.refresh= - # edc.oauth.provider.jwks.url= - # edc.oauth.public.key.alias= - # edc.oauth.token.url= - # edc.oauth.validation.nbf.leeway= - # edc.receiver.http.auth-code= - # edc.receiver.http.auth-key= - # edc.receiver.http.endpoint= - # edc.transfer.proxy.endpoint= - # edc.transfer.proxy.token.validity.seconds= - # edc.transfer.proxy.token.signer.privatekey.alias= - # edc.transfer.functions.check.endpoint= - # edc.transfer.functions.enabled.protocols= - # edc.transfer.functions.transfer.endpoint= - # edc.transfer-process-store.database.name= - # edc.transfer.state-machine.batch-size= - # edc.vault= - # edc.vault.certificate= - # edc.vault.clientid= - # edc.vault.clientsecret= - # edc.vault.name= - # edc.vault.tenantid= - # edc.vault.hashicorp.url= - # edc.vault.hashicorp.token= - # edc.vault.hashicorp.timeout.seconds= - # edc.webdid.doh.url= - # edc.web.rest.cors.enabled= - # edc.web.rest.cors.headers= - # edc.web.rest.cors.methods= - # edc.web.rest.cors.origins= - # ids.webhook.address= diff --git a/charts/edc-dataplane/.helmignore b/charts/edc-dataplane/.helmignore deleted file mode 100644 index 148b31d6c..000000000 --- a/charts/edc-dataplane/.helmignore +++ /dev/null @@ -1,29 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ - -README.md.gotmpl - -# Accept only values.yaml -values?*.yaml -values?*.yml diff --git a/charts/edc-dataplane/Chart.yaml b/charts/edc-dataplane/Chart.yaml deleted file mode 100644 index 96d5598fa..000000000 --- a/charts/edc-dataplane/Chart.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - ---- -apiVersion: v2 -name: edc-dataplane -description: >- - EDC Data-Plane - The Eclipse DataSpaceConnector data layer with responsibility of transferring and receiving data streams -home: https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/edc-dataplane -type: application -appVersion: "0.3.2" -version: 0.3.2 -deprecated: true -maintainers: [] -sources: - - https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/edc-dataplane diff --git a/charts/edc-dataplane/LICENSE b/charts/edc-dataplane/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/charts/edc-dataplane/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/charts/edc-dataplane/README.md b/charts/edc-dataplane/README.md deleted file mode 100644 index 6085ccbbc..000000000 --- a/charts/edc-dataplane/README.md +++ /dev/null @@ -1,90 +0,0 @@ -# edc-dataplane - -> **:exclamation: This Helm Chart is deprecated!** - -![Version: 0.3.2](https://img.shields.io/badge/Version-0.3.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.2](https://img.shields.io/badge/AppVersion-0.3.2-informational?style=flat-square) - -EDC Data-Plane - The Eclipse DataSpaceConnector data layer with responsibility of transferring and receiving data streams - -**Homepage:** - -## TL;DR - -```shell -helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -helm install my-release tractusx-edc/edc-dataplane --version 0.3.2 -``` - -## Source Code - -* - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| affinity | object | `{}` | [Affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) constrains which nodes the Pod can be scheduled on based on node labels. | -| automountServiceAccountToken | bool | `false` | Whether to [automount kubernetes API credentials](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server) into the pod | -| autoscaling.enabled | bool | `false` | Enables [horizontal pod autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) | -| autoscaling.maxReplicas | int | `100` | Maximum replicas if resource consumption exceeds resource threshholds | -| autoscaling.minReplicas | int | `1` | Minimal replicas if resource consumption falls below resource threshholds | -| autoscaling.targetCPUUtilizationPercentage | int | `80` | targetAverageUtilization of cpu provided to a pod | -| autoscaling.targetMemoryUtilizationPercentage | int | `80` | targetAverageUtilization of memory provided to a pod | -| configuration.properties | string | `"# edc.atomikos.checkpoint.interval=\n# edc.atomikos.directory=\n# edc.atomikos.logging=\n# edc.atomikos.threaded2pc=\n# edc.atomikos.timeout=\n# edc.aws.access.key=\n# edc.aws.provision.retry.retries.max=\n# edc.aws.provision.role.duration.session.max=\n# edc.aws.secret.access.key=\n# edc.blobstore.endpoint=\n# edc.dataplane.token.validation.endpoint=\n# edc.core.retry.backoff.max=\n# edc.core.retry.backoff.min=\n# edc.core.retry.retries.max=\n# edc.core.system.health.check.liveness-period=\n# edc.core.system.health.check.readiness-period=\n# edc.core.system.health.check.startup-period=\n# edc.core.system.health.check.threadpool-size=\n# edc.dataplane.queue.capacity=\n# edc.dataplane.wait=\n# edc.dataplane.workers=\n# edc.datasource.asset.name=\"default\"\n# edc.datasource.contractdefinition.name=\"default\"\n# edc.datasource.contractnegotiation.name=\"default\"\n# edc.datasource.policy.name=\"default\"\n# edc.datasource.transferprocess.name=\"default\"\n# edc.datasource.default.pool.maxIdleConnections=\n# edc.datasource.default.pool.maxTotalConnections=\n# edc.datasource.default.pool.minIdleConnections=\n# edc.datasource.default.pool.testConnectionOnBorrow=\n# edc.datasource.default.pool.testConnectionOnCreate=\n# edc.datasource.default.pool.testConnectionOnReturn=\n# edc.datasource.default.pool.testConnectionWhileIdle=\n# edc.datasource.default.pool.testQuery=\n# edc.datasource.default.url=\n# edc.datasource.default.user=\n# edc.datasource.default.password=\n# edc.dpf.selector.url=\n# edc.events.topic.endpoint=\n# edc.events.topic.name=\n# edc.fs.config=\n# edc.hostname=\n# edc.identity.did.url=\n# edc.ids.catalog.id=\n# edc.ids.curator=\n# edc.ids.description=\n# edc.ids.endpoint=\n# edc.ids.endpoint.audience=\n# edc.ids.id=\n# edc.ids.maintainer=\n# edc.ids.security.profile=\n# edc.ids.title=\n# edc.ids.validation.referringconnector=\n# edc.ion.crawler.did-type=\n# edc.ion.crawler.interval-minutes=\n# edc.ion.crawler.ion.url=\n# edc.metrics.enabled=\n# edc.metrics.executor.enabled=\n# edc.metrics.jersey.enabled=\n# edc.metrics.jetty.enabled=\n# edc.metrics.okhttp.enabled=\n# edc.metrics.system.enabled=\n# edc.negotiation.consumer.state-machine.batch-size=\n# edc.negotiation.provider.state-machine.batch-size=\n# edc.oauth.client.id=\n# edc.oauth.private.key.alias=\n# edc.oauth.provider.jwks.refresh=\n# edc.oauth.provider.jwks.url=\n# edc.oauth.public.key.alias=\n# edc.oauth.token.url=\n# edc.oauth.validation.nbf.leeway=\n# edc.receiver.http.auth-code=\n# edc.receiver.http.auth-key=\n# edc.receiver.http.endpoint=\n# edc.transfer.functions.check.endpoint=\n# edc.transfer.functions.enabled.protocols=\n# edc.transfer.functions.transfer.endpoint=\n# edc.transfer-process-store.database.name=\n# edc.transfer.state-machine.batch-size=\n# edc.vault=\n# edc.vault.certificate=\n# edc.vault.clientid=\n# edc.vault.clientsecret=\n# edc.vault.name=\n# edc.vault.tenantid=\n# edc.vault.hashicorp.url=\n# edc.vault.hashicorp.token=\n# edc.vault.hashicorp.timeout.seconds=\n# edc.webdid.doh.url=\n# edc.web.rest.cors.enabled=\n# edc.web.rest.cors.headers=\n# edc.web.rest.cors.methods=\n# edc.web.rest.cors.origins="` | EDC configuration.properties configuring aspects of the [eclipse-dataspaceconnector](https://github.com/eclipse-edc/Connector) | -| customLabels | object | `{}` | Additional custom Labels to add | -| edc.endpoints.control.path | string | `"/api/dataplane/control"` | The path mapping the "control" api is going to be exposed by | -| edc.endpoints.control.port | string | `"9999"` | The network port, which the "control" api is going to be exposed by the container, pod and service | -| edc.endpoints.default.path | string | `"/api"` | The path mapping the "default" api is going to be exposed by | -| edc.endpoints.default.port | string | `"8080"` | The network port, which the "default" api is going to be exposed by the container, pod and service | -| edc.endpoints.metrics.path | string | `"/metrics"` | The path mapping the prometheus metrics are going to be exposed at | -| edc.endpoints.metrics.port | string | `"9090"` | The network port, which the prometheus metrics are going to be exposed by the container, pod and service | -| edc.endpoints.public.path | string | `"/api/public"` | The path mapping the "public" api is going to be exposed by | -| edc.endpoints.public.port | string | `"8185"` | The network port, which the "public" api is going to be exposed by the container, pod and service | -| env | object | `{}` | Container environment variables e.g. for configuring [JAVA_TOOL_OPTIONS](https://docs.oracle.com/javase/8/docs/technotes/guides/troubleshoot/envvars002.html) Ex.: JAVA_TOOL_OPTIONS: > -Dhttp.proxyHost=proxy -Dhttp.proxyPort=80 -Dhttp.nonProxyHosts="localhost|127.*|[::1]" -Dhttps.proxyHost=proxy -Dhttps.proxyPort=443 | -| envSecretName | string | `nil` | [Kubernetes Secret Resource](https://kubernetes.io/docs/concepts/configuration/secret/) name to load environment variables from | -| fullnameOverride | string | `""` | Overrides the releases full name | -| image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | -| image.repository | string | `"ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-hashicorp-vault"` | Which derivate of the edc data-plane to use. One of: [ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-hashicorp-vault, ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-azure-vault] | -| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion | -| imagePullSecret.dockerconfigjson | string | `""` | Image pull secret to create to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) Note: This value needs to adhere to the [(base64 encoded) .dockerconfigjson format](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials). Furthermore, if 'imagePullSecret.dockerconfigjson' is defined, it takes precedence over 'imagePullSecrets'. | -| imagePullSecrets | list | `[]` | Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | -| ingresses[0].annotations | object | `{}` | Additional ingress annotations to add | -| ingresses[0].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | -| ingresses[0].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | -| ingresses[0].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | -| ingresses[0].enabled | bool | `true` | | -| ingresses[0].endpoints | list | `["public"]` | EDC endpoints exposed by this ingress resource | -| ingresses[0].hostname | string | `"edc-dataplane.local"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | -| ingresses[0].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | -| ingresses[0].tls.enabled | bool | `false` | Enables TLS on the ingress resource | -| ingresses[0].tls.secretName | string | `""` | If present overwrites the default secret name | -| livenessProbe.enabled | bool | `true` | Whether to enable kubernetes [liveness-probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | -| logging.properties | string | `".level=INFO\norg.eclipse.edc.level=ALL\nhandlers=java.util.logging.ConsoleHandler\njava.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter\njava.util.logging.ConsoleHandler.level=ALL\njava.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n"` | EDC logging.properties configuring the [java.util.logging subsystem](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html#a1.8) | -| nameOverride | string | `""` | Overrides the charts name | -| nodeSelector | object | `{}` | [Node-Selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) to constrain the Pod to nodes with specific labels. | -| opentelemetry.properties | string | `"otel.javaagent.enabled=true\notel.javaagent.debug=false"` | opentelemetry.properties configuring the [opentelemetry agent](https://opentelemetry.io/docs/instrumentation/java/automatic/agent-config/) | -| podAnnotations | object | `{}` | [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) added to deployed [pods](https://kubernetes.io/docs/concepts/workloads/pods/) | -| podSecurityContext.fsGroup | int | `10001` | The owner for volumes and any files created within volumes will belong to this guid | -| podSecurityContext.runAsGroup | int | `10001` | Processes within a pod will belong to this guid | -| podSecurityContext.runAsUser | int | `10001` | Runs all processes within a pod with a special uid | -| podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | Restrict a Container's Syscalls with seccomp | -| readinessProbe.enabled | bool | `true` | Whether to enable kubernetes readiness-probes | -| replicaCount | int | `1` | Specifies how many replicas of a deployed pod shall be created during the deployment Note: If horizontal pod autoscaling is enabled this setting has no effect | -| resources | object | `{}` | [Resource management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) applied to the deployed pod | -| securityContext.allowPrivilegeEscalation | bool | `false` | Controls [Privilege Escalation](https://kubernetes.io/docs/concepts/security/pod-security-policy/#privilege-escalation) enabling setuid binaries changing the effective user ID | -| securityContext.capabilities.add | list | `[]` | Specifies which capabilities to add to issue specialized syscalls | -| securityContext.capabilities.drop | list | `["ALL"]` | Specifies which capabilities to drop to reduce syscall attack surface | -| securityContext.readOnlyRootFilesystem | bool | `true` | Whether the root filesystem is mounted in read-only mode | -| securityContext.runAsNonRoot | bool | `true` | Requires the container to run without root privileges | -| securityContext.runAsUser | int | `10001` | The container's process will run with the specified uid | -| service.type | string | `"ClusterIP"` | [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. | -| serviceAccount.annotations | object | `{}` | [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to add to the service account | -| serviceAccount.create | bool | `true` | Specifies whether a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) should be created per release | -| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the release's fullname template | -| startupProbe.enabled | bool | `true` | Whether to enable kubernetes startup-probes | -| startupProbe.failureThreshold | int | `12` | Minimum consecutive failures for the probe to be considered failed after having succeeded | -| startupProbe.initialDelaySeconds | int | `10` | Number of seconds after the container has started before liveness probes are initiated. | -| tolerations | list | `[]` | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) are applied to Pods to schedule onto nodes with matching taints. | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.10.0](https://github.com/norwoodj/helm-docs/releases/v1.10.0) diff --git a/charts/edc-dataplane/README.md.gotmpl b/charts/edc-dataplane/README.md.gotmpl deleted file mode 100644 index c94d26d50..000000000 --- a/charts/edc-dataplane/README.md.gotmpl +++ /dev/null @@ -1,26 +0,0 @@ -{{ template "chart.header" . }} - -{{ template "chart.deprecationWarning" . }} - -{{ template "chart.badgesSection" . }} - -{{ template "chart.description" . }} - -{{ template "chart.homepageLine" . }} - -## TL;DR - -```shell -helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -helm install my-release tractusx-edc/edc-dataplane --version {{ .Version }} -``` - -{{ template "chart.maintainersSection" . }} - -{{ template "chart.sourcesSection" . }} - -{{ template "chart.requirementsSection" . }} - -{{ template "chart.valuesSection" . }} - -{{ template "helm-docs.versionFooter" . }} diff --git a/charts/edc-dataplane/templates/NOTES.txt b/charts/edc-dataplane/templates/NOTES.txt deleted file mode 100644 index 454b250eb..000000000 --- a/charts/edc-dataplane/templates/NOTES.txt +++ /dev/null @@ -1,64 +0,0 @@ - -CHART NAME: {{ .Chart.Name }} -CHART VERSION: {{ .Chart.Version }} -APP VERSION: {{ .Chart.AppVersion }} - -Logs can be accessed by running this command: - - kubectl logs --tail 100 -f \ - --namespace {{ .Release.Namespace }} \ - -l "app.kubernetes.io/name={{ include "edc-dataplane.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" - -{{- if .Values.ingresses }} - -Following ingress URLS are available: - {{- $edcEndpoints := .Values.edc.endpoints }} - {{- range .Values.ingresses }} - {{- if .enabled }} - {{- $ingressEdcEndpoints := .endpoints }} - {{- $hostname := .hostname }} - {{- $tls := .tls }} - {{- range $name, $mapping := $edcEndpoints }} - {{- if (has $name $ingressEdcEndpoints) }} - Visit http{{ if $tls }}s{{ end }}://{{ $hostname }}{{ $mapping.path }} to access the {{ $name }} api - {{- end }} - {{- end }} - {{- end }} - {{- end }} - -{{- else if contains "NodePort" .Values.service.type }} -Get the application URLs by running these commands: - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - - export NODE_PORT_DEFAULT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "edc-dataplane.fullname" . }}}") - export NODE_PORT_PUBLIC=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "edc-dataplane.fullname" . }}}") - export NODE_PORT_CONTROL=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[2].nodePort}" services {{ include "edc-dataplane.fullname" . }}}") - export NODE_PORT_METRICS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[3].nodePort}" services {{ include "edc-dataplane.fullname" . }}}") - - echo "Visit http://$NODE_IP:$NODE_PORT_DEFAULT to access the default api" - echo "Visit http://$NODE_IP:$NODE_PORT_PUBLIC to access the public data transfer api" - echo "Visit http://$NODE_IP:$NODE_PORT_CONTROL to access the control api" - echo "Visit http://$NODE_IP:$NODE_PORT_METRICS to access the metrics api" - -{{- else if contains "ClusterIP" .Values.service.type }} -Get the application URL by running these commands: - - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "edc-dataplane.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - - export CONTAINER_PORT_DEFAULT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") - export CONTAINER_PORT_PUBLIC=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[1].containerPort}") - export CONTAINER_PORT_CONTROL=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[2].containerPort}") - export CONTAINER_PORT_METRICS=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[3].containerPort}") - - echo "Visit http://127.0.0.1:8080 to access the default api" - echo "Visit http://127.0.0.1:8185 to access the public data transfer api" - echo "Visit http://127.0.0.1:9999 to access the control api" - echo "Visit http://127.0.0.1:9090 to access the metrics api" - - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME \ - 8080:$CONTAINER_PORT_DEFAULT \ - 8185:$CONTAINER_PORT_PUBLIC \ - 9999:$CONTAINER_PORT_CONTROL \ - 9090:$CONTAINER_PORT_METRICS - -{{- end }} diff --git a/charts/edc-dataplane/templates/_helpers.tpl b/charts/edc-dataplane/templates/_helpers.tpl deleted file mode 100644 index 3615298cd..000000000 --- a/charts/edc-dataplane/templates/_helpers.tpl +++ /dev/null @@ -1,72 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "edc-dataplane.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "edc-dataplane.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "edc-dataplane.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "edc-dataplane.labels" -}} -helm.sh/chart: {{ include "edc-dataplane.chart" . }} -{{ include "edc-dataplane.selectorLabels" . }} -{{ include "edc-dataplane.customLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "edc-dataplane.selectorLabels" -}} -app.kubernetes.io/name: {{ include "edc-dataplane.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Custom labels -*/}} -{{- define "edc-dataplane.customLabels" -}} -{{- with .Values.customLabels }} -{{ toYaml . }} -{{- end }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "edc-dataplane.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "edc-dataplane.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/charts/edc-dataplane/templates/configmap-env.yaml b/charts/edc-dataplane/templates/configmap-env.yaml deleted file mode 100644 index 0e021734a..000000000 --- a/charts/edc-dataplane/templates/configmap-env.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "edc-dataplane.fullname" . }}-env - namespace: {{ .Release.Namespace | default "default" | quote }} - labels: - {{- include "edc-dataplane.labels" . | nindent 4 }} -data: - {{- toYaml .Values.env | nindent 2 }} diff --git a/charts/edc-dataplane/templates/configmap.yaml b/charts/edc-dataplane/templates/configmap.yaml deleted file mode 100644 index c7daa322f..000000000 --- a/charts/edc-dataplane/templates/configmap.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "edc-dataplane.fullname" . }}-configmap - namespace: {{ .Release.Namespace | default "default" | quote }} - labels: - {{- include "edc-dataplane.labels" . | nindent 4 }} -data: - configuration.properties: |- - web.http.default.port={{ .Values.edc.endpoints.default.port }} - web.http.default.path={{ .Values.edc.endpoints.default.path }} - web.http.public.port={{ .Values.edc.endpoints.public.port }} - web.http.public.path={{ .Values.edc.endpoints.public.path }} - web.http.control.port={{ .Values.edc.endpoints.control.port }} - web.http.control.path={{ .Values.edc.endpoints.control.path }} - {{- .Values.configuration.properties | nindent 4 }} - - opentelemetry.properties: |- - {{- .Values.opentelemetry.properties | nindent 4 }} - - logging.properties: |- - {{- .Values.logging.properties | nindent 4 }} diff --git a/charts/edc-dataplane/templates/deployment.yaml b/charts/edc-dataplane/templates/deployment.yaml deleted file mode 100644 index 474b04650..000000000 --- a/charts/edc-dataplane/templates/deployment.yaml +++ /dev/null @@ -1,142 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "edc-dataplane.fullname" . }} - namespace: {{ .Release.Namespace | default "default" | quote }} - labels: - {{- include "edc-dataplane.labels" . | nindent 4 }} -spec: - {{- if not .Values.autoscaling.enabled }} - replicas: {{ .Values.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include "edc-dataplane.selectorLabels" . | nindent 6 }} - template: - metadata: - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/env-config: {{ include (print $.Template.BasePath "/configmap-env.yaml") . | sha256sum }} - {{- with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "edc-dataplane.selectorLabels" . | nindent 8 }} - spec: - {{- if .Values.imagePullSecret.dockerconfigjson }} - imagePullSecrets: - - name: {{ include "edc-dataplane.fullname" . }}-imagepullsecret - {{- else }} - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- end }} - serviceAccountName: {{ include "edc-dataplane.serviceAccountName" . }} - automountServiceAccountToken: {{ if .Values.automountServiceAccountToken }}true{{ else }}false{{ end }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: default - containerPort: {{ .Values.edc.endpoints.default.port }} - protocol: TCP - - name: public - containerPort: {{ .Values.edc.endpoints.public.port }} - protocol: TCP - - name: control - containerPort: {{ .Values.edc.endpoints.control.port }} - protocol: TCP - - name: metrics - containerPort: {{ .Values.edc.endpoints.metrics.port }} - protocol: TCP - {{- if .Values.livenessProbe.enabled }} - livenessProbe: - httpGet: - path: {{ .Values.edc.endpoints.default.path }}/check/liveness - port: default - {{- end }} - {{- if .Values.readinessProbe.enabled }} - readinessProbe: - httpGet: - path: {{ .Values.edc.endpoints.default.path }}/check/readiness - port: default - {{- end }} - {{- if .Values.startupProbe.enabled }} - startupProbe: - httpGet: - path: {{ .Values.edc.endpoints.default.path }}/check/startup - port: default - failureThreshold: {{ .Values.startupProbe.failureThreshold }} - initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }} - {{- end }} - envFrom: - - configMapRef: - name: {{ include "edc-dataplane.fullname" . }}-env - {{- if .Values.envSecretName }} - - secretRef: - name: {{ .Values.envSecretName | quote }} - {{- end }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - volumeMounts: - - name: configuration - mountPath: /app/configuration.properties - subPath: configuration.properties - - name: configuration - mountPath: /app/opentelemetry.properties - subPath: opentelemetry.properties - - name: configuration - mountPath: /app/logging.properties - subPath: logging.properties - volumes: - - name: configuration - configMap: - name: {{ include "edc-dataplane.fullname" . }}-configmap - items: - - key: configuration.properties - path: configuration.properties - - key: opentelemetry.properties - path: opentelemetry.properties - - key: logging.properties - path: logging.properties - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/charts/edc-dataplane/templates/hpa.yaml b/charts/edc-dataplane/templates/hpa.yaml deleted file mode 100644 index 037934aeb..000000000 --- a/charts/edc-dataplane/templates/hpa.yaml +++ /dev/null @@ -1,52 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - -{{- if .Values.autoscaling.enabled }} ---- -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "edc-dataplane.fullname" . }} - namespace: {{ .Release.Namespace | default "default" | quote }} - labels: - {{- include "edc-dataplane.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "edc-dataplane.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/charts/edc-dataplane/templates/imagepullsecret.yaml b/charts/edc-dataplane/templates/imagepullsecret.yaml deleted file mode 100644 index 11961674b..000000000 --- a/charts/edc-dataplane/templates/imagepullsecret.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - -{{- if .Values.imagePullSecret.dockerconfigjson }} ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "edc-dataplane.fullname" . }}-imagepullsecret - namespace: {{ .Release.Namespace | default "default" | quote }} - labels: - {{- include "edc-dataplane.labels" . | nindent 4 }} -data: - .dockerconfigjson: {{ .Values.imagePullSecret.dockerconfigjson }} -type: kubernetes.io/dockerconfigjson -{{- end }} diff --git a/charts/edc-dataplane/templates/ingress.yaml b/charts/edc-dataplane/templates/ingress.yaml deleted file mode 100644 index 716ac3d1f..000000000 --- a/charts/edc-dataplane/templates/ingress.yaml +++ /dev/null @@ -1,100 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - -{{- $fullName := include "edc-dataplane.fullname" . }} -{{- $labels := include "edc-dataplane.labels" . | nindent 4 }} -{{- $gitVersion := .Capabilities.KubeVersion.GitVersion }} -{{- $edcEndpoints := .Values.edc.endpoints }} -{{- $namespace := .Release.Namespace }} -{{- range .Values.ingresses }} -{{- if and .enabled .endpoints }} -{{- $ingressName := printf "%s-%s" $fullName .hostname }} ---- -{{- if semverCompare ">=1.19-0" $gitVersion }} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" $gitVersion }} -apiVersion: networking.k8s.io/v1beta1 -{{- else }} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $ingressName }} - namespace: {{ $namespace | default "default" | quote }} - labels: - {{- $labels | nindent 2 }} - annotations: - {{- if and .className (not (semverCompare ">=1.18-0" $gitVersion)) }} - {{- if not (hasKey .annotations "kubernetes.io/ingress.class") }} - {{- $_ := set .annotations "kubernetes.io/ingress.class" .className}} - {{- end }} - {{- end }} - {{- if .certManager }} - {{- if .certManager.issuer }} - {{- $_ := set .annotations "cert-manager.io/issuer" .certManager.issuer}} - {{- end }} - {{- if .certManager.clusterIssuer }} - {{- $_ := set .annotations "cert-manager.io/cluster-issuer" .certManager.clusterIssuer}} - {{- end }} - {{- end }} - {{- with .annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and .className (semverCompare ">=1.18-0" $gitVersion) }} - ingressClassName: {{ .className }} - {{- end }} - {{- if .hostname }} - {{- if .tls.enabled }} - tls: - - hosts: - - {{ .hostname }} - {{- if .tls.secretName }} - secretName: {{ .tls.secretName }} - {{- else }} - secretName: {{ $ingressName }}-tls - {{- end }} - {{- end }} - rules: - - host: {{ .hostname }} - http: - paths: - {{- $ingressEdcEndpoints := .endpoints }} - {{- range $name, $mapping := $edcEndpoints }} - {{- if (has $name $ingressEdcEndpoints) }} - - path: {{ $mapping.path }} - pathType: Prefix - backend: - {{- if semverCompare ">=1.19-0" $gitVersion }} - service: - name: {{ $fullName }} - port: - number: {{ $mapping.port }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $mapping.port }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} -{{- end }}{{- /* end: if .enabled */}} -{{- end }}{{- /* end: range .Values.ingresses */}} diff --git a/charts/edc-dataplane/templates/service.yaml b/charts/edc-dataplane/templates/service.yaml deleted file mode 100644 index e4d081776..000000000 --- a/charts/edc-dataplane/templates/service.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ include "edc-dataplane.fullname" . }} - namespace: {{ .Release.Namespace | default "default" | quote }} - labels: - {{- include "edc-dataplane.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.edc.endpoints.default.port }} - targetPort: default - protocol: TCP - name: default - - port: {{ .Values.edc.endpoints.control.port }} - targetPort: control - protocol: TCP - name: control - - port: {{ .Values.edc.endpoints.public.port }} - targetPort: public - protocol: TCP - name: public - - port: {{ .Values.edc.endpoints.metrics.port }} - targetPort: metrics - protocol: TCP - name: metrics - selector: - {{- include "edc-dataplane.selectorLabels" . | nindent 4 }} diff --git a/charts/edc-dataplane/templates/serviceaccount.yaml b/charts/edc-dataplane/templates/serviceaccount.yaml deleted file mode 100644 index 39a44d35e..000000000 --- a/charts/edc-dataplane/templates/serviceaccount.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - -{{- if .Values.serviceAccount.create -}} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "edc-dataplane.serviceAccountName" . }} - namespace: {{ .Release.Namespace | default "default" | quote }} - labels: - {{- include "edc-dataplane.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/charts/edc-dataplane/values.yaml b/charts/edc-dataplane/values.yaml deleted file mode 100644 index 9a049cb1f..000000000 --- a/charts/edc-dataplane/values.yaml +++ /dev/null @@ -1,331 +0,0 @@ -# -# Copyright (c) 2023 ZF Friedrichshafen AG -# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH -# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) -# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License, Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# SPDX-License-Identifier: Apache-2.0 -# - ---- -# Default values for edc-dataplane. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# -- Specifies how many replicas of a deployed pod shall be created during the deployment -# Note: If horizontal pod autoscaling is enabled this setting has no effect -replicaCount: 1 - -image: - # -- Which derivate of the edc data-plane to use. - # One of: [ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-hashicorp-vault, ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-azure-vault] - repository: ghcr.io/eclipse-tractusx/tractusx-edc/edc-dataplane-hashicorp-vault - # -- [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use - pullPolicy: IfNotPresent - # -- Overrides the image tag whose default is the chart appVersion - tag: "" - -imagePullSecret: - # -- Image pull secret to create to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) - # Note: This value needs to adhere to the [(base64 encoded) .dockerconfigjson format](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials). - # Furthermore, if 'imagePullSecret.dockerconfigjson' is defined, it takes precedence over 'imagePullSecrets'. - dockerconfigjson: "" - -# -- Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) -imagePullSecrets: [] - -# -- Overrides the charts name -nameOverride: "" - -# -- Overrides the releases full name -fullnameOverride: "" - -# -- Additional custom Labels to add -customLabels: {} - -serviceAccount: - # -- Specifies whether a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) should be created per release - create: true - # -- [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to add to the service account - annotations: {} - # -- The name of the service account to use. If not set and create is true, a name is generated using the release's fullname template - name: "" - -# -- Whether to [automount kubernetes API credentials](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server) into the pod -automountServiceAccountToken: false - -# -- [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) added to deployed [pods](https://kubernetes.io/docs/concepts/workloads/pods/) -podAnnotations: {} - -# The [pod security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) defines privilege and access control settings for a Pod within the deployment -podSecurityContext: - seccompProfile: - # -- Restrict a Container's Syscalls with seccomp - type: RuntimeDefault - # -- Runs all processes within a pod with a special uid - runAsUser: 10001 - # -- Processes within a pod will belong to this guid - runAsGroup: 10001 - # -- The owner for volumes and any files created within volumes will belong to this guid - fsGroup: 10001 - -# The [container security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container) defines privilege and access control settings for a Container within a pod -securityContext: - capabilities: - # -- Specifies which capabilities to drop to reduce syscall attack surface - drop: - - ALL - # -- Specifies which capabilities to add to issue specialized syscalls - add: [] - # -- Whether the root filesystem is mounted in read-only mode - readOnlyRootFilesystem: true - # -- Controls [Privilege Escalation](https://kubernetes.io/docs/concepts/security/pod-security-policy/#privilege-escalation) enabling setuid binaries changing the effective user ID - allowPrivilegeEscalation: false - # -- Requires the container to run without root privileges - runAsNonRoot: true - # -- The container's process will run with the specified uid - runAsUser: 10001 - -livenessProbe: - # -- Whether to enable kubernetes [liveness-probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) - enabled: true - -readinessProbe: - # -- Whether to enable kubernetes readiness-probes - enabled: true - -startupProbe: - # -- Whether to enable kubernetes startup-probes - enabled: true - # -- Minimum consecutive failures for the probe to be considered failed after having succeeded - failureThreshold: 12 - # -- Number of seconds after the container has started before liveness probes are initiated. - initialDelaySeconds: 10 - -## EDC endpoints exposed by the data-plane -edc: - endpoints: - ## Default api exposing health checks etc - default: - # -- The network port, which the "default" api is going to be exposed by the container, pod and service - port: "8080" - # -- The path mapping the "default" api is going to be exposed by - path: /api - ## Public endpoint for data transfer - public: - # -- The network port, which the "public" api is going to be exposed by the container, pod and service - port: "8185" - # -- The path mapping the "public" api is going to be exposed by - path: /api/public - ## Control API - control: - # -- The network port, which the "control" api is going to be exposed by the container, pod and service - port: "9999" - # -- The path mapping the "control" api is going to be exposed by - path: /api/dataplane/control - ## Prometheus endpoint - metrics: - # -- The network port, which the prometheus metrics are going to be exposed by the container, pod and service - port: "9090" - # -- The path mapping the prometheus metrics are going to be exposed at - path: /metrics - -service: - # -- [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. - type: ClusterIP - -## Ingress declaration to expose the network service. -ingresses: - ## Public / Internet facing Ingress - - enabled: true - # -- The hostname to be used to precisely map incoming traffic onto the underlying network service - hostname: "edc-dataplane.local" - # -- Additional ingress annotations to add - annotations: {} - # -- EDC endpoints exposed by this ingress resource - endpoints: - - public - # -- Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use - className: "" - # -- TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource - tls: - # -- Enables TLS on the ingress resource - enabled: false - # -- If present overwrites the default secret name - secretName: "" - ## Adds [cert-manager](https://cert-manager.io/docs/) annotations to the ingress resource - certManager: - # -- If preset enables certificate generation via cert-manager namespace scoped issuer - issuer: "" - # -- If preset enables certificate generation via cert-manager cluster-wide issuer - clusterIssuer: "" - -# -- [Resource management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) applied to the deployed pod -resources: - {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -autoscaling: - # -- Enables [horizontal pod autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) - enabled: false - # -- Minimal replicas if resource consumption falls below resource threshholds - minReplicas: 1 - # -- Maximum replicas if resource consumption exceeds resource threshholds - maxReplicas: 100 - # -- targetAverageUtilization of cpu provided to a pod - targetCPUUtilizationPercentage: 80 - # -- targetAverageUtilization of memory provided to a pod - targetMemoryUtilizationPercentage: 80 - -# -- [Node-Selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) to constrain the Pod to nodes with specific labels. -nodeSelector: {} - -# -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) are applied to Pods to schedule onto nodes with matching taints. -tolerations: [] - -# -- [Affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) constrains which nodes the Pod can be scheduled on based on node labels. -affinity: {} - -# -- Container environment variables e.g. for configuring [JAVA_TOOL_OPTIONS](https://docs.oracle.com/javase/8/docs/technotes/guides/troubleshoot/envvars002.html) -# Ex.: -# JAVA_TOOL_OPTIONS: > -# -Dhttp.proxyHost=proxy -Dhttp.proxyPort=80 -Dhttp.nonProxyHosts="localhost|127.*|[::1]" -Dhttps.proxyHost=proxy -Dhttps.proxyPort=443 -env: {} - -# -- [Kubernetes Secret Resource](https://kubernetes.io/docs/concepts/configuration/secret/) name to load environment variables from -envSecretName: - -logging: - # -- EDC logging.properties configuring the [java.util.logging subsystem](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html#a1.8) - properties: |- - .level=INFO - org.eclipse.edc.level=ALL - handlers=java.util.logging.ConsoleHandler - java.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter - java.util.logging.ConsoleHandler.level=ALL - java.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n - -opentelemetry: - # -- opentelemetry.properties configuring the [opentelemetry agent](https://opentelemetry.io/docs/instrumentation/java/automatic/agent-config/) - properties: |- - otel.javaagent.enabled=true - otel.javaagent.debug=false - -configuration: - # -- EDC configuration.properties configuring aspects of the [eclipse-dataspaceconnector](https://github.com/eclipse-edc/Connector) - properties: |- - # edc.atomikos.checkpoint.interval= - # edc.atomikos.directory= - # edc.atomikos.logging= - # edc.atomikos.threaded2pc= - # edc.atomikos.timeout= - # edc.aws.access.key= - # edc.aws.provision.retry.retries.max= - # edc.aws.provision.role.duration.session.max= - # edc.aws.secret.access.key= - # edc.blobstore.endpoint= - # edc.dataplane.token.validation.endpoint= - # edc.core.retry.backoff.max= - # edc.core.retry.backoff.min= - # edc.core.retry.retries.max= - # edc.core.system.health.check.liveness-period= - # edc.core.system.health.check.readiness-period= - # edc.core.system.health.check.startup-period= - # edc.core.system.health.check.threadpool-size= - # edc.dataplane.queue.capacity= - # edc.dataplane.wait= - # edc.dataplane.workers= - # edc.datasource.asset.name="default" - # edc.datasource.contractdefinition.name="default" - # edc.datasource.contractnegotiation.name="default" - # edc.datasource.policy.name="default" - # edc.datasource.transferprocess.name="default" - # edc.datasource.default.pool.maxIdleConnections= - # edc.datasource.default.pool.maxTotalConnections= - # edc.datasource.default.pool.minIdleConnections= - # edc.datasource.default.pool.testConnectionOnBorrow= - # edc.datasource.default.pool.testConnectionOnCreate= - # edc.datasource.default.pool.testConnectionOnReturn= - # edc.datasource.default.pool.testConnectionWhileIdle= - # edc.datasource.default.pool.testQuery= - # edc.datasource.default.url= - # edc.datasource.default.user= - # edc.datasource.default.password= - # edc.dpf.selector.url= - # edc.events.topic.endpoint= - # edc.events.topic.name= - # edc.fs.config= - # edc.hostname= - # edc.identity.did.url= - # edc.ids.catalog.id= - # edc.ids.curator= - # edc.ids.description= - # edc.ids.endpoint= - # edc.ids.endpoint.audience= - # edc.ids.id= - # edc.ids.maintainer= - # edc.ids.security.profile= - # edc.ids.title= - # edc.ids.validation.referringconnector= - # edc.ion.crawler.did-type= - # edc.ion.crawler.interval-minutes= - # edc.ion.crawler.ion.url= - # edc.metrics.enabled= - # edc.metrics.executor.enabled= - # edc.metrics.jersey.enabled= - # edc.metrics.jetty.enabled= - # edc.metrics.okhttp.enabled= - # edc.metrics.system.enabled= - # edc.negotiation.consumer.state-machine.batch-size= - # edc.negotiation.provider.state-machine.batch-size= - # edc.oauth.client.id= - # edc.oauth.private.key.alias= - # edc.oauth.provider.jwks.refresh= - # edc.oauth.provider.jwks.url= - # edc.oauth.public.key.alias= - # edc.oauth.token.url= - # edc.oauth.validation.nbf.leeway= - # edc.receiver.http.auth-code= - # edc.receiver.http.auth-key= - # edc.receiver.http.endpoint= - # edc.transfer.functions.check.endpoint= - # edc.transfer.functions.enabled.protocols= - # edc.transfer.functions.transfer.endpoint= - # edc.transfer-process-store.database.name= - # edc.transfer.state-machine.batch-size= - # edc.vault= - # edc.vault.certificate= - # edc.vault.clientid= - # edc.vault.clientsecret= - # edc.vault.name= - # edc.vault.tenantid= - # edc.vault.hashicorp.url= - # edc.vault.hashicorp.token= - # edc.vault.hashicorp.timeout.seconds= - # edc.webdid.doh.url= - # edc.web.rest.cors.enabled= - # edc.web.rest.cors.headers= - # edc.web.rest.cors.methods= - # edc.web.rest.cors.origins= From d916e9e8d7682d265a9e95cecd4e6c507243904b Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger <43503240+paullatzelsperger@users.noreply.github.com> Date: Tue, 4 Apr 2023 09:51:50 +0200 Subject: [PATCH 36/92] Update docs/development/decision-records/2023-04-03_renaming_branches/README.md Co-authored-by: Florian Rusch (ZF Friedrichshafen AG) --- .../decision-records/2023-04-03_renaming_branches/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/development/decision-records/2023-04-03_renaming_branches/README.md b/docs/development/decision-records/2023-04-03_renaming_branches/README.md index dcb80865c..5fc2fe4e5 100644 --- a/docs/development/decision-records/2023-04-03_renaming_branches/README.md +++ b/docs/development/decision-records/2023-04-03_renaming_branches/README.md @@ -48,7 +48,7 @@ fork" refers to `catenax-ng/tx-tractusx-edc`. - push the contents of `fork/main` -> `upstream/releases` - synchronize `upstream/develop` with `fork/develop` - force-push the contents of `develop` -> `upstream/main` (do **not** update the tracking branch!) -- synchronize `upstream/main` -> `fork/main`. +- synchronize `upstream/main` -> `fork/main` - delete/archive `upstream/develop` and `fork/develop` _Note that most of this will likely need to be done manually, since GitHub does not allow for advanced Git operations From 70a1ac4a30455b3c274ac42853e78dd1ac029d12 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger <43503240+paullatzelsperger@users.noreply.github.com> Date: Tue, 4 Apr 2023 09:51:57 +0200 Subject: [PATCH 37/92] Update docs/development/decision-records/2023-04-03_renaming_branches/README.md Co-authored-by: Florian Rusch (ZF Friedrichshafen AG) --- .../decision-records/2023-04-03_renaming_branches/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/development/decision-records/2023-04-03_renaming_branches/README.md b/docs/development/decision-records/2023-04-03_renaming_branches/README.md index 5fc2fe4e5..7bc3abd64 100644 --- a/docs/development/decision-records/2023-04-03_renaming_branches/README.md +++ b/docs/development/decision-records/2023-04-03_renaming_branches/README.md @@ -44,7 +44,7 @@ section outlines the exact sequence of steps. Note that "upstream" refers to `ec fork" refers to `catenax-ng/tx-tractusx-edc`. - create a new branch `upstream/releases` -- create a new branch `fork/releaes`, set it to track `upstream/releases` +- create a new branch `fork/releases`, set it to track `upstream/releases` - push the contents of `fork/main` -> `upstream/releases` - synchronize `upstream/develop` with `fork/develop` - force-push the contents of `develop` -> `upstream/main` (do **not** update the tracking branch!) From 01d6cf92be88554d6ac20c34026ba08d4f772136 Mon Sep 17 00:00:00 2001 From: Enrico Risa Date: Tue, 4 Apr 2023 16:04:56 +0200 Subject: [PATCH 38/92] feat(dataEncryption): removes lombok from data-encryption module --- .../algorithms/aes/AesAlgorithm.java | 132 +++++++++-------- .../aes/AesInitializationVectorIterator.java | 63 ++++---- .../algorithms/aes/ByteCounter.java | 102 +++++++------ .../data/CryptoDataFactoryImpl.java | 131 ++++++++++------- .../AesDataEncrypterConfiguration.java | 28 +++- .../encrypter/AesDataEncrypterImpl.java | 137 +++++++++--------- .../encrypter/DataEncrypterFactory.java | 74 +++++----- .../encryption/key/CryptoKeyFactoryImpl.java | 22 ++- .../encryption/provider/AesKeyProvider.java | 73 +++++----- .../provider/CachingKeyProvider.java | 105 ++++++++------ .../algorithms/aes/AesAlgorithmTest.java | 126 ++++++++-------- .../AesInitializationVectorIteratorTest.java | 80 +++++----- .../DataEncrypterAesComponentTest.java | 125 ++++++++-------- 13 files changed, 649 insertions(+), 549 deletions(-) diff --git a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesAlgorithm.java b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesAlgorithm.java index 69c54a173..6f463fc82 100644 --- a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesAlgorithm.java +++ b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesAlgorithm.java @@ -20,93 +20,91 @@ */ package org.eclipse.tractusx.edc.data.encryption.algorithms.aes; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.eclipse.tractusx.edc.data.encryption.algorithms.CryptoAlgorithm; +import org.eclipse.tractusx.edc.data.encryption.data.CryptoDataFactory; +import org.eclipse.tractusx.edc.data.encryption.data.DecryptedData; +import org.eclipse.tractusx.edc.data.encryption.data.EncryptedData; +import org.eclipse.tractusx.edc.data.encryption.key.AesKey; +import org.eclipse.tractusx.edc.data.encryption.util.ArrayUtil; + import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; import java.security.SecureRandom; +import java.util.Objects; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; import javax.crypto.IllegalBlockSizeException; import javax.crypto.NoSuchPaddingException; import javax.crypto.spec.GCMParameterSpec; import javax.crypto.spec.SecretKeySpec; -import lombok.NonNull; -import lombok.SneakyThrows; -import org.bouncycastle.jce.provider.BouncyCastleProvider; -import org.eclipse.tractusx.edc.data.encryption.algorithms.CryptoAlgorithm; -import org.eclipse.tractusx.edc.data.encryption.data.CryptoDataFactory; -import org.eclipse.tractusx.edc.data.encryption.data.DecryptedData; -import org.eclipse.tractusx.edc.data.encryption.data.EncryptedData; -import org.eclipse.tractusx.edc.data.encryption.key.AesKey; -import org.eclipse.tractusx.edc.data.encryption.util.ArrayUtil; -import org.jetbrains.annotations.NotNull; public class AesAlgorithm implements CryptoAlgorithm { - private static final String AES_GCM = "AES/GCM/NoPadding"; - private static final String AES = "AES"; - private static final Object MONITOR = new Object(); + private static final String AES_GCM = "AES/GCM/NoPadding"; + private static final String AES = "AES"; + private static final Object MONITOR = new Object(); + + private final SecureRandom secureRandom; - private final SecureRandom secureRandom; + private final CryptoDataFactory cryptoDataFactory; + private AesInitializationVectorIterator initializationVectorIterator; - @NonNull private final CryptoDataFactory cryptoDataFactory; - private AesInitializationVectorIterator initializationVectorIterator; + public AesAlgorithm(CryptoDataFactory cryptoDataFactory) { + this.cryptoDataFactory = Objects.requireNonNull(cryptoDataFactory); - @SneakyThrows - public AesAlgorithm(@NotNull CryptoDataFactory cryptoDataFactory) { - this.cryptoDataFactory = cryptoDataFactory; + // We use new SecureRandom() and not SecureRandom.getInstanceStrong(), as the second one + // would use a blocking algorithm, which leads to an increased encryption time of up to 3 + // minutes. Since we have already used /dev/urandom, which only provides pseudo-randomness and + // is also non-blocking, switching to a non-blocking algorithm should not matter here either. + this.secureRandom = new SecureRandom(); + this.initializationVectorIterator = new AesInitializationVectorIterator(this.secureRandom); + } - // We use new SecureRandom() and not SecureRandom.getInstanceStrong(), as the second one - // would use a blocking algorithm, which leads to an increased encryption time of up to 3 - // minutes. Since we have already used /dev/urandom, which only provides pseudo-randomness and - // is also non-blocking, switching to a non-blocking algorithm should not matter here either. - this.secureRandom = new SecureRandom(); - this.initializationVectorIterator = new AesInitializationVectorIterator(this.secureRandom); - } + @Override + public synchronized EncryptedData encrypt(DecryptedData data, AesKey key) + throws IllegalBlockSizeException, BadPaddingException, InvalidKeyException, + NoSuchPaddingException, NoSuchAlgorithmException, InvalidAlgorithmParameterException { - @Override - public synchronized EncryptedData encrypt(DecryptedData data, AesKey key) - throws IllegalBlockSizeException, BadPaddingException, InvalidKeyException, - NoSuchPaddingException, NoSuchAlgorithmException, InvalidAlgorithmParameterException { + final byte[] initializationVector; + synchronized (MONITOR) { + if (!initializationVectorIterator.hasNext()) { + initializationVectorIterator = new AesInitializationVectorIterator(this.secureRandom); + } - final byte[] initializationVector; - synchronized (MONITOR) { - if (!initializationVectorIterator.hasNext()) { - initializationVectorIterator = new AesInitializationVectorIterator(this.secureRandom); - } + initializationVector = initializationVectorIterator.next(); + } - initializationVector = initializationVectorIterator.next(); + Cipher cipher = Cipher.getInstance(AES_GCM, new BouncyCastleProvider()); + final SecretKeySpec keySpec = new SecretKeySpec(key.getBytes(), AES); + final GCMParameterSpec gcmParameterSpec = + new GCMParameterSpec(16 * 8 /* =128 */, initializationVector); + cipher.init(Cipher.ENCRYPT_MODE, keySpec, gcmParameterSpec); + byte[] encrypted = cipher.doFinal(data.getBytes()); + byte[] encryptedWithVector = ArrayUtil.concat(initializationVector, encrypted); + + return cryptoDataFactory.encryptedFromBytes(encryptedWithVector); } - Cipher cipher = Cipher.getInstance(AES_GCM, new BouncyCastleProvider()); - final SecretKeySpec keySpec = new SecretKeySpec(key.getBytes(), AES); - final GCMParameterSpec gcmParameterSpec = - new GCMParameterSpec(16 * 8 /* =128 */, initializationVector); - cipher.init(Cipher.ENCRYPT_MODE, keySpec, gcmParameterSpec); - byte[] encrypted = cipher.doFinal(data.getBytes()); - byte[] encryptedWithVector = ArrayUtil.concat(initializationVector, encrypted); - - return cryptoDataFactory.encryptedFromBytes(encryptedWithVector); - } - - @Override - public DecryptedData decrypt(EncryptedData data, AesKey key) - throws IllegalBlockSizeException, BadPaddingException, InvalidKeyException, - NoSuchPaddingException, NoSuchAlgorithmException, InvalidAlgorithmParameterException { - byte[] encryptedWithVector = data.getBytes(); - byte[] initializationVector = ArrayUtil.subArray(encryptedWithVector, 0, 16); - byte[] encrypted = ArrayUtil.subArray(encryptedWithVector, 16, encryptedWithVector.length - 16); - - Cipher cipher = Cipher.getInstance(AES_GCM, new BouncyCastleProvider()); - final SecretKeySpec keySpec = new SecretKeySpec(key.getBytes(), AES); - final GCMParameterSpec gcmParameterSpec = - new GCMParameterSpec(16 * 8 /* =128 */, initializationVector); - cipher.init(Cipher.DECRYPT_MODE, keySpec, gcmParameterSpec); - byte[] decryptedData = cipher.doFinal(encrypted); - return cryptoDataFactory.decryptedFromBytes(decryptedData); - } - - public String getAlgorithm() { - return this.secureRandom.getAlgorithm(); - } + @Override + public DecryptedData decrypt(EncryptedData data, AesKey key) + throws IllegalBlockSizeException, BadPaddingException, InvalidKeyException, + NoSuchPaddingException, NoSuchAlgorithmException, InvalidAlgorithmParameterException { + byte[] encryptedWithVector = data.getBytes(); + byte[] initializationVector = ArrayUtil.subArray(encryptedWithVector, 0, 16); + byte[] encrypted = ArrayUtil.subArray(encryptedWithVector, 16, encryptedWithVector.length - 16); + + Cipher cipher = Cipher.getInstance(AES_GCM, new BouncyCastleProvider()); + final SecretKeySpec keySpec = new SecretKeySpec(key.getBytes(), AES); + final GCMParameterSpec gcmParameterSpec = + new GCMParameterSpec(16 * 8 /* =128 */, initializationVector); + cipher.init(Cipher.DECRYPT_MODE, keySpec, gcmParameterSpec); + byte[] decryptedData = cipher.doFinal(encrypted); + return cryptoDataFactory.decryptedFromBytes(decryptedData); + } + + public String getAlgorithm() { + return this.secureRandom.getAlgorithm(); + } } diff --git a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesInitializationVectorIterator.java b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesInitializationVectorIterator.java index cd0a6b1ec..73d02c3d5 100644 --- a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesInitializationVectorIterator.java +++ b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesInitializationVectorIterator.java @@ -20,51 +20,50 @@ */ package org.eclipse.tractusx.edc.data.encryption.algorithms.aes; +import org.eclipse.tractusx.edc.data.encryption.util.ArrayUtil; + import java.security.SecureRandom; import java.util.Iterator; import java.util.NoSuchElementException; -import lombok.SneakyThrows; -import org.eclipse.tractusx.edc.data.encryption.util.ArrayUtil; public class AesInitializationVectorIterator implements Iterator { - public static final int RANDOM_SIZE = 12; - public static final int COUNTER_SIZE = 4; - - private final ByteCounter counter; + public static final int RANDOM_SIZE = 12; + public static final int COUNTER_SIZE = 4; - private SecureRandom secureRandom; + private final ByteCounter counter; - public AesInitializationVectorIterator(SecureRandom secureRandom) { - this.counter = new ByteCounter(COUNTER_SIZE); - this.secureRandom = secureRandom; - } + private SecureRandom secureRandom; - public AesInitializationVectorIterator(ByteCounter byteCounter) { - this.counter = byteCounter; - } + public AesInitializationVectorIterator(SecureRandom secureRandom) { + this.counter = new ByteCounter(COUNTER_SIZE); + this.secureRandom = secureRandom; + } - @Override - public boolean hasNext() { - return !counter.isMaxed(); - } + public AesInitializationVectorIterator(ByteCounter byteCounter) { + this.counter = byteCounter; + } - @Override - public byte[] next() { - if (counter.isMaxed()) { - throw new NoSuchElementException(getClass().getSimpleName() + " has no more elements"); + @Override + public boolean hasNext() { + return !counter.isMaxed(); } - byte[] random = getNextRandom(); - counter.increment(); + @Override + public byte[] next() { + if (counter.isMaxed()) { + throw new NoSuchElementException(getClass().getSimpleName() + " has no more elements"); + } - return ArrayUtil.concat(random, counter.getBytes()); - } + byte[] random = getNextRandom(); + counter.increment(); - @SneakyThrows - public byte[] getNextRandom() { - byte[] newVector = new byte[RANDOM_SIZE]; - secureRandom.nextBytes(newVector); - return newVector; - } + return ArrayUtil.concat(random, counter.getBytes()); + } + + public byte[] getNextRandom() { + byte[] newVector = new byte[RANDOM_SIZE]; + secureRandom.nextBytes(newVector); + return newVector; + } } diff --git a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/ByteCounter.java b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/ByteCounter.java index 55eec8184..e7874c158 100644 --- a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/ByteCounter.java +++ b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/ByteCounter.java @@ -19,63 +19,69 @@ */ package org.eclipse.tractusx.edc.data.encryption.algorithms.aes; -/** Big Endian Byte Counter */ +/** + * Big Endian Byte Counter + */ public class ByteCounter { - private final byte[] counter; - - /** - * Constructs a new ByteCounter with the given number of bytes. E.g. a ByteCounter with 4 bytes - * will have a counter value of [0, 0, 0, 0]. - * - * @param size number of bytes used by the counter - */ - public ByteCounter(int size) { - this.counter = new byte[size]; - } + private final byte[] counter; - /** - * Constructs a new ByteCounter with the given counter value. Counter cannot grow bigger than the - * size of the array. - * - * @param counter initial counter value - */ - public ByteCounter(byte[] counter) { - this.counter = counter; - } + /** + * Constructs a new ByteCounter with the given number of bytes. E.g. a ByteCounter with 4 bytes + * will have a counter value of [0, 0, 0, 0]. + * + * @param size number of bytes used by the counter + */ + public ByteCounter(int size) { + this.counter = new byte[size]; + } - /** Returns the counter value as a byte array. */ - public byte[] getBytes() { - return counter; - } + /** + * Constructs a new ByteCounter with the given counter value. Counter cannot grow bigger than the + * size of the array. + * + * @param counter initial counter value + */ + public ByteCounter(byte[] counter) { + this.counter = counter; + } - /** Returns true if counter is maxed */ - public boolean isMaxed() { - for (byte b : counter) { - if (b != (byte) 0xff) return false; + /** + * Returns the counter value as a byte array. + */ + public byte[] getBytes() { + return counter; } - return true; - } - /** - * Increments the counter by one. - * - * @throws IllegalStateException if the counter is already maxed - */ - public void increment() { - incrementByte(counter.length - 1); - } + /** + * Returns true if counter is maxed + */ + public boolean isMaxed() { + for (byte b : counter) { + if (b != (byte) 0xff) return false; + } + return true; + } - private void incrementByte(int index) { - if (isMaxed()) { - throw new IllegalStateException("Counter is already maxed"); + /** + * Increments the counter by one. + * + * @throws IllegalStateException if the counter is already maxed + */ + public void increment() { + incrementByte(counter.length - 1); } - if (counter[index] == (byte) 0xff) { - incrementByte(index - 1); - counter[index] = (byte) 0x00; - } else { - counter[index]++; + private void incrementByte(int index) { + if (isMaxed()) { + throw new IllegalStateException("Counter is already maxed"); + } + + if (counter[index] == (byte) 0xff) { + incrementByte(index - 1); + counter[index] = (byte) 0x00; + } else { + counter[index]++; + } } - } } diff --git a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/data/CryptoDataFactoryImpl.java b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/data/CryptoDataFactoryImpl.java index b23966170..a01331275 100644 --- a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/data/CryptoDataFactoryImpl.java +++ b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/data/CryptoDataFactoryImpl.java @@ -19,58 +19,89 @@ */ package org.eclipse.tractusx.edc.data.encryption.data; -import lombok.Value; import org.bouncycastle.util.encoders.Base64; public class CryptoDataFactoryImpl implements CryptoDataFactory { - public DecryptedData decryptedFromText(String text) { - final byte[] bytes = text.getBytes(); - final String base64 = Base64.toBase64String(bytes); - return new DecryptedDataImpl(bytes, base64, text); - } - - public DecryptedData decryptedFromBase64(String base64) { - final byte[] bytes = Base64.decode(base64); - final String text = new String(bytes); - return new DecryptedDataImpl(bytes, base64, text); - } - - public DecryptedData decryptedFromBytes(byte[] bytes) { - final String base64 = Base64.toBase64String(bytes); - final String text = new String(bytes); - return new DecryptedDataImpl(bytes, base64, text); - } - - public EncryptedData encryptedFromText(String text) { - final byte[] bytes = text.getBytes(); - final String base64 = Base64.toBase64String(bytes); - return new EncryptedDataImpl(bytes, base64, text); - } - - public EncryptedData encryptedFromBase64(String base64) { - final byte[] bytes = Base64.decode(base64); - final String text = new String(bytes); - return new EncryptedDataImpl(bytes, base64, text); - } - - public EncryptedData encryptedFromBytes(byte[] bytes) { - final String base64 = Base64.toBase64String(bytes); - final String text = new String(bytes); - return new EncryptedDataImpl(bytes, base64, text); - } - - @Value - private static class DecryptedDataImpl implements DecryptedData { - byte[] bytes; - String base64; - String text; - } - - @Value - private static class EncryptedDataImpl implements EncryptedData { - byte[] bytes; - String base64; - String text; - } + public DecryptedData decryptedFromText(String text) { + final byte[] bytes = text.getBytes(); + final String base64 = Base64.toBase64String(bytes); + return new DecryptedDataImpl(bytes, base64, text); + } + + public DecryptedData decryptedFromBase64(String base64) { + final byte[] bytes = Base64.decode(base64); + final String text = new String(bytes); + return new DecryptedDataImpl(bytes, base64, text); + } + + public DecryptedData decryptedFromBytes(byte[] bytes) { + final String base64 = Base64.toBase64String(bytes); + final String text = new String(bytes); + return new DecryptedDataImpl(bytes, base64, text); + } + + public EncryptedData encryptedFromText(String text) { + final byte[] bytes = text.getBytes(); + final String base64 = Base64.toBase64String(bytes); + return new EncryptedDataImpl(bytes, base64, text); + } + + public EncryptedData encryptedFromBase64(String base64) { + final byte[] bytes = Base64.decode(base64); + final String text = new String(bytes); + return new EncryptedDataImpl(bytes, base64, text); + } + + public EncryptedData encryptedFromBytes(byte[] bytes) { + final String base64 = Base64.toBase64String(bytes); + final String text = new String(bytes); + return new EncryptedDataImpl(bytes, base64, text); + } + + + private static class DecryptedDataImpl implements DecryptedData { + private final byte[] bytes; + private final String base64; + private final String text; + + private DecryptedDataImpl(byte[] bytes, String base64, String text) { + this.bytes = bytes; + this.base64 = base64; + this.text = text; + } + + @Override + public byte[] getBytes() { + return bytes; + } + + @Override + public String getBase64() { + return base64; + } + } + + + private static class EncryptedDataImpl implements EncryptedData { + private final byte[] bytes; + private final String base64; + private final String text; + + private EncryptedDataImpl(byte[] bytes, String base64, String text) { + this.bytes = bytes; + this.base64 = base64; + this.text = text; + } + + @Override + public byte[] getBytes() { + return bytes; + } + + @Override + public String getBase64() { + return base64; + } + } } diff --git a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/encrypter/AesDataEncrypterConfiguration.java b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/encrypter/AesDataEncrypterConfiguration.java index 725828acc..0723306e4 100644 --- a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/encrypter/AesDataEncrypterConfiguration.java +++ b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/encrypter/AesDataEncrypterConfiguration.java @@ -21,12 +21,28 @@ package org.eclipse.tractusx.edc.data.encryption.encrypter; import java.time.Duration; -import lombok.NonNull; -import lombok.Value; -@Value + public class AesDataEncrypterConfiguration { - @NonNull String keySetAlias; - boolean cachingEnabled; - @NonNull Duration cachingDuration; + private final String keySetAlias; + private final boolean cachingEnabled; + private final Duration cachingDuration; + + public AesDataEncrypterConfiguration(String keySetAlias, boolean cachingEnabled, Duration cachingDuration) { + this.keySetAlias = keySetAlias; + this.cachingEnabled = cachingEnabled; + this.cachingDuration = cachingDuration; + } + + public Duration getCachingDuration() { + return cachingDuration; + } + + public boolean isCachingEnabled() { + return cachingEnabled; + } + + public String getKeySetAlias() { + return keySetAlias; + } } diff --git a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/encrypter/AesDataEncrypterImpl.java b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/encrypter/AesDataEncrypterImpl.java index 160f57df0..d8b4add87 100644 --- a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/encrypter/AesDataEncrypterImpl.java +++ b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/encrypter/AesDataEncrypterImpl.java @@ -20,15 +20,6 @@ package org.eclipse.tractusx.edc.data.encryption.encrypter; -import java.security.InvalidAlgorithmParameterException; -import java.security.InvalidKeyException; -import java.security.NoSuchAlgorithmException; -import java.util.Optional; -import javax.crypto.AEADBadTagException; -import javax.crypto.BadPaddingException; -import javax.crypto.IllegalBlockSizeException; -import javax.crypto.NoSuchPaddingException; -import lombok.RequiredArgsConstructor; import org.eclipse.edc.connector.transfer.dataplane.spi.security.DataEncrypter; import org.eclipse.edc.spi.EdcException; import org.eclipse.edc.spi.monitor.Monitor; @@ -40,69 +31,85 @@ import org.eclipse.tractusx.edc.data.encryption.key.AesKey; import org.eclipse.tractusx.edc.data.encryption.provider.KeyProvider; -@RequiredArgsConstructor +import java.security.InvalidAlgorithmParameterException; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.util.Optional; +import javax.crypto.AEADBadTagException; +import javax.crypto.BadPaddingException; +import javax.crypto.IllegalBlockSizeException; +import javax.crypto.NoSuchPaddingException; + public class AesDataEncrypterImpl implements DataEncrypter { - private final CryptoAlgorithm encryptionStrategy; - private final Monitor monitor; - private final KeyProvider keyProvider; - private final CryptoAlgorithm algorithm; - private final CryptoDataFactory cryptoDataFactory; + private final CryptoAlgorithm encryptionStrategy; + private final Monitor monitor; + private final KeyProvider keyProvider; + private final CryptoAlgorithm algorithm; + private final CryptoDataFactory cryptoDataFactory; - @Override - public String encrypt(String value) { - DecryptedData decryptedData = cryptoDataFactory.decryptedFromText(value); - AesKey key = keyProvider.getEncryptionKey(); + public AesDataEncrypterImpl(CryptoAlgorithm encryptionStrategy, Monitor monitor, KeyProvider keyProvider, CryptoAlgorithm algorithm, CryptoDataFactory cryptoDataFactory) { + this.encryptionStrategy = encryptionStrategy; + this.monitor = monitor; + this.keyProvider = keyProvider; + this.algorithm = algorithm; + this.cryptoDataFactory = cryptoDataFactory; + } - try { - EncryptedData encryptedData = algorithm.encrypt(decryptedData, key); - return encryptedData.getBase64(); - } catch (IllegalBlockSizeException - | BadPaddingException - | InvalidKeyException - | InvalidAlgorithmParameterException - | NoSuchPaddingException - | NoSuchAlgorithmException e) { - throw new EdcException(e); + @Override + public String encrypt(String value) { + DecryptedData decryptedData = cryptoDataFactory.decryptedFromText(value); + AesKey key = keyProvider.getEncryptionKey(); + + try { + EncryptedData encryptedData = algorithm.encrypt(decryptedData, key); + return encryptedData.getBase64(); + } catch (IllegalBlockSizeException + | BadPaddingException + | InvalidKeyException + | InvalidAlgorithmParameterException + | NoSuchPaddingException + | NoSuchAlgorithmException e) { + throw new EdcException(e); + } } - } - @Override - public String decrypt(String value) { - EncryptedData encryptedData = cryptoDataFactory.encryptedFromBase64(value); + @Override + public String decrypt(String value) { + EncryptedData encryptedData = cryptoDataFactory.encryptedFromBase64(value); - return keyProvider - .getDecryptionKeySet() - .map(key -> decrypt(encryptedData, key)) - .filter(Optional::isPresent) - .map(Optional::get) - .map(DecryptedData::getBytes) - .map(String::new) - .findFirst() - .orElseThrow( - () -> - new EdcException( - DataEncryptionExtension.EXTENSION_NAME - + ": Failed to decrypt data. This can happen if the key set is empty, contains invalid keys, the decryption key rotated out of the key set or because the data was encrypted by a different algorithm.")); - } + return keyProvider + .getDecryptionKeySet() + .map(key -> decrypt(encryptedData, key)) + .filter(Optional::isPresent) + .map(Optional::get) + .map(DecryptedData::getBytes) + .map(String::new) + .findFirst() + .orElseThrow( + () -> + new EdcException( + DataEncryptionExtension.EXTENSION_NAME + + ": Failed to decrypt data. This can happen if the key set is empty, contains invalid keys, the decryption key rotated out of the key set or because the data was encrypted by a different algorithm.")); + } - private Optional decrypt(EncryptedData data, AesKey key) { - try { - return Optional.of(encryptionStrategy.decrypt(data, key)); - } catch (AEADBadTagException e) { // thrown when wrong key is used for decryption - return Optional.empty(); - } catch (IllegalBlockSizeException - | BadPaddingException - | InvalidKeyException - | NoSuchPaddingException - | NoSuchAlgorithmException - | InvalidAlgorithmParameterException e) { - monitor.warning( - String.format( - DataEncryptionExtension.EXTENSION_NAME - + ": Exception decrypting data using key from rotating key set. %s", - e.getMessage())); - throw new EdcException(e); + private Optional decrypt(EncryptedData data, AesKey key) { + try { + return Optional.of(encryptionStrategy.decrypt(data, key)); + } catch (AEADBadTagException e) { // thrown when wrong key is used for decryption + return Optional.empty(); + } catch (IllegalBlockSizeException + | BadPaddingException + | InvalidKeyException + | NoSuchPaddingException + | NoSuchAlgorithmException + | InvalidAlgorithmParameterException e) { + monitor.warning( + String.format( + DataEncryptionExtension.EXTENSION_NAME + + ": Exception decrypting data using key from rotating key set. %s", + e.getMessage())); + throw new EdcException(e); + } } - } } diff --git a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/encrypter/DataEncrypterFactory.java b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/encrypter/DataEncrypterFactory.java index 916ab245f..c40e20b08 100644 --- a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/encrypter/DataEncrypterFactory.java +++ b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/encrypter/DataEncrypterFactory.java @@ -21,9 +21,6 @@ package org.eclipse.tractusx.edc.data.encryption.encrypter; -import static java.lang.String.format; - -import lombok.RequiredArgsConstructor; import org.eclipse.edc.connector.transfer.dataplane.spi.security.DataEncrypter; import org.eclipse.edc.spi.monitor.Monitor; import org.eclipse.edc.spi.security.Vault; @@ -36,45 +33,52 @@ import org.eclipse.tractusx.edc.data.encryption.provider.CachingKeyProvider; import org.eclipse.tractusx.edc.data.encryption.provider.KeyProvider; -@RequiredArgsConstructor -public class DataEncrypterFactory { +import static java.lang.String.format; - public static final String AES_ALGORITHM = "AES"; - public static final String NONE = "NONE"; +public class DataEncrypterFactory { - private final Vault vault; - private final Monitor monitor; - private final CryptoKeyFactory keyFactory; + public static final String AES_ALGORITHM = "AES"; + public static final String NONE = "NONE"; - public DataEncrypter createNoneEncrypter() { - return new DataEncrypter() { - @Override - public String encrypt(String data) { - return data; - } + private final Vault vault; + private final Monitor monitor; + private final CryptoKeyFactory keyFactory; - @Override - public String decrypt(String data) { - return data; - } - }; - } + public DataEncrypterFactory(Vault vault, Monitor monitor, CryptoKeyFactory keyFactory) { + this.vault = vault; + this.monitor = monitor; + this.keyFactory = keyFactory; + } - public DataEncrypter createAesEncrypter(AesDataEncrypterConfiguration configuration) { - KeyProvider keyProvider = - new AesKeyProvider(vault, configuration.getKeySetAlias(), keyFactory); + public DataEncrypter createNoneEncrypter() { + return new DataEncrypter() { + @Override + public String encrypt(String data) { + return data; + } - if (configuration.isCachingEnabled()) { - keyProvider = new CachingKeyProvider<>(keyProvider, configuration.getCachingDuration()); + @Override + public String decrypt(String data) { + return data; + } + }; } + + public DataEncrypter createAesEncrypter(AesDataEncrypterConfiguration configuration) { + KeyProvider keyProvider = + new AesKeyProvider(vault, configuration.getKeySetAlias(), keyFactory); - final CryptoDataFactory cryptoDataFactory = new CryptoDataFactoryImpl(); - final AesAlgorithm algorithm = new AesAlgorithm(cryptoDataFactory); + if (configuration.isCachingEnabled()) { + keyProvider = new CachingKeyProvider<>(keyProvider, configuration.getCachingDuration()); + } - monitor.debug( - format( - "AES algorithm was initialised with SecureRandom algorithm '%s'", - algorithm.getAlgorithm())); - return new AesDataEncrypterImpl(algorithm, monitor, keyProvider, algorithm, cryptoDataFactory); - } + final CryptoDataFactory cryptoDataFactory = new CryptoDataFactoryImpl(); + final AesAlgorithm algorithm = new AesAlgorithm(cryptoDataFactory); + + monitor.debug( + format( + "AES algorithm was initialised with SecureRandom algorithm '%s'", + algorithm.getAlgorithm())); + return new AesDataEncrypterImpl(algorithm, monitor, keyProvider, algorithm, cryptoDataFactory); + } } diff --git a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/key/CryptoKeyFactoryImpl.java b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/key/CryptoKeyFactoryImpl.java index f3fa102a4..7a5b0fc15 100644 --- a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/key/CryptoKeyFactoryImpl.java +++ b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/key/CryptoKeyFactoryImpl.java @@ -19,7 +19,6 @@ */ package org.eclipse.tractusx.edc.data.encryption.key; -import lombok.Value; import org.bouncycastle.util.encoders.Base64; public class CryptoKeyFactoryImpl implements CryptoKeyFactory { @@ -37,9 +36,24 @@ public AesKey fromBytes(byte[] key) { return new AesKeyImpl(key, Base64.toBase64String(key)); } - @Value + private static class AesKeyImpl implements AesKey { - byte[] bytes; - String base64; + private final byte[] bytes; + private final String base64; + + private AesKeyImpl(byte[] bytes, String base64) { + this.bytes = bytes; + this.base64 = base64; + } + + @Override + public byte[] getBytes() { + return bytes; + } + + @Override + public String getBase64() { + return base64; + } } } diff --git a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/provider/AesKeyProvider.java b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/provider/AesKeyProvider.java index 82b8eccdd..e740a6f43 100644 --- a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/provider/AesKeyProvider.java +++ b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/provider/AesKeyProvider.java @@ -19,51 +19,56 @@ */ package org.eclipse.tractusx.edc.data.encryption.provider; -import java.util.Arrays; -import java.util.function.Predicate; -import java.util.stream.Stream; -import lombok.RequiredArgsConstructor; import org.bouncycastle.util.encoders.Base64; import org.eclipse.edc.spi.security.Vault; import org.eclipse.tractusx.edc.data.encryption.DataEncryptionExtension; import org.eclipse.tractusx.edc.data.encryption.key.AesKey; import org.eclipse.tractusx.edc.data.encryption.key.CryptoKeyFactory; -@RequiredArgsConstructor +import java.util.Arrays; +import java.util.function.Predicate; +import java.util.stream.Stream; + public class AesKeyProvider implements KeyProvider { - private static final String KEY_SEPARATOR = ","; + private static final String KEY_SEPARATOR = ","; + + private final Vault vault; + private final String vaultKeyAlias; + private final CryptoKeyFactory cryptoKeyFactory; - private final Vault vault; - private final String vaultKeyAlias; - private final CryptoKeyFactory cryptoKeyFactory; + public AesKeyProvider(Vault vault, String vaultKeyAlias, CryptoKeyFactory cryptoKeyFactory) { + this.vault = vault; + this.vaultKeyAlias = vaultKeyAlias; + this.cryptoKeyFactory = cryptoKeyFactory; + } - @Override - public Stream getDecryptionKeySet() { - return getKeysStream(); - } + @Override + public AesKey getEncryptionKey() { + return getKeysStream() + .findFirst() + .orElseThrow( + () -> + new RuntimeException( + DataEncryptionExtension.EXTENSION_NAME + + ": Vault must contain at least one key.")); + } - @Override - public AesKey getEncryptionKey() { - return getKeysStream() - .findFirst() - .orElseThrow( - () -> - new RuntimeException( - DataEncryptionExtension.EXTENSION_NAME - + ": Vault must contain at least one key.")); - } + @Override + public Stream getDecryptionKeySet() { + return getKeysStream(); + } - private Stream getKeysStream() { - return Arrays.stream(getKeys().split(KEY_SEPARATOR)) - .map(String::trim) - .filter(Predicate.not(String::isEmpty)) - .map(Base64::decode) - .map(cryptoKeyFactory::fromBytes); - } + private Stream getKeysStream() { + return Arrays.stream(getKeys().split(KEY_SEPARATOR)) + .map(String::trim) + .filter(Predicate.not(String::isEmpty)) + .map(Base64::decode) + .map(cryptoKeyFactory::fromBytes); + } - private String getKeys() { - String keys = vault.resolveSecret(vaultKeyAlias); - return keys == null ? "" : keys; - } + private String getKeys() { + String keys = vault.resolveSecret(vaultKeyAlias); + return keys == null ? "" : keys; + } } diff --git a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/provider/CachingKeyProvider.java b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/provider/CachingKeyProvider.java index b4b490918..4819b6386 100644 --- a/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/provider/CachingKeyProvider.java +++ b/edc-extensions/data-encryption/src/main/java/org/eclipse/tractusx/edc/data/encryption/provider/CachingKeyProvider.java @@ -20,60 +20,73 @@ package org.eclipse.tractusx.edc.data.encryption.provider; +import org.eclipse.tractusx.edc.data.encryption.key.CryptoKey; + import java.time.Clock; import java.time.Duration; import java.time.Instant; import java.util.List; +import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.Stream; -import lombok.NonNull; -import lombok.Value; -import org.eclipse.tractusx.edc.data.encryption.key.CryptoKey; public class CachingKeyProvider implements KeyProvider { - @NonNull private final KeyProvider decoratedProvider; - @NonNull private final Clock clock; - @NonNull private final Duration cacheExpiration; - - private CachedKeys cachedKeys; - - public CachingKeyProvider(KeyProvider keyProvider, Duration cacheExpiration) { - this(keyProvider, cacheExpiration, Clock.systemUTC()); - } - - public CachingKeyProvider(KeyProvider keyProvider, Duration cacheExpiration, Clock clock) { - - this.decoratedProvider = keyProvider; - this.cacheExpiration = cacheExpiration; - this.clock = clock; - } - - @Override - public T getEncryptionKey() { - checkCache(); - return cachedKeys.getEncryptionKey(); - } - - @Override - public Stream getDecryptionKeySet() { - checkCache(); - return cachedKeys.getDecryptionKeys().stream(); - } - - private void checkCache() { - if (cachedKeys == null || cachedKeys.expiration.isBefore(clock.instant())) { - T encryptionKey = decoratedProvider.getEncryptionKey(); - List decryptionKeys = decoratedProvider.getDecryptionKeySet().collect(Collectors.toList()); - cachedKeys = - new CachedKeys<>(encryptionKey, decryptionKeys, clock.instant().plus(cacheExpiration)); + private final KeyProvider decoratedProvider; + private final Clock clock; + private final Duration cacheExpiration; + + private CachedKeys cachedKeys; + + public CachingKeyProvider(KeyProvider keyProvider, Duration cacheExpiration) { + this(keyProvider, cacheExpiration, Clock.systemUTC()); + } + + public CachingKeyProvider(KeyProvider keyProvider, Duration cacheExpiration, Clock clock) { + this.decoratedProvider = Objects.requireNonNull(keyProvider); + this.cacheExpiration = Objects.requireNonNull(cacheExpiration); + this.clock = Objects.requireNonNull(clock); + } + + @Override + public T getEncryptionKey() { + checkCache(); + return cachedKeys.getEncryptionKey(); + } + + @Override + public Stream getDecryptionKeySet() { + checkCache(); + return cachedKeys.getDecryptionKeys().stream(); + } + + private void checkCache() { + if (cachedKeys == null || cachedKeys.expiration.isBefore(clock.instant())) { + T encryptionKey = decoratedProvider.getEncryptionKey(); + List decryptionKeys = decoratedProvider.getDecryptionKeySet().collect(Collectors.toList()); + cachedKeys = + new CachedKeys<>(encryptionKey, decryptionKeys, clock.instant().plus(cacheExpiration)); + } + } + + + private static class CachedKeys { + private final T encryptionKey; + private final List decryptionKeys; + private final Instant expiration; + + private CachedKeys(T encryptionKey, List decryptionKeys, Instant expiration) { + this.encryptionKey = encryptionKey; + this.decryptionKeys = decryptionKeys; + this.expiration = Objects.requireNonNull(expiration); + } + + public List getDecryptionKeys() { + return decryptionKeys; + } + + public T getEncryptionKey() { + return encryptionKey; + } } - } - - @Value - private static class CachedKeys { - T encryptionKey; - List decryptionKeys; - @NonNull Instant expiration; - } } diff --git a/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesAlgorithmTest.java b/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesAlgorithmTest.java index 4d19927fb..683a06f08 100644 --- a/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesAlgorithmTest.java +++ b/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesAlgorithmTest.java @@ -19,7 +19,6 @@ */ package org.eclipse.tractusx.edc.data.encryption.algorithms.aes; -import lombok.SneakyThrows; import org.bouncycastle.util.encoders.Base64; import org.eclipse.tractusx.edc.data.encryption.data.CryptoDataFactory; import org.eclipse.tractusx.edc.data.encryption.data.CryptoDataFactoryImpl; @@ -31,62 +30,71 @@ class AesAlgorithmTest { - private static final byte[] KEY_128_BIT = Base64.decode("dVUjmYJzbwVcntkFZU+lNQ=="); - private static final byte[] KEY_196_BIT = Base64.decode("NcgHzzRTUC+z396tWG9hqIbeihujz0m8"); - private static final byte[] KEY_256_BIT = - Base64.decode("OSD+3NcZAmS/6UXbq6NL8UL+aQIAJDLL7BE2rBX5MtA="); - - private final AesAlgorithm strategy = new AesAlgorithm(new CryptoDataFactoryImpl()); - private final CryptoDataFactory cryptoDataFactory = new CryptoDataFactoryImpl(); - - @Test - void test128BitKey() { - testKey(KEY_128_BIT); - } - - @Test - void test196BitKey() { - testKey(KEY_196_BIT); - } - - @Test - void test256BitKey() { - testKey(KEY_256_BIT); - } - - @Test - @SneakyThrows - void testSameDataEncryptedDifferently() { - final AesKey aesKey = createKey(KEY_128_BIT); - final DecryptedData expected = cryptoDataFactory.decryptedFromText("same data"); - final EncryptedData result1 = strategy.encrypt(expected, aesKey); - final EncryptedData result2 = strategy.encrypt(expected, aesKey); - - Assertions.assertNotEquals(result1.getBase64(), result2.getBase64()); - } - - @SneakyThrows - void testKey(byte[] key) { - final AesKey aesKey = createKey(key); - final DecryptedData expected = cryptoDataFactory.decryptedFromText("I will be encrypted"); - final EncryptedData encryptedResult = strategy.encrypt(expected, aesKey); - final DecryptedData result = strategy.decrypt(encryptedResult, aesKey); - - Assertions.assertEquals(expected.getBase64(), result.getBase64()); - } - - AesKey createKey(byte[] key) { - return new AesKey() { - - @Override - public byte[] getBytes() { - return key; - } - - @Override - public String getBase64() { - return Base64.toBase64String(key); - } - }; - } + private static final byte[] KEY_128_BIT = Base64.decode("dVUjmYJzbwVcntkFZU+lNQ=="); + private static final byte[] KEY_196_BIT = Base64.decode("NcgHzzRTUC+z396tWG9hqIbeihujz0m8"); + private static final byte[] KEY_256_BIT = + Base64.decode("OSD+3NcZAmS/6UXbq6NL8UL+aQIAJDLL7BE2rBX5MtA="); + + private final AesAlgorithm strategy = new AesAlgorithm(new CryptoDataFactoryImpl()); + private final CryptoDataFactory cryptoDataFactory = new CryptoDataFactoryImpl(); + + @Test + void test128BitKey() { + testKey(KEY_128_BIT); + } + + @Test + void test196BitKey() { + testKey(KEY_196_BIT); + } + + @Test + void test256BitKey() { + testKey(KEY_256_BIT); + } + + @Test + void testSameDataEncryptedDifferently() { + final AesKey aesKey = createKey(KEY_128_BIT); + final DecryptedData expected = cryptoDataFactory.decryptedFromText("same data"); + + try { + final EncryptedData result1 = strategy.encrypt(expected, aesKey); + final EncryptedData result2 = strategy.encrypt(expected, aesKey); + + Assertions.assertNotEquals(result1.getBase64(), result2.getBase64()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + + void testKey(byte[] key) { + final AesKey aesKey = createKey(key); + final DecryptedData expected = cryptoDataFactory.decryptedFromText("I will be encrypted"); + try { + final EncryptedData encryptedResult = strategy.encrypt(expected, aesKey); + final DecryptedData result = strategy.decrypt(encryptedResult, aesKey); + + + Assertions.assertEquals(expected.getBase64(), result.getBase64()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + AesKey createKey(byte[] key) { + return new AesKey() { + + @Override + public byte[] getBytes() { + return key; + } + + @Override + public String getBase64() { + return Base64.toBase64String(key); + } + }; + } } diff --git a/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesInitializationVectorIteratorTest.java b/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesInitializationVectorIteratorTest.java index ceebf50d6..f70a3bf70 100644 --- a/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesInitializationVectorIteratorTest.java +++ b/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesInitializationVectorIteratorTest.java @@ -20,61 +20,57 @@ */ package org.eclipse.tractusx.edc.data.encryption.algorithms.aes; -import java.security.SecureRandom; -import java.util.ArrayList; -import java.util.List; -import java.util.NoSuchElementException; -import lombok.SneakyThrows; import org.eclipse.tractusx.edc.data.encryption.util.ArrayUtil; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.mockito.Mockito; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.List; +import java.util.NoSuchElementException; + class AesInitializationVectorIteratorTest { - @Test - @SneakyThrows - void testDistinctVectors() { - final int vectorCount = 100; - final SecureRandom secureRandom = new SecureRandom(); - AesInitializationVectorIterator iterator = new AesInitializationVectorIterator(secureRandom); + @Test + void testDistinctVectors() { + final int vectorCount = 100; + final SecureRandom secureRandom = new SecureRandom(); + AesInitializationVectorIterator iterator = new AesInitializationVectorIterator(secureRandom); - List vectors = new ArrayList<>(); - for (var i = 0; i < vectorCount; i++) { - vectors.add(iterator.next()); - } + List vectors = new ArrayList<>(); + for (var i = 0; i < vectorCount; i++) { + vectors.add(iterator.next()); + } - long distinctVectors = vectors.stream().map(ArrayUtil::byteArrayToHex).distinct().count(); - Assertions.assertEquals(vectorCount, distinctVectors); - } + long distinctVectors = vectors.stream().map(ArrayUtil::byteArrayToHex).distinct().count(); + Assertions.assertEquals(vectorCount, distinctVectors); + } - @Test - @SneakyThrows - void testHasNextTrueOnCounterContinuing() { - ByteCounter counter = Mockito.mock(ByteCounter.class); - AesInitializationVectorIterator iterator = new AesInitializationVectorIterator(counter); + @Test + void testHasNextTrueOnCounterContinuing() { + ByteCounter counter = Mockito.mock(ByteCounter.class); + AesInitializationVectorIterator iterator = new AesInitializationVectorIterator(counter); - Mockito.when(counter.isMaxed()).thenReturn(false); - Assertions.assertTrue(iterator.hasNext()); - } + Mockito.when(counter.isMaxed()).thenReturn(false); + Assertions.assertTrue(iterator.hasNext()); + } - @Test - @SneakyThrows - void testHasNextFalseOnCounterEnd() { - ByteCounter counter = Mockito.mock(ByteCounter.class); - AesInitializationVectorIterator iterator = new AesInitializationVectorIterator(counter); + @Test + void testHasNextFalseOnCounterEnd() { + ByteCounter counter = Mockito.mock(ByteCounter.class); + AesInitializationVectorIterator iterator = new AesInitializationVectorIterator(counter); - Mockito.when(counter.isMaxed()).thenReturn(true); - Assertions.assertFalse(iterator.hasNext()); - } + Mockito.when(counter.isMaxed()).thenReturn(true); + Assertions.assertFalse(iterator.hasNext()); + } - @Test - @SneakyThrows - void testNoSuchElementExceptionOnCounterEnd() { - ByteCounter counter = Mockito.mock(ByteCounter.class); - AesInitializationVectorIterator iterator = new AesInitializationVectorIterator(counter); + @Test + void testNoSuchElementExceptionOnCounterEnd() { + ByteCounter counter = Mockito.mock(ByteCounter.class); + AesInitializationVectorIterator iterator = new AesInitializationVectorIterator(counter); - Mockito.when(counter.isMaxed()).thenReturn(true); - Assertions.assertThrows(NoSuchElementException.class, iterator::next); - } + Mockito.when(counter.isMaxed()).thenReturn(true); + Assertions.assertThrows(NoSuchElementException.class, iterator::next); + } } diff --git a/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/encrypter/DataEncrypterAesComponentTest.java b/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/encrypter/DataEncrypterAesComponentTest.java index aa9140629..6dcd103cb 100644 --- a/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/encrypter/DataEncrypterAesComponentTest.java +++ b/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/encrypter/DataEncrypterAesComponentTest.java @@ -19,7 +19,6 @@ */ package org.eclipse.tractusx.edc.data.encryption.encrypter; -import lombok.SneakyThrows; import org.eclipse.edc.connector.transfer.dataplane.spi.security.DataEncrypter; import org.eclipse.edc.spi.monitor.Monitor; import org.eclipse.edc.spi.security.Vault; @@ -42,64 +41,68 @@ @SuppressWarnings("FieldCanBeLocal") class DataEncrypterAesComponentTest { - private static final String KEY_128_BIT_BASE_64 = "7h6sh6t6tchCmNnHjK2kFA=="; - private static final String KEY_256_BIT_BASE_64 = "OSD+3NcZAmS/6UXbq6NL8UL+aQIAJDLL7BE2rBX5MtA="; - - private DataEncrypter dataEncrypter; - private CryptoAlgorithm algorithm; - private KeyProvider keyProvider; - private CryptoKeyFactory cryptoKeyFactory; - private CryptoDataFactory cryptoDataFactory; - - // mocks - private Monitor monitor; - private Vault vault; - - @BeforeEach - void setup() { - monitor = Mockito.mock(Monitor.class); - vault = Mockito.mock(Vault.class); - - cryptoKeyFactory = new CryptoKeyFactoryImpl(); - cryptoDataFactory = new CryptoDataFactoryImpl(); - algorithm = new AesAlgorithm(cryptoDataFactory); - keyProvider = new AesKeyProvider(vault, "foo", cryptoKeyFactory); - - dataEncrypter = - new AesDataEncrypterImpl(algorithm, monitor, keyProvider, algorithm, cryptoDataFactory); - } - - @Test - @SneakyThrows - void testKeyRotation() { - Mockito.when(vault.resolveSecret(Mockito.anyString())) - .thenReturn( - String.format( - "%s, %s, %s, %s", - KEY_128_BIT_BASE_64, - KEY_128_BIT_BASE_64, - KEY_128_BIT_BASE_64, - KEY_256_BIT_BASE_64)); - - final AesKey key256Bit = cryptoKeyFactory.fromBase64(KEY_256_BIT_BASE_64); - final String expectedResult = "hello"; - final DecryptedData decryptedResult = cryptoDataFactory.decryptedFromText(expectedResult); - final EncryptedData encryptedResult = algorithm.encrypt(decryptedResult, key256Bit); - - var result = dataEncrypter.decrypt(encryptedResult.getBase64()); - - Assertions.assertEquals(expectedResult, result); - } - - @Test - void testEncryption() { - Mockito.when(vault.resolveSecret(Mockito.anyString())).thenReturn(KEY_128_BIT_BASE_64); - - final String expectedResult = "hello world!"; - - var encryptedResult = dataEncrypter.encrypt(expectedResult); - var result = dataEncrypter.decrypt(encryptedResult); - - Assertions.assertEquals(expectedResult, result); - } + private static final String KEY_128_BIT_BASE_64 = "7h6sh6t6tchCmNnHjK2kFA=="; + private static final String KEY_256_BIT_BASE_64 = "OSD+3NcZAmS/6UXbq6NL8UL+aQIAJDLL7BE2rBX5MtA="; + + private DataEncrypter dataEncrypter; + private CryptoAlgorithm algorithm; + private KeyProvider keyProvider; + private CryptoKeyFactory cryptoKeyFactory; + private CryptoDataFactory cryptoDataFactory; + + // mocks + private Monitor monitor; + private Vault vault; + + @BeforeEach + void setup() { + monitor = Mockito.mock(Monitor.class); + vault = Mockito.mock(Vault.class); + + cryptoKeyFactory = new CryptoKeyFactoryImpl(); + cryptoDataFactory = new CryptoDataFactoryImpl(); + algorithm = new AesAlgorithm(cryptoDataFactory); + keyProvider = new AesKeyProvider(vault, "foo", cryptoKeyFactory); + + dataEncrypter = + new AesDataEncrypterImpl(algorithm, monitor, keyProvider, algorithm, cryptoDataFactory); + } + + @Test + void testKeyRotation() { + Mockito.when(vault.resolveSecret(Mockito.anyString())) + .thenReturn( + String.format( + "%s, %s, %s, %s", + KEY_128_BIT_BASE_64, + KEY_128_BIT_BASE_64, + KEY_128_BIT_BASE_64, + KEY_256_BIT_BASE_64)); + + final AesKey key256Bit = cryptoKeyFactory.fromBase64(KEY_256_BIT_BASE_64); + final String expectedResult = "hello"; + final DecryptedData decryptedResult = cryptoDataFactory.decryptedFromText(expectedResult); + + try { + final EncryptedData encryptedResult = algorithm.encrypt(decryptedResult, key256Bit); + + var result = dataEncrypter.decrypt(encryptedResult.getBase64()); + + Assertions.assertEquals(expectedResult, result); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Test + void testEncryption() { + Mockito.when(vault.resolveSecret(Mockito.anyString())).thenReturn(KEY_128_BIT_BASE_64); + + final String expectedResult = "hello world!"; + + var encryptedResult = dataEncrypter.encrypt(expectedResult); + var result = dataEncrypter.decrypt(encryptedResult); + + Assertions.assertEquals(expectedResult, result); + } } From f52f5f148e8a5b4f5c2ca0e238aac41fa55a2ae4 Mon Sep 17 00:00:00 2001 From: Enrico Risa Date: Wed, 5 Apr 2023 09:33:01 +0200 Subject: [PATCH 39/92] Update edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesAlgorithmTest.java Co-authored-by: Florian Rusch (ZF Friedrichshafen AG) --- .../edc/data/encryption/algorithms/aes/AesAlgorithmTest.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesAlgorithmTest.java b/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesAlgorithmTest.java index 683a06f08..d141887cf 100644 --- a/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesAlgorithmTest.java +++ b/edc-extensions/data-encryption/src/test/java/org/eclipse/tractusx/edc/data/encryption/algorithms/aes/AesAlgorithmTest.java @@ -75,8 +75,6 @@ void testKey(byte[] key) { try { final EncryptedData encryptedResult = strategy.encrypt(expected, aesKey); final DecryptedData result = strategy.decrypt(encryptedResult, aesKey); - - Assertions.assertEquals(expected.getBase64(), result.getBase64()); } catch (Exception e) { throw new RuntimeException(e); From 6ef8522fb91dfeb3337ed19985e562795d1472f7 Mon Sep 17 00:00:00 2001 From: Tuncay Tunc Date: Wed, 5 Apr 2023 10:22:13 +0200 Subject: [PATCH 40/92] Fix issue with sql pool --- edc-controlplane/edc-controlplane-postgresql/build.gradle.kts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/edc-controlplane/edc-controlplane-postgresql/build.gradle.kts b/edc-controlplane/edc-controlplane-postgresql/build.gradle.kts index 659aa6891..b69e3d010 100644 --- a/edc-controlplane/edc-controlplane-postgresql/build.gradle.kts +++ b/edc-controlplane/edc-controlplane-postgresql/build.gradle.kts @@ -12,9 +12,10 @@ dependencies { runtimeOnly(project(":edc-extensions:postgresql-migration")) runtimeOnly(edc.azure.vault) runtimeOnly(edc.bundles.sqlstores) + runtimeOnly(edc.transaction.local) + runtimeOnly(edc.sql.pool) runtimeOnly(edc.core.controlplane) runtimeOnly(edc.dpf.transfer) - } From 985a2f0f74cc3f574054fe47a42341965f402611 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Tue, 11 Apr 2023 09:02:51 +0200 Subject: [PATCH 41/92] fix: add newline to file --- .github/workflows/verify.yaml | 2 -- .../decision-records/2023-04-03_renaming_branches/README.md | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index b51c482f1..c9c8f1d5f 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -34,8 +34,6 @@ on: pull_request: paths-ignore: - 'charts/**' - - 'docs/**' - - '**/*.md' branches: - '*' workflow_dispatch: diff --git a/docs/development/decision-records/2023-04-03_renaming_branches/README.md b/docs/development/decision-records/2023-04-03_renaming_branches/README.md index 7bc3abd64..5638a79dd 100644 --- a/docs/development/decision-records/2023-04-03_renaming_branches/README.md +++ b/docs/development/decision-records/2023-04-03_renaming_branches/README.md @@ -58,4 +58,4 @@ like force-pushing. Write access to `upstream` is required!_ The new `releases` branch (note the plural) will serve the same purpose that `main` did up until now, which is to track all releases (via merge commits and tags) in chronological order. We will continue to have separate `release/x.y.z` -branches for every release. \ No newline at end of file +branches for every release. From 5975acb49de69e07aa003fa41bda6ed2bdd043f8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Apr 2023 08:34:44 +0000 Subject: [PATCH 42/92] chore(deps): bump alpine Bumps alpine from 3.17.2 to 3.17.3. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .../edc-controlplane-postgresql/src/main/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile b/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile index b3e04fac7..c9af99a81 100644 --- a/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile +++ b/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile @@ -18,7 +18,7 @@ # # SPDX-License-Identifier: Apache-2.0 # -FROM alpine:3.17.2 as otel +FROM alpine:3.17.3 as otel ENV OTEL_AGENT_LOCATION "https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/download/v1.12.1/opentelemetry-javaagent.jar" From 9c6b0284fa6a5fff99c353fc896ea794f86acd1c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Apr 2023 11:19:35 +0000 Subject: [PATCH 43/92] chore(deps): bump actions/setup-java from 3.10.0 to 3.11.0 Bumps [actions/setup-java](https://github.com/actions/setup-java) from 3.10.0 to 3.11.0. - [Release notes](https://github.com/actions/setup-java/releases) - [Commits](https://github.com/actions/setup-java/compare/v3.10.0...v3.11.0) --- updated-dependencies: - dependency-name: actions/setup-java dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/build.yaml | 8 ++++---- .github/workflows/business-tests.yaml | 2 +- .github/workflows/draft-new-release.yaml | 2 +- .github/workflows/publish-new-release.yml | 4 ++-- .github/workflows/veracode.yaml | 6 +++--- .github/workflows/verify.yaml | 12 ++++++------ 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index b8ab83bba..48e463202 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -74,7 +74,7 @@ jobs: uses: actions/checkout@v3.3.0 - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' @@ -118,7 +118,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' @@ -185,7 +185,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' @@ -240,7 +240,7 @@ jobs: uses: actions/checkout@v3.3.0 - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' diff --git a/.github/workflows/business-tests.yaml b/.github/workflows/business-tests.yaml index 6a7cd2cbf..9273b781c 100644 --- a/.github/workflows/business-tests.yaml +++ b/.github/workflows/business-tests.yaml @@ -53,7 +53,7 @@ jobs: uses: actions/checkout@v3.3.0 - name: Set-Up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' diff --git a/.github/workflows/draft-new-release.yaml b/.github/workflows/draft-new-release.yaml index acb4412d9..59969decc 100644 --- a/.github/workflows/draft-new-release.yaml +++ b/.github/workflows/draft-new-release.yaml @@ -34,7 +34,7 @@ jobs: git config user.email noreply@github.com - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' diff --git a/.github/workflows/publish-new-release.yml b/.github/workflows/publish-new-release.yml index 56148b82a..23455015d 100644 --- a/.github/workflows/publish-new-release.yml +++ b/.github/workflows/publish-new-release.yml @@ -58,7 +58,7 @@ jobs: uses: actions/checkout@v3.3.0 - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' @@ -179,7 +179,7 @@ jobs: prerelease: false - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' diff --git a/.github/workflows/veracode.yaml b/.github/workflows/veracode.yaml index 0bfaac8b5..b980a2829 100644 --- a/.github/workflows/veracode.yaml +++ b/.github/workflows/veracode.yaml @@ -31,7 +31,7 @@ jobs: fetch-depth: 0 - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' @@ -60,7 +60,7 @@ jobs: uses: actions/checkout@v3.3.0 - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' @@ -109,7 +109,7 @@ jobs: uses: actions/checkout@v3.3.0 - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index c9c8f1d5f..5cdaa5c9c 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -61,7 +61,7 @@ jobs: uses: actions/checkout@v3.3.0 - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' @@ -94,7 +94,7 @@ jobs: uses: actions/checkout@v3.3.0 - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' @@ -111,7 +111,7 @@ jobs: uses: actions/checkout@v3.3.0 - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' @@ -128,7 +128,7 @@ jobs: uses: actions/checkout@v3.3.0 - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' @@ -145,7 +145,7 @@ jobs: uses: actions/checkout@v3.3.0 - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' @@ -166,7 +166,7 @@ jobs: with: fetch-depth: 0 - name: Set up JDK 11 - uses: actions/setup-java@v3.10.0 + uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' From afc89ba42b9c95c3635e8d95376dd67b90600615 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Apr 2023 11:22:50 +0000 Subject: [PATCH 44/92] chore(deps): bump alpine Bumps alpine from 3.17.2 to 3.17.3. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .../src/main/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile index b3e04fac7..c9af99a81 100644 --- a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile +++ b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile @@ -18,7 +18,7 @@ # # SPDX-License-Identifier: Apache-2.0 # -FROM alpine:3.17.2 as otel +FROM alpine:3.17.3 as otel ENV OTEL_AGENT_LOCATION "https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/download/v1.12.1/opentelemetry-javaagent.jar" From 0e57ad0b32aa8fe649e490287a76a1b4fa4ea53a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Apr 2023 11:37:22 +0000 Subject: [PATCH 45/92] chore(deps): bump alpine Bumps alpine from 3.17.2 to 3.17.3. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .../edc-controlplane-memory/src/main/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile b/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile index b3e04fac7..c9af99a81 100644 --- a/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile +++ b/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile @@ -18,7 +18,7 @@ # # SPDX-License-Identifier: Apache-2.0 # -FROM alpine:3.17.2 as otel +FROM alpine:3.17.3 as otel ENV OTEL_AGENT_LOCATION "https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/download/v1.12.1/opentelemetry-javaagent.jar" From 3684fbe1b0f09f170338f3900e3bc71c3f42ebbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Apr 2023 11:37:30 +0000 Subject: [PATCH 46/92] chore(deps): bump alpine Bumps alpine from 3.17.2 to 3.17.3. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .../edc-dataplane-azure-vault/src/main/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile b/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile index 5c3b12f11..5c9e65d5e 100644 --- a/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile +++ b/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile @@ -18,7 +18,7 @@ # # SPDX-License-Identifier: Apache-2.0 # -FROM alpine:3.17.2 as otel +FROM alpine:3.17.3 as otel ENV OTEL_AGENT_LOCATION "https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/download/v1.12.1/opentelemetry-javaagent.jar" From 0de7accb3fc237a5c74313d56e86c234aec8033d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Apr 2023 11:53:03 +0000 Subject: [PATCH 47/92] chore(deps): bump alpine Bumps alpine from 3.17.2 to 3.17.3. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .../edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile b/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile index 5c3b12f11..5c9e65d5e 100644 --- a/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile +++ b/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile @@ -18,7 +18,7 @@ # # SPDX-License-Identifier: Apache-2.0 # -FROM alpine:3.17.2 as otel +FROM alpine:3.17.3 as otel ENV OTEL_AGENT_LOCATION "https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/download/v1.12.1/opentelemetry-javaagent.jar" From 94e2fb402b03dd40bd66dd810b4a9688aef7bd96 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Tue, 11 Apr 2023 15:36:36 +0200 Subject: [PATCH 48/92] docs: create decision-record about refactoring helm charts --- .../2023-04-11_refactor_helmcharts/README.md | 112 ++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 docs/development/decision-records/2023-04-11_refactor_helmcharts/README.md diff --git a/docs/development/decision-records/2023-04-11_refactor_helmcharts/README.md b/docs/development/decision-records/2023-04-11_refactor_helmcharts/README.md new file mode 100644 index 000000000..5cf59f958 --- /dev/null +++ b/docs/development/decision-records/2023-04-11_refactor_helmcharts/README.md @@ -0,0 +1,112 @@ +# Refactor TractusX-EDC Helm charts + +## Decision + +The Helm charts provided by Tractusx-EDC will be refactored to be more focused and opinionated. Specifically, there will +be the following charts: + +1. `tractusx-connector-memory`: all backing stores are memory-based and thus ephemeral. The vault will also be + memory-based. _This chart is intended for testing/demo purposes only!_ +2. `tractusx-connector`: this is the "production-ready" chart that uses PostgreSQL and Hashicorp-Vault +3. `tractusx-connector-azure-vault`: this is a variant of `tractusx-connector-azure-vault` that uses Azure KeyVault (" + AZKV") instead + of Hashicorp as some stakeholders still use AZKV. + +These charts and their default configuration will be fully [tested](#testing). + +In addition to that, the Docker images will undergo some [refactoring](#docker-image-refactoring) as well. + +## Rationale + +The current "dynamically composed" helm chart has proven to be a source for issues, and it is difficult to isolate +errors due to the great number of variations. Further, only one particular variant (i.e. postgres+hashicorp) is put to +any semblance of testing (i.e. business tests). + +The official recommendation of TractusX-EDC is to use PostgreSQL and HashiCorp Vault, and alongside it, we will provide +charts for easy testing and setting up demos as well as an Azure KeyVault variant for legacy use cases. + +> Note: using Azure KeyVault is not officially supported or recommended by TractusX-EDC! + +This will also reduce the number of Docker images that need to be published. + +## Approach + +### Variant 1: `tractusx-connector-memory` + +This chart is intended for blackbox-testing or for easily setting up demos etc. It is **not** recommended for anything +else. It will have the following properties: + +- all backing stores (Asset Index, Policy Store etc.) are ephemeral in-memory stores +- the vault implementation will either be based also on memory, or on the `FsVault`, which uses local storage to store + secrets +- an embedded data plane will be used +- no scalability or replication is possible +- DAPS will be used as identity provider, so there is an implicit dependency onto a DAPS instance +- the `edc-runtime-memory` Docker image will be used. That image contains both control plane and data plane. + +### Variant 2: `tractusx-connector` + +This is the production-ready chart that is published by TractusX-EDC, and it will actually consist of two charts. One is +the `tractusx-runtime` sub-chart, that contains all configuration for data plane and control plane, and the other one is +the top-level `tractusx-connector` chart, that pulls in other charts as dependencies that are needed for one TractusX +connector application. This is sometimes referred to +as ["umbrella chart"](https://helm.sh/docs/howto/charts_tips_and_tricks/#complex-charts-with-many-dependencies). + +> Note: this will **not** include sub-charts for DAPS or MinIO. + +```shell +tractusx-connector + |-> tractusx-runtime + |-> postgres + |-> hashicorp-vault +``` + +The `tractusx-runtime` chart has the following properties: + +- PostgreSQL is used as persistence backend +- HashiCorp Vault is used as secret store +- the data plane is a separate runtime, i.e. separate pod +- DAPS is used as identity provider +- the `edc-controlplane-postgresql-hashicorp-vault` and `edc-dataplane-hashicorp-vault` Docker images will be used + +### Variant 3: `tractusx-connector-azure-vault` + +This variant is essentially identical to `tractusx-connector` except for dropping the HashiCorp Vault chart, and +replacing the HashiCorp Vault configuration with Azure KeyVault configuration. + +For this, the `edc-controlplane-postgresql-azure-vault` and `edc-dataplane-azure-vault` Docker images will be used. + +### Testing + +There are several steps to testing our Helm charts: + +1. waiting for all pods to come up: using an exemplary configuration, this relies on the health checks, i.e. liveness + and readiness probe (i.e. the runtime`s observability endpoints) to ensure that (most of) the static + configuration is correct, no values are missing etc. +2. executing a set of HTTP requests against the management API and assert a successful HTTP status code. For that we + use [Helm chart tests](https://helm.sh/docs/topics/chart_tests/) + +> Note: we refer to this kind of testing as "deployment testing" + +### Docker image refactoring + +The following changes need to be made to our Docker images: + +- rename `edc-controlplane-memory` -> `-edc-runtime-memory` +- in `edc-runtime-memory` use `FsVault` instead of `AzureVault` +- `edc-runtime-memory` contains an embedded data plane +- rename `edc-controlplane-postgresql` -> `edc-controlplane-postgresql-azure-vault` +- delete `edc-controlplane-memory-hashicorp-vault` + +thus effectively resulting in the following structure: + +```shell +edc-controlplane +|-> edc-runtime-memory +|-> edc-controlplane-postgresql-hashicorp-vault +|-> edc-controlplane-postgresql-azure-vault + +edc-dataplane +|-> edc-dataplane-hashicorp-vault +|-> edc-dataplane-azure-vaul +``` From 302ce5dae04947d52c3c0e706a21b2ff665c74e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Apr 2023 02:02:36 +0000 Subject: [PATCH 49/92] chore(deps): bump crazy-max/ghaction-import-gpg from 1 to 5 Bumps [crazy-max/ghaction-import-gpg](https://github.com/crazy-max/ghaction-import-gpg) from 1 to 5. - [Release notes](https://github.com/crazy-max/ghaction-import-gpg/releases) - [Changelog](https://github.com/crazy-max/ghaction-import-gpg/blob/v5/CHANGELOG.md) - [Commits](https://github.com/crazy-max/ghaction-import-gpg/compare/v1...v5) --- updated-dependencies: - dependency-name: crazy-max/ghaction-import-gpg dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/build.yaml | 2 +- .github/workflows/publish-new-release.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 48e463202..093c7f6b4 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -246,7 +246,7 @@ jobs: distribution: 'temurin' cache: 'gradle' - name: Import GPG Key - uses: crazy-max/ghaction-import-gpg@v1 + uses: crazy-max/ghaction-import-gpg@v5 env: GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} diff --git a/.github/workflows/publish-new-release.yml b/.github/workflows/publish-new-release.yml index 23455015d..e3d4d555f 100644 --- a/.github/workflows/publish-new-release.yml +++ b/.github/workflows/publish-new-release.yml @@ -65,7 +65,7 @@ jobs: cache: 'gradle' - name: Import GPG Key - uses: crazy-max/ghaction-import-gpg@v1 + uses: crazy-max/ghaction-import-gpg@v5 env: GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} From 4ad4322b36938ea20291f13dfd84bf7a17e788ff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Apr 2023 02:02:38 +0000 Subject: [PATCH 50/92] chore(deps): bump helm/chart-testing-action from 2.3.1 to 2.4.0 Bumps [helm/chart-testing-action](https://github.com/helm/chart-testing-action) from 2.3.1 to 2.4.0. - [Release notes](https://github.com/helm/chart-testing-action/releases) - [Commits](https://github.com/helm/chart-testing-action/compare/v2.3.1...v2.4.0) --- updated-dependencies: - dependency-name: helm/chart-testing-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/helm-lint.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/helm-lint.yaml b/.github/workflows/helm-lint.yaml index 624607533..bf1531cad 100644 --- a/.github/workflows/helm-lint.yaml +++ b/.github/workflows/helm-lint.yaml @@ -42,7 +42,7 @@ jobs: python-version: 3.7 - name: chart-testing (setup) - uses: helm/chart-testing-action@v2.3.1 + uses: helm/chart-testing-action@v2.4.0 ##################### ### Chart Testing ### ##################### From 2d7652da1d9b9aa2db445b40c951c9ddf8dc0b04 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Apr 2023 02:02:45 +0000 Subject: [PATCH 51/92] chore(deps): bump mikefarah/yq from 4.31.2 to 4.33.3 Bumps [mikefarah/yq](https://github.com/mikefarah/yq) from 4.31.2 to 4.33.3. - [Release notes](https://github.com/mikefarah/yq/releases) - [Changelog](https://github.com/mikefarah/yq/blob/master/release_notes.txt) - [Commits](https://github.com/mikefarah/yq/compare/v4.31.2...v4.33.3) --- updated-dependencies: - dependency-name: mikefarah/yq dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/draft-new-release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/draft-new-release.yaml b/.github/workflows/draft-new-release.yaml index 59969decc..915f2b7a8 100644 --- a/.github/workflows/draft-new-release.yaml +++ b/.github/workflows/draft-new-release.yaml @@ -49,7 +49,7 @@ jobs: GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - name: Bump version in /charts - uses: mikefarah/yq@v4.31.2 + uses: mikefarah/yq@v4.33.3 with: cmd: |- find charts -name Chart.yaml | xargs -n1 yq -i '.appVersion = "${{ github.event.inputs.version }}" | .version = "${{ github.event.inputs.version }}"' From 14b6d87139aa9719c56babb066310bbea1c336ca Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 12 Apr 2023 15:14:06 +0200 Subject: [PATCH 52/92] feature: publish docker images to DockerHub --- .github/workflows/publish-docker.yaml | 0 .../notice.md | 28 +++++++++++++++++ .../edc-controlplane-memory/notice.md | 28 +++++++++++++++++ .../notice.md | 31 +++++++++++++++++++ .../edc-controlplane-postgresql/notice.md | 28 +++++++++++++++++ 5 files changed, 115 insertions(+) create mode 100644 .github/workflows/publish-docker.yaml create mode 100644 edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md create mode 100644 edc-controlplane/edc-controlplane-memory/notice.md create mode 100644 edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md create mode 100644 edc-controlplane/edc-controlplane-postgresql/notice.md diff --git a/.github/workflows/publish-docker.yaml b/.github/workflows/publish-docker.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md new file mode 100644 index 000000000..1285fdc3d --- /dev/null +++ b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md @@ -0,0 +1,28 @@ +## Notice for Docker image + +This application provides container images for demonstration purposes. + +DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-memory-hashicorp-vault + +Eclipse Tractus-X product(s) installed within the image: + +- GitHub: https://github.com/eclipse-tractusx/tractusx-edc +- Project home: https://projects.eclipse.org/projects/automotive.tractusx +- +Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-memory-hashicorp-vault/src/main/docker/Dockerfile +- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) + +**Used base image** + +- [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) +- Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin +- Eclipse Temurin Project: https://projects.eclipse.org/projects/adoptium.temurin +- Additional information about the Eclipse Temurin + images: https://github.com/docker-library/repo-info/tree/master/repos/eclipse-temurin + +As with all Docker images, these likely also contain other software which may be under other licenses (such as Bash, etc +from the base distribution, along with any direct or indirect dependencies of the primary software being contained). + +As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies +with any relevant licenses for all software contained within. + diff --git a/edc-controlplane/edc-controlplane-memory/notice.md b/edc-controlplane/edc-controlplane-memory/notice.md new file mode 100644 index 000000000..639e0d366 --- /dev/null +++ b/edc-controlplane/edc-controlplane-memory/notice.md @@ -0,0 +1,28 @@ +## Notice for Docker image + +This application provides container images for demonstration purposes. + +DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-memory + +Eclipse Tractus-X product(s) installed within the image: + +- GitHub: https://github.com/eclipse-tractusx/tractusx-edc +- Project home: https://projects.eclipse.org/projects/automotive.tractusx +- +Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile +- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) + +**Used base image** + +- [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) +- Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin +- Eclipse Temurin Project: https://projects.eclipse.org/projects/adoptium.temurin +- Additional information about the Eclipse Temurin + images: https://github.com/docker-library/repo-info/tree/master/repos/eclipse-temurin + +As with all Docker images, these likely also contain other software which may be under other licenses (such as Bash, etc +from the base distribution, along with any direct or indirect dependencies of the primary software being contained). + +As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies +with any relevant licenses for all software contained within. + diff --git a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md new file mode 100644 index 000000000..5cc869306 --- /dev/null +++ b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md @@ -0,0 +1,31 @@ +## Notice for Docker image + +This application provides container images for demonstration purposes. + +DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-postgresql-hashicorp-vault + +Eclipse Tractus-X product(s) installed within the image: + +- GitHub: https://github.com/eclipse-tractusx/tractusx-edc +- Project home: https://projects.eclipse.org/projects/automotive.tractusx +- + +Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile + +- Project + license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) + +**Used base image** + +- [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) +- Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin +- Eclipse Temurin Project: https://projects.eclipse.org/projects/adoptium.temurin +- Additional information about the Eclipse Temurin + images: https://github.com/docker-library/repo-info/tree/master/repos/eclipse-temurin + +As with all Docker images, these likely also contain other software which may be under other licenses (such as Bash, etc +from the base distribution, along with any direct or indirect dependencies of the primary software being contained). + +As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies +with any relevant licenses for all software contained within. + diff --git a/edc-controlplane/edc-controlplane-postgresql/notice.md b/edc-controlplane/edc-controlplane-postgresql/notice.md new file mode 100644 index 000000000..ec36137a1 --- /dev/null +++ b/edc-controlplane/edc-controlplane-postgresql/notice.md @@ -0,0 +1,28 @@ +## Notice for Docker image + +This application provides container images for demonstration purposes. + +DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-postgresql + +Eclipse Tractus-X product(s) installed within the image: + +- GitHub: https://github.com/eclipse-tractusx/tractusx-edc +- Project home: https://projects.eclipse.org/projects/automotive.tractusx +- +Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile +- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) + +**Used base image** + +- [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) +- Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin +- Eclipse Temurin Project: https://projects.eclipse.org/projects/adoptium.temurin +- Additional information about the Eclipse Temurin + images: https://github.com/docker-library/repo-info/tree/master/repos/eclipse-temurin + +As with all Docker images, these likely also contain other software which may be under other licenses (such as Bash, etc +from the base distribution, along with any direct or indirect dependencies of the primary software being contained). + +As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies +with any relevant licenses for all software contained within. + From cb4156a339dc30c0cc0bfd556d201414bddef26d Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 12 Apr 2023 15:40:27 +0200 Subject: [PATCH 53/92] add manual docker-publish workflow --- .../actions/publish-docker-image/action.yml | 72 ++++++++++++++++++ .github/workflows/publish-docker.yaml | 76 +++++++++++++++++++ 2 files changed, 148 insertions(+) create mode 100644 .github/actions/publish-docker-image/action.yml diff --git a/.github/actions/publish-docker-image/action.yml b/.github/actions/publish-docker-image/action.yml new file mode 100644 index 000000000..15638b13f --- /dev/null +++ b/.github/actions/publish-docker-image/action.yml @@ -0,0 +1,72 @@ +name: "Publish Docker Image" +description: "Build and publish a Docker Image to DockerHub" +inputs: + rootDir: + required: true + description: "The directory where the notice.md file and the src/main/docker directory are located" + namespace: + required: false + default: "tractusx" + description: "The Docker image namespace" + imagename: + required: true + description: "the name of the image" +runs: + using: "composite" + steps: + - name: Checkout + uses: actions/checkout@v3 + + ##################### + # Login to DockerHub + ##################### + - name: DockerHub login + if: github.event_name != 'pull_request' + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_HUB_USER }} + password: ${{ secrets.DOCKER_HUB_TOKEN }} + + ############################### + # Set metadata of docker image + ############################### + # Create SemVer or ref tags dependent of trigger event + - name: Docker meta + id: meta + uses: docker/metadata-action@v4 + with: + images: | + ${{ inputs.namespace }}/${{ inputs.imagename }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}} + type=semver,pattern={{major}}.{{minor}} + + ############################### + # Build and push the image + ############################### + - name: Build and push + uses: docker/build-push-action@v3 + with: + context: . + file: ${{ inputs.rootDir }}/src/main/docker/Dockerfile + build-args: | + JAR=${{ inputs.rootDir }}/build/libs/${{ inputs.imagename }}.jar + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + + ############################### + # Update the description + # https://github.com/peter-evans/dockerhub-description + ############################### + - name: Update Docker Hub description + if: github.event_name != 'pull_request' && ${{ secrets.DOCKER_HUB_USER }} && ${{ secrets.DOCKER_HUB_TOKEN }} + uses: peter-evans/dockerhub-description@v3 + with: + readme-filepath: ${{ inputs.rootDir }}/notice.md + username: ${{ secrets.DOCKER_HUB_USER }} + password: ${{ secrets.DOCKER_HUB_TOKEN }} + repository: ${{ inputs.namespace }}/${{ inputs.imagename }} \ No newline at end of file diff --git a/.github/workflows/publish-docker.yaml b/.github/workflows/publish-docker.yaml index e69de29bb..240895fed 100644 --- a/.github/workflows/publish-docker.yaml +++ b/.github/workflows/publish-docker.yaml @@ -0,0 +1,76 @@ +# +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) +# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + +--- +name: "Create Docker images" + +on: + workflow_dispatch: + inputs: + rootDir: + required: true + description: "The directory where the notice.md file and the src/main/docker directory are located" + namespace: + required: false + default: "tractusx" + description: "The Docker image namespace" + imagename: + required: true + description: "the name of the image" + +jobs: + create-docker-image-controlplane: + name: "Create Docker Images for the ControlPlane" + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + name: + - edc-controlplane-memory + - edc-controlplane-memory-hashicorp-vault + - edc-controlplane-postgresql + - edc-controlplane-postgresql-hashicorp-vault + permissions: + contents: write + packages: write + steps: + - uses: ./.github/actions/publish-docker-image + with: + rootDir: edc-controlplane/${{ matrix.name }} + imagename: ${{ matrix.name }} + + + create-docker-image-dataplane: + name: "Create Docker Images for the DataPlane" + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + name: + - edc-dataplane-azure-vault + - edc-dataplane-hashicorp-vault + permissions: + contents: write + packages: write + steps: + - uses: ./.github/actions/publish-docker-image + with: + rootDir: edc-dataplane/${{ matrix.name }} + imagename: ${{ matrix.name }} \ No newline at end of file From 189b43458193035e22d3ce8a9016aa6132fbcf20 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 12 Apr 2023 15:47:52 +0200 Subject: [PATCH 54/92] avoid input params, add concurrency --- .github/workflows/publish-docker.yaml | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/.github/workflows/publish-docker.yaml b/.github/workflows/publish-docker.yaml index 240895fed..ffde68a0b 100644 --- a/.github/workflows/publish-docker.yaml +++ b/.github/workflows/publish-docker.yaml @@ -23,17 +23,10 @@ name: "Create Docker images" on: workflow_dispatch: - inputs: - rootDir: - required: true - description: "The directory where the notice.md file and the src/main/docker directory are located" - namespace: - required: false - default: "tractusx" - description: "The Docker image namespace" - imagename: - required: true - description: "the name of the image" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true jobs: create-docker-image-controlplane: From e58a3a0cd62bee276ad3781e2fff543700caa376 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 12 Apr 2023 15:49:57 +0200 Subject: [PATCH 55/92] add checkout action --- .github/workflows/publish-docker.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/publish-docker.yaml b/.github/workflows/publish-docker.yaml index ffde68a0b..7b868b053 100644 --- a/.github/workflows/publish-docker.yaml +++ b/.github/workflows/publish-docker.yaml @@ -44,6 +44,8 @@ jobs: contents: write packages: write steps: + - name: Checkout + uses: actions/checkout@v3 - uses: ./.github/actions/publish-docker-image with: rootDir: edc-controlplane/${{ matrix.name }} @@ -63,6 +65,8 @@ jobs: contents: write packages: write steps: + - name: Checkout + uses: actions/checkout@v3 - uses: ./.github/actions/publish-docker-image with: rootDir: edc-dataplane/${{ matrix.name }} From f8d6ee2b9fa4b74d0d823d3df92cdeb6bbcb3ffc Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 12 Apr 2023 15:54:42 +0200 Subject: [PATCH 56/92] creds as action inputs --- .github/actions/publish-docker-image/action.yml | 16 +++++++++++----- .github/workflows/publish-docker.yaml | 6 +++++- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/.github/actions/publish-docker-image/action.yml b/.github/actions/publish-docker-image/action.yml index 15638b13f..c8cc42629 100644 --- a/.github/actions/publish-docker-image/action.yml +++ b/.github/actions/publish-docker-image/action.yml @@ -11,6 +11,12 @@ inputs: imagename: required: true description: "the name of the image" + docker_user: + required: false + description: "DockerHub user name. No push is done if omitted" + docker_token: + required: false + description: "DockerHub Token. No push is done if omitted" runs: using: "composite" steps: @@ -24,8 +30,8 @@ runs: if: github.event_name != 'pull_request' uses: docker/login-action@v2 with: - username: ${{ secrets.DOCKER_HUB_USER }} - password: ${{ secrets.DOCKER_HUB_TOKEN }} + username: ${{ inputs.docker_user }} + password: ${{ inputs.docker_token }} ############################### # Set metadata of docker image @@ -63,10 +69,10 @@ runs: # https://github.com/peter-evans/dockerhub-description ############################### - name: Update Docker Hub description - if: github.event_name != 'pull_request' && ${{ secrets.DOCKER_HUB_USER }} && ${{ secrets.DOCKER_HUB_TOKEN }} + if: github.event_name != 'pull_request' && ${{ inputs.docker_user }} && ${{ inputs.docker_token }} uses: peter-evans/dockerhub-description@v3 with: readme-filepath: ${{ inputs.rootDir }}/notice.md - username: ${{ secrets.DOCKER_HUB_USER }} - password: ${{ secrets.DOCKER_HUB_TOKEN }} + username: ${{ inputs.docker_user }} + password: ${{ inputs.docker_token }} repository: ${{ inputs.namespace }}/${{ inputs.imagename }} \ No newline at end of file diff --git a/.github/workflows/publish-docker.yaml b/.github/workflows/publish-docker.yaml index 7b868b053..8c25a3fab 100644 --- a/.github/workflows/publish-docker.yaml +++ b/.github/workflows/publish-docker.yaml @@ -50,6 +50,8 @@ jobs: with: rootDir: edc-controlplane/${{ matrix.name }} imagename: ${{ matrix.name }} + docker_user: ${{ secrets.DOCKER_HUB_USER }} + docker_token: ${{ secrets.DOCKER_HUB_TOKEN }} create-docker-image-dataplane: @@ -70,4 +72,6 @@ jobs: - uses: ./.github/actions/publish-docker-image with: rootDir: edc-dataplane/${{ matrix.name }} - imagename: ${{ matrix.name }} \ No newline at end of file + imagename: ${{ matrix.name }} + docker_user: ${{ secrets.DOCKER_HUB_USER }} + docker_token: ${{ secrets.DOCKER_HUB_TOKEN }} \ No newline at end of file From 6e93812cbb1c222f441e4b484d254ec68a28baa1 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 12 Apr 2023 16:02:59 +0200 Subject: [PATCH 57/92] add jar build step --- .github/actions/publish-docker-image/action.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/.github/actions/publish-docker-image/action.yml b/.github/actions/publish-docker-image/action.yml index c8cc42629..595fdda35 100644 --- a/.github/actions/publish-docker-image/action.yml +++ b/.github/actions/publish-docker-image/action.yml @@ -33,6 +33,20 @@ runs: username: ${{ inputs.docker_user }} password: ${{ inputs.docker_token }} + ##################### + # Build JAR file + ##################### + - name: Set up JDK 11 + uses: actions/setup-java@v3.11.0 + with: + java-version: '11' + distribution: 'temurin' + cache: 'gradle' + - name: Build Controlplane + shell: bash + run: |- + ./gradlew -p ${{ inputs.rootDir }} shadowJar + ############################### # Set metadata of docker image ############################### From e56a86b21b2a9cbaf98a9fcb82b5f3923f667db2 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 12 Apr 2023 16:09:46 +0200 Subject: [PATCH 58/92] make namespace overridable --- .github/workflows/publish-docker.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/publish-docker.yaml b/.github/workflows/publish-docker.yaml index 8c25a3fab..4bb7a4045 100644 --- a/.github/workflows/publish-docker.yaml +++ b/.github/workflows/publish-docker.yaml @@ -23,6 +23,11 @@ name: "Create Docker images" on: workflow_dispatch: + inputs: + namespace: + description: 'The namespace (=repo) in DockerHub' + required: false + default: "tractusx" concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -50,6 +55,7 @@ jobs: with: rootDir: edc-controlplane/${{ matrix.name }} imagename: ${{ matrix.name }} + namespace: ${{ inputs.namespace }} docker_user: ${{ secrets.DOCKER_HUB_USER }} docker_token: ${{ secrets.DOCKER_HUB_TOKEN }} @@ -73,5 +79,6 @@ jobs: with: rootDir: edc-dataplane/${{ matrix.name }} imagename: ${{ matrix.name }} + namespace: ${{ inputs.namespace }} docker_user: ${{ secrets.DOCKER_HUB_USER }} docker_token: ${{ secrets.DOCKER_HUB_TOKEN }} \ No newline at end of file From 062ddb75e287ff784017df381cbe3124ba496fb6 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 12 Apr 2023 16:30:07 +0200 Subject: [PATCH 59/92] updated notices --- .../notice.md | 2 +- .../notice.md | 2 +- .../edc-controlplane-postgresql/notice.md | 2 +- .../edc-dataplane-azure-vault/notice.md | 27 ++++++++++++++++++ .../edc-dataplane-hashicorp-vault/notice.md | 28 +++++++++++++++++++ 5 files changed, 58 insertions(+), 3 deletions(-) create mode 100644 edc-dataplane/edc-dataplane-azure-vault/notice.md create mode 100644 edc-dataplane/edc-dataplane-hashicorp-vault/notice.md diff --git a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md index 1285fdc3d..2508b9a8e 100644 --- a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md +++ b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md @@ -1,6 +1,6 @@ ## Notice for Docker image -This application provides container images for demonstration purposes. +An EDC Control Plane using memory-based storage, and HashiCorp Vault as secret store. DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-memory-hashicorp-vault diff --git a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md index 5cc869306..169a58361 100644 --- a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md +++ b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md @@ -1,6 +1,6 @@ ## Notice for Docker image -This application provides container images for demonstration purposes. +An EDC Control Plane using PostgreSQL as persistence backend, and HashiCorp Vault as secret store. DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-postgresql-hashicorp-vault diff --git a/edc-controlplane/edc-controlplane-postgresql/notice.md b/edc-controlplane/edc-controlplane-postgresql/notice.md index ec36137a1..5aafea71a 100644 --- a/edc-controlplane/edc-controlplane-postgresql/notice.md +++ b/edc-controlplane/edc-controlplane-postgresql/notice.md @@ -1,6 +1,6 @@ ## Notice for Docker image -This application provides container images for demonstration purposes. +An EDC Control Plane using PostgreSQL as persistence backend, and Azure KeyVault as secret store. DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-postgresql diff --git a/edc-dataplane/edc-dataplane-azure-vault/notice.md b/edc-dataplane/edc-dataplane-azure-vault/notice.md new file mode 100644 index 000000000..ec2afd457 --- /dev/null +++ b/edc-dataplane/edc-dataplane-azure-vault/notice.md @@ -0,0 +1,27 @@ +## Notice for Docker image + +An EDC Data Plane using the Azure KeyVault. + +DockerHub: https://hub.docker.com/r/tractusx/edc-dataplane-azure-vault + +Eclipse Tractus-X product(s) installed within the image: + +- GitHub: https://github.com/eclipse-tractusx/tractusx-edc +- Project home: https://projects.eclipse.org/projects/automotive.tractusx +- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile +- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) + +**Used base image** + +- [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) +- Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin +- Eclipse Temurin Project: https://projects.eclipse.org/projects/adoptium.temurin +- Additional information about the Eclipse Temurin + images: https://github.com/docker-library/repo-info/tree/master/repos/eclipse-temurin + +As with all Docker images, these likely also contain other software which may be under other licenses (such as Bash, etc +from the base distribution, along with any direct or indirect dependencies of the primary software being contained). + +As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies +with any relevant licenses for all software contained within. + diff --git a/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md b/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md new file mode 100644 index 000000000..afcd9c8a6 --- /dev/null +++ b/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md @@ -0,0 +1,28 @@ +## Notice for Docker image + +An EDC Data Plane using the HashiCorp Vault + +DockerHub: https://hub.docker.com/r/tractusx/edc-dataplane-hashicorp-vault + +Eclipse Tractus-X product(s) installed within the image: + +- GitHub: https://github.com/eclipse-tractusx/tractusx-edc +- Project home: https://projects.eclipse.org/projects/automotive.tractusx +- +Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile +- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) + +**Used base image** + +- [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) +- Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin +- Eclipse Temurin Project: https://projects.eclipse.org/projects/adoptium.temurin +- Additional information about the Eclipse Temurin + images: https://github.com/docker-library/repo-info/tree/master/repos/eclipse-temurin + +As with all Docker images, these likely also contain other software which may be under other licenses (such as Bash, etc +from the base distribution, along with any direct or indirect dependencies of the primary software being contained). + +As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies +with any relevant licenses for all software contained within. + From 03dd9dbcee9239a46c2372052a54fd3535e335ba Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 12 Apr 2023 16:54:30 +0200 Subject: [PATCH 60/92] incorporate new docker publish flow --- .github/workflows/build.yaml | 153 +++++++---------------------------- 1 file changed, 31 insertions(+), 122 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 48e463202..c52b671f5 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -55,8 +55,7 @@ jobs: GPG_PRIVATE_KEY: ${{ steps.secret-presence.outputs.GPG_PRIVATE_KEY }} GPG_PASSPHRASE: ${{ steps.secret-presence.outputs.GPG_PASSPHRASE }} steps: - - - name: Check whether secrets exist + - name: Check whether secrets exist id: secret-presence run: | [ ! -z "${{ secrets.SONAR_TOKEN }}" ] && echo "::set-output name=SONAR_TOKEN::true" @@ -66,22 +65,19 @@ jobs: build-extensions: runs-on: ubuntu-latest - needs: [ secret-presence] + needs: [ secret-presence ] steps: # Set-Up - - - name: Checkout + - name: Checkout uses: actions/checkout@v3.3.0 - - - name: Set up JDK 11 + - name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: java-version: '11' distribution: 'temurin' cache: 'gradle' # Build - - - name: Build Extensions + - name: Build Extensions run: |- ./gradlew -p edc-extensions build env: @@ -89,11 +85,9 @@ jobs: GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} build-controlplane: + name: "Create Docker Images for the ControlPlane" runs-on: ubuntu-latest - permissions: - contents: read - packages: write - needs: [ secret-presence] + needs: [ secrets-presence ] strategy: fail-fast: false matrix: @@ -102,134 +96,49 @@ jobs: - edc-controlplane-memory-hashicorp-vault - edc-controlplane-postgresql - edc-controlplane-postgresql-hashicorp-vault + permissions: + contents: write + packages: write steps: - # Set-Up - - - name: Checkout - uses: actions/checkout@v3.3.0 - - - name: Login to GitHub Container Registry - if: | - github.event_name != 'pull_request' - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '11' - distribution: 'temurin' - cache: 'gradle' - # Build - - - name: Build Controlplane - run: |- - ./gradlew -p edc-controlplane/${{ matrix.name }} shadowJar - env: - GITHUB_PACKAGE_USERNAME: ${{ github.actor }} - GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - - - name: edc-controlplane Docker Metadata - id: edc_controlplane_meta - uses: docker/metadata-action@v4 - with: - images: | - ghcr.io/${{ github.repository }}/${{ matrix.name }} - tags: | - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{raw}} - type=match,pattern=\d.\d.\d - type=sha - - - name: Build Docker Image - uses: docker/build-push-action@v4 + - name: Checkout + uses: actions/checkout@v3 + - uses: ./.github/actions/publish-docker-image with: - context: . - file: edc-controlplane/${{ matrix.name }}/src/main/docker/Dockerfile - build-args: | - JAR=edc-controlplane/${{ matrix.name }}/build/libs/${{ matrix.name }}.jar - push: | - ${{ (github.event_name != 'pull_request' && 'true') || 'false' }} - tags: ${{ steps.edc_controlplane_meta.outputs.tags }} - labels: ${{ steps.edc_controlplane_meta.outputs.labels }} + rootDir: edc-controlplane/${{ matrix.name }} + imagename: ${{ matrix.name }} + namespace: ${{ inputs.namespace }} + docker_user: ${{ secrets.DOCKER_HUB_USER }} + docker_token: ${{ secrets.DOCKER_HUB_TOKEN }} build-dataplane: runs-on: ubuntu-latest - permissions: - contents: read - packages: write - needs: [ secret-presence] + needs: [ secret-presence ] strategy: fail-fast: false matrix: name: - edc-dataplane-azure-vault - edc-dataplane-hashicorp-vault + permissions: + contents: write + packages: write steps: - # Set-Up - - - name: Checkout - uses: actions/checkout@v3.3.0 - - - name: Login to GitHub Container Registry - if: | - github.event_name != 'pull_request' - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '11' - distribution: 'temurin' - cache: 'gradle' - # Build - - - name: Build Dataplane - run: |- - ./gradlew -p edc-dataplane/${{ matrix.name }} shadowJar - env: - GITHUB_PACKAGE_USERNAME: ${{ github.actor }} - GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - - - name: edc-dataplane Docker Metadata - id: edc_dataplane_meta - uses: docker/metadata-action@v4 - with: - images: | - ghcr.io/${{ github.repository }}/${{ matrix.name }} - tags: | - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{raw}} - type=match,pattern=\d.\d.\d - type=sha - - - name: Build Docker Image - uses: docker/build-push-action@v4 + - name: Checkout + uses: actions/checkout@v3 + - uses: ./.github/actions/publish-docker-image with: - context: . - file: edc-dataplane/${{ matrix.name }}/src/main/docker/Dockerfile - build-args: | - JAR=edc-dataplane/${{ matrix.name }}/build/libs/${{ matrix.name }}.jar - push: | - ${{ (github.event_name != 'pull_request' && 'true') || 'false' }} - tags: ${{ steps.edc_dataplane_meta.outputs.tags }} - labels: ${{ steps.edc_dataplane_meta.outputs.labels }} + rootDir: edc-dataplane/${{ matrix.name }} + imagename: ${{ matrix.name }} + namespace: ${{ inputs.namespace }} + docker_user: ${{ secrets.DOCKER_HUB_USER }} + docker_token: ${{ secrets.DOCKER_HUB_TOKEN }} publish-to-github-packages: runs-on: ubuntu-latest permissions: contents: read packages: write - needs: [secret-presence, build-controlplane, build-dataplane, build-extensions] + needs: [ secret-presence, build-controlplane, build-dataplane, build-extensions ] # do not run on PR branches, do not run on main if: | From 69171f8fde3dc925fa8ea22afe777b9700bd1bed Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 12 Apr 2023 16:56:12 +0200 Subject: [PATCH 61/92] update chart deployment specs --- .../actions/publish-docker-image/action.yml | 21 ++++++++++++ .../templates/deployment-controlplane.yaml | 8 ++--- .../templates/deployment-dataplane.yaml | 34 +++++++++++++++---- 3 files changed, 53 insertions(+), 10 deletions(-) diff --git a/.github/actions/publish-docker-image/action.yml b/.github/actions/publish-docker-image/action.yml index 595fdda35..7cbe07297 100644 --- a/.github/actions/publish-docker-image/action.yml +++ b/.github/actions/publish-docker-image/action.yml @@ -1,3 +1,24 @@ +# +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) +# Copyright (c) 2023 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + +--- name: "Publish Docker Image" description: "Build and publish a Docker Image to DockerHub" inputs: diff --git a/charts/tractusx-connector/templates/deployment-controlplane.yaml b/charts/tractusx-connector/templates/deployment-controlplane.yaml index 691047c0f..dc708a8a7 100644 --- a/charts/tractusx-connector/templates/deployment-controlplane.yaml +++ b/charts/tractusx-connector/templates/deployment-controlplane.yaml @@ -62,13 +62,13 @@ spec: {{- if .Values.controlplane.image.repository }} image: "{{ .Values.controlplane.image.repository }}:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" {{- else if and .Values.postgresql.enabled .Values.vault.hashicorp.enabled }} - image: "ghcr.io/catenax-ng/tx-tractusx-edc/edc-controlplane-postgresql-hashicorp-vault:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" + image: "tractusx/edc-controlplane-postgresql-hashicorp-vault:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" {{- else if and .Values.postgresql.enabled .Values.vault.azure.enabled }} - image: "ghcr.io/catenax-ng/tx-tractusx-edc/edc-controlplane-postgresql:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" + image: "tractusx/edc-controlplane-postgresql:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" {{- else if .Values.vault.hashicorp.enabled }} - image: "ghcr.io/catenax-ng/tx-tractusx-edc/edc-controlplane-memory-hashicorp-vault:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" + image: "tractusx/edc-controlplane-memory-hashicorp-vault:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" {{- else if .Values.vault.azure.enabled }} - image: "ghcr.io/catenax-ng/tx-tractusx-edc/edc-controlplane-memory:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" + image: "tractusx/edc-controlplane-memory:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" {{- else }} {{- fail "cannot choose control-plane image automatically based on configuration" }} {{- end }} diff --git a/charts/tractusx-connector/templates/deployment-dataplane.yaml b/charts/tractusx-connector/templates/deployment-dataplane.yaml index ff5f6a5ce..bd375b295 100644 --- a/charts/tractusx-connector/templates/deployment-dataplane.yaml +++ b/charts/tractusx-connector/templates/deployment-dataplane.yaml @@ -1,3 +1,25 @@ +# +# Copyright (c) 2023 ZF Friedrichshafen AG +# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) +# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + --- apiVersion: apps/v1 kind: Deployment @@ -40,9 +62,9 @@ spec: {{- if .Values.dataplane.image.repository }} image: "{{ .Values.dataplane.image.repository }}:{{ .Values.dataplane.image.tag | default .Chart.AppVersion }}" {{- else if and .Values.vault.hashicorp }} - image: "ghcr.io/catenax-ng/tx-tractusx-edc/edc-dataplane-hashicorp-vault:{{ .Values.dataplane.image.tag | default .Chart.AppVersion }}" + image: "tractusx/edc-dataplane-hashicorp-vault:{{ .Values.dataplane.image.tag | default .Chart.AppVersion }}" {{- else if .Values.vault.azure }} - image: "ghcr.io/catenax-ng/tx-tractusx-edc/edc-dataplane-azure-vault:{{ .Values.dataplane.image.tag | default .Chart.AppVersion }}" + image: "tractusx/edc-dataplane-azure-vault:{{ .Values.dataplane.image.tag | default .Chart.AppVersion }}" {{- else }} {{- fail "cannot choose data-plane image automatically based on configuration" }} {{- end }} @@ -109,7 +131,7 @@ spec: - name: "WEB_HTTP_PUBLIC_PATH" value: {{ .Values.dataplane.endpoints.public.path | quote }} - name: "EDC_DATAPLANE_TOKEN_VALIDATION_ENDPOINT" - value: {{ include "txdc.controlplane.url.validation" .}} + value: {{ include "txdc.controlplane.url.validation" .}} ####### # AWS # @@ -162,9 +184,9 @@ spec: value: {{ .Values.vault.azure.certificate | quote }} {{- end }} - ###################################### - ## Additional environment variables ## - ###################################### + ###################################### + ## Additional environment variables ## + ###################################### {{- range $key, $value := .Values.dataplane.envValueFrom }} - name: {{ $key | quote }} valueFrom: From 2ba15ea32b3c48e37f9d98d14e44c9a5538975a6 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 12 Apr 2023 17:19:56 +0200 Subject: [PATCH 62/92] fix formatting --- .github/workflows/build.yaml | 2 -- .../edc-controlplane-memory-hashicorp-vault/notice.md | 5 +++-- edc-controlplane/edc-controlplane-memory/notice.md | 7 ++++--- .../notice.md | 10 ++++------ edc-controlplane/edc-controlplane-postgresql/notice.md | 5 +++-- edc-dataplane/edc-dataplane-azure-vault/notice.md | 2 ++ edc-dataplane/edc-dataplane-hashicorp-vault/notice.md | 5 +++-- 7 files changed, 19 insertions(+), 17 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index c52b671f5..660600a73 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -98,7 +98,6 @@ jobs: - edc-controlplane-postgresql-hashicorp-vault permissions: contents: write - packages: write steps: - name: Checkout uses: actions/checkout@v3 @@ -121,7 +120,6 @@ jobs: - edc-dataplane-hashicorp-vault permissions: contents: write - packages: write steps: - name: Checkout uses: actions/checkout@v3 diff --git a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md index 2508b9a8e..d1db501b3 100644 --- a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md +++ b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md @@ -6,10 +6,11 @@ DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-memory-hashicorp-v Eclipse Tractus-X product(s) installed within the image: +### TractusX-EDC Control Plane + - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx -- -Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-memory-hashicorp-vault/src/main/docker/Dockerfile +- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-memory-hashicorp-vault/src/main/docker/Dockerfile - Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) **Used base image** diff --git a/edc-controlplane/edc-controlplane-memory/notice.md b/edc-controlplane/edc-controlplane-memory/notice.md index 639e0d366..5806989b4 100644 --- a/edc-controlplane/edc-controlplane-memory/notice.md +++ b/edc-controlplane/edc-controlplane-memory/notice.md @@ -1,15 +1,16 @@ ## Notice for Docker image -This application provides container images for demonstration purposes. +An EDC Control Plane using memory-based storage, and Azure KeyVault as secret store. DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-memory Eclipse Tractus-X product(s) installed within the image: +### TractusX-EDC Control Plane + - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx -- -Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile +- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile - Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) **Used base image** diff --git a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md index 169a58361..ac8288747 100644 --- a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md +++ b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md @@ -6,14 +6,12 @@ DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-postgresql-hashico Eclipse Tractus-X product(s) installed within the image: +### TractusX-EDC Control Plane + - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx -- - -Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile - -- Project - license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) +- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile +- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) **Used base image** diff --git a/edc-controlplane/edc-controlplane-postgresql/notice.md b/edc-controlplane/edc-controlplane-postgresql/notice.md index 5aafea71a..edb15438d 100644 --- a/edc-controlplane/edc-controlplane-postgresql/notice.md +++ b/edc-controlplane/edc-controlplane-postgresql/notice.md @@ -6,10 +6,11 @@ DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-postgresql Eclipse Tractus-X product(s) installed within the image: +### TractusX-EDC Control Plane + - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx -- -Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile +- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile - Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) **Used base image** diff --git a/edc-dataplane/edc-dataplane-azure-vault/notice.md b/edc-dataplane/edc-dataplane-azure-vault/notice.md index ec2afd457..a161899ef 100644 --- a/edc-dataplane/edc-dataplane-azure-vault/notice.md +++ b/edc-dataplane/edc-dataplane-azure-vault/notice.md @@ -6,6 +6,8 @@ DockerHub: https://hub.docker.com/r/tractusx/edc-dataplane-azure-vault Eclipse Tractus-X product(s) installed within the image: +### TractusX-EDC Data Plane + - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx - Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile diff --git a/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md b/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md index afcd9c8a6..90c281a2f 100644 --- a/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md +++ b/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md @@ -6,10 +6,11 @@ DockerHub: https://hub.docker.com/r/tractusx/edc-dataplane-hashicorp-vault Eclipse Tractus-X product(s) installed within the image: +### TractusX-EDC Data Plane + - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx -- -Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile +- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile - Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) **Used base image** From 26fb63f35e48674df2afd5887b51680734ba970e Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 12 Apr 2023 17:27:44 +0200 Subject: [PATCH 63/92] markdown lint --- .markdownlint.yaml | 1 + .../edc-controlplane-memory-hashicorp-vault/notice.md | 7 +++---- edc-controlplane/edc-controlplane-memory/notice.md | 7 +++---- .../edc-controlplane-postgresql-hashicorp-vault/notice.md | 7 +++---- edc-controlplane/edc-controlplane-postgresql/notice.md | 7 +++---- edc-dataplane/edc-dataplane-azure-vault/notice.md | 7 +++---- edc-dataplane/edc-dataplane-hashicorp-vault/notice.md | 7 +++---- 7 files changed, 19 insertions(+), 24 deletions(-) diff --git a/.markdownlint.yaml b/.markdownlint.yaml index ace38e3d4..d060f2264 100644 --- a/.markdownlint.yaml +++ b/.markdownlint.yaml @@ -19,6 +19,7 @@ "default": true # Do not restrict line length: https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#MD013 "MD013": false +"MD034": # Allow same content on headlines on siblings: https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#MD024 "MD024": "siblings_only": true diff --git a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md index d1db501b3..cf6aa8d92 100644 --- a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md +++ b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md @@ -1,4 +1,4 @@ -## Notice for Docker image +# Notice for Docker image An EDC Control Plane using memory-based storage, and HashiCorp Vault as secret store. @@ -6,14 +6,14 @@ DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-memory-hashicorp-v Eclipse Tractus-X product(s) installed within the image: -### TractusX-EDC Control Plane +## TractusX-EDC Control Plane - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx - Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-memory-hashicorp-vault/src/main/docker/Dockerfile - Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) -**Used base image** +## Used base image - [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) - Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin @@ -26,4 +26,3 @@ from the base distribution, along with any direct or indirect dependencies of th As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies with any relevant licenses for all software contained within. - diff --git a/edc-controlplane/edc-controlplane-memory/notice.md b/edc-controlplane/edc-controlplane-memory/notice.md index 5806989b4..d8bcac50b 100644 --- a/edc-controlplane/edc-controlplane-memory/notice.md +++ b/edc-controlplane/edc-controlplane-memory/notice.md @@ -1,4 +1,4 @@ -## Notice for Docker image +# Notice for Docker image An EDC Control Plane using memory-based storage, and Azure KeyVault as secret store. @@ -6,14 +6,14 @@ DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-memory Eclipse Tractus-X product(s) installed within the image: -### TractusX-EDC Control Plane +## TractusX-EDC Control Plane - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx - Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile - Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) -**Used base image** +## Used base image - [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) - Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin @@ -26,4 +26,3 @@ from the base distribution, along with any direct or indirect dependencies of th As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies with any relevant licenses for all software contained within. - diff --git a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md index ac8288747..cd46028a5 100644 --- a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md +++ b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md @@ -1,4 +1,4 @@ -## Notice for Docker image +# Notice for Docker image An EDC Control Plane using PostgreSQL as persistence backend, and HashiCorp Vault as secret store. @@ -6,14 +6,14 @@ DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-postgresql-hashico Eclipse Tractus-X product(s) installed within the image: -### TractusX-EDC Control Plane +## TractusX-EDC Control Plane - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx - Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile - Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) -**Used base image** +## Used base image - [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) - Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin @@ -26,4 +26,3 @@ from the base distribution, along with any direct or indirect dependencies of th As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies with any relevant licenses for all software contained within. - diff --git a/edc-controlplane/edc-controlplane-postgresql/notice.md b/edc-controlplane/edc-controlplane-postgresql/notice.md index edb15438d..a73966fa9 100644 --- a/edc-controlplane/edc-controlplane-postgresql/notice.md +++ b/edc-controlplane/edc-controlplane-postgresql/notice.md @@ -1,4 +1,4 @@ -## Notice for Docker image +# Notice for Docker image An EDC Control Plane using PostgreSQL as persistence backend, and Azure KeyVault as secret store. @@ -6,14 +6,14 @@ DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-postgresql Eclipse Tractus-X product(s) installed within the image: -### TractusX-EDC Control Plane +## TractusX-EDC Control Plane - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx - Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile - Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) -**Used base image** +## Used base image - [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) - Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin @@ -26,4 +26,3 @@ from the base distribution, along with any direct or indirect dependencies of th As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies with any relevant licenses for all software contained within. - diff --git a/edc-dataplane/edc-dataplane-azure-vault/notice.md b/edc-dataplane/edc-dataplane-azure-vault/notice.md index a161899ef..5c95dfd5b 100644 --- a/edc-dataplane/edc-dataplane-azure-vault/notice.md +++ b/edc-dataplane/edc-dataplane-azure-vault/notice.md @@ -1,4 +1,4 @@ -## Notice for Docker image +# Notice for Docker image An EDC Data Plane using the Azure KeyVault. @@ -6,14 +6,14 @@ DockerHub: https://hub.docker.com/r/tractusx/edc-dataplane-azure-vault Eclipse Tractus-X product(s) installed within the image: -### TractusX-EDC Data Plane +## TractusX-EDC Data Plane - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx - Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile - Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) -**Used base image** +## Used base image - [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) - Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin @@ -26,4 +26,3 @@ from the base distribution, along with any direct or indirect dependencies of th As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies with any relevant licenses for all software contained within. - diff --git a/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md b/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md index 90c281a2f..f734642ad 100644 --- a/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md +++ b/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md @@ -1,4 +1,4 @@ -## Notice for Docker image +# Notice for Docker image An EDC Data Plane using the HashiCorp Vault @@ -6,14 +6,14 @@ DockerHub: https://hub.docker.com/r/tractusx/edc-dataplane-hashicorp-vault Eclipse Tractus-X product(s) installed within the image: -### TractusX-EDC Data Plane +## TractusX-EDC Data Plane - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx - Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile - Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) -**Used base image** +## Used base image - [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) - Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin @@ -26,4 +26,3 @@ from the base distribution, along with any direct or indirect dependencies of th As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies with any relevant licenses for all software contained within. - From cbed53492212f9ae8847733fc636a044f1c443a8 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 12 Apr 2023 17:32:58 +0200 Subject: [PATCH 64/92] fix workflow --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 660600a73..34a003d31 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -87,7 +87,7 @@ jobs: build-controlplane: name: "Create Docker Images for the ControlPlane" runs-on: ubuntu-latest - needs: [ secrets-presence ] + needs: [ secret-presence ] strategy: fail-fast: false matrix: From f036a59b05c7399a4d7eb838afc272e175dbf8ff Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 12 Apr 2023 17:35:30 +0200 Subject: [PATCH 65/92] remove image namespace --- .github/workflows/build.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 34a003d31..24b6ff01e 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -105,7 +105,6 @@ jobs: with: rootDir: edc-controlplane/${{ matrix.name }} imagename: ${{ matrix.name }} - namespace: ${{ inputs.namespace }} docker_user: ${{ secrets.DOCKER_HUB_USER }} docker_token: ${{ secrets.DOCKER_HUB_TOKEN }} @@ -127,7 +126,6 @@ jobs: with: rootDir: edc-dataplane/${{ matrix.name }} imagename: ${{ matrix.name }} - namespace: ${{ inputs.namespace }} docker_user: ${{ secrets.DOCKER_HUB_USER }} docker_token: ${{ secrets.DOCKER_HUB_TOKEN }} From fe6891e3ea11fd73854607d4e199d2e87c664642 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 12 Apr 2023 17:54:05 +0200 Subject: [PATCH 66/92] prevent all interaction with dockerhub on pull requests --- .github/actions/publish-docker-image/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/publish-docker-image/action.yml b/.github/actions/publish-docker-image/action.yml index 7cbe07297..0e1fee7c4 100644 --- a/.github/actions/publish-docker-image/action.yml +++ b/.github/actions/publish-docker-image/action.yml @@ -104,7 +104,7 @@ runs: # https://github.com/peter-evans/dockerhub-description ############################### - name: Update Docker Hub description - if: github.event_name != 'pull_request' && ${{ inputs.docker_user }} && ${{ inputs.docker_token }} + if: github.event_name != 'pull_request' uses: peter-evans/dockerhub-description@v3 with: readme-filepath: ${{ inputs.rootDir }}/notice.md From d38c16364822d6ddd98d1368af03b19867e3f1ef Mon Sep 17 00:00:00 2001 From: "Florian Rusch (ZF Friedrichshafen AG)" Date: Thu, 13 Apr 2023 16:38:57 +0200 Subject: [PATCH 67/92] docs: add technical committer to pr_etiquette.md (#182) --- pr_etiquette.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/pr_etiquette.md b/pr_etiquette.md index ce9ec73f8..aaaf16761 100644 --- a/pr_etiquette.md +++ b/pr_etiquette.md @@ -58,6 +58,13 @@ Submitting pull requests in EDC should be done while adhering to a couple of sim - Be civil and objective. No foul language, insulting or otherwise abusive language will be tolerated. The goal is to _encourage_ contributions. -## The technical committers +## The technical committers (as of April 05, 2023) -- TBD +Main committers for the TractusX-EDC project: + +- @paullatzelsperger +- @florianrusch-zf + +Alternatively, the following Tractus-X committers can also step in: + +- @SebastianBezold From 79789ba82a11fbd7c8116e70cd806b4e38267780 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger <43503240+paullatzelsperger@users.noreply.github.com> Date: Fri, 14 Apr 2023 10:27:58 +0200 Subject: [PATCH 68/92] chore: update to temurin 17 (#212) * chore: update dockerfiles and GH Actions to temurin 17 * pin specific version --- .github/actions/publish-docker-image/action.yml | 2 +- .github/workflows/build.yaml | 4 ++-- .github/workflows/business-tests.yaml | 2 +- .github/workflows/draft-new-release.yaml | 2 +- .github/workflows/publish-new-release.yml | 4 ++-- .github/workflows/veracode.yaml | 6 +++--- .github/workflows/verify.yaml | 12 ++++++------ .../notice.md | 2 +- .../src/main/docker/Dockerfile | 2 +- edc-controlplane/edc-controlplane-memory/notice.md | 2 +- .../src/main/docker/Dockerfile | 2 +- .../notice.md | 2 +- .../src/main/docker/Dockerfile | 2 +- .../edc-controlplane-postgresql/notice.md | 2 +- .../src/main/docker/Dockerfile | 2 +- edc-dataplane/edc-dataplane-azure-vault/notice.md | 2 +- .../src/main/docker/Dockerfile | 2 +- .../edc-dataplane-hashicorp-vault/notice.md | 2 +- .../src/main/docker/Dockerfile | 2 +- 19 files changed, 28 insertions(+), 28 deletions(-) diff --git a/.github/actions/publish-docker-image/action.yml b/.github/actions/publish-docker-image/action.yml index 4cd57dfe8..0dfefd5f0 100644 --- a/.github/actions/publish-docker-image/action.yml +++ b/.github/actions/publish-docker-image/action.yml @@ -60,7 +60,7 @@ runs: - name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' - name: Build Controlplane diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 5b37d55b4..f3e63414a 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -73,7 +73,7 @@ jobs: - name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' # Build @@ -147,7 +147,7 @@ jobs: - name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' - name: Import GPG Key diff --git a/.github/workflows/business-tests.yaml b/.github/workflows/business-tests.yaml index 9273b781c..7c64b29f5 100644 --- a/.github/workflows/business-tests.yaml +++ b/.github/workflows/business-tests.yaml @@ -55,7 +55,7 @@ jobs: name: Set-Up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' - diff --git a/.github/workflows/draft-new-release.yaml b/.github/workflows/draft-new-release.yaml index 915f2b7a8..82e956c76 100644 --- a/.github/workflows/draft-new-release.yaml +++ b/.github/workflows/draft-new-release.yaml @@ -36,7 +36,7 @@ jobs: name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' - diff --git a/.github/workflows/publish-new-release.yml b/.github/workflows/publish-new-release.yml index e3d4d555f..c626bba84 100644 --- a/.github/workflows/publish-new-release.yml +++ b/.github/workflows/publish-new-release.yml @@ -60,7 +60,7 @@ jobs: name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' @@ -181,7 +181,7 @@ jobs: name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' - diff --git a/.github/workflows/veracode.yaml b/.github/workflows/veracode.yaml index b980a2829..a58d3fa4d 100644 --- a/.github/workflows/veracode.yaml +++ b/.github/workflows/veracode.yaml @@ -33,7 +33,7 @@ jobs: name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' - @@ -62,7 +62,7 @@ jobs: name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' # Build @@ -111,7 +111,7 @@ jobs: name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' # Build diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index 5cdaa5c9c..01064f35d 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -63,7 +63,7 @@ jobs: - name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' - name: Verify proper formatting @@ -96,7 +96,7 @@ jobs: - name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' @@ -113,7 +113,7 @@ jobs: - name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' @@ -130,7 +130,7 @@ jobs: - name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' @@ -147,7 +147,7 @@ jobs: - name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' @@ -168,7 +168,7 @@ jobs: - name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: - java-version: '11' + java-version: '17' distribution: 'temurin' cache: 'gradle' - name: Cache SonarCloud packages diff --git a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md index cf6aa8d92..fdcc88583 100644 --- a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md +++ b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md @@ -15,7 +15,7 @@ Eclipse Tractus-X product(s) installed within the image: ## Used base image -- [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) +- [eclipse-temurin:17.0.6_10-jre-alpine](https://github.com/adoptium/containers) - Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin - Eclipse Temurin Project: https://projects.eclipse.org/projects/adoptium.temurin - Additional information about the Eclipse Temurin diff --git a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/src/main/docker/Dockerfile b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/src/main/docker/Dockerfile index 229c44868..149256182 100644 --- a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/src/main/docker/Dockerfile +++ b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/src/main/docker/Dockerfile @@ -26,7 +26,7 @@ HEALTHCHECK NONE RUN wget ${OTEL_AGENT_LOCATION} -O /tmp/opentelemetry-javaagent.jar -FROM eclipse-temurin:11.0.18_10-jre-alpine +FROM eclipse-temurin:17.0.6_10-jre-alpine ARG JAR ARG APP_USER=docker diff --git a/edc-controlplane/edc-controlplane-memory/notice.md b/edc-controlplane/edc-controlplane-memory/notice.md index d8bcac50b..cee9fe5ed 100644 --- a/edc-controlplane/edc-controlplane-memory/notice.md +++ b/edc-controlplane/edc-controlplane-memory/notice.md @@ -15,7 +15,7 @@ Eclipse Tractus-X product(s) installed within the image: ## Used base image -- [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) +- [eclipse-temurin:17.0.6_10-jre-alpine](https://github.com/adoptium/containers) - Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin - Eclipse Temurin Project: https://projects.eclipse.org/projects/adoptium.temurin - Additional information about the Eclipse Temurin diff --git a/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile b/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile index c9af99a81..d248e8131 100644 --- a/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile +++ b/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile @@ -26,7 +26,7 @@ HEALTHCHECK NONE RUN wget ${OTEL_AGENT_LOCATION} -O /tmp/opentelemetry-javaagent.jar -FROM eclipse-temurin:11.0.18_10-jre-alpine +FROM eclipse-temurin:17.0.6_10-jre-alpine ARG JAR ARG APP_USER=docker diff --git a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md index cd46028a5..3b5e517f0 100644 --- a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md +++ b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md @@ -15,7 +15,7 @@ Eclipse Tractus-X product(s) installed within the image: ## Used base image -- [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) +- [eclipse-temurin:17.0.6_10-jre-alpine](https://github.com/adoptium/containers) - Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin - Eclipse Temurin Project: https://projects.eclipse.org/projects/adoptium.temurin - Additional information about the Eclipse Temurin diff --git a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile index c9af99a81..d248e8131 100644 --- a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile +++ b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile @@ -26,7 +26,7 @@ HEALTHCHECK NONE RUN wget ${OTEL_AGENT_LOCATION} -O /tmp/opentelemetry-javaagent.jar -FROM eclipse-temurin:11.0.18_10-jre-alpine +FROM eclipse-temurin:17.0.6_10-jre-alpine ARG JAR ARG APP_USER=docker diff --git a/edc-controlplane/edc-controlplane-postgresql/notice.md b/edc-controlplane/edc-controlplane-postgresql/notice.md index a73966fa9..d9e1b58b1 100644 --- a/edc-controlplane/edc-controlplane-postgresql/notice.md +++ b/edc-controlplane/edc-controlplane-postgresql/notice.md @@ -15,7 +15,7 @@ Eclipse Tractus-X product(s) installed within the image: ## Used base image -- [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) +- [eclipse-temurin:17.0.6_10-jre-alpine](https://github.com/adoptium/containers) - Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin - Eclipse Temurin Project: https://projects.eclipse.org/projects/adoptium.temurin - Additional information about the Eclipse Temurin diff --git a/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile b/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile index c9af99a81..d248e8131 100644 --- a/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile +++ b/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile @@ -26,7 +26,7 @@ HEALTHCHECK NONE RUN wget ${OTEL_AGENT_LOCATION} -O /tmp/opentelemetry-javaagent.jar -FROM eclipse-temurin:11.0.18_10-jre-alpine +FROM eclipse-temurin:17.0.6_10-jre-alpine ARG JAR ARG APP_USER=docker diff --git a/edc-dataplane/edc-dataplane-azure-vault/notice.md b/edc-dataplane/edc-dataplane-azure-vault/notice.md index 5c95dfd5b..7023f7ab7 100644 --- a/edc-dataplane/edc-dataplane-azure-vault/notice.md +++ b/edc-dataplane/edc-dataplane-azure-vault/notice.md @@ -15,7 +15,7 @@ Eclipse Tractus-X product(s) installed within the image: ## Used base image -- [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) +- [eclipse-temurin:17.0.6_10-jre-alpine](https://github.com/adoptium/containers) - Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin - Eclipse Temurin Project: https://projects.eclipse.org/projects/adoptium.temurin - Additional information about the Eclipse Temurin diff --git a/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile b/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile index 5c9e65d5e..cb3e3f817 100644 --- a/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile +++ b/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile @@ -26,7 +26,7 @@ HEALTHCHECK NONE RUN wget ${OTEL_AGENT_LOCATION} -O /tmp/opentelemetry-javaagent.jar -FROM eclipse-temurin:11.0.18_10-jre-alpine +FROM eclipse-temurin:17.0.6_10-jre-alpine ARG JAR ARG APP_USER=docker diff --git a/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md b/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md index f734642ad..8b18d0a4b 100644 --- a/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md +++ b/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md @@ -15,7 +15,7 @@ Eclipse Tractus-X product(s) installed within the image: ## Used base image -- [eclipse-temurin:11.0.18_10-jre-alpine](https://github.com/adoptium/containers) +- [eclipse-temurin:17.0.6_10-jre-alpine](https://github.com/adoptium/containers) - Official Eclipse Temurin DockerHub page: https://hub.docker.com/_/eclipse-temurin - Eclipse Temurin Project: https://projects.eclipse.org/projects/adoptium.temurin - Additional information about the Eclipse Temurin diff --git a/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile b/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile index 5c9e65d5e..cb3e3f817 100644 --- a/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile +++ b/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile @@ -26,7 +26,7 @@ HEALTHCHECK NONE RUN wget ${OTEL_AGENT_LOCATION} -O /tmp/opentelemetry-javaagent.jar -FROM eclipse-temurin:11.0.18_10-jre-alpine +FROM eclipse-temurin:17.0.6_10-jre-alpine ARG JAR ARG APP_USER=docker From 0ac05fe9793520ddb20f4debb6d236abbb27741c Mon Sep 17 00:00:00 2001 From: Enrico Risa Date: Fri, 14 Apr 2023 10:40:35 +0200 Subject: [PATCH 69/92] feat(tests): removes lombok from edc-tests module (#159) --- .../eclipse/tractusx/edc/tests/Connector.java | 55 +- .../tractusx/edc/tests/ConnectorFactory.java | 27 +- .../eclipse/tractusx/edc/tests/Constants.java | 25 +- .../edc/tests/ControlPlaneAdapterSteps.java | 82 +- .../tractusx/edc/tests/DataManagementAPI.java | 1309 +++++++++-------- .../tractusx/edc/tests/Environment.java | 203 ++- .../edc/tests/HttpProxyTransferSteps.java | 6 +- .../tractusx/edc/tests/NegotiationSteps.java | 89 +- .../tractusx/edc/tests/PolicyStepDefs.java | 65 +- .../edc/tests/S3FileTransferStepsDefs.java | 227 ++- .../tractusx/edc/tests/data/Asset.java | 26 +- .../data/BusinessPartnerNumberConstraint.java | 14 +- .../edc/tests/data/ContractDefinition.java | 41 +- .../edc/tests/data/ContractNegotiation.java | 29 +- .../edc/tests/data/ContractOffer.java | 28 +- .../data/HttpProxySourceDataAddress.java | 60 +- .../tractusx/edc/tests/data/Negotiation.java | 57 +- .../tractusx/edc/tests/data/OrConstraint.java | 15 +- .../edc/tests/data/PayMeConstraint.java | 14 +- .../tractusx/edc/tests/data/Permission.java | 29 +- .../tractusx/edc/tests/data/Policy.java | 22 +- .../edc/tests/data/S3DataAddress.java | 28 +- .../tractusx/edc/tests/data/Transfer.java | 46 +- .../edc/tests/data/TransferProcess.java | 21 +- .../edc/tests/util/DatabaseCleaner.java | 45 +- .../tractusx/edc/tests/util/S3Client.java | 166 +-- 26 files changed, 1577 insertions(+), 1152 deletions(-) diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Connector.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Connector.java index 5c7a42c5d..d4e2ea7a8 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Connector.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Connector.java @@ -20,36 +20,56 @@ package org.eclipse.tractusx.edc.tests; -import lombok.Getter; -import lombok.NonNull; -import lombok.RequiredArgsConstructor; + import org.eclipse.tractusx.edc.tests.util.DatabaseCleaner; import org.eclipse.tractusx.edc.tests.util.S3Client; import static org.mockito.Mockito.mock; -@RequiredArgsConstructor public class Connector { - @NonNull - @Getter private final String name; - @Getter - @NonNull private final Environment environment; - @Getter(lazy = true) - private final DataManagementAPI dataManagementAPI = loadDataManagementAPI(); + private final DataManagementAPI dataManagementAPI; + + private final DatabaseCleaner databaseCleaner; + + + private final S3Client s3Client; + + public Connector(String name, Environment environment) { + this.name = name; + this.environment = environment; + dataManagementAPI = loadDataManagementAPI(); + databaseCleaner = loadDatabaseCleaner(); + s3Client = createS3Client(); + } + + public BackendDataService getBackendServiceBackendAPI() { + return mock(BackendDataService.class); + } + + public DatabaseCleaner getDatabaseCleaner() { + return databaseCleaner; + } - @Getter(lazy = true) - private final BackendDataService backendServiceBackendAPI = loadBackendServiceBackendAPI(); + public DataManagementAPI getDataManagementAPI() { + return dataManagementAPI; + } - @Getter(lazy = true) - private final DatabaseCleaner databaseCleaner = loadDatabaseCleaner(); + public Environment getEnvironment() { + return environment; + } - @Getter(lazy = true) - private final S3Client s3Client = createS3Client(); + public S3Client getS3Client() { + return s3Client; + } + + public String getName() { + return name; + } private DataManagementAPI loadDataManagementAPI() { return new DataManagementAPI( @@ -63,9 +83,6 @@ private DatabaseCleaner loadDatabaseCleaner() { environment.getDatabasePassword()); } - private BackendDataService loadBackendServiceBackendAPI() { - return mock(BackendDataService.class); - } private S3Client createS3Client() { return new S3Client(environment); diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/ConnectorFactory.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/ConnectorFactory.java index 7a8ef81a1..364e266f3 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/ConnectorFactory.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/ConnectorFactory.java @@ -20,24 +20,25 @@ package org.eclipse.tractusx.edc.tests; -import java.util.HashMap; import java.util.Locale; import java.util.Map; -import lombok.NonNull; -import lombok.experimental.UtilityClass; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; + -@UtilityClass public class ConnectorFactory { - private static final Map CONNECTOR_CACHE = new HashMap<>(); + private static final Map CONNECTOR_CACHE = new ConcurrentHashMap<>(); - public static Connector byName(@NonNull final String name) { - return CONNECTOR_CACHE.computeIfAbsent( - name.toUpperCase(Locale.ROOT), k -> createConnector(name)); - } + public static Connector byName(String name) { + Objects.requireNonNull(name); + return CONNECTOR_CACHE.computeIfAbsent( + name.toUpperCase(Locale.ROOT), k -> createConnector(name)); + } - private static Connector createConnector(@NonNull final String name) { - final Environment environment = Environment.byName(name); + private static Connector createConnector(String name) { + Objects.requireNonNull(name); + Environment environment = Environment.byName(name); - return new Connector(name, environment); - } + return new Connector(name, environment); + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Constants.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Constants.java index 67b484e38..6a7de2ceb 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Constants.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Constants.java @@ -20,19 +20,16 @@ package org.eclipse.tractusx.edc.tests; -import lombok.experimental.UtilityClass; - -@UtilityClass public final class Constants { - public static final String DATA_MANAGEMENT_URL = "DATA_MANAGEMENT_URL"; - public static final String DATA_MANAGEMENT_API_AUTH_KEY = "DATA_MANAGEMENT_API_AUTH_KEY"; - public static final String IDS_URL = "IDS_URL"; - public static final String DATA_PLANE_URL = "DATA_PLANE_URL"; - public static final String BACKEND_SERVICE_BACKEND_API_URL = "BACKEND_SERVICE_BACKEND_API_URL"; - public static final String DATABASE_URL = "DATABASE_URL"; - public static final String DATABASE_USER = "DATABASE_USER"; - public static final String DATABASE_PASSWORD = "DATABASE_PASSWORD"; - public static final String EDC_AWS_ENDPOINT_OVERRIDE = "EDC_AWS_ENDPOINT_OVERRIDE"; - public static final String AWS_ACCESS_KEY_ID = "AWS_ACCESS_KEY_ID"; - public static final String AWS_SECRET_ACCESS_KEY = "AWS_SECRET_ACCESS_KEY"; + public static final String DATA_MANAGEMENT_URL = "DATA_MANAGEMENT_URL"; + public static final String DATA_MANAGEMENT_API_AUTH_KEY = "DATA_MANAGEMENT_API_AUTH_KEY"; + public static final String IDS_URL = "IDS_URL"; + public static final String DATA_PLANE_URL = "DATA_PLANE_URL"; + public static final String BACKEND_SERVICE_BACKEND_API_URL = "BACKEND_SERVICE_BACKEND_API_URL"; + public static final String DATABASE_URL = "DATABASE_URL"; + public static final String DATABASE_USER = "DATABASE_USER"; + public static final String DATABASE_PASSWORD = "DATABASE_PASSWORD"; + public static final String EDC_AWS_ENDPOINT_OVERRIDE = "EDC_AWS_ENDPOINT_OVERRIDE"; + public static final String AWS_ACCESS_KEY_ID = "AWS_ACCESS_KEY_ID"; + public static final String AWS_SECRET_ACCESS_KEY = "AWS_SECRET_ACCESS_KEY"; } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/ControlPlaneAdapterSteps.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/ControlPlaneAdapterSteps.java index d29a13aa4..e786c789a 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/ControlPlaneAdapterSteps.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/ControlPlaneAdapterSteps.java @@ -22,64 +22,66 @@ import com.google.gson.Gson; import io.cucumber.datatable.DataTable; -import java.io.IOException; -import java.util.Map; -import lombok.extern.slf4j.Slf4j; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.HttpClientBuilder; import org.eclipse.edc.spi.system.health.HealthStatus; import org.eclipse.edc.spi.types.domain.edr.EndpointDataReference; import org.junit.jupiter.api.Assertions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; -@Slf4j public class ControlPlaneAdapterSteps { - private EndpointDataReference endpointDataReference; + private static final Logger log = LoggerFactory.getLogger(ControlPlaneAdapterSteps.class); + private EndpointDataReference endpointDataReference; - /* - * TODO: see of EndToEndTransfer.feature - * the current Bussinnes test is not running, because of a possible rare condition in the CI pipeline - * regarding the contract validity: see https://github.com/eclipse-edc/Connector/issues/2514 - */ + /* + * TODO: see of EndToEndTransfer.feature + * the current Bussinnes test is not running, because of a possible rare condition in the CI pipeline + * regarding the contract validity: see https://github.com/eclipse-edc/Connector/issues/2514 + */ - // @When("'{connector}' gets a request endpoint from '{connector}'") - public void getEndPointFromGetRequest(Connector consumer, Connector receiver, DataTable table) - throws IOException { + // @When("'{connector}' gets a request endpoint from '{connector}'") + public void getEndPointFromGetRequest(Connector consumer, Connector receiver, DataTable table) + throws IOException { - final DataManagementAPI dataManagementAPI = consumer.getDataManagementAPI(); - final String receiverIdsUrl = receiver.getEnvironment().getIdsUrl() + "/data"; + DataManagementAPI dataManagementAPI = consumer.getDataManagementAPI(); + String receiverIdsUrl = receiver.getEnvironment().getIdsUrl() + "/data"; - for (Map map : table.asMaps()) { - final String assetId = map.get("asset id"); + for (Map map : table.asMaps()) { + String assetId = map.get("asset id"); - endpointDataReference = dataManagementAPI.getEdcEndpoint(assetId, receiverIdsUrl); + endpointDataReference = dataManagementAPI.getEdcEndpoint(assetId, receiverIdsUrl); - log.debug("endpointDataReference in controlplane" + endpointDataReference.toString()); + log.debug("endpointDataReference in controlplane" + endpointDataReference.toString()); + } } - } - /* - * TODO: see EndToEndTransfer.feature - * the current Bussinnes test is not running, because of a possible rare condition in the CI pipeline - * regarding the contract validity: see https://github.com/eclipse-edc/Connector/issues/2514 - */ + /* + * TODO: see EndToEndTransfer.feature + * the current Bussinnes test is not running, because of a possible rare condition in the CI pipeline + * regarding the contract validity: see https://github.com/eclipse-edc/Connector/issues/2514 + */ - // @Then("'{connector}' asks for the asset from the endpoint") - public void receiveEndpoint(Connector consumer) throws IOException { + // @Then("'{connector}' asks for the asset from the endpoint") + public void receiveEndpoint(Connector consumer) throws IOException { - var requestUrl = endpointDataReference.getEndpoint(); - var key = endpointDataReference.getAuthKey(); - var value = endpointDataReference.getAuthCode(); - var httpClient = HttpClientBuilder.create().build(); - var get = new HttpGet(requestUrl); - get.addHeader(key, value); - final CloseableHttpResponse response = httpClient.execute(get); - var bytes = response.getEntity().getContent().readAllBytes(); - var result = new String(bytes); - var resultTransformed = new Gson().fromJson(result, HealthStatus.class); + var requestUrl = endpointDataReference.getEndpoint(); + var key = endpointDataReference.getAuthKey(); + var value = endpointDataReference.getAuthCode(); + var httpClient = HttpClientBuilder.create().build(); + var get = new HttpGet(requestUrl); + get.addHeader(key, value); + CloseableHttpResponse response = httpClient.execute(get); + var bytes = response.getEntity().getContent().readAllBytes(); + var result = new String(bytes); + var resultTransformed = new Gson().fromJson(result, HealthStatus.class); - Assertions.assertTrue(resultTransformed.isHealthy()); - Assertions.assertFalse(resultTransformed.getComponentResults().isEmpty()); - } + Assertions.assertTrue(resultTransformed.isHealthy()); + Assertions.assertFalse(resultTransformed.getComponentResults().isEmpty()); + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/DataManagementAPI.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/DataManagementAPI.java index 735cbf175..d67dc77ca 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/DataManagementAPI.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/DataManagementAPI.java @@ -23,17 +23,6 @@ import com.google.gson.Gson; import com.google.gson.annotations.SerializedName; import com.google.gson.reflect.TypeToken; -import java.io.IOException; -import java.net.URLEncoder; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; -import lombok.Data; -import lombok.NonNull; -import lombok.extern.slf4j.Slf4j; import org.apache.http.Header; import org.apache.http.HttpResponse; import org.apache.http.client.methods.CloseableHttpResponse; @@ -45,628 +34,706 @@ import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.message.BasicHeader; import org.eclipse.edc.spi.types.domain.edr.EndpointDataReference; -import org.eclipse.tractusx.edc.tests.data.*; +import org.eclipse.tractusx.edc.tests.data.Asset; +import org.eclipse.tractusx.edc.tests.data.BusinessPartnerNumberConstraint; +import org.eclipse.tractusx.edc.tests.data.Constraint; +import org.eclipse.tractusx.edc.tests.data.ContractDefinition; +import org.eclipse.tractusx.edc.tests.data.ContractNegotiation; +import org.eclipse.tractusx.edc.tests.data.ContractNegotiationState; +import org.eclipse.tractusx.edc.tests.data.ContractOffer; +import org.eclipse.tractusx.edc.tests.data.DataAddress; +import org.eclipse.tractusx.edc.tests.data.HttpProxySinkDataAddress; +import org.eclipse.tractusx.edc.tests.data.HttpProxySourceDataAddress; +import org.eclipse.tractusx.edc.tests.data.Negotiation; +import org.eclipse.tractusx.edc.tests.data.NullDataAddress; +import org.eclipse.tractusx.edc.tests.data.OrConstraint; +import org.eclipse.tractusx.edc.tests.data.PayMeConstraint; +import org.eclipse.tractusx.edc.tests.data.Permission; +import org.eclipse.tractusx.edc.tests.data.Policy; +import org.eclipse.tractusx.edc.tests.data.S3DataAddress; +import org.eclipse.tractusx.edc.tests.data.Transfer; +import org.eclipse.tractusx.edc.tests.data.TransferProcess; +import org.eclipse.tractusx.edc.tests.data.TransferProcessState; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; -@Slf4j public class DataManagementAPI { - private static final String ASSET_PATH = "/assets"; - private static final String POLICY_PATH = "/policydefinitions"; - private static final String CONTRACT_DEFINITIONS_PATH = "/contractdefinitions"; - private static final String CATALOG_PATH = "/catalog"; - private static final String NEGOTIATIONS_PATH = "/contractnegotiations"; - private static final String TRANSFER_PATH = "/transferprocess"; - private static final String ADAPTER_PATH = "/adapter/asset/sync/"; - - private final String dataMgmtUrl; - private final String dataMgmtAuthKey; - private final CloseableHttpClient httpClient; - - public DataManagementAPI(String dataManagementUrl, String dataMgmtAuthKey) { - this.httpClient = HttpClientBuilder.create().build(); - this.dataMgmtUrl = dataManagementUrl; - this.dataMgmtAuthKey = dataMgmtAuthKey; - } - - public List requestCatalogFrom(String receivingConnectorUrl) throws IOException { - final String encodedUrl = URLEncoder.encode(receivingConnectorUrl, StandardCharsets.UTF_8); - final ManagementApiContractOfferCatalog catalog = - get( - CATALOG_PATH, - "providerUrl=" + encodedUrl, - new TypeToken() {}); - - log.debug("Received " + catalog.contractOffers.size() + " offers"); - - return catalog.contractOffers.stream().map(this::mapOffer).collect(Collectors.toList()); - } - - public Negotiation initiateNegotiation( - String receivingConnectorUrl, String definitionId, String assetId, Policy policy) - throws IOException { - final ManagementApiOffer offer = new ManagementApiOffer(); - offer.offerId = definitionId + ":foo"; - offer.assetId = assetId; - offer.policy = mapPolicy(policy); - offer.policy.permissions.forEach(p -> p.target = assetId); - - final ManagementApiNegotiationPayload negotiationPayload = - new ManagementApiNegotiationPayload(); - negotiationPayload.connectorAddress = receivingConnectorUrl; - negotiationPayload.offer = offer; - - final ManagementApiNegotiationResponse response = - post( - NEGOTIATIONS_PATH, - negotiationPayload, - new TypeToken() {}); - - if (response == null) - throw new RuntimeException( - "Initiated negotiation. Connector did not answer with negotiation ID."); - - log.info(String.format("Initiated negotiation (id=%s)", response.getId())); - - final String negotiationId = response.getId(); - return new Negotiation(negotiationId); - } - - public Transfer initiateTransferProcess( - String receivingConnectorUrl, - String contractAgreementId, - String assetId, - DataAddress dataAddress) - throws IOException { - final ManagementApiTransfer transfer = new ManagementApiTransfer(); - - transfer.connectorAddress = receivingConnectorUrl; - transfer.contractId = contractAgreementId; - transfer.assetId = assetId; - transfer.transferType = new ManagementApiTransferType(); - transfer.managedResources = false; - transfer.dataDestination = mapDataAddress(dataAddress); - transfer.protocol = "ids-multipart"; - - return initiateTransferProcess(transfer); - } - - public Transfer initiateTransferProcess( - String receivingConnectorUrl, - String contractAgreementId, - String assetId, - DataAddress dataAddress, - String receiverEndpoint) - throws IOException { - final ManagementApiTransfer transfer = new ManagementApiTransfer(); - - transfer.connectorAddress = receivingConnectorUrl; - transfer.contractId = contractAgreementId; - transfer.assetId = assetId; - transfer.transferType = new ManagementApiTransferType(); - transfer.managedResources = false; - transfer.dataDestination = mapDataAddress(dataAddress); - transfer.protocol = "ids-multipart"; - transfer.properties = new ManagementApiProperties(receiverEndpoint); - - return initiateTransferProcess(transfer); - } - - private Transfer initiateTransferProcess(ManagementApiTransfer transfer) throws IOException { - final ManagementApiTransferResponse response = - post(TRANSFER_PATH, transfer, new TypeToken() {}); - - if (response == null) - throw new RuntimeException( - "Initiated transfer process. Connector did not answer with transfer process ID."); - - log.info(String.format("Initiated transfer process (id=%s)", response.getId())); - - final String transferId = response.getId(); - return new Transfer(transferId); - } - - public Asset initiateTransferProcess( - String endpointUrl, String endpointAuthKey, String endpointAuthCode) throws IOException { - Header header = new BasicHeader(endpointAuthKey, endpointAuthCode); - return get(endpointUrl, header, new TypeToken() {}); - } - - public TransferProcess getTransferProcess(String id) throws IOException { - final ManagementApiTransferProcess transferProcess = - get(TRANSFER_PATH + "/" + id, new TypeToken() {}); - return mapTransferProcess(transferProcess); - } - - public ContractNegotiation getNegotiation(String id) throws IOException { - final ManagementApiNegotiation negotiation = - get(NEGOTIATIONS_PATH + "/" + id, new TypeToken() {}); - return mapNegotiation(negotiation); - } - - public List getNegotiations() throws IOException { - final List negotiations = - get(NEGOTIATIONS_PATH + "/", new TypeToken>() {}); - return negotiations.stream().map(this::mapNegotiation).collect(Collectors.toList()); - } + private static final Logger log = LoggerFactory.getLogger(DataManagementAPI.class); + private static final String ASSET_PATH = "/assets"; + private static final String POLICY_PATH = "/policydefinitions"; + private static final String CONTRACT_DEFINITIONS_PATH = "/contractdefinitions"; + private static final String CATALOG_PATH = "/catalog"; + private static final String NEGOTIATIONS_PATH = "/contractnegotiations"; + private static final String TRANSFER_PATH = "/transferprocess"; + private static final String ADAPTER_PATH = "/adapter/asset/sync/"; + + private final String dataMgmtUrl; + private final String dataMgmtAuthKey; + private final CloseableHttpClient httpClient; + + public DataManagementAPI(String dataManagementUrl, String dataMgmtAuthKey) { + httpClient = HttpClientBuilder.create().build(); + dataMgmtUrl = dataManagementUrl; + this.dataMgmtAuthKey = dataMgmtAuthKey; + } + + public List requestCatalogFrom(String receivingConnectorUrl) throws IOException { + String encodedUrl = URLEncoder.encode(receivingConnectorUrl, StandardCharsets.UTF_8); + ManagementApiContractOfferCatalog catalog = + get( + CATALOG_PATH, + "providerUrl=" + encodedUrl, + new TypeToken() { + }); + + log.debug("Received " + catalog.contractOffers.size() + " offers"); + + return catalog.contractOffers.stream().map(this::mapOffer).collect(Collectors.toList()); + } + + public Negotiation initiateNegotiation( + String receivingConnectorUrl, String definitionId, String assetId, Policy policy) + throws IOException { + ManagementApiOffer offer = new ManagementApiOffer(); + offer.offerId = definitionId + ":foo"; + offer.assetId = assetId; + offer.policy = mapPolicy(policy); + offer.policy.permissions.forEach(p -> p.target = assetId); + + ManagementApiNegotiationPayload negotiationPayload = + new ManagementApiNegotiationPayload(); + negotiationPayload.connectorAddress = receivingConnectorUrl; + negotiationPayload.offer = offer; + + ManagementApiNegotiationResponse response = + post( + NEGOTIATIONS_PATH, + negotiationPayload, + new TypeToken() { + }); + + if (response == null) { + throw new RuntimeException( + "Initiated negotiation. Connector did not answer with negotiation ID."); + } + + log.info(String.format("Initiated negotiation (id=%s)", response.getId())); + + String negotiationId = response.getId(); + return new Negotiation(negotiationId); + } + + public Transfer initiateTransferProcess( + String receivingConnectorUrl, + String contractAgreementId, + String assetId, + DataAddress dataAddress) + throws IOException { + ManagementApiTransfer transfer = new ManagementApiTransfer(); + + transfer.connectorAddress = receivingConnectorUrl; + transfer.contractId = contractAgreementId; + transfer.assetId = assetId; + transfer.transferType = new ManagementApiTransferType(); + transfer.managedResources = false; + transfer.dataDestination = mapDataAddress(dataAddress); + transfer.protocol = "ids-multipart"; + + return initiateTransferProcess(transfer); + } + + public Transfer initiateTransferProcess( + String receivingConnectorUrl, + String contractAgreementId, + String assetId, + DataAddress dataAddress, + String receiverEndpoint) + throws IOException { + ManagementApiTransfer transfer = new ManagementApiTransfer(); + + transfer.connectorAddress = receivingConnectorUrl; + transfer.contractId = contractAgreementId; + transfer.assetId = assetId; + transfer.transferType = new ManagementApiTransferType(); + transfer.managedResources = false; + transfer.dataDestination = mapDataAddress(dataAddress); + transfer.protocol = "ids-multipart"; + transfer.properties = new ManagementApiProperties(receiverEndpoint); + + return initiateTransferProcess(transfer); + } + + public Asset initiateTransferProcess( + String endpointUrl, String endpointAuthKey, String endpointAuthCode) throws IOException { + Header header = new BasicHeader(endpointAuthKey, endpointAuthCode); + return get(endpointUrl, header, new TypeToken() { + }); + } + + public TransferProcess getTransferProcess(String id) throws IOException { + ManagementApiTransferProcess transferProcess = + get(TRANSFER_PATH + "/" + id, new TypeToken() { + }); + return mapTransferProcess(transferProcess); + } + + public ContractNegotiation getNegotiation(String id) throws IOException { + ManagementApiNegotiation negotiation = + get(NEGOTIATIONS_PATH + "/" + id, new TypeToken() { + }); + return mapNegotiation(negotiation); + } + + public List getNegotiations() throws IOException { + List negotiations = + get(NEGOTIATIONS_PATH + "/", new TypeToken>() { + }); + return negotiations.stream().map(this::mapNegotiation).collect(Collectors.toList()); + } + + public void createAsset(Asset asset) throws IOException { + ManagementApiAssetCreate assetCreate = new ManagementApiAssetCreate(); + + assetCreate.asset = mapAsset(asset); + assetCreate.dataAddress = mapDataAddress(asset.getDataAddress()); + + post(ASSET_PATH, assetCreate); + } + + public void createPolicy(Policy policy) throws IOException { + post(POLICY_PATH, mapPolicyDefinition(policy)); + } + + public void createContractDefinition(ContractDefinition contractDefinition) throws IOException { + post(CONTRACT_DEFINITIONS_PATH, mapContractDefinition(contractDefinition)); + } + + public EndpointDataReference getEdcEndpoint(String assetId, String receivingConnectorUrl) + throws IOException { + String encodedUrl = ADAPTER_PATH + assetId + "?providerUrl=" + receivingConnectorUrl; + + EndpointDataReference endpoint = + get(encodedUrl, new TypeToken() { + }); + + return endpoint; + } + + private Transfer initiateTransferProcess(ManagementApiTransfer transfer) throws IOException { + ManagementApiTransferResponse response = + post(TRANSFER_PATH, transfer, new TypeToken() { + }); + + if (response == null) { + throw new RuntimeException( + "Initiated transfer process. Connector did not answer with transfer process ID."); + } - public void createAsset(Asset asset) throws IOException { - final ManagementApiAssetCreate assetCreate = new ManagementApiAssetCreate(); + log.info(String.format("Initiated transfer process (id=%s)", response.getId())); - assetCreate.asset = mapAsset(asset); - assetCreate.dataAddress = mapDataAddress(asset.getDataAddress()); + String transferId = response.getId(); + return new Transfer(transferId); + } + + private T get(String path, String params, TypeToken typeToken) throws IOException { + return get(path + "?" + params, typeToken); + } + + private T get(String path, TypeToken typeToken) throws IOException { + + HttpGet get = new HttpGet(dataMgmtUrl + path); + HttpResponse response = sendRequest(get); + byte[] json = response.getEntity().getContent().readAllBytes(); + + log.debug("Received response: {}", new String(json, StandardCharsets.UTF_8)); + return new Gson().fromJson(new String(json, StandardCharsets.UTF_8), typeToken.getType()); + } + + private T get(String path, Header header, TypeToken typeToken) throws IOException { + + HttpGet get = new HttpGet(path); + get.addHeader(header); + HttpResponse response = sendRequest(get); + byte[] json = response.getEntity().getContent().readAllBytes(); + + log.debug("Received response: {}", new String(json, StandardCharsets.UTF_8)); + return new Gson().fromJson(new String(json, StandardCharsets.UTF_8), typeToken.getType()); + } + + private void post(String path, Object object) throws IOException { + post(path, object, new TypeToken() { + }); + } + + private T post(String path, Object object, TypeToken typeToken) throws IOException { + String url = String.format("%s%s", dataMgmtUrl, path); + HttpPost post = new HttpPost(url); + post.addHeader("Content-Type", "application/json"); + + var json = new Gson().toJson(object); + + log.debug("POST Payload: " + json); + + post.setEntity(new StringEntity(json)); + CloseableHttpResponse response = sendRequest(post); + + T responseJson = null; + if (!typeToken.equals(new TypeToken() { + })) { + byte[] responseBytes = response.getEntity().getContent().readAllBytes(); + responseJson = + new Gson() + .fromJson(new String(responseBytes, StandardCharsets.UTF_8), typeToken.getType()); + } + + response.close(); + + return responseJson; + } + + private CloseableHttpResponse sendRequest(HttpRequestBase request) throws IOException { + request.addHeader("X-Api-Key", dataMgmtAuthKey); + + log.debug(String.format("Send %-6s %s", request.getMethod(), request.getURI())); + + CloseableHttpResponse response = httpClient.execute(request); + if (200 > response.getStatusLine().getStatusCode() + || response.getStatusLine().getStatusCode() >= 300) { + throw new RuntimeException( + String.format("Unexpected response: %s", response.getStatusLine())); + } + + return response; + } + + private ContractNegotiation mapNegotiation(ManagementApiNegotiation negotiation) { + + ContractNegotiationState state; + + switch (negotiation.state) { + case "ERROR": + state = ContractNegotiationState.ERROR; + break; + case "INITIAL": + state = ContractNegotiationState.INITIAL; + break; + case "DECLINED": + state = ContractNegotiationState.DECLINED; + break; + case "CONFIRMED": + state = ContractNegotiationState.CONFIRMED; + break; + default: + state = ContractNegotiationState.UNKNOWN; + } + + return new ContractNegotiation(negotiation.id, state, negotiation.contractAgreementId); + } + + private TransferProcess mapTransferProcess(ManagementApiTransferProcess transferProcess) { - post(ASSET_PATH, assetCreate); - } - - public void createPolicy(Policy policy) throws IOException { - post(POLICY_PATH, mapPolicyDefinition(policy)); - } - - public void createContractDefinition(ContractDefinition contractDefinition) throws IOException { - post(CONTRACT_DEFINITIONS_PATH, mapContractDefinition(contractDefinition)); - } - - public EndpointDataReference getEdcEndpoint(String assetId, String receivingConnectorUrl) - throws IOException { - final String encodedUrl = ADAPTER_PATH + assetId + "?providerUrl=" + receivingConnectorUrl; - - final EndpointDataReference endpoint = - get(encodedUrl, new TypeToken() {}); + TransferProcessState state; - return endpoint; - } + switch (transferProcess.state) { + case "COMPLETED": + state = TransferProcessState.COMPLETED; + break; + case "ERROR": + state = TransferProcessState.ERROR; + break; + default: + state = TransferProcessState.UNKNOWN; + } - private T get(String path, String params, TypeToken typeToken) throws IOException { - return get(path + "?" + params, typeToken); - } - - private T get(String path, TypeToken typeToken) throws IOException { - - final HttpGet get = new HttpGet(dataMgmtUrl + path); - final HttpResponse response = sendRequest(get); - final byte[] json = response.getEntity().getContent().readAllBytes(); + return new TransferProcess(transferProcess.id, state); + } + + private ManagementApiDataAddress mapDataAddress(DataAddress dataAddress) { + Objects.requireNonNull(dataAddress); + ManagementApiDataAddress apiObject = new ManagementApiDataAddress(); + + if (dataAddress instanceof HttpProxySourceDataAddress) { + var address = (HttpProxySourceDataAddress) dataAddress; + var properties = new HashMap(); + properties.put("type", "HttpData"); + properties.put("baseUrl", address.getBaseUrl()); + var oauth2Provision = address.getOauth2Provision(); + if (oauth2Provision != null) { + properties.put("oauth2:tokenUrl", oauth2Provision.getTokenUrl()); + properties.put("oauth2:clientId", oauth2Provision.getClientId()); + properties.put("oauth2:clientSecret", oauth2Provision.getClientSecret()); + properties.put("oauth2:scope", oauth2Provision.getScope()); + } + apiObject.setProperties(properties); + } else if (dataAddress instanceof HttpProxySinkDataAddress) { + apiObject.setProperties(Map.of("type", "HttpProxy")); + } else if (dataAddress instanceof S3DataAddress) { + S3DataAddress a = (S3DataAddress) dataAddress; + apiObject.setProperties( + Map.of( + "type", + "AmazonS3", + "bucketName", + a.getBucketName(), + "region", + a.getRegion(), + "keyName", + a.getKeyName())); + } else if (dataAddress instanceof NullDataAddress) { + // set something that passes validation + apiObject.setProperties(Map.of("type", "HttpData", "baseUrl", "http://localhost")); + } else { + throw new UnsupportedOperationException( + String.format( + "Cannot map data address of type %s to EDC domain", dataAddress.getClass())); + } + + return apiObject; + } + + private ManagementApiAsset mapAsset(Asset asset) { + Map properties = + Map.of( + ManagementApiAsset.ID, asset.getId(), + ManagementApiAsset.DESCRIPTION, asset.getDescription()); + + ManagementApiAsset apiObject = new ManagementApiAsset(); + apiObject.setProperties(properties); + return apiObject; + } + + private Policy mapPolicy(ManagementApiPolicy managementApiPolicy) { + String id = managementApiPolicy.uid; + List permissions = + managementApiPolicy.permissions.stream() + .map(this::mapPermission) + .collect(Collectors.toList()); + + return new Policy(id, permissions); + } - log.debug("Received response: {}", new String(json, StandardCharsets.UTF_8)); - return new Gson().fromJson(new String(json, StandardCharsets.UTF_8), typeToken.getType()); - } + private ManagementApiPolicy mapPolicy(Policy policy) { + List permissions = + policy.getPermission().stream().map(this::mapPermission).collect(Collectors.toList()); + ManagementApiPolicy managementApiPolicy = new ManagementApiPolicy(); + managementApiPolicy.permissions = permissions; - private T get(String path, Header header, TypeToken typeToken) throws IOException { + return managementApiPolicy; + } + + private ManagementApiPolicyDefinition mapPolicyDefinition(Policy policy) { + ManagementApiPolicyDefinition apiObject = new ManagementApiPolicyDefinition(); + apiObject.id = policy.getId(); + apiObject.policy = mapPolicy(policy); + return apiObject; + } + + private Permission mapPermission(ManagementApiPermission managementApiPermission) { + String target = managementApiPermission.target; + String action = managementApiPermission.action.type; + return new Permission(action, new ArrayList<>(), target); + } + + private ManagementApiPermission mapPermission(Permission permission) { + String target = permission.getTarget(); + String action = permission.getAction(); + + ManagementApiRuleAction apiAction = new ManagementApiRuleAction(); + apiAction.type = action; + + var constraints = + permission.getConstraints().stream().map(this::mapConstraint).collect(Collectors.toList()); + + ManagementApiPermission apiObject = new ManagementApiPermission(); + apiObject.target = target; + apiObject.action = apiAction; + apiObject.constraints = constraints; + return apiObject; + } + + private ManagementConstraint mapConstraint(Constraint constraint) { + if (OrConstraint.class.equals(constraint.getClass())) { + return mapConstraint((OrConstraint) constraint); + } else if (BusinessPartnerNumberConstraint.class.equals(constraint.getClass())) { + return mapConstraint((BusinessPartnerNumberConstraint) constraint); + } else if (PayMeConstraint.class.equals(constraint.getClass())) { + return mapConstraint((PayMeConstraint) constraint); + } else { + throw new UnsupportedOperationException( + "Unsupported constraint type: " + constraint.getClass().getName()); + } + } - final HttpGet get = new HttpGet(path); - get.addHeader(header); - final HttpResponse response = sendRequest(get); - final byte[] json = response.getEntity().getContent().readAllBytes(); + private ManagementAtomicConstraint mapConstraint(PayMeConstraint constraint) { + ManagementApiLiteralExpression leftExpression = new ManagementApiLiteralExpression(); + leftExpression.value = "PayMe"; - log.debug("Received response: {}", new String(json, StandardCharsets.UTF_8)); - return new Gson().fromJson(new String(json, StandardCharsets.UTF_8), typeToken.getType()); - } + ManagementApiLiteralExpression rightExpression = new ManagementApiLiteralExpression(); + rightExpression.value = String.valueOf(constraint.getAmount()); - private void post(String path, Object object) throws IOException { - post(path, object, new TypeToken() {}); - } + ManagementAtomicConstraint dataManagementApiConstraint = new ManagementAtomicConstraint(); + dataManagementApiConstraint.leftExpression = leftExpression; + dataManagementApiConstraint.rightExpression = rightExpression; + dataManagementApiConstraint.operator = "EQ"; - private T post(String path, Object object, TypeToken typeToken) throws IOException { - final String url = String.format("%s%s", dataMgmtUrl, path); - final HttpPost post = new HttpPost(url); - post.addHeader("Content-Type", "application/json"); - - var json = new Gson().toJson(object); - - log.debug("POST Payload: " + json); - - post.setEntity(new StringEntity(json)); - final CloseableHttpResponse response = sendRequest(post); - - T responseJson = null; - if (!typeToken.equals(new TypeToken() {})) { - final byte[] responseBytes = response.getEntity().getContent().readAllBytes(); - responseJson = - new Gson() - .fromJson(new String(responseBytes, StandardCharsets.UTF_8), typeToken.getType()); - } - - response.close(); - - return responseJson; - } - - private CloseableHttpResponse sendRequest(HttpRequestBase request) throws IOException { - request.addHeader("X-Api-Key", dataMgmtAuthKey); - - log.debug(String.format("Send %-6s %s", request.getMethod(), request.getURI())); - - final CloseableHttpResponse response = httpClient.execute(request); - if (200 > response.getStatusLine().getStatusCode() - || response.getStatusLine().getStatusCode() >= 300) { - throw new RuntimeException( - String.format("Unexpected response: %s", response.getStatusLine())); - } - - return response; - } - - private ContractNegotiation mapNegotiation(ManagementApiNegotiation negotiation) { - - ContractNegotiationState state; - - switch (negotiation.state) { - case "ERROR": - state = ContractNegotiationState.ERROR; - break; - case "INITIAL": - state = ContractNegotiationState.INITIAL; - break; - case "DECLINED": - state = ContractNegotiationState.DECLINED; - break; - case "CONFIRMED": - state = ContractNegotiationState.CONFIRMED; - break; - default: - state = ContractNegotiationState.UNKNOWN; - } - - return new ContractNegotiation(negotiation.id, negotiation.contractAgreementId, state); - } - - private TransferProcess mapTransferProcess(ManagementApiTransferProcess transferProcess) { - - TransferProcessState state; - - switch (transferProcess.state) { - case "COMPLETED": - state = TransferProcessState.COMPLETED; - break; - case "ERROR": - state = TransferProcessState.ERROR; - break; - default: - state = TransferProcessState.UNKNOWN; - } - - return new TransferProcess(transferProcess.id, state); - } - - private ManagementApiDataAddress mapDataAddress(@NonNull DataAddress dataAddress) { - final ManagementApiDataAddress apiObject = new ManagementApiDataAddress(); - - if (dataAddress instanceof HttpProxySourceDataAddress) { - final var address = (HttpProxySourceDataAddress) dataAddress; - var properties = new HashMap(); - properties.put("type", "HttpData"); - properties.put("baseUrl", address.getBaseUrl()); - var oauth2Provision = address.getOauth2Provision(); - if (oauth2Provision != null) { - properties.put("oauth2:tokenUrl", oauth2Provision.getTokenUrl()); - properties.put("oauth2:clientId", oauth2Provision.getClientId()); - properties.put("oauth2:clientSecret", oauth2Provision.getClientSecret()); - properties.put("oauth2:scope", oauth2Provision.getScope()); - } - apiObject.setProperties(properties); - } else if (dataAddress instanceof HttpProxySinkDataAddress) { - apiObject.setProperties(Map.of("type", "HttpProxy")); - } else if (dataAddress instanceof S3DataAddress) { - final S3DataAddress a = (S3DataAddress) dataAddress; - apiObject.setProperties( - Map.of( - "type", - "AmazonS3", - "bucketName", - a.getBucketName(), - "region", - a.getRegion(), - "keyName", - a.getKeyName())); - } else if (dataAddress instanceof NullDataAddress) { - // set something that passes validation - apiObject.setProperties(Map.of("type", "HttpData", "baseUrl", "http://localhost")); - } else { - throw new UnsupportedOperationException( - String.format( - "Cannot map data address of type %s to EDC domain", dataAddress.getClass())); - } - - return apiObject; - } - - private ManagementApiAsset mapAsset(Asset asset) { - final Map properties = - Map.of( - ManagementApiAsset.ID, asset.getId(), - ManagementApiAsset.DESCRIPTION, asset.getDescription()); - - final ManagementApiAsset apiObject = new ManagementApiAsset(); - apiObject.setProperties(properties); - return apiObject; - } - - private Policy mapPolicy(ManagementApiPolicy managementApiPolicy) { - final String id = managementApiPolicy.uid; - final List permissions = - managementApiPolicy.permissions.stream() - .map(this::mapPermission) - .collect(Collectors.toList()); - - return new Policy(id, permissions); - } - - private ManagementApiPolicy mapPolicy(Policy policy) { - final List permissions = - policy.getPermission().stream().map(this::mapPermission).collect(Collectors.toList()); - final ManagementApiPolicy managementApiPolicy = new ManagementApiPolicy(); - managementApiPolicy.permissions = permissions; - - return managementApiPolicy; - } - - private ManagementApiPolicyDefinition mapPolicyDefinition(Policy policy) { - final ManagementApiPolicyDefinition apiObject = new ManagementApiPolicyDefinition(); - apiObject.id = policy.getId(); - apiObject.policy = mapPolicy(policy); - return apiObject; - } - - private Permission mapPermission(ManagementApiPermission managementApiPermission) { - final String target = managementApiPermission.target; - final String action = managementApiPermission.action.type; - return new Permission(action, target, new ArrayList<>()); - } - - private ManagementApiPermission mapPermission(Permission permission) { - final String target = permission.getTarget(); - final String action = permission.getAction(); - - final ManagementApiRuleAction apiAction = new ManagementApiRuleAction(); - apiAction.type = action; - - var constraints = - permission.getConstraints().stream().map(this::mapConstraint).collect(Collectors.toList()); - - final ManagementApiPermission apiObject = new ManagementApiPermission(); - apiObject.target = target; - apiObject.action = apiAction; - apiObject.constraints = constraints; - return apiObject; - } - - private ManagementConstraint mapConstraint(Constraint constraint) { - if (OrConstraint.class.equals(constraint.getClass())) { - return mapConstraint((OrConstraint) constraint); - } else if (BusinessPartnerNumberConstraint.class.equals(constraint.getClass())) { - return mapConstraint((BusinessPartnerNumberConstraint) constraint); - } else if (PayMeConstraint.class.equals(constraint.getClass())) { - return mapConstraint((PayMeConstraint) constraint); - } else { - throw new UnsupportedOperationException( - "Unsupported constraint type: " + constraint.getClass().getName()); - } - } - - private ManagementAtomicConstraint mapConstraint(PayMeConstraint constraint) { - final ManagementApiLiteralExpression leftExpression = new ManagementApiLiteralExpression(); - leftExpression.value = "PayMe"; - - final ManagementApiLiteralExpression rightExpression = new ManagementApiLiteralExpression(); - rightExpression.value = String.valueOf(constraint.getAmount()); - - final ManagementAtomicConstraint dataManagementApiConstraint = new ManagementAtomicConstraint(); - dataManagementApiConstraint.leftExpression = leftExpression; - dataManagementApiConstraint.rightExpression = rightExpression; - dataManagementApiConstraint.operator = "EQ"; - - return dataManagementApiConstraint; - } - - private ManagementAtomicConstraint mapConstraint(BusinessPartnerNumberConstraint constraint) { - final ManagementApiLiteralExpression leftExpression = new ManagementApiLiteralExpression(); - leftExpression.value = "BusinessPartnerNumber"; - - final ManagementApiLiteralExpression rightExpression = new ManagementApiLiteralExpression(); - rightExpression.value = constraint.getBusinessPartnerNumber(); - - final ManagementAtomicConstraint dataManagementApiConstraint = new ManagementAtomicConstraint(); - dataManagementApiConstraint.leftExpression = leftExpression; - dataManagementApiConstraint.rightExpression = rightExpression; - dataManagementApiConstraint.operator = "EQ"; - - return dataManagementApiConstraint; - } - - private ManagementOrConstraint mapConstraint(OrConstraint constraint) { - var orConstraint = new ManagementOrConstraint(); - orConstraint.constraints = - constraint.getConstraints().stream().map(this::mapConstraint).collect(Collectors.toList()); - return orConstraint; - } - - private ContractOffer mapOffer(ManagementApiContractOffer managementApiContractOffer) { - final String id = managementApiContractOffer.id; - final String assetId = - managementApiContractOffer.assetId != null - ? managementApiContractOffer.assetId - : (String) managementApiContractOffer.asset.getProperties().get(ManagementApiAsset.ID); - - final Policy policy = mapPolicy(managementApiContractOffer.getPolicy()); - - return new ContractOffer(id, policy, assetId); - } - - private ManagementApiContractDefinition mapContractDefinition( - ContractDefinition contractDefinition) { - - final ManagementApiContractDefinition apiObject = new ManagementApiContractDefinition(); - apiObject.id = contractDefinition.getId(); - apiObject.accessPolicyId = contractDefinition.getAcccessPolicyId(); - apiObject.contractPolicyId = contractDefinition.getContractPolicyId(); - apiObject.criteria = new ArrayList<>(); - - for (final String assetId : contractDefinition.getAssetIds()) { - ManagementApiCriterion criterion = new ManagementApiCriterion(); - criterion.operandLeft = ManagementApiAsset.ID; - criterion.operator = "="; - criterion.operandRight = assetId; - - apiObject.criteria.add(criterion); - } - - return apiObject; - } - - @Data - private static class ManagementApiNegotiationResponse { - private String id; - } - - @Data - private static class ManagementApiNegotiationPayload { - private String connectorId = "foo"; - private String connectorAddress; - private ManagementApiOffer offer; - } - - @Data - private static class ManagementApiNegotiation { - private String id; - private String state; - private String contractAgreementId; - } - - @Data - private static class ManagementApiTransferProcess { - private String id; - private String state; - } - - @Data - private static class ManagementApiOffer { - private String offerId; - private String assetId; - private ManagementApiPolicy policy; - } - - @Data - private static class ManagementApiTransfer { - private String connectorId = "foo"; - private String connectorAddress; - private String contractId; - private String assetId; - private String protocol; - private ManagementApiDataAddress dataDestination; - private boolean managedResources; - private ManagementApiTransferType transferType; - private ManagementApiProperties properties; - } - - @Data - private static class ManagementApiTransferType { - private String contentType = "application/octet-stream"; - private boolean isFinite = true; - } - - @Data - private static class ManagementApiTransferResponse { - private String id; - } - - @Data - private static class ManagementApiAssetCreate { - private ManagementApiAsset asset; - private ManagementApiDataAddress dataAddress; - } - - @Data - private static class ManagementApiAsset { - public static final String ID = "asset:prop:id"; - public static final String DESCRIPTION = "asset:prop:description"; - - private Map properties; - } - - @Data - private static class ManagementApiDataAddress { - public static final String TYPE = "type"; - private Map properties; - } - - @Data - private static class ManagementApiProperties { - @SerializedName(value = "receiver.http.endpoint") - private final String receiverHttpEndpoint; - } - - @Data - private static class ManagementApiPolicyDefinition { - private String id; - private ManagementApiPolicy policy; - } - - @Data - private static class ManagementApiPolicy { - private String uid; - private List permissions = new ArrayList<>(); - } - - @Data - private static class ManagementApiPermission { - private String edctype = "dataspaceconnector:permission"; - private ManagementApiRuleAction action; - private String target; - private List constraints = new ArrayList<>(); - } - - @Data - private static class ManagementAtomicConstraint implements ManagementConstraint { - private String edctype = "AtomicConstraint"; - private ManagementApiLiteralExpression leftExpression; - private ManagementApiLiteralExpression rightExpression; - private String operator; - } - - @Data - private static class ManagementOrConstraint implements ManagementConstraint { - private String edctype = "dataspaceconnector:orconstraint"; - private List constraints; - } - - private interface ManagementConstraint {} - - @Data - private static class ManagementApiLiteralExpression { - private String edctype = "dataspaceconnector:literalexpression"; - private String value; - } - - @Data - private static class ManagementApiRuleAction { - private String type; - } - - @Data - private static class ManagementApiContractDefinition { - private String id; - private String accessPolicyId; - private String contractPolicyId; - private List criteria = new ArrayList<>(); - } - - @Data - private static class ManagementApiCriterion { - private Object operandLeft; - private String operator; - private Object operandRight; - } - - @Data - private static class ManagementApiContractOffer { - private String id; - private ManagementApiPolicy policy; - private ManagementApiAsset asset; - private String assetId; - } - - @Data - private static class ManagementApiContractOfferCatalog { - private String id; - private List contractOffers = new ArrayList<>(); - } + return dataManagementApiConstraint; + } + + private ManagementAtomicConstraint mapConstraint(BusinessPartnerNumberConstraint constraint) { + ManagementApiLiteralExpression leftExpression = new ManagementApiLiteralExpression(); + leftExpression.value = "BusinessPartnerNumber"; + + ManagementApiLiteralExpression rightExpression = new ManagementApiLiteralExpression(); + rightExpression.value = constraint.getBusinessPartnerNumber(); + + ManagementAtomicConstraint dataManagementApiConstraint = new ManagementAtomicConstraint(); + dataManagementApiConstraint.leftExpression = leftExpression; + dataManagementApiConstraint.rightExpression = rightExpression; + dataManagementApiConstraint.operator = "EQ"; + + return dataManagementApiConstraint; + } + + private ManagementOrConstraint mapConstraint(OrConstraint constraint) { + var orConstraint = new ManagementOrConstraint(); + orConstraint.constraints = + constraint.getConstraints().stream().map(this::mapConstraint).collect(Collectors.toList()); + return orConstraint; + } + + private ContractOffer mapOffer(ManagementApiContractOffer managementApiContractOffer) { + String id = managementApiContractOffer.id; + String assetId = + managementApiContractOffer.assetId != null + ? managementApiContractOffer.assetId + : (String) managementApiContractOffer.asset.getProperties().get(ManagementApiAsset.ID); + + Policy policy = mapPolicy(managementApiContractOffer.getPolicy()); + + return new ContractOffer(id, policy, assetId); + } + + private ManagementApiContractDefinition mapContractDefinition( + ContractDefinition contractDefinition) { + + ManagementApiContractDefinition apiObject = new ManagementApiContractDefinition(); + apiObject.id = contractDefinition.getId(); + apiObject.accessPolicyId = contractDefinition.getAcccessPolicyId(); + apiObject.contractPolicyId = contractDefinition.getContractPolicyId(); + apiObject.criteria = new ArrayList<>(); + + for (String assetId : contractDefinition.getAssetIds()) { + ManagementApiCriterion criterion = new ManagementApiCriterion(); + criterion.operandLeft = ManagementApiAsset.ID; + criterion.operator = "="; + criterion.operandRight = assetId; + + apiObject.criteria.add(criterion); + } + + return apiObject; + } + + private interface ManagementConstraint { + } + + + private static class ManagementApiNegotiationResponse { + private String id; + + + public String getId() { + return id; + } + } + + + private static class ManagementApiNegotiationPayload { + private final String connectorId = "foo"; + private String connectorAddress; + private ManagementApiOffer offer; + } + + private static class ManagementApiNegotiation { + private String id; + private String state; + private String contractAgreementId; + } + + private static class ManagementApiTransferProcess { + private String id; + private String state; + } + + + private static class ManagementApiOffer { + private String offerId; + private String assetId; + private ManagementApiPolicy policy; + } + + + private static class ManagementApiTransfer { + private final String connectorId = "foo"; + private String connectorAddress; + private String contractId; + private String assetId; + private String protocol; + private ManagementApiDataAddress dataDestination; + private boolean managedResources; + private ManagementApiTransferType transferType; + private ManagementApiProperties properties; + } + + + private static class ManagementApiTransferType { + private final String contentType = "application/octet-stream"; + private final boolean isFinite = true; + } + + + private static class ManagementApiTransferResponse { + private String id; + + + public String getId() { + return id; + } + } + + private static class ManagementApiAssetCreate { + private ManagementApiAsset asset; + private ManagementApiDataAddress dataAddress; + } + + private static class ManagementApiAsset { + public static final String ID = "asset:prop:id"; + public static final String DESCRIPTION = "asset:prop:description"; + + private Map properties; + + + public Map getProperties() { + return properties; + } + + public void setProperties(Map properties) { + this.properties = properties; + } + } + + + private static class ManagementApiDataAddress { + public static final String TYPE = "type"; + private Map properties; + + + public Map getProperties() { + return properties; + } + + public void setProperties(Map properties) { + this.properties = properties; + } + } + + + private static class ManagementApiProperties { + @SerializedName(value = "receiver.http.endpoint") + private final String receiverHttpEndpoint; + + private ManagementApiProperties(String receiverHttpEndpoint) { + this.receiverHttpEndpoint = receiverHttpEndpoint; + } + } + + + private static class ManagementApiPolicyDefinition { + private String id; + private ManagementApiPolicy policy; + } + + + private static class ManagementApiPolicy { + private String uid; + private List permissions = new ArrayList<>(); + } + + + private static class ManagementApiPermission { + private final String edctype = "dataspaceconnector:permission"; + private ManagementApiRuleAction action; + private String target; + private List constraints = new ArrayList<>(); + } + + + private static class ManagementAtomicConstraint implements ManagementConstraint { + private final String edctype = "AtomicConstraint"; + private ManagementApiLiteralExpression leftExpression; + private ManagementApiLiteralExpression rightExpression; + private String operator; + } + + + private static class ManagementOrConstraint implements ManagementConstraint { + private final String edctype = "dataspaceconnector:orconstraint"; + private List constraints; + } + + + private static class ManagementApiLiteralExpression { + private final String edctype = "dataspaceconnector:literalexpression"; + private String value; + } + + + private static class ManagementApiRuleAction { + private String type; + } + + + private static class ManagementApiContractDefinition { + private String id; + private String accessPolicyId; + private String contractPolicyId; + private List criteria = new ArrayList<>(); + } + + + private static class ManagementApiCriterion { + private Object operandLeft; + private String operator; + private Object operandRight; + } + + + private static class ManagementApiContractOffer { + private String id; + private ManagementApiPolicy policy; + private ManagementApiAsset asset; + private String assetId; + + + public ManagementApiPolicy getPolicy() { + return policy; + } + } + + + private static class ManagementApiContractOfferCatalog { + private final List contractOffers = new ArrayList<>(); + private String id; + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Environment.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Environment.java index d1a199fd1..49a2353d1 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Environment.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/Environment.java @@ -20,6 +20,9 @@ package org.eclipse.tractusx.edc.tests; +import java.util.Locale; +import java.util.Objects; + import static org.eclipse.tractusx.edc.tests.Constants.AWS_ACCESS_KEY_ID; import static org.eclipse.tractusx.edc.tests.Constants.AWS_SECRET_ACCESS_KEY; import static org.eclipse.tractusx.edc.tests.Constants.BACKEND_SERVICE_BACKEND_API_URL; @@ -32,45 +35,165 @@ import static org.eclipse.tractusx.edc.tests.Constants.EDC_AWS_ENDPOINT_OVERRIDE; import static org.eclipse.tractusx.edc.tests.Constants.IDS_URL; -import java.util.Locale; -import lombok.AccessLevel; -import lombok.Builder; -import lombok.Getter; -import lombok.NonNull; -import lombok.ToString; - -@Builder(access = AccessLevel.PRIVATE) -@Getter -@ToString public class Environment { - @NonNull private final String dataManagementAuthKey; - @NonNull private final String dataManagementUrl; - @NonNull private final String idsUrl; - @NonNull private final String dataPlaneUrl; - @NonNull private final String backendServiceBackendApiUrl; - @NonNull private final String databaseUrl; - @NonNull private final String databaseUser; - @NonNull private final String databasePassword; - @NonNull private final String awsEndpointOverride; - @NonNull private final String awsAccessKey; - @NonNull private final String awsSecretAccessKey; - - public static Environment byName(String name) { - name = name.toUpperCase(Locale.ROOT); - - return Environment.builder() - .dataManagementUrl(System.getenv(String.join("_", name, DATA_MANAGEMENT_URL))) - .dataManagementAuthKey(System.getenv(String.join("_", name, DATA_MANAGEMENT_API_AUTH_KEY))) - .idsUrl(System.getenv(String.join("_", name, IDS_URL))) - .dataPlaneUrl(System.getenv(String.join("_", name, DATA_PLANE_URL))) - .backendServiceBackendApiUrl( - System.getenv(String.join("_", name, BACKEND_SERVICE_BACKEND_API_URL))) - .databaseUrl(System.getenv(String.join("_", name, DATABASE_URL))) - .databaseUser(System.getenv(String.join("_", name, DATABASE_USER))) - .databasePassword(System.getenv(String.join("_", name, DATABASE_PASSWORD))) - .awsEndpointOverride(System.getenv(EDC_AWS_ENDPOINT_OVERRIDE)) - .awsAccessKey(System.getenv(String.join("_", name, AWS_ACCESS_KEY_ID))) - .awsSecretAccessKey(System.getenv(String.join("_", name, AWS_SECRET_ACCESS_KEY))) - .build(); - } + + private String awsEndpointOverride; + private String awsAccessKey; + private String awsSecretAccessKey; + private String dataManagementAuthKey; + private String dataManagementUrl; + private String idsUrl; + private String dataPlaneUrl; + private String backendServiceBackendApiUrl; + private String databaseUrl; + private String databaseUser; + private String databasePassword; + + private Environment() { + + } + + + public static Environment byName(String name) { + var upperName = name.toUpperCase(Locale.ROOT); + + return Environment.Builder.newInstance() + .dataManagementUrl(System.getenv(String.join("_", upperName, DATA_MANAGEMENT_URL))) + .dataManagementAuthKey(System.getenv(String.join("_", upperName, DATA_MANAGEMENT_API_AUTH_KEY))) + .idsUrl(System.getenv(String.join("_", upperName, IDS_URL))) + .dataPlaneUrl(System.getenv(String.join("_", upperName, DATA_PLANE_URL))) + .backendServiceBackendApiUrl( + System.getenv(String.join("_", upperName, BACKEND_SERVICE_BACKEND_API_URL))) + .databaseUrl(System.getenv(String.join("_", upperName, DATABASE_URL))) + .databaseUser(System.getenv(String.join("_", upperName, DATABASE_USER))) + .databasePassword(System.getenv(String.join("_", upperName, DATABASE_PASSWORD))) + .awsEndpointOverride(System.getenv(EDC_AWS_ENDPOINT_OVERRIDE)) + .awsAccessKey(System.getenv(String.join("_", upperName, AWS_ACCESS_KEY_ID))) + .awsSecretAccessKey(System.getenv(String.join("_", upperName, AWS_SECRET_ACCESS_KEY))) + .build(); + } + + public String getIdsUrl() { + return idsUrl; + } + + public String getAwsEndpointOverride() { + return awsEndpointOverride; + } + + public String getAwsSecretAccessKey() { + return awsSecretAccessKey; + } + + public String getAwsAccessKey() { + return awsAccessKey; + } + + public String getBackendServiceBackendApiUrl() { + return backendServiceBackendApiUrl; + } + + public String getDatabasePassword() { + return databasePassword; + } + + public String getDatabaseUrl() { + return databaseUrl; + } + + public String getDatabaseUser() { + return databaseUser; + } + + public String getDataManagementAuthKey() { + return dataManagementAuthKey; + } + + public String getDataManagementUrl() { + return dataManagementUrl; + } + + private static class Builder { + + + private final Environment environment; + + private Builder() { + environment = new Environment(); + } + + public static Builder newInstance() { + return new Builder(); + } + + public Builder awsEndpointOverride(String val) { + environment.awsEndpointOverride = val; + return this; + } + + public Builder awsAccessKey(String val) { + environment.awsAccessKey = val; + return this; + } + + public Builder awsSecretAccessKey(String val) { + environment.awsSecretAccessKey = val; + return this; + } + + public Builder dataManagementAuthKey(String val) { + environment.dataManagementAuthKey = val; + return this; + } + + public Builder dataManagementUrl(String val) { + environment.dataManagementUrl = val; + return this; + } + + public Builder idsUrl(String val) { + environment.idsUrl = val; + return this; + } + + public Builder dataPlaneUrl(String val) { + environment.dataPlaneUrl = val; + return this; + } + + public Builder backendServiceBackendApiUrl(String val) { + environment.backendServiceBackendApiUrl = val; + return this; + } + + public Builder databaseUrl(String val) { + environment.databaseUrl = val; + return this; + } + + public Builder databaseUser(String val) { + environment.databaseUser = val; + return this; + } + + public Builder databasePassword(String val) { + environment.databasePassword = val; + return this; + } + + public Environment build() { + Objects.requireNonNull(environment.awsAccessKey); + Objects.requireNonNull(environment.awsEndpointOverride); + Objects.requireNonNull(environment.awsSecretAccessKey); + Objects.requireNonNull(environment.backendServiceBackendApiUrl); + Objects.requireNonNull(environment.databaseUrl); + Objects.requireNonNull(environment.databasePassword); + Objects.requireNonNull(environment.databaseUser); + Objects.requireNonNull(environment.dataManagementUrl); + Objects.requireNonNull(environment.dataPlaneUrl); + Objects.requireNonNull(environment.dataManagementAuthKey); + Objects.requireNonNull(environment.idsUrl); + return environment; + } + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/HttpProxyTransferSteps.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/HttpProxyTransferSteps.java index 24f68e3a1..39a743ab5 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/HttpProxyTransferSteps.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/HttpProxyTransferSteps.java @@ -4,12 +4,13 @@ import io.cucumber.java.en.Given; import io.cucumber.java.en.Then; import io.cucumber.java.en.When; -import lombok.extern.slf4j.Slf4j; import org.eclipse.tractusx.edc.tests.data.Asset; import org.eclipse.tractusx.edc.tests.data.DataAddress; import org.eclipse.tractusx.edc.tests.data.HttpProxySinkDataAddress; import org.eclipse.tractusx.edc.tests.data.HttpProxySourceDataAddress; import org.junit.jupiter.api.Assertions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.time.Duration; @@ -20,9 +21,10 @@ import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.when; -@Slf4j public class HttpProxyTransferSteps { + private static final Logger log = LoggerFactory.getLogger(HttpProxyTransferSteps.class); + private static final String ID = "id"; private static final String DESCRIPTION = "description"; private static final String BASE_URL = "baseUrl"; diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/NegotiationSteps.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/NegotiationSteps.java index 5872d2dfe..7a713ff1b 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/NegotiationSteps.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/NegotiationSteps.java @@ -23,11 +23,6 @@ import io.cucumber.datatable.DataTable; import io.cucumber.java.en.Then; import io.cucumber.java.en.When; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import lombok.extern.slf4j.Slf4j; import org.eclipse.tractusx.edc.tests.data.ContractNegotiation; import org.eclipse.tractusx.edc.tests.data.ContractNegotiationState; import org.eclipse.tractusx.edc.tests.data.Negotiation; @@ -35,60 +30,66 @@ import org.eclipse.tractusx.edc.tests.data.Policy; import org.junit.jupiter.api.Assertions; -@Slf4j +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + + public class NegotiationSteps { - private static final String DEFINITION_ID = "definition id"; - private static final String ASSET_ID = "asset id"; - private ContractNegotiation lastInitiatedNegotiation; + private static final String DEFINITION_ID = "definition id"; + private static final String ASSET_ID = "asset id"; - @When("'{connector}' sends '{connector}' an offer without constraints") - public void sendAnOfferWithoutConstraints(Connector sender, Connector receiver, DataTable table) - throws IOException { + private ContractNegotiation lastInitiatedNegotiation; - final DataManagementAPI dataManagementAPI = sender.getDataManagementAPI(); - final String receiverIdsUrl = receiver.getEnvironment().getIdsUrl() + "/data"; + @When("'{connector}' sends '{connector}' an offer without constraints") + public void sendAnOfferWithoutConstraints(Connector sender, Connector receiver, DataTable table) + throws IOException { - for (Map map : table.asMaps()) { - final String definitionId = map.get(DEFINITION_ID); - final String assetId = map.get(ASSET_ID); + DataManagementAPI dataManagementAPI = sender.getDataManagementAPI(); + String receiverIdsUrl = receiver.getEnvironment().getIdsUrl() + "/data"; - final Permission permission = new Permission("USE", null, new ArrayList<>()); - final Policy policy = new Policy("foo", List.of(permission)); + for (Map map : table.asMaps()) { + String definitionId = map.get(DEFINITION_ID); + String assetId = map.get(ASSET_ID); - final Negotiation negotiation = - dataManagementAPI.initiateNegotiation(receiverIdsUrl, definitionId, assetId, policy); + Permission permission = new Permission("USE", new ArrayList<>(), null); + Policy policy = new Policy("foo", List.of(permission)); - // wait for negotiation to complete - negotiation.waitUntilComplete(dataManagementAPI); + Negotiation negotiation = + dataManagementAPI.initiateNegotiation(receiverIdsUrl, definitionId, assetId, policy); - lastInitiatedNegotiation = dataManagementAPI.getNegotiation(negotiation.getId()); + // wait for negotiation to complete + negotiation.waitUntilComplete(dataManagementAPI); + + lastInitiatedNegotiation = dataManagementAPI.getNegotiation(negotiation.getId()); + } } - } - @When("'{connector}' successfully negotiation a contract agreement with '{connector}'") - public void sokratesSuccessfullyNegotiationAContractAgreementPlatoFor( - Connector consumer, Connector provider, DataTable table) throws IOException { - final DataManagementAPI api = consumer.getDataManagementAPI(); + @When("'{connector}' successfully negotiation a contract agreement with '{connector}'") + public void sokratesSuccessfullyNegotiationAContractAgreementPlatoFor( + Connector consumer, Connector provider, DataTable table) throws IOException { + DataManagementAPI api = consumer.getDataManagementAPI(); - final Map map = table.asMap(); - final String definitionId = map.get(DEFINITION_ID); - final String assetId = map.get(ASSET_ID); + Map map = table.asMap(); + String definitionId = map.get(DEFINITION_ID); + String assetId = map.get(ASSET_ID); - // as default always the "allow all" policy is used. So we can assume this here, too. - final Permission permission = new Permission("USE", null, new ArrayList<>()); - final Policy policy = new Policy("policy-id", List.of(permission)); + // as default always the "allow all" policy is used. So we can assume this here, too. + Permission permission = new Permission("USE", new ArrayList<>(), null); + Policy policy = new Policy("policy-id", List.of(permission)); - final String receiverUrl = provider.getEnvironment().getIdsUrl(); - final Negotiation negotiation = - api.initiateNegotiation(receiverUrl, assetId, definitionId, policy); + String receiverUrl = provider.getEnvironment().getIdsUrl(); + Negotiation negotiation = + api.initiateNegotiation(receiverUrl, assetId, definitionId, policy); - negotiation.waitUntilComplete(api); - } + negotiation.waitUntilComplete(api); + } - @Then("the negotiation is declined") - public void assertLastNegotiationDeclined() { - Assertions.assertEquals(ContractNegotiationState.DECLINED, lastInitiatedNegotiation.getState()); - } + @Then("the negotiation is declined") + public void assertLastNegotiationDeclined() { + Assertions.assertEquals(ContractNegotiationState.DECLINED, lastInitiatedNegotiation.getState()); + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/PolicyStepDefs.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/PolicyStepDefs.java index a7ede22be..d8bac4466 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/PolicyStepDefs.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/PolicyStepDefs.java @@ -20,45 +20,54 @@ package org.eclipse.tractusx.edc.tests; -import static java.util.Arrays.stream; -import static java.util.stream.Collectors.toList; - import io.cucumber.datatable.DataTable; import io.cucumber.java.en.Given; +import org.eclipse.tractusx.edc.tests.data.BusinessPartnerNumberConstraint; +import org.eclipse.tractusx.edc.tests.data.Constraint; +import org.eclipse.tractusx.edc.tests.data.OrConstraint; +import org.eclipse.tractusx.edc.tests.data.PayMeConstraint; +import org.eclipse.tractusx.edc.tests.data.Permission; +import org.eclipse.tractusx.edc.tests.data.Policy; + import java.util.ArrayList; import java.util.List; import java.util.Map; -import org.eclipse.tractusx.edc.tests.data.*; + +import static java.util.Arrays.stream; +import static java.util.stream.Collectors.toList; public class PolicyStepDefs { - @Given("'{connector}' has the following policies") - public void hasPolicies(Connector connector, DataTable table) throws Exception { - var api = connector.getDataManagementAPI(); - var policies = table.asMaps().stream().map(this::parseRow).collect(toList()); + @Given("'{connector}' has the following policies") + public void hasPolicies(Connector connector, DataTable table) throws Exception { + var api = connector.getDataManagementAPI(); + var policies = table.asMaps().stream().map(this::parseRow).collect(toList()); - for (var policy : policies) api.createPolicy(policy); - } + for (var policy : policies) { + api.createPolicy(policy); + } + } - private Policy parseRow(Map row) { - var id = row.get("id"); - var action = row.get("action"); - var constraints = new ArrayList(); + private Policy parseRow(Map row) { + var id = row.get("id"); + var action = row.get("action"); + var constraints = new ArrayList(); - var businessPartnerNumber = row.get("businessPartnerNumber"); - if (businessPartnerNumber != null && !businessPartnerNumber.isBlank()) { - var bpnConstraints = - stream(businessPartnerNumber.split(",")) - .map(BusinessPartnerNumberConstraint::new) - .collect(toList()); - constraints.add(new OrConstraint(bpnConstraints)); - } + var businessPartnerNumber = row.get("businessPartnerNumber"); + if (businessPartnerNumber != null && !businessPartnerNumber.isBlank()) { + var bpnConstraints = + stream(businessPartnerNumber.split(",")) + .map(BusinessPartnerNumberConstraint::new) + .collect(toList()); + constraints.add(new OrConstraint(bpnConstraints)); + } - var payMe = row.get("payMe"); - if (payMe != null && !payMe.isBlank()) - constraints.add(new PayMeConstraint(Double.parseDouble(payMe))); + var payMe = row.get("payMe"); + if (payMe != null && !payMe.isBlank()) { + constraints.add(new PayMeConstraint(Double.parseDouble(payMe))); + } - var permission = new Permission(action, null, constraints); - return new Policy(id, List.of(permission)); - } + var permission = new Permission(action, constraints, null); + return new Policy(id, List.of(permission)); + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/S3FileTransferStepsDefs.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/S3FileTransferStepsDefs.java index c4bc85a27..05f5f1242 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/S3FileTransferStepsDefs.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/S3FileTransferStepsDefs.java @@ -19,21 +19,10 @@ package org.eclipse.tractusx.edc.tests; -import static org.awaitility.Awaitility.await; -import static org.junit.jupiter.api.Assertions.fail; - import io.cucumber.datatable.DataTable; import io.cucumber.java.AfterAll; import io.cucumber.java.en.Given; import io.cucumber.java.en.Then; -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.time.Duration; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; import org.eclipse.tractusx.edc.tests.data.Asset; import org.eclipse.tractusx.edc.tests.data.DataAddress; import org.eclipse.tractusx.edc.tests.data.Negotiation; @@ -45,135 +34,145 @@ import org.eclipse.tractusx.edc.tests.util.Timeouts; import org.junit.jupiter.api.Assertions; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.awaitility.Awaitility.await; +import static org.junit.jupiter.api.Assertions.fail; + public class S3FileTransferStepsDefs { - @Given("'{connector}' has an empty storage bucket called {string}") - public void hasEmptyStorageBucket(Connector connector, String bucketName) { - S3Client s3 = connector.getS3Client(); + private static final String COMPLETION_MARKER = ".complete"; + private File fileToTransfer; + private String assetId; + private String agreementId; - s3.createBucket(bucketName); + @AfterAll + public static void bucketsCleanup() { + S3Client s3 = new S3Client(Environment.byName("Sokrates")); + s3.deleteAllBuckets(); + } - Assertions.assertTrue(s3.listBuckets().contains(bucketName)); - Assertions.assertEquals(0, s3.listBucketContent(bucketName).size()); - } + @Given("'{connector}' has an empty storage bucket called {string}") + public void hasEmptyStorageBucket(Connector connector, String bucketName) { + S3Client s3 = connector.getS3Client(); - private File fileToTransfer; + s3.createBucket(bucketName); - @Given("'{connector}' has a storage bucket called {string} with the file called {string}") - public void hasAStorageBucketWithTheFile(Connector connector, String bucketName, String fileName) - throws IOException { + Assertions.assertTrue(s3.listBuckets().contains(bucketName)); + Assertions.assertEquals(0, s3.listBucketContent(bucketName).size()); + } - S3Client s3 = connector.getS3Client(); - s3.createBucket(bucketName); - fileToTransfer = s3.uploadFile(bucketName, fileName); + @Given("'{connector}' has a storage bucket called {string} with the file called {string}") + public void hasAStorageBucketWithTheFile(Connector connector, String bucketName, String fileName) + throws IOException { - Set bucketContent = s3.listBucketContent(bucketName); + S3Client s3 = connector.getS3Client(); + s3.createBucket(bucketName); + fileToTransfer = s3.uploadFile(bucketName, fileName); - Assertions.assertEquals(1, bucketContent.size()); - Assertions.assertTrue(bucketContent.contains(fileName)); - } + Set bucketContent = s3.listBucketContent(bucketName); - @Given("'{connector}' has the following S3 assets") - public void hasAssets(Connector connector, DataTable table) { - final DataManagementAPI api = connector.getDataManagementAPI(); + Assertions.assertEquals(1, bucketContent.size()); + Assertions.assertTrue(bucketContent.contains(fileName)); + } - parseDataTable(table) - .forEach( - asset -> { - try { - api.createAsset(asset); - } catch (IOException e) { - fail(e.getMessage()); - } - }); - } + @Given("'{connector}' has the following S3 assets") + public void hasAssets(Connector connector, DataTable table) { + DataManagementAPI api = connector.getDataManagementAPI(); + + parseDataTable(table) + .forEach( + asset -> { + try { + api.createAsset(asset); + } catch (IOException e) { + fail(e.getMessage()); + } + }); + } - private String assetId; - private String agreementId; + @Then("'{connector}' negotiates the contract successfully with '{connector}'") + public void negotiateContract(Connector sender, Connector receiver, DataTable dataTable) + throws IOException { - @Then("'{connector}' negotiates the contract successfully with '{connector}'") - public void negotiateContract(Connector sender, Connector receiver, DataTable dataTable) - throws IOException { + String definitionId = dataTable.asMaps().get(0).get("contract offer id"); + assetId = dataTable.asMaps().get(0).get("asset id"); + String policyId = dataTable.asMaps().get(0).get("policy id"); - String definitionId = dataTable.asMaps().get(0).get("contract offer id"); - assetId = dataTable.asMaps().get(0).get("asset id"); - String policyId = dataTable.asMaps().get(0).get("policy id"); + Policy policy = + new Policy(policyId, List.of(new Permission("USE", new ArrayList<>(), null))); - final Policy policy = - new Policy(policyId, List.of(new Permission("USE", null, new ArrayList<>()))); + DataManagementAPI dataManagementAPI = sender.getDataManagementAPI(); + String receiverIdsUrl = receiver.getEnvironment().getIdsUrl() + "/data"; - final DataManagementAPI dataManagementAPI = sender.getDataManagementAPI(); - final String receiverIdsUrl = receiver.getEnvironment().getIdsUrl() + "/data"; + Negotiation negotiation = + dataManagementAPI.initiateNegotiation(receiverIdsUrl, definitionId, assetId, policy); + negotiation.waitUntilComplete(dataManagementAPI); - final Negotiation negotiation = - dataManagementAPI.initiateNegotiation(receiverIdsUrl, definitionId, assetId, policy); - negotiation.waitUntilComplete(dataManagementAPI); + agreementId = dataManagementAPI.getNegotiation(negotiation.getId()).getAgreementId(); + } - agreementId = dataManagementAPI.getNegotiation(negotiation.getId()).getAgreementId(); - } + @Then("'{connector}' initiate S3 transfer process from '{connector}'") + public void initiateTransferProcess(Connector sender, Connector receiver, DataTable dataTable) + throws IOException { + DataAddress dataAddress = createDataAddress(dataTable.asMaps().get(0)); - @Then("'{connector}' initiate S3 transfer process from '{connector}'") - public void initiateTransferProcess(Connector sender, Connector receiver, DataTable dataTable) - throws IOException { - DataAddress dataAddress = createDataAddress(dataTable.asMaps().get(0)); + DataManagementAPI dataManagementAPI = sender.getDataManagementAPI(); + String receiverIdsUrl = receiver.getEnvironment().getIdsUrl() + "/data"; - final DataManagementAPI dataManagementAPI = sender.getDataManagementAPI(); - final String receiverIdsUrl = receiver.getEnvironment().getIdsUrl() + "/data"; + Transfer transferProcess = + dataManagementAPI.initiateTransferProcess( + receiverIdsUrl, agreementId, assetId, dataAddress); + transferProcess.waitUntilComplete(dataManagementAPI); - final Transfer transferProcess = - dataManagementAPI.initiateTransferProcess( - receiverIdsUrl, agreementId, assetId, dataAddress); - transferProcess.waitUntilComplete(dataManagementAPI); + Assertions.assertNotNull(transferProcess.getId()); + } - Assertions.assertNotNull(transferProcess.getId()); - } + @Then("'{connector}' has a storage bucket called {string} with transferred file called {string}") + public void consumerHasAStorageBucketWithFileTransferred( + Connector connector, String bucketName, String fileName) throws IOException { + S3Client s3 = connector.getS3Client(); + await() + .pollDelay(Duration.ofMillis(500)) + .atMost(Timeouts.FILE_TRANSFER) + .until(() -> isFilePresent(s3, bucketName, fileName + COMPLETION_MARKER)); + + Set bucketContent = s3.listBucketContent(bucketName); + + Assertions.assertEquals(2, bucketContent.size()); + Assertions.assertTrue(bucketContent.contains(fileName)); + Assertions.assertArrayEquals( + Files.readAllBytes(fileToTransfer.toPath()), + Files.readAllBytes(s3.downloadFile(bucketName, fileName).toPath())); + } - private static final String COMPLETION_MARKER = ".complete"; + private boolean isFilePresent(S3Client s3, String bucketName, String fileName) { + return s3.listBucketContent(bucketName).contains(fileName); + } - @Then("'{connector}' has a storage bucket called {string} with transferred file called {string}") - public void consumerHasAStorageBucketWithFileTransferred( - Connector connector, String bucketName, String fileName) throws IOException { - S3Client s3 = connector.getS3Client(); - await() - .pollDelay(Duration.ofMillis(500)) - .atMost(Timeouts.FILE_TRANSFER) - .until(() -> isFilePresent(s3, bucketName, fileName + COMPLETION_MARKER)); + private List parseDataTable(DataTable table) { + List assetsWithDataAddresses = new ArrayList<>(); - Set bucketContent = s3.listBucketContent(bucketName); + for (Map map : table.asMaps()) { + String id = map.get("id"); + String description = map.get("description"); + assetsWithDataAddresses.add(new Asset(id, description, createDataAddress(map))); + } - Assertions.assertEquals(2, bucketContent.size()); - Assertions.assertTrue(bucketContent.contains(fileName)); - Assertions.assertArrayEquals( - Files.readAllBytes(fileToTransfer.toPath()), - Files.readAllBytes(s3.downloadFile(bucketName, fileName).toPath())); - } - - private boolean isFilePresent(S3Client s3, String bucketName, String fileName) { - return s3.listBucketContent(bucketName).contains(fileName); - } - - private List parseDataTable(DataTable table) { - final List assetsWithDataAddresses = new ArrayList<>(); - - for (Map map : table.asMaps()) { - String id = map.get("id"); - String description = map.get("description"); - assetsWithDataAddresses.add(new Asset(id, description, createDataAddress(map))); + return assetsWithDataAddresses; } - return assetsWithDataAddresses; - } - - private DataAddress createDataAddress(Map map) { - final String bucketName = map.get("data_address_s3_bucket_name"); - final String region = map.get("data_address_s3_region"); - final String keyName = map.get("data_address_s3_key_name"); - return new S3DataAddress(bucketName, region, keyName); - } - - @AfterAll - public static void bucketsCleanup() { - S3Client s3 = new S3Client(Environment.byName("Sokrates")); - s3.deleteAllBuckets(); - } + private DataAddress createDataAddress(Map map) { + String bucketName = map.get("data_address_s3_bucket_name"); + String region = map.get("data_address_s3_region"); + String keyName = map.get("data_address_s3_key_name"); + return new S3DataAddress(bucketName, region, keyName); + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Asset.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Asset.java index acccef8d8..47142d0e9 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Asset.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Asset.java @@ -19,14 +19,28 @@ */ package org.eclipse.tractusx.edc.tests.data; -import lombok.NonNull; -import lombok.Value; +import java.util.Objects; -@Value public class Asset { - @NonNull String Id; + private final String id; + private final String description; + private final DataAddress dataAddress; - @NonNull String description; + public Asset(String id, String description, DataAddress dataAddress) { + this.id = Objects.requireNonNull(id); + this.description = Objects.requireNonNull(description); + this.dataAddress = Objects.requireNonNull(dataAddress); + } - @NonNull DataAddress dataAddress; + public String getId() { + return id; + } + + public String getDescription() { + return description; + } + + public DataAddress getDataAddress() { + return dataAddress; + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/BusinessPartnerNumberConstraint.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/BusinessPartnerNumberConstraint.java index b9c64d158..f276b3d34 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/BusinessPartnerNumberConstraint.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/BusinessPartnerNumberConstraint.java @@ -1,10 +1,16 @@ package org.eclipse.tractusx.edc.tests.data; -import lombok.NonNull; -import lombok.Value; +import java.util.Objects; -@Value public class BusinessPartnerNumberConstraint implements Constraint { - @NonNull String businessPartnerNumber; + private final String businessPartnerNumber; + + public BusinessPartnerNumberConstraint(String businessPartnerNumber) { + this.businessPartnerNumber = Objects.requireNonNull(businessPartnerNumber); + } + + public String getBusinessPartnerNumber() { + return businessPartnerNumber; + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/ContractDefinition.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/ContractDefinition.java index a9fca04a1..c90fe1788 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/ContractDefinition.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/ContractDefinition.java @@ -20,17 +20,42 @@ package org.eclipse.tractusx.edc.tests.data; import java.util.List; -import lombok.NonNull; -import lombok.Value; +import java.util.Objects; + -@Value public class ContractDefinition { - @NonNull String id; + private final String id; + + private final String contractPolicyId; + private final String acccessPolicyId; + + private final List assetIds; + private final Long validity; + + public ContractDefinition(String id, String contractPolicyId, String acccessPolicyId, List assetIds, Long validity) { + this.id = Objects.requireNonNull(id); + this.contractPolicyId = Objects.requireNonNull(contractPolicyId); + this.acccessPolicyId = Objects.requireNonNull(acccessPolicyId); + this.assetIds = assetIds; + this.validity = validity; + } + + public String getId() { + return id; + } + + public String getContractPolicyId() { + return contractPolicyId; + } + + public String getAcccessPolicyId() { + return acccessPolicyId; + } + + public List getAssetIds() { + return assetIds; + } - @NonNull String contractPolicyId; - @NonNull String acccessPolicyId; - List assetIds; - Long validity; } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/ContractNegotiation.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/ContractNegotiation.java index 109249744..67f9dafb0 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/ContractNegotiation.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/ContractNegotiation.java @@ -20,12 +20,29 @@ package org.eclipse.tractusx.edc.tests.data; -import lombok.NonNull; -import lombok.Value; +import java.util.Objects; -@Value public class ContractNegotiation { - @NonNull String id; - String agreementId; - @NonNull ContractNegotiationState state; + private final String id; + private final ContractNegotiationState state; + private final String agreementId; + + + public ContractNegotiation(String id, ContractNegotiationState state, String agreementId) { + this.id = Objects.requireNonNull(id); + this.state = Objects.requireNonNull(state); + this.agreementId = agreementId; + } + + public String getId() { + return id; + } + + public ContractNegotiationState getState() { + return state; + } + + public String getAgreementId() { + return agreementId; + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/ContractOffer.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/ContractOffer.java index 75dfd8d27..7ac87cb9a 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/ContractOffer.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/ContractOffer.java @@ -19,12 +19,28 @@ */ package org.eclipse.tractusx.edc.tests.data; -import lombok.NonNull; -import lombok.Value; +import java.util.Objects; -@Value public class ContractOffer { - @NonNull String id; - Policy policy; - String assetId; + private final String id; + private final Policy policy; + private final String assetId; + + public ContractOffer(String id, Policy policy, String assetId) { + this.id = Objects.requireNonNull(id); + this.policy = policy; + this.assetId = assetId; + } + + public String getId() { + return id; + } + + public Policy getPolicy() { + return policy; + } + + public String getAssetId() { + return assetId; + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/HttpProxySourceDataAddress.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/HttpProxySourceDataAddress.java index 4a5946cc9..97f300611 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/HttpProxySourceDataAddress.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/HttpProxySourceDataAddress.java @@ -1,18 +1,52 @@ package org.eclipse.tractusx.edc.tests.data; -import lombok.NonNull; -import lombok.Value; +import java.util.Objects; + -@Value public class HttpProxySourceDataAddress implements DataAddress { - @NonNull String baseUrl; - Oauth2Provision oauth2Provision; - - @Value - public static class Oauth2Provision { - @NonNull String tokenUrl; - @NonNull String clientId; - @NonNull String clientSecret; - String scope; - } + private final String baseUrl; + private final Oauth2Provision oauth2Provision; + + public HttpProxySourceDataAddress(String baseUrl, Oauth2Provision oauth2Provision) { + this.baseUrl = Objects.requireNonNull(baseUrl); + this.oauth2Provision = oauth2Provision; + } + + public String getBaseUrl() { + return baseUrl; + } + + public Oauth2Provision getOauth2Provision() { + return oauth2Provision; + } + + public static class Oauth2Provision { + private final String tokenUrl; + private final String clientId; + private final String clientSecret; + private final String scope; + + public Oauth2Provision(String tokenUrl, String clientId, String clientSecret, String scope) { + this.tokenUrl = Objects.requireNonNull(tokenUrl); + this.clientId = Objects.requireNonNull(clientId); + this.clientSecret = Objects.requireNonNull(clientSecret); + this.scope = scope; + } + + public String getTokenUrl() { + return tokenUrl; + } + + public String getScope() { + return scope; + } + + public String getClientId() { + return clientId; + } + + public String getClientSecret() { + return clientSecret; + } + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Negotiation.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Negotiation.java index 40845e4c0..ddc715c9b 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Negotiation.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Negotiation.java @@ -1,34 +1,43 @@ package org.eclipse.tractusx.edc.tests.data; -import static org.awaitility.Awaitility.await; +import org.eclipse.tractusx.edc.tests.DataManagementAPI; +import org.eclipse.tractusx.edc.tests.util.Timeouts; -import groovyjarjarantlr4.v4.runtime.misc.NotNull; import java.io.IOException; import java.time.Duration; +import java.util.Objects; import java.util.stream.Stream; -import lombok.Value; -import org.eclipse.tractusx.edc.tests.DataManagementAPI; -import org.eclipse.tractusx.edc.tests.util.Timeouts; -@Value +import static org.awaitility.Awaitility.await; + + public class Negotiation { - @NotNull String id; - - public void waitUntilComplete(DataManagementAPI dataManagementAPI) { - await() - .pollDelay(Duration.ofMillis(2000)) - .atMost(Timeouts.CONTRACT_NEGOTIATION) - .until(() -> isComplete(dataManagementAPI)); - } - - public boolean isComplete(DataManagementAPI dataManagementAPI) throws IOException { - var negotiation = dataManagementAPI.getNegotiation(id); - return negotiation != null - && Stream.of( - ContractNegotiationState.ERROR, - ContractNegotiationState.CONFIRMED, - ContractNegotiationState.DECLINED) - .anyMatch((l) -> l.equals(negotiation.getState())); - } + + private final String id; + + public Negotiation(String id) { + this.id = Objects.requireNonNull(id); + } + + public void waitUntilComplete(DataManagementAPI dataManagementAPI) { + await() + .pollDelay(Duration.ofMillis(2000)) + .atMost(Timeouts.CONTRACT_NEGOTIATION) + .until(() -> isComplete(dataManagementAPI)); + } + + public boolean isComplete(DataManagementAPI dataManagementAPI) throws IOException { + var negotiation = dataManagementAPI.getNegotiation(id); + return negotiation != null + && Stream.of( + ContractNegotiationState.ERROR, + ContractNegotiationState.CONFIRMED, + ContractNegotiationState.DECLINED) + .anyMatch((l) -> l.equals(negotiation.getState())); + } + + public String getId() { + return id; + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/OrConstraint.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/OrConstraint.java index 88bf3438d..66ffd4396 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/OrConstraint.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/OrConstraint.java @@ -1,11 +1,18 @@ package org.eclipse.tractusx.edc.tests.data; import java.util.List; -import lombok.NonNull; -import lombok.Value; +import java.util.Objects; + -@Value public class OrConstraint implements Constraint { - @NonNull List constraints; + private final List constraints; + + public OrConstraint(List constraints) { + this.constraints = Objects.requireNonNull(constraints); + } + + public List getConstraints() { + return constraints; + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/PayMeConstraint.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/PayMeConstraint.java index 4376346b2..1412b8d76 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/PayMeConstraint.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/PayMeConstraint.java @@ -20,13 +20,19 @@ package org.eclipse.tractusx.edc.tests.data; -import lombok.Value; - /** * The PayMe constraint should be used when no constraint validation/enforcement in the EDC is * intended. */ -@Value + public class PayMeConstraint implements Constraint { - double amount; + private final double amount; + + public PayMeConstraint(double amount) { + this.amount = amount; + } + + public double getAmount() { + return amount; + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Permission.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Permission.java index 128e6686f..e90cbfaf0 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Permission.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Permission.java @@ -20,13 +20,30 @@ package org.eclipse.tractusx.edc.tests.data; import java.util.List; -import lombok.NonNull; -import lombok.Value; +import java.util.Objects; + -@Value public class Permission { - @NonNull String action; - String target; + private final String action; + private final List constraints; + private final String target; + + + public Permission(String action, List constraints, String target) { + this.action = Objects.requireNonNull(action); + this.constraints = Objects.requireNonNull(constraints); + this.target = target; + } + + public String getAction() { + return action; + } + + public List getConstraints() { + return constraints; + } - @NonNull List constraints; + public String getTarget() { + return target; + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Policy.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Policy.java index 27ea65d7a..c58c79206 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Policy.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Policy.java @@ -21,11 +21,23 @@ package org.eclipse.tractusx.edc.tests.data; import java.util.List; -import lombok.NonNull; -import lombok.Value; +import java.util.Objects; + -@Value public class Policy { - String id; - @NonNull List Permission; + private final String id; + private final List Permission; + + public Policy(String id, List permission) { + this.id = id; + Permission = Objects.requireNonNull(permission); + } + + public String getId() { + return id; + } + + public List getPermission() { + return Permission; + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/S3DataAddress.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/S3DataAddress.java index a59843447..93fe5ce8b 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/S3DataAddress.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/S3DataAddress.java @@ -1,12 +1,28 @@ package org.eclipse.tractusx.edc.tests.data; -import lombok.NonNull; -import lombok.Value; +import java.util.Objects; -@Value public class S3DataAddress implements DataAddress { - @NonNull String bucketName; - @NonNull String region; - @NonNull String keyName; + private final String bucketName; + private final String region; + private final String keyName; + + public S3DataAddress(String bucketName, String region, String keyName) { + this.bucketName = Objects.requireNonNull(bucketName); + this.region = Objects.requireNonNull(region); + this.keyName = Objects.requireNonNull(keyName); + } + + public String getBucketName() { + return bucketName; + } + + public String getRegion() { + return region; + } + + public String getKeyName() { + return keyName; + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Transfer.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Transfer.java index ebd722d07..ea38442a3 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Transfer.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Transfer.java @@ -1,31 +1,41 @@ package org.eclipse.tractusx.edc.tests.data; -import static org.awaitility.Awaitility.await; +import org.eclipse.tractusx.edc.tests.DataManagementAPI; +import org.eclipse.tractusx.edc.tests.util.Timeouts; import java.io.IOException; import java.time.Duration; -import lombok.Value; -import org.eclipse.tractusx.edc.tests.DataManagementAPI; -import org.eclipse.tractusx.edc.tests.util.Timeouts; -@Value +import static org.awaitility.Awaitility.await; + + public class Transfer { - String id; + private final String id; + + public Transfer(String id) { + this.id = id; + } + + public void waitUntilComplete(DataManagementAPI dataManagementAPI) { + await() + .pollDelay(Duration.ofMillis(2000)) + .atMost(Timeouts.FILE_TRANSFER) + .until(() -> isComplete(dataManagementAPI)); + } - public void waitUntilComplete(DataManagementAPI dataManagementAPI) { - await() - .pollDelay(Duration.ofMillis(2000)) - .atMost(Timeouts.FILE_TRANSFER) - .until(() -> isComplete(dataManagementAPI)); - } + public boolean isComplete(DataManagementAPI dataManagementAPI) throws IOException { + var transferProcess = dataManagementAPI.getTransferProcess(id); + if (transferProcess == null) { + return false; + } - public boolean isComplete(DataManagementAPI dataManagementAPI) throws IOException { - var transferProcess = dataManagementAPI.getTransferProcess(id); - if (transferProcess == null) return false; + var state = transferProcess.getState(); - var state = transferProcess.getState(); + return state == TransferProcessState.COMPLETED || state == TransferProcessState.ERROR; + } - return state == TransferProcessState.COMPLETED || state == TransferProcessState.ERROR; - } + public String getId() { + return id; + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/TransferProcess.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/TransferProcess.java index 28be5157a..1c00e86c3 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/TransferProcess.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/TransferProcess.java @@ -19,11 +19,22 @@ package org.eclipse.tractusx.edc.tests.data; -import lombok.NonNull; -import lombok.Value; +import java.util.Objects; -@Value public class TransferProcess { - @NonNull String id; - @NonNull TransferProcessState state; + private final String id; + private final TransferProcessState state; + + public TransferProcess(String id, TransferProcessState state) { + this.id = Objects.requireNonNull(id); + this.state = Objects.requireNonNull(state); + } + + public String getId() { + return id; + } + + public TransferProcessState getState() { + return state; + } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/util/DatabaseCleaner.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/util/DatabaseCleaner.java index 53aececa9..e03f38e98 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/util/DatabaseCleaner.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/util/DatabaseCleaner.java @@ -24,28 +24,33 @@ import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; -import lombok.RequiredArgsConstructor; -@RequiredArgsConstructor + public class DatabaseCleaner { - private static final String SQL = - "DELETE FROM edc_contract_negotiation;\n" - + "DELETE FROM edc_contract_agreement;\n" - + "DELETE FROM edc_transfer_process;\n" - + "DELETE FROM edc_contract_definitions;\n" - + "DELETE FROM edc_policydefinitions;\n" - + "DELETE FROM edc_asset;\n" - + "DELETE FROM edc_lease;"; - - private final String url; - private final String user; - private final String password; - - public void run() throws SQLException { - try (Connection con = DriverManager.getConnection(url, user, password)) { - Statement st = con.createStatement(); - st.executeUpdate(SQL); + private static final String SQL = + "DELETE FROM edc_contract_negotiation;\n" + + "DELETE FROM edc_contract_agreement;\n" + + "DELETE FROM edc_transfer_process;\n" + + "DELETE FROM edc_contract_definitions;\n" + + "DELETE FROM edc_policydefinitions;\n" + + "DELETE FROM edc_asset;\n" + + "DELETE FROM edc_lease;"; + + private final String url; + private final String user; + private final String password; + + public DatabaseCleaner(String url, String user, String password) { + this.url = url; + this.user = user; + this.password = password; + } + + public void run() throws SQLException { + try (Connection con = DriverManager.getConnection(url, user, password)) { + Statement st = con.createStatement(); + st.executeUpdate(SQL); + } } - } } diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/util/S3Client.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/util/S3Client.java index c2779ce0d..63ab60324 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/util/S3Client.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/util/S3Client.java @@ -19,16 +19,9 @@ package org.eclipse.tractusx.edc.tests.util; -import java.io.File; -import java.io.IOException; -import java.net.URI; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; -import lombok.extern.slf4j.Slf4j; import org.eclipse.tractusx.edc.tests.Environment; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.core.ResponseBytes; @@ -45,80 +38,89 @@ import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.S3Object; -@Slf4j +import java.io.File; +import java.io.IOException; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + + public class S3Client { + private static final Logger log = LoggerFactory.getLogger(S3Client.class); + private final software.amazon.awssdk.services.s3.S3Client s3; + + public S3Client(Environment environment) { + + s3 = + software.amazon.awssdk.services.s3.S3Client.builder() + .region(Region.US_EAST_1) + .forcePathStyle(true) + .endpointOverride(URI.create(environment.getAwsEndpointOverride())) + .credentialsProvider( + StaticCredentialsProvider.create( + AwsBasicCredentials.create( + environment.getAwsAccessKey(), environment.getAwsSecretAccessKey()))) + .build(); + } + + public void createBucket(String bucketName) { + try { + s3.createBucket(CreateBucketRequest.builder().bucket(bucketName).build()); + } catch (BucketAlreadyOwnedByYouException e) { + log.info("'{}' bucket already owned - skipped bucket creation", bucketName); + } + } + + public File uploadFile(String bucketName, String fileName) throws IOException { + File tempFile = File.createTempFile(fileName, null); + Files.write( + tempFile.toPath(), "Will fail if the file has no content".getBytes(StandardCharsets.UTF_8)); + + s3.putObject( + PutObjectRequest.builder().bucket(bucketName).key(fileName).build(), + RequestBody.fromFile(tempFile)); + + return tempFile; + } + + public List listBuckets() { + return s3.listBuckets().buckets().stream().map(Bucket::name).collect(Collectors.toList()); + } + + public Set listBucketContent(String bucketName) { + return s3 + .listObjects(ListObjectsRequest.builder().bucket(bucketName).build()) + .contents() + .stream() + .map(S3Object::key) + .collect(Collectors.toSet()); + } + + public File downloadFile(String bucketName, String fileName) throws IOException { + ResponseBytes objectAsBytes = + s3.getObjectAsBytes(GetObjectRequest.builder().bucket(bucketName).key(fileName).build()); + + return Files.write(File.createTempFile(fileName, null).toPath(), objectAsBytes.asByteArray()) + .toFile(); + } + + public void deleteAllBuckets() { + List buckets = s3.listBuckets().buckets(); + buckets.forEach(this::clearBucket); + buckets.forEach( + bucket -> s3.deleteBucket(DeleteBucketRequest.builder().bucket(bucket.name()).build())); + } - private final software.amazon.awssdk.services.s3.S3Client s3; - - public S3Client(Environment environment) { - - s3 = - software.amazon.awssdk.services.s3.S3Client.builder() - .region(Region.US_EAST_1) - .forcePathStyle(true) - .endpointOverride(URI.create(environment.getAwsEndpointOverride())) - .credentialsProvider( - StaticCredentialsProvider.create( - AwsBasicCredentials.create( - environment.getAwsAccessKey(), environment.getAwsSecretAccessKey()))) - .build(); - } - - public void createBucket(String bucketName) { - try { - s3.createBucket(CreateBucketRequest.builder().bucket(bucketName).build()); - } catch (BucketAlreadyOwnedByYouException e) { - log.info("'{}' bucket already owned - skipped bucket creation", bucketName); + private void clearBucket(Bucket bucket) { + String bucketName = bucket.name(); + s3.listObjects(ListObjectsRequest.builder().bucket(bucketName).build()) + .contents() + .forEach( + s3Object -> + s3.deleteObject( + DeleteObjectRequest.builder().bucket(bucketName).key(s3Object.key()).build())); } - } - - public File uploadFile(String bucketName, String fileName) throws IOException { - File tempFile = File.createTempFile(fileName, null); - Files.write( - tempFile.toPath(), "Will fail if the file has no content".getBytes(StandardCharsets.UTF_8)); - - s3.putObject( - PutObjectRequest.builder().bucket(bucketName).key(fileName).build(), - RequestBody.fromFile(tempFile)); - - return tempFile; - } - - public List listBuckets() { - return s3.listBuckets().buckets().stream().map(Bucket::name).collect(Collectors.toList()); - } - - public Set listBucketContent(String bucketName) { - return s3 - .listObjects(ListObjectsRequest.builder().bucket(bucketName).build()) - .contents() - .stream() - .map(S3Object::key) - .collect(Collectors.toSet()); - } - - public File downloadFile(String bucketName, String fileName) throws IOException { - ResponseBytes objectAsBytes = - s3.getObjectAsBytes(GetObjectRequest.builder().bucket(bucketName).key(fileName).build()); - - return Files.write(File.createTempFile(fileName, null).toPath(), objectAsBytes.asByteArray()) - .toFile(); - } - - public void deleteAllBuckets() { - List buckets = s3.listBuckets().buckets(); - buckets.forEach(this::clearBucket); - buckets.forEach( - bucket -> s3.deleteBucket(DeleteBucketRequest.builder().bucket(bucket.name()).build())); - } - - private void clearBucket(Bucket bucket) { - String bucketName = bucket.name(); - s3.listObjects(ListObjectsRequest.builder().bucket(bucketName).build()) - .contents() - .forEach( - s3Object -> - s3.deleteObject( - DeleteObjectRequest.builder().bucket(bucketName).key(s3Object.key()).build())); - } } From ecccbb0333a6d93ab0bbb13c69ed019636d7917b Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger <43503240+paullatzelsperger@users.noreply.github.com> Date: Fri, 14 Apr 2023 14:18:43 +0200 Subject: [PATCH 70/92] chore: add a template for pull request descriptions (#213) --- .github/PULL_REQUEST_TEMPLATE.md | 13 +++++++++++++ .github/workflows/verify.yaml | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 .github/PULL_REQUEST_TEMPLATE.md diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..fe4467a54 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,13 @@ +## WHAT + +_Briefly describe what your PR changes, which features it adds/modifies._ + +## WHY + +_Briefly state why the change was necessary._ + +## FURTHER NOTES + +_List other areas of code that have changed but are not necessarily linked to the main feature. This could be method signature changes, package declarations, bugs that were encountered and were fixed inline, etc._ + +Closes # <-- _insert Issue number if one exists_ diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index 01064f35d..c844d9249 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -84,7 +84,7 @@ jobs: - name: Run markdownlint run: | - markdownlint-cli2-config .markdownlint.yaml "**/*.md" + markdownlint-cli2-config .markdownlint.yaml "**/*.md" "#.github" unit-tests: runs-on: ubuntu-latest From 406e1373b61e3bde6050e0d36465c0f201b86104 Mon Sep 17 00:00:00 2001 From: "Tuncay Tunc (ZF Friedrichshafen AG)" <100704677+tuncaytunc-zf@users.noreply.github.com> Date: Fri, 14 Apr 2023 15:04:11 +0200 Subject: [PATCH 71/92] fix: Adapt Helm Chart for version 0.3.x (#211) * Adapt Charts for version 0.3.x * fix business-tests * add edc.receiver.http.dynamic.endpoint * fix business-tests * code-review findings --- .github/workflows/business-tests.yaml | 8 ++-- .../tractusx-connector/templates/_helpers.tpl | 12 +++--- .../templates/deployment-controlplane.yaml | 43 ++++++------------- .../templates/deployment-dataplane.yaml | 24 +++++++---- .../templates/service-controlplane.yaml | 16 +++---- .../templates/service-dataplane.yaml | 4 ++ charts/tractusx-connector/values.yaml | 24 +++++------ .../edc-dataplane-base/build.gradle.kts | 22 +++++----- 8 files changed, 72 insertions(+), 81 deletions(-) diff --git a/.github/workflows/business-tests.yaml b/.github/workflows/business-tests.yaml index 7c64b29f5..248927db0 100644 --- a/.github/workflows/business-tests.yaml +++ b/.github/workflows/business-tests.yaml @@ -128,14 +128,14 @@ jobs: run: |- # Define endpoints echo "SOKRATES_DATA_MANAGEMENT_API_AUTH_KEY=password" | tee -a ${GITHUB_ENV} - echo "SOKRATES_DATA_MANAGEMENT_URL=http://sokrates-controlplane:8081/data" | tee -a ${GITHUB_ENV} + echo "SOKRATES_DATA_MANAGEMENT_URL=http://sokrates-controlplane:8081/management" | tee -a ${GITHUB_ENV} echo "SOKRATES_IDS_URL=http://sokrates-controlplane:8084/api/v1/ids" | tee -a ${GITHUB_ENV} echo "SOKRATES_DATA_PLANE_URL=http://sokrates-dataplane:8081/api/public/" | tee -a ${GITHUB_ENV} echo "SOKRATES_DATABASE_URL=jdbc:postgresql://plato-postgresql:5432/edc" | tee -a ${GITHUB_ENV} echo "SOKRATES_DATABASE_USER=user" | tee -a ${GITHUB_ENV} echo "SOKRATES_DATABASE_PASSWORD=password" | tee -a ${GITHUB_ENV} echo "PLATO_DATA_MANAGEMENT_API_AUTH_KEY=password" | tee -a ${GITHUB_ENV} - echo "PLATO_DATA_MANAGEMENT_URL=http://plato-controlplane:8081/data" | tee -a ${GITHUB_ENV} + echo "PLATO_DATA_MANAGEMENT_URL=http://plato-controlplane:8081/management" | tee -a ${GITHUB_ENV} echo "PLATO_IDS_URL=http://plato-controlplane:8084/api/v1/ids" | tee -a ${GITHUB_ENV} echo "PLATO_DATA_PLANE_URL=http://plato-dataplane:8081/api/public/" | tee -a ${GITHUB_ENV} echo "PLATO_DATABASE_URL=jdbc:postgresql://plato-postgresql:5432/edc" | tee -a ${GITHUB_ENV} @@ -176,7 +176,7 @@ jobs: helm install plato charts/tractusx-connector \ --set fullnameOverride=plato \ --set controlplane.service.type=NodePort \ - --set controlplane.endpoints.data.authKey=password \ + --set controlplane.endpoints.management.authKey=password \ --set controlplane.image.tag=business-test \ --set controlplane.image.pullPolicy=Never \ --set controlplane.image.repository=docker.io/library/edc-controlplane-postgresql-hashicorp-vault \ @@ -209,7 +209,7 @@ jobs: helm install sokrates charts/tractusx-connector \ --set fullnameOverride=sokrates \ --set controlplane.service.type=NodePort \ - --set controlplane.endpoints.data.authKey=password \ + --set controlplane.endpoints.management.authKey=password \ --set controlplane.image.tag=business-test \ --set controlplane.image.pullPolicy=Never \ --set controlplane.image.repository=docker.io/library/edc-controlplane-postgresql-hashicorp-vault \ diff --git a/charts/tractusx-connector/templates/_helpers.tpl b/charts/tractusx-connector/templates/_helpers.tpl index ecc8ff1d2..701e6fc75 100644 --- a/charts/tractusx-connector/templates/_helpers.tpl +++ b/charts/tractusx-connector/templates/_helpers.tpl @@ -110,9 +110,9 @@ Create the name of the service account to use {{/* Control IDS URL */}} -{{- define "txdc.controlplane.url.ids" -}} -{{- if .Values.controlplane.url.ids }}{{/* if ids api url has been specified explicitly */}} -{{- .Values.controlplane.url.ids }} +{{- define "txdc.controlplane.url.protocol" -}} +{{- if .Values.controlplane.url.protocol }}{{/* if ids api url has been specified explicitly */}} +{{- .Values.controlplane.url.protocol }} {{- else }}{{/* else when ids api url has not been specified explicitly */}} {{- with (index .Values.controlplane.ingresses 0) }} {{- if .enabled }}{{/* if ingress enabled */}} @@ -122,17 +122,17 @@ Control IDS URL {{- printf "http://%s" .hostname -}} {{- end }}{{/* end if tls */}} {{- else }}{{/* else when ingress not enabled */}} -{{- printf "http://%s-controlplane:%v" ( include "txdc.fullname" $ ) $.Values.controlplane.endpoints.ids.port -}} +{{- printf "http://%s-controlplane:%v" ( include "txdc.fullname" $ ) $.Values.controlplane.endpoints.protocol.port -}} {{- end }}{{/* end if ingress */}} {{- end }}{{/* end with ingress */}} -{{- end }}{{/* end if .Values.controlplane.url.ids */}} +{{- end }}{{/* end if .Values.controlplane.url.protocol */}} {{- end }} {{/* Validation URL */}} {{- define "txdc.controlplane.url.validation" -}} -{{- printf "http://%s-controlplane:%v%s/token" ( include "txdc.fullname" $ ) $.Values.controlplane.endpoints.validation.port $.Values.controlplane.endpoints.validation.path -}} +{{- printf "http://%s-controlplane:%v%s/token" ( include "txdc.fullname" $ ) $.Values.controlplane.endpoints.control.port $.Values.controlplane.endpoints.control.path -}} {{- end }} {{/* diff --git a/charts/tractusx-connector/templates/deployment-controlplane.yaml b/charts/tractusx-connector/templates/deployment-controlplane.yaml index dc708a8a7..338feebe3 100644 --- a/charts/tractusx-connector/templates/deployment-controlplane.yaml +++ b/charts/tractusx-connector/templates/deployment-controlplane.yaml @@ -128,45 +128,30 @@ spec: value: {{ printf "%s%s" .Values.daps.url .Values.daps.paths.token }} - name: EDC_OAUTH_PRIVATE_KEY_ALIAS value: {{ .Values.vault.secretNames.dapsPrivateKey | required ".Values.vault.secretNames.dapsPrivateKey is required" | quote }} - - name: EDC_OAUTH_PUBLIC_KEY_ALIAS + - name: EDC_OAUTH_CERTIFICATE_ALIAS value: {{ .Values.vault.secretNames.dapsPublicKey | required ".Values.vault.secretNames.dapsPublicKey is required" | quote }} ####### # API # ####### - name: "EDC_API_AUTH_KEY" - value: {{ .Values.controlplane.endpoints.data.authKey | required ".Values.controlplane.endpoints.data.authKey is required" | quote }} + value: {{ .Values.controlplane.endpoints.management.authKey | required ".Values.controlplane.endpoints.mangement.authKey is required" | quote }} - name: "WEB_HTTP_DEFAULT_PORT" value: {{ .Values.controlplane.endpoints.default.port | quote }} - name: "WEB_HTTP_DEFAULT_PATH" value: {{ .Values.controlplane.endpoints.default.path | quote }} - {{- if or (eq (substr 0 3 .Values.controlplane.image.tag) "0.1") (eq (substr 0 3 .Values.controlplane.image.tag) "0.2") }} - # WEB_HTTP_DATA_PORT is renamed to WEB_HTTP_MANAGEMENT_PORT from version 0.2.1 and newer - # we will keep both settings for downward capabilities - - name: "WEB_HTTP_DATA_PORT" - value: {{ .Values.controlplane.endpoints.data.port | quote }} - # WEB_HTTP_DATA_PATH is renamed to WEB_HTTP_MANAGEMENT_PATH from version 0.2.1 and newer - # we will keep both settings for downward capabilities - - name: "WEB_HTTP_DATA_PATH" - value: {{ .Values.controlplane.endpoints.data.path | quote }} - {{- else }} - name: "WEB_HTTP_MANAGEMENT_PORT" - value: {{ .Values.controlplane.endpoints.data.port | quote }} + value: {{ .Values.controlplane.endpoints.management.port | quote }} - name: "WEB_HTTP_MANAGEMENT_PATH" - value: {{ .Values.controlplane.endpoints.data.path | quote }} - {{- end }} - - name: "WEB_HTTP_VALIDATION_PORT" - value: {{ .Values.controlplane.endpoints.validation.port | quote }} - - name: "WEB_HTTP_VALIDATION_PATH" - value: {{ .Values.controlplane.endpoints.validation.path | quote }} + value: {{ .Values.controlplane.endpoints.management.path | quote }} - name: "WEB_HTTP_CONTROL_PORT" value: {{ .Values.controlplane.endpoints.control.port | quote }} - name: "WEB_HTTP_CONTROL_PATH" value: {{ .Values.controlplane.endpoints.control.path | quote }} - - name: "WEB_HTTP_IDS_PORT" - value: {{ .Values.controlplane.endpoints.ids.port | quote }} - - name: "WEB_HTTP_IDS_PATH" - value: {{ .Values.controlplane.endpoints.ids.path | quote }} + - name: "WEB_HTTP_PROTOCOL_PORT" + value: {{ .Values.controlplane.endpoints.protocol.port | quote }} + - name: "WEB_HTTP_PROTOCOL_PATH" + value: {{ .Values.controlplane.endpoints.protocol.path | quote }} - name: "WEB_HTTP_OBSERVABILITY_PORT" value: {{ .Values.controlplane.endpoints.observability.port | quote}} - name: "WEB_HTTP_OBSERVABILITY_PATH" @@ -178,9 +163,9 @@ spec: ## IDS ## ######### - name: "IDS_WEBHOOK_ADDRESS" - value: {{ include "txdc.controlplane.url.ids" . | quote }} + value: {{ include "txdc.controlplane.url.protocol" . | quote }} - name: "EDC_IDS_ENDPOINT" - value: {{ printf "%s%s" (include "txdc.controlplane.url.ids" .) .Values.controlplane.endpoints.ids.path | quote }} + value: {{ printf "%s%s" (include "txdc.controlplane.url.protocol" .) .Values.controlplane.endpoints.protocol.path | quote }} - name: "EDC_IDS_ID" value: {{ printf "urn:connector:%s" (lower .Values.controlplane.internationalDataSpaces.id) | quote }} - name: "EDC_IDS_DESCRIPTION" @@ -196,10 +181,10 @@ spec: - name: "EDC_OAUTH_PROVIDER_AUDIENCE" value: "idsc:IDS_CONNECTORS_ALL" - name: "EDC_OAUTH_ENDPOINT_AUDIENCE" - value: {{ printf "%s%s%s" (include "txdc.controlplane.url.ids" . ) .Values.controlplane.endpoints.ids.path "/data" | quote }} + value: {{ printf "%s%s%s" (include "txdc.controlplane.url.protocol" . ) .Values.controlplane.endpoints.protocol.path "/data" | quote }} # this is the old setting name for 'EDC_OAUTH_ENDPOINT_AUDIENCE' and is mandatory for Produce EDC v0.1.2 and older - name: "EDC_IDS_ENDPOINT_AUDIENCE" - value: {{ printf "%s%s%s" (include "txdc.controlplane.url.ids" . ) .Values.controlplane.endpoints.ids.path "/data" | quote }} + value: {{ printf "%s%s%s" (include "txdc.controlplane.url.protocol" . ) .Values.controlplane.endpoints.protocol.path "/data" | quote }} {{- if .Values.postgresql.enabled }} @@ -281,9 +266,9 @@ spec: - name: "EDC_TRANSFER_PROXY_TOKEN_VERIFIER_PUBLICKEY_ALIAS" value: {{ .Values.vault.secretNames.transferProxyTokenSignerPublicKey | quote }} - # see extension https://github.com/eclipse-edc/Connector/tree/main/extensions/control-plane/http-receiver + # see extension https://github.com/eclipse-edc/Connector/tree/main/extensions/control-plane/transfer/transfer-pull-http-dynamic-receiver - - name: "EDC_RECEIVER_HTTP_ENDPOINT" + - name: "EDC_RECEIVER_HTTP_DYNAMIC_ENDPOINT" value: {{ .Values.backendService.httpProxyTokenReceiverUrl | required ".Values.backendService.httpProxyTokenReceiverUrl is required" | quote }} ########### diff --git a/charts/tractusx-connector/templates/deployment-dataplane.yaml b/charts/tractusx-connector/templates/deployment-dataplane.yaml index bd375b295..c83742cba 100644 --- a/charts/tractusx-connector/templates/deployment-dataplane.yaml +++ b/charts/tractusx-connector/templates/deployment-dataplane.yaml @@ -78,8 +78,8 @@ spec: {{- if .Values.dataplane.livenessProbe.enabled }} livenessProbe: httpGet: - path: {{ .Values.dataplane.endpoints.default.path }}/check/liveness - port: {{ .Values.dataplane.endpoints.default.port }} + path: {{ .Values.dataplane.endpoints.observability.path }}/check/liveness + port: {{ .Values.dataplane.endpoints.observability.port }} initialDelaySeconds: {{ .Values.dataplane.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.dataplane.livenessProbe.periodSeconds }} timeoutSeconds: {{ .Values.dataplane.livenessProbe.timeoutSeconds }} @@ -89,8 +89,8 @@ spec: {{- if .Values.dataplane.readinessProbe.enabled }} readinessProbe: httpGet: - path: {{ .Values.dataplane.endpoints.default.path }}/check/readiness - port: {{ .Values.dataplane.endpoints.default.port }} + path: {{ .Values.dataplane.endpoints.observability.path }}/check/readiness + port: {{ .Values.dataplane.endpoints.observability.port }} initialDelaySeconds: {{ .Values.dataplane.readinessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.dataplane.readinessProbe.periodSeconds }} timeoutSeconds: {{ .Values.dataplane.readinessProbe.timeoutSeconds }} @@ -122,16 +122,18 @@ spec: value: {{ .Values.dataplane.endpoints.control.port | quote }} - name: "WEB_HTTP_CONTROL_PATH" value: {{ .Values.dataplane.endpoints.control.path | quote }} - - name: "WEB_HTTP_VALIDATION_PORT" - value: {{ .Values.dataplane.endpoints.validation.port | quote }} - - name: "WEB_HTTP_VALIDATION_PATH" - value: {{ .Values.dataplane.endpoints.validation.path | quote }} - name: "WEB_HTTP_PUBLIC_PORT" value: {{ .Values.dataplane.endpoints.public.port | quote }} - name: "WEB_HTTP_PUBLIC_PATH" value: {{ .Values.dataplane.endpoints.public.path | quote }} - name: "EDC_DATAPLANE_TOKEN_VALIDATION_ENDPOINT" value: {{ include "txdc.controlplane.url.validation" .}} + - name: "WEB_HTTP_OBSERVABILITY_PORT" + value: {{ .Values.dataplane.endpoints.observability.port | quote }} + - name: "WEB_HTTP_OBSERVABILITY_PATH" + value: {{ .Values.dataplane.endpoints.observability.path | quote }} + - name: "TRACTUSX_API_OBSERVABILITY_ALLOW-INSECURE" + value: {{ .Values.dataplane.endpoints.observability.insecure | quote }} ####### # AWS # @@ -178,10 +180,16 @@ spec: value: {{ .Values.vault.azure.tenant | quote }} - name: "EDC_VAULT_NAME" value: {{ .Values.vault.azure.name | quote }} + # only set the env var if config value not null + {{- if .Values.vault.azure.secret }} - name: "EDC_VAULT_CLIENTSECRET" value: {{ .Values.vault.azure.secret | quote }} + {{- end }} + # only set the env var if config value not null + {{- if .Values.vault.azure.certificate }} - name: "EDC_VAULT_CERTIFICATE" value: {{ .Values.vault.azure.certificate | quote }} + {{- end }} {{- end }} ###################################### diff --git a/charts/tractusx-connector/templates/service-controlplane.yaml b/charts/tractusx-connector/templates/service-controlplane.yaml index 94a02fa1e..acab58343 100644 --- a/charts/tractusx-connector/templates/service-controlplane.yaml +++ b/charts/tractusx-connector/templates/service-controlplane.yaml @@ -39,18 +39,14 @@ spec: targetPort: control protocol: TCP name: control - - port: {{ .Values.controlplane.endpoints.data.port }} - targetPort: data + - port: {{ .Values.controlplane.endpoints.management.port }} + targetPort: management protocol: TCP - name: data - - port: {{ .Values.controlplane.endpoints.validation.port }} - targetPort: validation + name: management + - port: {{ .Values.controlplane.endpoints.protocol.port }} + targetPort: protocol protocol: TCP - name: validation - - port: {{ .Values.controlplane.endpoints.ids.port }} - targetPort: ids - protocol: TCP - name: ids + name: protocol - port: {{ .Values.controlplane.endpoints.metrics.port }} targetPort: metrics protocol: TCP diff --git a/charts/tractusx-connector/templates/service-dataplane.yaml b/charts/tractusx-connector/templates/service-dataplane.yaml index 26fa9c203..5644f7fbe 100644 --- a/charts/tractusx-connector/templates/service-dataplane.yaml +++ b/charts/tractusx-connector/templates/service-dataplane.yaml @@ -21,6 +21,10 @@ spec: targetPort: public protocol: TCP name: public + - port: {{ .Values.dataplane.endpoints.observability.port }} + targetPort: observability + protocol: TCP + name: observability - port: {{ .Values.dataplane.endpoints.metrics.port }} targetPort: metrics protocol: TCP diff --git a/charts/tractusx-connector/values.yaml b/charts/tractusx-connector/values.yaml index cbc266a94..aebd45481 100644 --- a/charts/tractusx-connector/values.yaml +++ b/charts/tractusx-connector/values.yaml @@ -89,19 +89,13 @@ controlplane: # -- path for incoming api calls path: /api # -- data management api, used by internal users, can be added to an ingress and must not be internet facing - data: + management: # -- port for incoming api calls port: 8081 # -- path for incoming api calls - path: /data + path: /management # -- authentication key, must be attached to each 'X-Api-Key' request header authKey: "" - # -- validation api, only used by the data plane and should not be added to any ingress - validation: - # -- port for incoming api calls - port: 8082 - # -- path for incoming api calls - path: /validation # -- control api, used for internal control calls. can be added to the internal ingress, but should probably not control: # -- port for incoming api calls @@ -109,7 +103,7 @@ controlplane: # -- path for incoming api calls path: /control # -- ids api, used for inter connector communication and must be internet facing - ids: + protocol: # -- port for incoming api calls port: 8084 # -- path for incoming api calls @@ -221,7 +215,7 @@ controlplane: annotations: {} # -- EDC endpoints exposed by this ingress resource endpoints: - - data + - management - control # -- Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use className: "" @@ -340,12 +334,16 @@ dataplane: public: port: 8081 path: /api/public - validation: - port: 8082 - path: /validation control: port: 8083 path: /api/dataplane/control + observability: + # -- port for incoming API calls + port: 8085 + # -- observability api, provides /health /readiness and /liveness endpoints + path: /observability + # -- allow or disallow insecure access, i.e. access without authentication + insecure: true metrics: port: 9090 path: /metrics diff --git a/edc-dataplane/edc-dataplane-base/build.gradle.kts b/edc-dataplane/edc-dataplane-base/build.gradle.kts index 686e5fd06..cc873dcea 100644 --- a/edc-dataplane/edc-dataplane-base/build.gradle.kts +++ b/edc-dataplane/edc-dataplane-base/build.gradle.kts @@ -4,18 +4,18 @@ plugins { } dependencies { - implementation(edc.config.filesystem) - implementation(edc.dpf.awss3) - implementation(edc.dpf.oauth2) - implementation(edc.dpf.http) + runtimeOnly(project(":edc-extensions:observability-api-customization")) - implementation(edc.dpf.framework) - implementation(edc.dpf.api) - implementation(edc.api.observability) - implementation(edc.core.connector) - implementation(edc.boot) + runtimeOnly(edc.config.filesystem) + runtimeOnly(edc.dpf.awss3) + runtimeOnly(edc.dpf.oauth2) + runtimeOnly(edc.dpf.http) + runtimeOnly(edc.dpf.framework) + runtimeOnly(edc.dpf.api) + runtimeOnly(edc.core.connector) + runtimeOnly(edc.boot) - implementation(edc.bundles.monitoring) - implementation(edc.ext.http) + runtimeOnly(edc.bundles.monitoring) + runtimeOnly(edc.ext.http) } \ No newline at end of file From e566646168b401a7bdc632619ef67502810a8851 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger <43503240+paullatzelsperger@users.noreply.github.com> Date: Sat, 15 Apr 2023 07:49:08 +0200 Subject: [PATCH 72/92] refactor: rename git branches (#218) * refactor: update branch names and references in our documentation * publish packages to tractus-x --- .../actions/publish-docker-image/action.yml | 1 + .github/dependabot.yml | 16 +++++----- .github/workflows/build.yaml | 23 ++++++++------- .github/workflows/business-tests.yaml | 5 ++-- .github/workflows/draft-new-release.yaml | 2 +- .github/workflows/helm-chart-release.yaml | 2 +- .github/workflows/helm-lint.yaml | 4 +-- .github/workflows/kics.yml | 4 +-- .github/workflows/publish-docker.yaml | 3 +- .github/workflows/publish-new-release.yml | 29 +++++++++---------- .github/workflows/trivy.yml | 4 +-- .github/workflows/verify.yaml | 3 +- .../templates/deployment-controlplane.yaml | 6 ++-- .../templates/deployment-dataplane.yaml | 2 +- .../2023-02-09-release-process/README.md | 18 ++++++------ .../2023-02-27_testing/README.md | 2 +- .../notice.md | 4 +-- .../edc-controlplane-memory/notice.md | 4 +-- .../notice.md | 4 +-- .../edc-controlplane-postgresql/notice.md | 4 +-- .../edc-dataplane-azure-vault/notice.md | 4 +-- .../edc-dataplane-hashicorp-vault/notice.md | 4 +-- pr_etiquette.md | 2 +- 23 files changed, 77 insertions(+), 73 deletions(-) diff --git a/.github/actions/publish-docker-image/action.yml b/.github/actions/publish-docker-image/action.yml index 0dfefd5f0..600383f81 100644 --- a/.github/actions/publish-docker-image/action.yml +++ b/.github/actions/publish-docker-image/action.yml @@ -86,6 +86,7 @@ runs: type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{raw}} type=match,pattern=\d.\d.\d + type=raw,value=latest,enable={{is_default_branch}} type=sha ############################### diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 16f8582cb..b59a06386 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,8 +3,8 @@ version: 2 updates: # Maven - - package-ecosystem: "maven" - target-branch: develop + package-ecosystem: "gradle" + target-branch: main directory: / labels: - "dependabot" @@ -15,7 +15,7 @@ updates: # Github Actions - package-ecosystem: "github-actions" - target-branch: develop + target-branch: main directory: / labels: - "dependabot" @@ -26,7 +26,7 @@ updates: # Docker - package-ecosystem: "docker" - target-branch: develop + target-branch: main directory: ./edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/ labels: - "dependabot" @@ -35,7 +35,7 @@ updates: interval: "daily" - package-ecosystem: "docker" - target-branch: develop + target-branch: main directory: ./edc-controlplane/edc-controlplane-postgresql/src/main/docker/ labels: - "dependabot" @@ -44,7 +44,7 @@ updates: interval: "daily" - package-ecosystem: "docker" - target-branch: develop + target-branch: main directory: ./edc-controlplane/edc-controlplane-memory/src/main/docker/ labels: - "dependabot" @@ -53,7 +53,7 @@ updates: interval: "daily" - package-ecosystem: "docker" - target-branch: develop + target-branch: main directory: ./edc-dataplane/edc-dataplane-azure-vault/src/main/docker/ labels: - "dependabot" @@ -62,7 +62,7 @@ updates: interval: "daily" - package-ecosystem: "docker" - target-branch: develop + target-branch: main directory: ./edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/ labels: - "dependabot" diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index f3e63414a..76d0e01c5 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -27,7 +27,7 @@ on: push: branches: - main - - develop + - releases tags: - '[0-9]+.[0-9]+.[0-9]+' release: @@ -44,7 +44,8 @@ on: concurrency: - group: ${{ github.workflow }}-${{ github.ref }} + # cancel only running jobs on pull requests + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: @@ -136,9 +137,9 @@ jobs: packages: write needs: [ secret-presence, build-controlplane, build-dataplane, build-extensions ] - # do not run on PR branches, do not run on main + # do not run on PR branches, do not run on releases if: | - needs.secret-presence.outputs.GPG_PASSPHRASE && needs.secret-presence.outputs.GPG_PRIVATE_KEY && github.event_name != 'pull_request' && github.ref != 'refs/heads/main' + needs.secret-presence.outputs.GPG_PASSPHRASE && needs.secret-presence.outputs.GPG_PRIVATE_KEY && github.event_name != 'pull_request' && github.ref != 'refs/heads/releases' steps: # Set-Up - name: Checkout @@ -152,9 +153,9 @@ jobs: cache: 'gradle' - name: Import GPG Key uses: crazy-max/ghaction-import-gpg@v5 - env: - GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} - PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} + with: + gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} + passphrase: ${{ secrets.GPG_PASSPHRASE }} # publish snapshots - name: Publish snapshot versions @@ -162,7 +163,7 @@ jobs: echo "Publishing Version $(grep -e "version" gradle.properties | cut -f2 -d"=") to Github Packages" ./gradlew publishAllPublicationsToGitHubPackagesRepository env: - #REPO: ${{ github.repository }} - REPO: "catenax-ng/product-edc" - GITHUB_PACKAGE_USERNAME: ${{ secrets.TEMP_GHPKG_USER }} - GITHUB_PACKAGE_PASSWORD: ${{ secrets.TEMP_GHPKG_PASSWORD }} + REPO: ${{ github.repository }} + GITHUB_PACKAGE_USERNAME: ${{ github.actor }} + GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} + diff --git a/.github/workflows/business-tests.yaml b/.github/workflows/business-tests.yaml index 248927db0..97a3044fb 100644 --- a/.github/workflows/business-tests.yaml +++ b/.github/workflows/business-tests.yaml @@ -29,13 +29,14 @@ on: - 'docs/**' - '**/*.md' branches: - - develop + - releases - release/** - main workflow_dispatch: concurrency: - group: ${{ github.workflow }}-${{ github.ref }} + # cancel only running jobs on pull requests + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: diff --git a/.github/workflows/draft-new-release.yaml b/.github/workflows/draft-new-release.yaml index 82e956c76..248f61bc4 100644 --- a/.github/workflows/draft-new-release.yaml +++ b/.github/workflows/draft-new-release.yaml @@ -79,7 +79,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: head: release/${{ github.event.inputs.version }} - base: main + base: releases title: Release version ${{ github.event.inputs.version }} reviewers: ${{ github.actor }} body: |- diff --git a/.github/workflows/helm-chart-release.yaml b/.github/workflows/helm-chart-release.yaml index 819f4f0ec..bd5e55302 100644 --- a/.github/workflows/helm-chart-release.yaml +++ b/.github/workflows/helm-chart-release.yaml @@ -23,7 +23,7 @@ on: paths: - 'charts/**' branches: - - main + - releases workflow_dispatch: jobs: diff --git a/.github/workflows/helm-lint.yaml b/.github/workflows/helm-lint.yaml index bf1531cad..ae94c84a7 100644 --- a/.github/workflows/helm-lint.yaml +++ b/.github/workflows/helm-lint.yaml @@ -5,7 +5,7 @@ on: push: branches: - main - - develop + - releases tags: - '[0-9]+.[0-9]+.[0-9]+' paths-ignore: @@ -50,7 +50,7 @@ jobs: name: chart-testing (list-changed) id: list-changed run: | - changed=$(ct list-changed --config ct.yaml --target-branch develop) + changed=$(ct list-changed --config ct.yaml --target-branch main) if [[ -n "$changed" ]]; then echo "::set-output name=changed::true" fi diff --git a/.github/workflows/kics.yml b/.github/workflows/kics.yml index c009ab2de..1b922064a 100644 --- a/.github/workflows/kics.yml +++ b/.github/workflows/kics.yml @@ -2,9 +2,9 @@ name: "KICS" on: push: - branches: [main, master, develop] + branches: [main, releases] pull_request: - branches: [main, master, develop] + branches: [main, releases] workflow_dispatch: schedule: diff --git a/.github/workflows/publish-docker.yaml b/.github/workflows/publish-docker.yaml index 4bb7a4045..e2a7a5384 100644 --- a/.github/workflows/publish-docker.yaml +++ b/.github/workflows/publish-docker.yaml @@ -30,7 +30,8 @@ on: default: "tractusx" concurrency: - group: ${{ github.workflow }}-${{ github.ref }} + # cancel only running jobs on pull requests + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: diff --git a/.github/workflows/publish-new-release.yml b/.github/workflows/publish-new-release.yml index c626bba84..373c892e7 100644 --- a/.github/workflows/publish-new-release.yml +++ b/.github/workflows/publish-new-release.yml @@ -4,7 +4,7 @@ name: "Publish new release" on: pull_request: branches: - - main + - releases - support/* types: - closed @@ -67,18 +67,17 @@ jobs: - name: Import GPG Key uses: crazy-max/ghaction-import-gpg@v5 env: - GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} - PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} + gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} + passphrase: ${{ secrets.GPG_PASSPHRASE }} - name: Publish release version run: | echo "Publishing Version $(grep -e "version" gradle.properties | cut -f2 -d"=") to Github Packages" ./gradlew publishAllPublicationsToGithubPackagesRepository env: - #REPO: ${{ github.repository }} - REPO: "catenax-ng/product-edc" - GITHUB_PACKAGE_USERNAME: ${{ secrets.TEMP_GHPKG_USER }} - GITHUB_PACKAGE_PASSWORD: ${{ secrets.TEMP_GHPKG_PASSWORD }} + REPO: ${{ github.repository }} + GITHUB_PACKAGE_USERNAME: ${{ github.actor }} + GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} # Release: Helm Charts helm-release: @@ -128,7 +127,7 @@ jobs: git push origin gh-pages - # Release: GitHub tag & release; Merges back main into develop; Starts a new development cycle; + # Release: GitHub tag & release; Merges back releases into main; Starts a new development cycle; github-release: name: Publish new github release needs: [ release-version ] @@ -148,7 +147,7 @@ jobs: name: Checkout uses: actions/checkout@v3.3.0 with: - # 0 to fetch the full history due to upcoming merge of main into develop branch + # 0 to fetch the full history due to upcoming merge of releases into main branch fetch-depth: 0 - name: Create Release Tag @@ -185,15 +184,15 @@ jobs: distribution: 'temurin' cache: 'gradle' - - name: Merge main back into develop and set new snapshot version - if: github.event.pull_request.base.ref == 'main' + name: Merge releases back into main and set new snapshot version + if: github.event.pull_request.base.ref == 'releases' run: | # Prepare git env git config user.name "GitHub actions" git config user.email noreply@github.com - # Merge main into develop - git checkout develop && git merge -X theirs main --no-commit --no-ff + # Merge releases into main + git checkout main && git merge -X theirs releases --no-commit --no-ff # Extract release version IFS=. read -r RELEASE_VERSION_MAJOR RELEASE_VERSION_MINOR RELEASE_VERSION_PATCH<<<"${{ env.RELEASE_VERSION }}" @@ -204,8 +203,8 @@ jobs: # Persist the "version" in the gradle.properties sed -i "s/version=.*/version=$SNAPSHOT_VERSION/g" gradle.properties - # Commit and push to origin develop + # Commit and push to origin main git add gradle.properties git commit --message "Introduce new snapshot version $SNAPSHOT_VERSION" - git push origin develop + git push origin main diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index b82acaf66..714ed4c74 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -9,7 +9,7 @@ on: workflows: ["Build"] branches: - main - - develop + - releases - release/* - hotfix/* tags: @@ -84,7 +84,7 @@ jobs: if: always() uses: aquasecurity/trivy-action@master with: - image-ref: "ghcr.io/${{ github.repository }}/${{ matrix.image }}:sha-${{ needs.git-sha7.outputs.value }}" + image-ref: "tractusx/${{ matrix.image }}:sha-${{ needs.git-sha7.outputs.value }}" format: "sarif" output: "trivy-results-${{ matrix.image }}.sarif" exit-code: "1" diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index c844d9249..d9dda3844 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -25,7 +25,7 @@ on: push: branches: - main - - develop + - releases tags: - '[0-9]+.[0-9]+.[0-9]+' release: @@ -39,6 +39,7 @@ on: workflow_dispatch: concurrency: + # cancel older running jobs on the same branch group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true diff --git a/charts/tractusx-connector/templates/deployment-controlplane.yaml b/charts/tractusx-connector/templates/deployment-controlplane.yaml index 338feebe3..1161db178 100644 --- a/charts/tractusx-connector/templates/deployment-controlplane.yaml +++ b/charts/tractusx-connector/templates/deployment-controlplane.yaml @@ -247,7 +247,7 @@ spec: ## DATA PLANE ## ################ - # see extension https://github.com/eclipse-tractusx/tractusx-edc/tree/develop/edc-extensions/dataplane-selector-configuration + # see extension https://github.com/eclipse-tractusx/tractusx-edc/tree/main/edc-extensions/dataplane-selector-configuration - name: "EDC_DATAPLANE_SELECTOR_DEFAULTPLANE_URL" value: {{ include "txdc.dataplane.url.control" . }}/transfer - name: "EDC_DATAPLANE_SELECTOR_DEFAULTPLANE_SOURCETYPES" @@ -276,7 +276,7 @@ spec: ########### {{- if .Values.vault.hashicorp.enabled }} - # see extension https://github.com/eclipse-tractusx/tractusx-edc/tree/develop/edc-extensions/hashicorp-vault + # see extension https://github.com/eclipse-tractusx/tractusx-edc/tree/main/edc-extensions/hashicorp-vault - name: "EDC_VAULT_HASHICORP_URL" value: {{ .Values.vault.hashicorp.url | required ".Values.vault.hashicorp.url is required" | quote }} - name: "EDC_VAULT_HASHICORP_TOKEN" @@ -317,7 +317,7 @@ spec: ## DATA ENCRYPTION ## ##################### - # see extension https://github.com/eclipse-tractusx/tractusx-edc/tree/develop/edc-extensions/data-encryption + # see extension https://github.com/eclipse-tractusx/tractusx-edc/tree/main/edc-extensions/data-encryption - name: "EDC_DATA_ENCRYPTION_KEYS_ALIAS" value: {{ .Values.vault.secretNames.transferProxyTokenEncryptionAesKey | quote }} - name: "EDC_DATA_ENCRYPTION_ALGORITHM" diff --git a/charts/tractusx-connector/templates/deployment-dataplane.yaml b/charts/tractusx-connector/templates/deployment-dataplane.yaml index c83742cba..bbc48c434 100644 --- a/charts/tractusx-connector/templates/deployment-dataplane.yaml +++ b/charts/tractusx-connector/templates/deployment-dataplane.yaml @@ -156,7 +156,7 @@ spec: ########### {{- if .Values.vault.hashicorp.enabled }} - # see extension https://github.com/eclipse-tractusx/tractusx-edc/tree/develop/edc-extensions/hashicorp-vault + # see extension https://github.com/eclipse-tractusx/tractusx-edc/tree/main/edc-extensions/hashicorp-vault - name: "EDC_VAULT_HASHICORP_URL" value: {{ .Values.vault.hashicorp.url | required ".Values.vault.hashicorp.url is required" | quote }} - name: "EDC_VAULT_HASHICORP_TOKEN" diff --git a/docs/development/decision-records/2023-02-09-release-process/README.md b/docs/development/decision-records/2023-02-09-release-process/README.md index aee5bac5a..4b2771c0a 100644 --- a/docs/development/decision-records/2023-02-09-release-process/README.md +++ b/docs/development/decision-records/2023-02-09-release-process/README.md @@ -8,7 +8,7 @@ To improve stability, reproducibility and maintainability of releases, tractusx- - use release versions of EDC in releases. Release branches must not change upstream dependency versions, unless there is a clear and concise reason to do so. - slightly update branching model -- if possible, bugs/defects should be fixed on `develop` and be backported to the respective `hotfix/` branch +- if possible, bugs/defects should be fixed on `main` and be backported to the respective `hotfix/` branch - only hotfixes for critical security bugs will be provided as defined by the committers for the currently released version. Nothing else. - feature development happens _in developers' forks only_ to keep the Git reflog of the `origin` clean. @@ -31,15 +31,15 @@ Every release version published by tractusx-edc must be reproducible at any time During feature development we only use `-SNAPSHOT` versions of EDC packages. It is assumed that when the build breaks due to changes in upstream, the fix can be done quickly and easily, much more so than working off technical -debt that would otherwise accumulate over several months. Builds on `develop` are therefore _not repeatable_, but that +debt that would otherwise accumulate over several months. Builds on `main` are therefore _not repeatable_, but that downside is easily offset by the tighter alignment with and smaller technical debt and integration pain with the upstream EDC. ### Use release versions of EDC in releases -First, a new branch `releases/X.Y.Z` based off of `develop` is created. This can either be done +First, a new branch `release/X.Y.Z` based off of `main` is created. This can either be done on `HEAD`, or - if desired - on a particular ref. The latter case is relevant if there are already features -in `develop` that are not scoped for a particular release. +in `main` that are not scoped for a particular release. Second, the dependency onto EDC is updated to the most recent build. For example, if a release is created on March 27th 2023, the most recent nightly would be `0.0.1-20230326`. @@ -79,13 +79,13 @@ Once a release is published, for example `0.3.1` it will receive no further deve hotfix branches are created based off of the release branch, here `releases/0.3.1`, thus, `hotfix/0.3.1`. From this, three scenarios emerge: -1. The actual fix is done on `develop` and can be cherry-picked into the `hotfix/0.3.1` branch. No new commits are +1. The actual fix is done on `main` and can be cherry-picked into the `hotfix/0.3.1` branch. No new commits are made directly in that branch. -2. The actual fix is done on `develop` and must be manually ported into the `hotfix/0.3.1` branch. One or several new +2. The actual fix is done on `main` and must be manually ported into the `hotfix/0.3.1` branch. One or several new commits are made on `hotfix/0.3.1`. This is needed when cherry-picking is not available due to incompatibilities - between `develop` and the hotfix branch due to intermittent changes. -3. The fix is only relevant for the `0.3.1` hotfix, it is not needed in `develop`. This can happen, when the problem is - not present on `develop`, because it was already implicitly fixed, or otherwise doesn't exist. + between `main` and the hotfix branch due to intermittent changes. +3. The fix is only relevant for the `0.3.1` hotfix, it is not needed in `main`. This can happen, when the problem is + not present on `main`, because it was already implicitly fixed, or otherwise doesn't exist. This might produce many branches, and the first `hotfix` makes the release obsolete, but it will greatly help readability and make a release's history readily apparent. diff --git a/docs/development/decision-records/2023-02-27_testing/README.md b/docs/development/decision-records/2023-02-27_testing/README.md index 45844203d..0d12ab353 100644 --- a/docs/development/decision-records/2023-02-27_testing/README.md +++ b/docs/development/decision-records/2023-02-27_testing/README.md @@ -82,5 +82,5 @@ This section explains _at which point in time_ we should execute which test. Thi | Unit test | when running tests locally, without any parameters, on every commit on every branch | | | Integration test | on every commit on every branch | | | System/End-To-End test | on pull request branches except when marked as `draft` | | -| Deployment test | before merging pull requests and on every commit on `develop` | | +| Deployment test | before merging pull requests and on every commit on `main` | | | Performance test | Only on a specific schedule, e.g. once per day or week | | diff --git a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md index fdcc88583..e6088f521 100644 --- a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md +++ b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/notice.md @@ -10,8 +10,8 @@ Eclipse Tractus-X product(s) installed within the image: - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx -- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-memory-hashicorp-vault/src/main/docker/Dockerfile -- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) +- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/main/edc-controlplane/edc-controlplane-memory-hashicorp-vault/src/main/docker/Dockerfile +- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/main/LICENSE) ## Used base image diff --git a/edc-controlplane/edc-controlplane-memory/notice.md b/edc-controlplane/edc-controlplane-memory/notice.md index cee9fe5ed..c58f81b30 100644 --- a/edc-controlplane/edc-controlplane-memory/notice.md +++ b/edc-controlplane/edc-controlplane-memory/notice.md @@ -10,8 +10,8 @@ Eclipse Tractus-X product(s) installed within the image: - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx -- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile -- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) +- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/main/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile +- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/main/LICENSE) ## Used base image diff --git a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md index 3b5e517f0..a1c9978ab 100644 --- a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md +++ b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/notice.md @@ -10,8 +10,8 @@ Eclipse Tractus-X product(s) installed within the image: - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx -- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile -- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) +- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/main/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/src/main/docker/Dockerfile +- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/main/LICENSE) ## Used base image diff --git a/edc-controlplane/edc-controlplane-postgresql/notice.md b/edc-controlplane/edc-controlplane-postgresql/notice.md index d9e1b58b1..e1662d799 100644 --- a/edc-controlplane/edc-controlplane-postgresql/notice.md +++ b/edc-controlplane/edc-controlplane-postgresql/notice.md @@ -10,8 +10,8 @@ Eclipse Tractus-X product(s) installed within the image: - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx -- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile -- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) +- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/main/edc-controlplane/edc-controlplane-postgresql/src/main/docker/Dockerfile +- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/main/LICENSE) ## Used base image diff --git a/edc-dataplane/edc-dataplane-azure-vault/notice.md b/edc-dataplane/edc-dataplane-azure-vault/notice.md index 7023f7ab7..065833ed2 100644 --- a/edc-dataplane/edc-dataplane-azure-vault/notice.md +++ b/edc-dataplane/edc-dataplane-azure-vault/notice.md @@ -10,8 +10,8 @@ Eclipse Tractus-X product(s) installed within the image: - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx -- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile -- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) +- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/main/edc-dataplane/edc-dataplane-azure-vault/src/main/docker/Dockerfile +- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/main/LICENSE) ## Used base image diff --git a/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md b/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md index 8b18d0a4b..054c5e35f 100644 --- a/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md +++ b/edc-dataplane/edc-dataplane-hashicorp-vault/notice.md @@ -10,8 +10,8 @@ Eclipse Tractus-X product(s) installed within the image: - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx -- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile -- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/develop/LICENSE) +- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/main/edc-dataplane/edc-dataplane-hashicorp-vault/src/main/docker/Dockerfile +- Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/main/LICENSE) ## Used base image diff --git a/pr_etiquette.md b/pr_etiquette.md index aaaf16761..348ebf000 100644 --- a/pr_etiquette.md +++ b/pr_etiquette.md @@ -10,7 +10,7 @@ Submitting pull requests in EDC should be done while adhering to a couple of sim - No surprise PRs please. Before you submit a PR, open a discussion or an issue outlining your planned work and give people time to comment. It may even be advisable to contact committers using the `@mention` feature. Unsolicited PRs may get ignored or rejected. -- Create your working branch in your fork of TractusX-EDC, and create the PR against the upstream `develop` branch +- Create your working branch in your fork of TractusX-EDC, and create the PR against the upstream `main` branch - Create focused PRs: your work should be focused on one particular feature or bug. Do not create broad-scoped PRs that solve multiple issues as reviewers may reject those PR bombs outright. - Provide a clear description and motivation in the PR description in GitHub. This makes the reviewer's life much From 85876568a12590406251ce0ecbab1de38e3d0b48 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Apr 2023 07:29:23 +0200 Subject: [PATCH 73/92] chore(deps): bump io.cucumber:cucumber-junit-platform-engine from 7.11.1 to 7.11.2 (#221) * refactor: rename git branches (#218) * refactor: update branch names and references in our documentation * publish packages to tractus-x * chore(deps): bump io.cucumber:cucumber-junit-platform-engine Bumps [io.cucumber:cucumber-junit-platform-engine](https://github.com/cucumber/cucumber-jvm) from 7.11.1 to 7.11.2. - [Release notes](https://github.com/cucumber/cucumber-jvm/releases) - [Changelog](https://github.com/cucumber/cucumber-jvm/blob/main/CHANGELOG.md) - [Commits](https://github.com/cucumber/cucumber-jvm/compare/v7.11.1...v7.11.2) --- updated-dependencies: - dependency-name: io.cucumber:cucumber-junit-platform-engine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: Paul Latzelsperger <43503240+paullatzelsperger@users.noreply.github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- edc-tests/cucumber/build.gradle.kts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/edc-tests/cucumber/build.gradle.kts b/edc-tests/cucumber/build.gradle.kts index 69ba71aa5..431ebcbb7 100644 --- a/edc-tests/cucumber/build.gradle.kts +++ b/edc-tests/cucumber/build.gradle.kts @@ -20,7 +20,7 @@ dependencies { testImplementation("org.apache.httpcomponents:httpclient:4.5.14") testImplementation("org.junit.platform:junit-platform-suite:1.9.2") testImplementation("io.cucumber:cucumber-java:7.11.1") - testImplementation("io.cucumber:cucumber-junit-platform-engine:7.11.1") + testImplementation("io.cucumber:cucumber-junit-platform-engine:7.11.2") testImplementation("org.slf4j:slf4j-api:2.0.3") testImplementation(libs.restAssured) testImplementation(libs.postgres) From 568b0cf29b3f85c40b2530ffac8103c92471a96d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Apr 2023 07:56:38 +0200 Subject: [PATCH 74/92] chore(deps): bump io.cucumber:cucumber-java from 7.11.1 to 7.11.2 (#225) Bumps [io.cucumber:cucumber-java](https://github.com/cucumber/cucumber-jvm) from 7.11.1 to 7.11.2. - [Release notes](https://github.com/cucumber/cucumber-jvm/releases) - [Changelog](https://github.com/cucumber/cucumber-jvm/blob/main/CHANGELOG.md) - [Commits](https://github.com/cucumber/cucumber-jvm/compare/v7.11.1...v7.11.2) --- updated-dependencies: - dependency-name: io.cucumber:cucumber-java dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- edc-tests/cucumber/build.gradle.kts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/edc-tests/cucumber/build.gradle.kts b/edc-tests/cucumber/build.gradle.kts index 431ebcbb7..17f5233d7 100644 --- a/edc-tests/cucumber/build.gradle.kts +++ b/edc-tests/cucumber/build.gradle.kts @@ -19,7 +19,7 @@ dependencies { testImplementation("com.google.code.gson:gson:2.10") testImplementation("org.apache.httpcomponents:httpclient:4.5.14") testImplementation("org.junit.platform:junit-platform-suite:1.9.2") - testImplementation("io.cucumber:cucumber-java:7.11.1") + testImplementation("io.cucumber:cucumber-java:7.11.2") testImplementation("io.cucumber:cucumber-junit-platform-engine:7.11.2") testImplementation("org.slf4j:slf4j-api:2.0.3") testImplementation(libs.restAssured) From d47f39e6ce7e27f36952ab21374cbfe04bb3d353 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Apr 2023 07:56:48 +0200 Subject: [PATCH 75/92] chore(deps): bump org.testcontainers:junit-jupiter from 1.17.6 to 1.18.0 (#224) Bumps [org.testcontainers:junit-jupiter](https://github.com/testcontainers/testcontainers-java) from 1.17.6 to 1.18.0. - [Release notes](https://github.com/testcontainers/testcontainers-java/releases) - [Changelog](https://github.com/testcontainers/testcontainers-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/testcontainers/testcontainers-java/compare/1.17.6...1.18.0) --- updated-dependencies: - dependency-name: org.testcontainers:junit-jupiter dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- edc-extensions/hashicorp-vault/build.gradle.kts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/edc-extensions/hashicorp-vault/build.gradle.kts b/edc-extensions/hashicorp-vault/build.gradle.kts index 90758edd0..8433e74a3 100644 --- a/edc-extensions/hashicorp-vault/build.gradle.kts +++ b/edc-extensions/hashicorp-vault/build.gradle.kts @@ -9,7 +9,7 @@ dependencies { implementation(edc.junit) implementation(libs.bouncyCastle.bcpkix) implementation(libs.okhttp) - implementation("org.testcontainers:junit-jupiter:1.17.6") + implementation("org.testcontainers:junit-jupiter:1.18.0") implementation("org.testcontainers:vault:1.17.6") testImplementation(libs.mockito.inline) } From bdd8482c9b5742b1dd58dc9acc869400aca6d573 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Apr 2023 07:57:25 +0200 Subject: [PATCH 76/92] chore(deps): bump com.bmuschko.docker-remote-api from 9.2.1 to 9.3.1 (#222) Bumps com.bmuschko.docker-remote-api from 9.2.1 to 9.3.1. --- updated-dependencies: - dependency-name: com.bmuschko.docker-remote-api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle.kts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle.kts b/build.gradle.kts index 33408a8b1..13dc385b0 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -8,7 +8,7 @@ plugins { id("io.freefair.lombok") version "6.6.2" id("com.diffplug.spotless") version "6.15.0" id("com.github.johnrengelman.shadow") version "8.0.0" - id("com.bmuschko.docker-remote-api") version "9.2.1" + id("com.bmuschko.docker-remote-api") version "9.3.1" id("org.sonarqube") version "4.0.0.2929" } From 945ef0c27d7a5262925735af1042ad4eda72cc8e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Apr 2023 07:58:20 +0200 Subject: [PATCH 77/92] chore(deps): bump org.testcontainers:vault from 1.17.6 to 1.18.0 (#223) Bumps [org.testcontainers:vault](https://github.com/testcontainers/testcontainers-java) from 1.17.6 to 1.18.0. - [Release notes](https://github.com/testcontainers/testcontainers-java/releases) - [Changelog](https://github.com/testcontainers/testcontainers-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/testcontainers/testcontainers-java/compare/1.17.6...1.18.0) --- updated-dependencies: - dependency-name: org.testcontainers:vault dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Paul Latzelsperger <43503240+paullatzelsperger@users.noreply.github.com> --- edc-extensions/hashicorp-vault/build.gradle.kts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/edc-extensions/hashicorp-vault/build.gradle.kts b/edc-extensions/hashicorp-vault/build.gradle.kts index 8433e74a3..1fc98b31b 100644 --- a/edc-extensions/hashicorp-vault/build.gradle.kts +++ b/edc-extensions/hashicorp-vault/build.gradle.kts @@ -9,7 +9,7 @@ dependencies { implementation(edc.junit) implementation(libs.bouncyCastle.bcpkix) implementation(libs.okhttp) + implementation("org.testcontainers:vault:1.18.0") implementation("org.testcontainers:junit-jupiter:1.18.0") - implementation("org.testcontainers:vault:1.17.6") testImplementation(libs.mockito.inline) } From fd241d069609e421060f79811cf8df34553f3894 Mon Sep 17 00:00:00 2001 From: "Sascha Isele (ZF Friedrichshafen AG)" <127207440+saschaisele-zf@users.noreply.github.com> Date: Mon, 17 Apr 2023 10:16:13 +0200 Subject: [PATCH 78/92] docs(control-plane-adapter): improve documentation on how to use the control-plane adapter extension (#210) --- edc-extensions/control-plane-adapter/README.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/edc-extensions/control-plane-adapter/README.md b/edc-extensions/control-plane-adapter/README.md index fe9d4787c..5f2d0d890 100644 --- a/edc-extensions/control-plane-adapter/README.md +++ b/edc-extensions/control-plane-adapter/README.md @@ -39,9 +39,17 @@ To run CP-Adapter in "PERSISTENT" mode, You need to create a proper tables with 1. Client sends a GET request with two parameters: assetId and the url of the provider controlplane: ```plain - /adapter/asset/sync/{assetId}?providerUrl={providerUrl} + {controlplaneUrl}:{web.http.management.port}/{web.http.management.path}/adapter/asset/sync/{assetId}?providerUrl={providerUrl} ``` + | Name | Description | + |----------------------------|----------------------------------------------------------------------------------| + | `controlplaneUrl` | The URL where the control plane of the consumer connector is available | + | `web.http.management.port` | Port of the management API provided by the control plane | + | `web.http.management.path` | Path of the management API provided by the control plane | + | `assetId` | ID of the wanted asset | + | `providerUrl` | URL pointing to the `data` endpoint of the IDS context of the provider connector | + The example ULR could be: ```plain From ec424a847d950e3ebb17e984e13b26ca26e2837a Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger <43503240+paullatzelsperger@users.noreply.github.com> Date: Mon, 17 Apr 2023 10:49:41 +0200 Subject: [PATCH 79/92] feature: create in-mem helm chart (#219) * feature: create the tractusx-connector-memory chart * pr remarks * pr remarks * increase waiting for negotiation, sometimes takes longer then 2 seconds * Apply suggestions from code review Co-authored-by: Florian Rusch (ZF Friedrichshafen AG) * pr remarks * Update charts/tractusx-connector-memory/templates/deployment-runtime.yaml Co-authored-by: Florian Rusch (ZF Friedrichshafen AG) --------- Co-authored-by: Florian Rusch (ZF Friedrichshafen AG) --- .../actions/publish-docker-image/action.yml | 2 - .../actions/run-deployment-test/action.yml | 103 ++++++ .github/workflows/build.yaml | 11 +- .github/workflows/business-tests.yaml | 1 - .github/workflows/deploy-test-secrets | 51 +++ .github/workflows/deployment-test.yaml | 67 ++++ .github/workflows/publish-docker.yaml | 2 +- .github/workflows/trivy.yml | 25 +- .github/workflows/veracode.yaml | 44 +-- CHANGELOG.md | 38 ++- README.md | 30 +- charts/tractusx-connector-memory/.helmignore | 23 ++ charts/tractusx-connector-memory/Chart.yaml | 45 +++ charts/tractusx-connector-memory/README.md | 241 ++++++++++++++ .../README.md.gotmpl | 26 ++ charts/tractusx-connector-memory/example.yaml | 65 ++++ .../templates/NOTES.txt | 22 ++ .../templates/_helpers.tpl | 157 +++++++++ .../templates/configmap-runtime.yaml | 33 ++ .../templates/deployment-runtime.yaml | 302 +++++++++++++++++ .../templates/hpa-runtime.yaml | 29 ++ .../templates/ingress-runtime.yaml | 77 +++++ .../templates/service-runtime.yaml | 59 ++++ .../templates/serviceaccount.yaml | 16 + .../templates/tests/test-readiness.yaml | 15 + charts/tractusx-connector-memory/values.yaml | 313 ++++++++++++++++++ .../templates/deployment-controlplane.yaml | 2 +- docs/README.md | 12 +- docs/migration/Version_0.3.1_0.3.2.md | 2 +- edc-controlplane/build.gradle.kts | 3 +- .../README.md | 109 +++--- .../build.gradle.kts | 11 +- .../notice.md | 4 +- .../src/main/docker/Dockerfile | 22 +- .../edc/vault/memory/InMemoryVault.java | 53 +++ .../vault/memory/VaultMemoryExtension.java | 54 +++ ...rg.eclipse.edc.spi.system.ServiceExtension | 21 ++ .../edc/vault/memory/InMemoryVaultTest.java | 56 ++++ .../memory/VaultMemoryExtensionTest.java | 52 +++ .../supporting-infrastructure/values.yaml | 13 - .../tractusx/edc/tests/data/Negotiation.java | 2 +- .../main/resources/helm/omejdn/.helmignore | 23 ++ .../src/main/resources/helm/omejdn/Chart.yaml | 25 ++ .../src/main/resources/helm/omejdn/README.md | 21 ++ .../helm/omejdn/templates/_helpers.tpl | 62 ++++ .../helm/omejdn/templates/configmap.yaml | 73 ++++ .../helm/omejdn/templates/deployment.yaml | 149 +++++++++ .../resources/helm/omejdn/templates/hpa.yaml | 28 ++ .../omejdn/templates/imagepullsecret.yaml | 13 + .../helm/omejdn/templates/service.yaml | 15 + .../helm/omejdn/templates/serviceaccount.yaml | 12 + .../main/resources/helm/omejdn/values.yaml | 91 +++++ .../helm/test-infrastructure/.gitignore | 4 + .../helm/test-infrastructure/.helmignore | 24 ++ .../helm/test-infrastructure/Chart.yaml | 54 +++ .../helm/test-infrastructure/README.md | 54 +++ .../helm/test-infrastructure/values.yaml | 185 +++++++++++ settings.gradle.kts | 36 +- 58 files changed, 2883 insertions(+), 199 deletions(-) create mode 100644 .github/actions/run-deployment-test/action.yml create mode 100644 .github/workflows/deploy-test-secrets create mode 100644 .github/workflows/deployment-test.yaml create mode 100644 charts/tractusx-connector-memory/.helmignore create mode 100644 charts/tractusx-connector-memory/Chart.yaml create mode 100644 charts/tractusx-connector-memory/README.md create mode 100644 charts/tractusx-connector-memory/README.md.gotmpl create mode 100644 charts/tractusx-connector-memory/example.yaml create mode 100644 charts/tractusx-connector-memory/templates/NOTES.txt create mode 100644 charts/tractusx-connector-memory/templates/_helpers.tpl create mode 100644 charts/tractusx-connector-memory/templates/configmap-runtime.yaml create mode 100644 charts/tractusx-connector-memory/templates/deployment-runtime.yaml create mode 100644 charts/tractusx-connector-memory/templates/hpa-runtime.yaml create mode 100644 charts/tractusx-connector-memory/templates/ingress-runtime.yaml create mode 100644 charts/tractusx-connector-memory/templates/service-runtime.yaml create mode 100644 charts/tractusx-connector-memory/templates/serviceaccount.yaml create mode 100644 charts/tractusx-connector-memory/templates/tests/test-readiness.yaml create mode 100644 charts/tractusx-connector-memory/values.yaml rename edc-controlplane/{edc-controlplane-memory => edc-runtime-memory}/README.md (54%) rename edc-controlplane/{edc-controlplane-memory => edc-runtime-memory}/build.gradle.kts (77%) rename edc-controlplane/{edc-controlplane-memory => edc-runtime-memory}/notice.md (90%) rename edc-controlplane/{edc-controlplane-memory => edc-runtime-memory}/src/main/docker/Dockerfile (61%) create mode 100644 edc-controlplane/edc-runtime-memory/src/main/java/org/eclipse/tractusx/edc/vault/memory/InMemoryVault.java create mode 100644 edc-controlplane/edc-runtime-memory/src/main/java/org/eclipse/tractusx/edc/vault/memory/VaultMemoryExtension.java create mode 100644 edc-controlplane/edc-runtime-memory/src/main/resources/META-INF/services/org.eclipse.edc.spi.system.ServiceExtension create mode 100644 edc-controlplane/edc-runtime-memory/src/test/java/org/eclipse/tractusx/edc/vault/memory/InMemoryVaultTest.java create mode 100644 edc-controlplane/edc-runtime-memory/src/test/java/org/eclipse/tractusx/edc/vault/memory/VaultMemoryExtensionTest.java create mode 100644 edc-tests/deployment/src/main/resources/helm/omejdn/.helmignore create mode 100644 edc-tests/deployment/src/main/resources/helm/omejdn/Chart.yaml create mode 100644 edc-tests/deployment/src/main/resources/helm/omejdn/README.md create mode 100644 edc-tests/deployment/src/main/resources/helm/omejdn/templates/_helpers.tpl create mode 100644 edc-tests/deployment/src/main/resources/helm/omejdn/templates/configmap.yaml create mode 100644 edc-tests/deployment/src/main/resources/helm/omejdn/templates/deployment.yaml create mode 100644 edc-tests/deployment/src/main/resources/helm/omejdn/templates/hpa.yaml create mode 100644 edc-tests/deployment/src/main/resources/helm/omejdn/templates/imagepullsecret.yaml create mode 100644 edc-tests/deployment/src/main/resources/helm/omejdn/templates/service.yaml create mode 100644 edc-tests/deployment/src/main/resources/helm/omejdn/templates/serviceaccount.yaml create mode 100644 edc-tests/deployment/src/main/resources/helm/omejdn/values.yaml create mode 100644 edc-tests/deployment/src/main/resources/helm/test-infrastructure/.gitignore create mode 100644 edc-tests/deployment/src/main/resources/helm/test-infrastructure/.helmignore create mode 100644 edc-tests/deployment/src/main/resources/helm/test-infrastructure/Chart.yaml create mode 100644 edc-tests/deployment/src/main/resources/helm/test-infrastructure/README.md create mode 100644 edc-tests/deployment/src/main/resources/helm/test-infrastructure/values.yaml diff --git a/.github/actions/publish-docker-image/action.yml b/.github/actions/publish-docker-image/action.yml index 600383f81..206e13d4c 100644 --- a/.github/actions/publish-docker-image/action.yml +++ b/.github/actions/publish-docker-image/action.yml @@ -48,7 +48,6 @@ runs: # Login to DockerHub ##################### - name: DockerHub login - if: github.event_name != 'pull_request' uses: docker/login-action@v2 with: username: ${{ inputs.docker_user }} @@ -108,7 +107,6 @@ runs: # https://github.com/peter-evans/dockerhub-description ############################### - name: Update Docker Hub description - if: github.event_name != 'pull_request' uses: peter-evans/dockerhub-description@v3 with: readme-filepath: ${{ inputs.rootDir }}/notice.md diff --git a/.github/actions/run-deployment-test/action.yml b/.github/actions/run-deployment-test/action.yml new file mode 100644 index 000000000..ed720b4be --- /dev/null +++ b/.github/actions/run-deployment-test/action.yml @@ -0,0 +1,103 @@ +# +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) +# Copyright (c) 2023 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + +--- +name: "Run Deployment Test" +description: "Build and publish a Docker Image to DockerHub" +inputs: + imagename: + required: true + description: "name of the docker image, e.g. edc-runtime-memory" + + image_tag: + required: false + default: "latest" + description: "docker image tag, defaults to 'latest'" + + helm_command: + required: true + description: "command which is executed to install the chart. must also include verification commands, such as 'helm test'" + + rootDir: + required: true + description: "The directory that contains the docker file, e.g. edc-controlplane/edc-runtime-memory" + +runs: + using: "composite" + steps: + - name: Checkout + uses: actions/checkout@v3.3.0 + + - name: Cache ContainerD Image Layers + uses: actions/cache@v3 + with: + path: /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs + key: ${{ runner.os }}-io.containerd.snapshotter.v1.overlayfs + + - name: Set up JDK 11 + uses: actions/setup-java@v3.11.0 + with: + java-version: '11' + distribution: 'temurin' + cache: 'gradle' + + - name: Build docker images + shell: bash + run: |- + ./gradlew -p ${{ inputs.rootDir }} dockerize + + - name: Setup Helm + uses: azure/setup-helm@v3.5 + with: + version: v3.8.1 + + - name: Setup Kubectl + uses: azure/setup-kubectl@v3.2 + + - name: Create k8s Kind Cluster + uses: helm/kind-action@v1.5.0 + + - name: Load images into KinD + shell: bash + run: | + kind get clusters | xargs -n1 kind load docker-image ${{ inputs.imagename }}:${{ inputs.image_tag }} --name + + ################################################### + # Install the test infrastructure + ################################################### + - name: Install Infrastructure + shell: bash + run: |- + helm install infra edc-tests/deployment/src/main/resources/helm/test-infrastructure \ + --wait-for-jobs --timeout=30s --dependency-update + + - name: Install Runtime + shell: bash + run: ${{ inputs.helm_command }} + + + ################# + ### Tear Down ### + ################# + - name: Destroy the kind cluster + if: always() + shell: bash + run: >- + kind get clusters | xargs -n1 kind delete cluster --name \ No newline at end of file diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 76d0e01c5..2c2dda9c2 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -55,6 +55,7 @@ jobs: SONAR_TOKEN: ${{ steps.secret-presence.outputs.SONAR_TOKEN }} GPG_PRIVATE_KEY: ${{ steps.secret-presence.outputs.GPG_PRIVATE_KEY }} GPG_PASSPHRASE: ${{ steps.secret-presence.outputs.GPG_PASSPHRASE }} + DOCKER_HUB_TOKEN: ${{ steps.secret-presence.outputs.DOCKER_HUB_TOKEN }} steps: - name: Check whether secrets exist id: secret-presence @@ -62,6 +63,7 @@ jobs: [ ! -z "${{ secrets.SONAR_TOKEN }}" ] && echo "::set-output name=SONAR_TOKEN::true" [ ! -z "${{ secrets.GPG_PRIVATE_KEY }}" ] && echo "::set-output name=GPG_PRIVATE_KEY::true" [ ! -z "${{ secrets.GPG_PASSPHRASE }}" ] && echo "::set-output name=GPG_PASSPHRASE::true" + [ ! -z "${{ secrets.DOCKER_HUB_TOKEN }}" ] && echo "::set-output name=DOCKER_HUB_TOKEN::true" exit 0 build-extensions: @@ -89,11 +91,13 @@ jobs: name: "Create Docker Images for the ControlPlane" runs-on: ubuntu-latest needs: [ secret-presence ] + if: | + needs.secret-presence.outputs.DOCKER_HUB_TOKEN strategy: fail-fast: false matrix: name: - - edc-controlplane-memory + - edc-runtime-memory - edc-controlplane-memory-hashicorp-vault - edc-controlplane-postgresql - edc-controlplane-postgresql-hashicorp-vault @@ -110,8 +114,11 @@ jobs: docker_token: ${{ secrets.DOCKER_HUB_TOKEN }} build-dataplane: + name: "Create Docker Images for the DataPlane" runs-on: ubuntu-latest needs: [ secret-presence ] + if: | + needs.secret-presence.outputs.DOCKER_HUB_TOKEN strategy: fail-fast: false matrix: @@ -135,7 +142,7 @@ jobs: permissions: contents: read packages: write - needs: [ secret-presence, build-controlplane, build-dataplane, build-extensions ] + needs: [ secret-presence, build-extensions ] # do not run on PR branches, do not run on releases if: | diff --git a/.github/workflows/business-tests.yaml b/.github/workflows/business-tests.yaml index 97a3044fb..39caaadb1 100644 --- a/.github/workflows/business-tests.yaml +++ b/.github/workflows/business-tests.yaml @@ -166,7 +166,6 @@ jobs: sleep 5s # Wait for supporting infrastructure to become ready (control-/data-plane, backend service) - kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=backend --timeout=120s || ( kubectl logs -l app.kubernetes.io/name=backend --tail 500 && exit 1 ) kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=idsdaps --timeout=120s || ( kubectl logs -l app.kubernetes.io/name=idsdaps --tail 500 && exit 1 ) kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=vault --timeout=120s || ( kubectl logs -l app.kubernetes.io/name=vault --tail 500 && exit 1 ) kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=sokrates-postgresql --timeout=120s || ( kubectl logs -l app.kubernetes.io/name=sokrates-postgresql --tail 500 && exit 1 ) diff --git a/.github/workflows/deploy-test-secrets b/.github/workflows/deploy-test-secrets new file mode 100644 index 000000000..28596b459 --- /dev/null +++ b/.github/workflows/deploy-test-secrets @@ -0,0 +1,51 @@ +daps-key:-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCv+NUvK7ppJPiM +wZPaQQxE745T5pV38O/Mkay5m82nnd5BoMoCdhhRTy3Efy79FhvBfGruFBLLGzsQ +FOEUY53Albeumo2gmpZSKjJR/M2ifK4MTaRniVOWL5mEcZSKPhsItKpxdLaiYfB6 +8uzqkqNICtmAQRSclYKzLBM9xHLEtxDWCbnzYFCHoOELGi+PTNIFsUnsT3QuKaJ/ +ejb47vdA/EZbwCQdtTyJ6i54jGhZUp0WMwq1Go2uhzJsygPmT2da/ZZZc7BNNEQE +sUSMZSpMH807TG/TunstotrzO4ShhpV4zbJ2FV/VlxH7yuCawmnR84F/KnXs9fUc +RSrQfuYBAgMBAAECggEAO+KjsjTgcG3bhBNQnMLsSP15Y0Yicbn18ZlVvaivGS7Z +d14fwSytY+ZdPfTGaey/L16HCVSdfK9cr0Fbw9OO2P5ajzobnp9dLsMbctlkpbpm +hNtbarzKTF8QkIkSsuUl0BWjt46vpJ1N+Jl5VO7oUFkY4dPEDvG2lAEY3zlekWDm +cQeOC/YgpoW4xfRwPPS6QE0w3Q+H5NfNjfz+mSHeItTlVfTKDRliWQLPWeRZFuXh +FlRFUQnTmEE/9wpIe3Hn7WXJ3fQqcYDzxU7/zwwY9I7bB15SgVHlR0ENDPAD5X8F +MVZ3EcLlqGBy+WvTWALp6pc8YfhW3fiTWyuamXtNrQKBgQDonsIzBKEOOKdKGW0e +uyw79ErmnmzkY5nuMrMxrmTA4WKCfJ/YRRA+4sxiltWsIJ3UkHe3OBCSSCdj79hb +ugb/+UzE70hOdgrct2NUQqbrj3gvsVvU8ZRQgTRMqKpmC0zY7KOMx6NU85z3IvS1 +z5fjszcUv4kLQlldYGSAuqPy+wKBgQDBqIkc8p/wcw7ygo1q/GerNeszfoxiIFp8 +h4RWLVhkwrcXFz30wBlUWuv5/kxU8tmJcmXxe72EmUstd6wvNOAnYwCiile6zQiJ +vsr1axavZnGOtNGUp6DUAsd2iviBl7IZ7kAcqCrQo4ivGhfHmahH3hmg8wuAMjYB +8f+FSPgaMwKBgQC7W4tMrjDOFIFhJEOIWfcRvvxI7VcFSNelS76aiDzsQVwnfxr7 +hPzFucQmsBgfUBHvMADMWGK4f1cCnh5kGtwidXgIsjVJxLeQ+EAPkLOCzQZfW3l8 +dKshgD9QcxTzpaxal5ZPAEikVqaZQtVYToCmzCTUGETYBbOWitnH+Qut2wKBgQC6 +Y6DcSLUhc0xOotLDxv1sbu/aVxF8nFEbDD+Vxf0Otc4MnmUWPRHj+8KlkVkcZcR0 +IrP1kThd+EDAGS+TG9wmbIY+6tH3S8HM+eJUBWcHGJ1xUZ1p61DC3Y3nDWiTKlLT +3Fi+fCkBOHSku4Npq/2odh7Kp0JJd4o9oxJg0VNhuwKBgQDSFn7dqFE0Xmwc40Vr +0wJH8cPWXKGt7KJENpj894buk2DniLD4w2x874dzTjrOFi6fKxEzbBNA9Rq9UPo8 +u9gKvl/IyWmV0c4zFCNMjRwVdnkMEte/lXcJZ67T4FXZByqAZlhrr/v0FD442Z9B +AjWFbUiBCFOo+gpAFcQGrkOQHA== +-----END PRIVATE KEY-----;daps-crt:-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIUXFgjbN7jxGRUDkoUvEwcN3zcew8wDQYJKoZIhvcNAQEL +BQAwgZAxCzAJBgNVBAYTAkRFMQ8wDQYDVQQIDAZCZXJsaW4xDzANBgNVBAcMBkJl +cmxpbjEMMAoGA1UECgwDQk1XMSAwHgYDVQQLDBdlZGMtcGxheWdyb3VuZC1wYXJ0 +bmVyMTEvMC0GA1UEAwwmc29rcmF0ZXMtZWRjLmRlbW8uY2F0ZW5hLXgubmV0L0JQ +TjEyMzQwHhcNMjIwNTEwMDc1NzMzWhcNMjMwNTEwMDc1NzMzWjCBkDELMAkGA1UE +BhMCREUxDzANBgNVBAgMBkJlcmxpbjEPMA0GA1UEBwwGQmVybGluMQwwCgYDVQQK +DANCTVcxIDAeBgNVBAsMF2VkYy1wbGF5Z3JvdW5kLXBhcnRuZXIxMS8wLQYDVQQD +DCZzb2tyYXRlcy1lZGMuZGVtby5jYXRlbmEteC5uZXQvQlBOMTIzNDCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAK/41S8rumkk+IzBk9pBDETvjlPmlXfw +78yRrLmbzaed3kGgygJ2GFFPLcR/Lv0WG8F8au4UEssbOxAU4RRjncCVt66ajaCa +llIqMlH8zaJ8rgxNpGeJU5YvmYRxlIo+Gwi0qnF0tqJh8Hry7OqSo0gK2YBBFJyV +grMsEz3EcsS3ENYJufNgUIeg4QsaL49M0gWxSexPdC4pon96Nvju90D8RlvAJB21 +PInqLniMaFlSnRYzCrUaja6HMmzKA+ZPZ1r9lllzsE00RASxRIxlKkwfzTtMb9O6 +ey2i2vM7hKGGlXjNsnYVX9WXEfvK4JrCadHzgX8qdez19RxFKtB+5gECAwEAAaNT +MFEwHQYDVR0OBBYEFOcHLXRWZjHwexDqtgMGTCN/7aZlMB8GA1UdIwQYMBaAFOcH +LXRWZjHwexDqtgMGTCN/7aZlMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBAD2a5kuIdICNXfYLpSe7AIONwZVucaArYtpXBxHEy5lMJsTEJgjZzypd +iIMU7onEQGVbii6yVNpWfIpJYM4e8ytVdJuk5evclVKZs/lZ2IshLyWFVj+ITh2E +28X4C/Hnmt4MPBCNowQf71nMp4LEziBgXp54qFV9C+qSTEVdrherRE0PU/zKyX10 +S/P5o42weTHnAO/pBN/8AmL3AymynKVgcPaW46IjjRAuc6kfZWCrYQ0M4+/7Ws5r +uM55Zae/L+C82OTNNaaK324ogsCkORPeQ23OCrRD8rZJmQ9bpoOGglPminfwEOhB +UHtyKgmvqCyOV3G/4G93W/xsLV0kxLA= +-----END CERTIFICATE----- \ No newline at end of file diff --git a/.github/workflows/deployment-test.yaml b/.github/workflows/deployment-test.yaml new file mode 100644 index 000000000..7d38b24ac --- /dev/null +++ b/.github/workflows/deployment-test.yaml @@ -0,0 +1,67 @@ +# +# Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH +# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + +--- +name: "Deployment Tests" + +on: + push: + branches: + - main + - develop + tags: + - '[0-9]+.[0-9]+.[0-9]+' + release: + types: + - published + pull_request: + paths-ignore: + - 'docs/**' + - '**/*.md' + branches: + - '*' + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + deployment-test-memory: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3.3.0 + - uses: ./.github/actions/run-deployment-test + name: "Run deployment test using KinD and Helm" + with: + imagename: edc-runtime-memory + rootDir: edc-controlplane/edc-runtime-memory + helm_command: |- + helm install tx-inmem charts/tractusx-connector-memory \ + -f charts/tractusx-connector-memory/example.yaml \ + --set vault.secrets="$(cat ./.github/workflows/deploy-test-secrets)" \ + --wait-for-jobs --timeout=120s + + # wait for the pod to become ready + kubectl rollout status deployment tx-inmem + + # execute the helm test + helm test tx-inmem diff --git a/.github/workflows/publish-docker.yaml b/.github/workflows/publish-docker.yaml index e2a7a5384..794d15061 100644 --- a/.github/workflows/publish-docker.yaml +++ b/.github/workflows/publish-docker.yaml @@ -42,7 +42,7 @@ jobs: fail-fast: false matrix: name: - - edc-controlplane-memory + - edc-runtime-memory - edc-controlplane-memory-hashicorp-vault - edc-controlplane-postgresql - edc-controlplane-postgresql-hashicorp-vault diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 714ed4c74..c315e8a07 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -6,7 +6,7 @@ on: - cron: "0 0 * * *" workflow_dispatch: workflow_run: - workflows: ["Build"] + workflows: [ "Build" ] branches: - main - releases @@ -24,8 +24,7 @@ jobs: outputs: value: ${{ steps.git-sha7.outputs.SHA7 }} steps: - - - name: Resolve git 7-chars sha + - name: Resolve git 7-chars sha id: git-sha7 run: | echo "::set-output name=SHA7::${GITHUB_SHA::7}" @@ -37,11 +36,9 @@ jobs: contents: read security-events: write steps: - - - name: Checkout repository + - name: Checkout repository uses: actions/checkout@v3.3.0 - - - name: Run Trivy vulnerability scanner in repo mode + - name: Run Trivy vulnerability scanner in repo mode uses: aquasecurity/trivy-action@master with: scan-type: "config" @@ -51,8 +48,7 @@ jobs: format: "sarif" output: "trivy-results-config.sarif" severity: "CRITICAL,HIGH" - - - name: Upload Trivy scan results to GitHub Security tab + - name: Upload Trivy scan results to GitHub Security tab uses: github/codeql-action/upload-sarif@v2 if: always() with: @@ -69,18 +65,16 @@ jobs: fail-fast: false # continue scanning other images although if the other has been vulnerable matrix: image: - - edc-controlplane-memory + - edc-runtime-memory - edc-controlplane-memory-hashicorp-vault - edc-controlplane-postgresql - edc-controlplane-postgresql-hashicorp-vault - edc-dataplane-azure-vault - edc-dataplane-hashicorp-vault steps: - - - name: Checkout + - name: Checkout uses: actions/checkout@v3.3.0 - - - name: Run Trivy vulnerability scanner + - name: Run Trivy vulnerability scanner if: always() uses: aquasecurity/trivy-action@master with: @@ -90,8 +84,7 @@ jobs: exit-code: "1" severity: "CRITICAL,HIGH" timeout: "10m0s" - - - name: Upload Trivy scan results to GitHub Security tab + - name: Upload Trivy scan results to GitHub Security tab if: always() uses: github/codeql-action/upload-sarif@v2 with: diff --git a/.github/workflows/veracode.yaml b/.github/workflows/veracode.yaml index a58d3fa4d..bba9df1b5 100644 --- a/.github/workflows/veracode.yaml +++ b/.github/workflows/veracode.yaml @@ -13,8 +13,7 @@ jobs: ORG_VERACODE_API_ID: ${{ steps.secret-presence.outputs.ORG_VERACODE_API_ID }} ORG_VERACODE_API_KEY: ${{ steps.secret-presence.outputs.ORG_VERACODE_API_KEY }} steps: - - - name: Check whether secrets exist + - name: Check whether secrets exist id: secret-presence run: | [ ! -z "${{ secrets.ORG_VERACODE_API_ID }}" ] && echo "::set-output name=ORG_VERACODE_API_ID::true" @@ -24,20 +23,17 @@ jobs: verify-formatting: runs-on: ubuntu-latest steps: - - - name: Checkout + - name: Checkout uses: actions/checkout@v3.3.0 with: fetch-depth: 0 - - - name: Set up JDK 11 + - name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: java-version: '17' distribution: 'temurin' cache: 'gradle' - - - name: Verify proper formatting + - name: Verify proper formatting run: ./gradlew spotlessCheck build-controlplane: @@ -49,36 +45,31 @@ jobs: fail-fast: false matrix: name: - - edc-controlplane-memory + - edc-runtime-memory - edc-controlplane-memory-hashicorp-vault - edc-controlplane-postgresql - edc-controlplane-postgresql-hashicorp-vault steps: # Set-Up - - - name: Checkout + - name: Checkout uses: actions/checkout@v3.3.0 - - - name: Set up JDK 11 + - name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: java-version: '17' distribution: 'temurin' cache: 'gradle' # Build - - - name: Build Controlplane + - name: Build Controlplane run: |- ./gradlew -p edc-controlplane/${{ matrix.name }} shadowJar env: GITHUB_PACKAGE_USERNAME: ${{ github.actor }} GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - - - name: Tar gzip files for veracode upload + - name: Tar gzip files for veracode upload run: |- tar -czvf edc-controlplane/${{ matrix.name }}/build/libs/${{ matrix.name }}.tar.gz edc-controlplane/${{ matrix.name }}/build/libs/${{ matrix.name }}.jar - - - name: Veracode Upload And Scan + - name: Veracode Upload And Scan uses: veracode/veracode-uploadandscan-action@v1.0 if: | needs.secret-presence.outputs.ORG_VERACODE_API_ID && needs.secret-presence.outputs.ORG_VERACODE_API_KEY @@ -104,30 +95,25 @@ jobs: - edc-dataplane-hashicorp-vault steps: # Set-Up - - - name: Checkout + - name: Checkout uses: actions/checkout@v3.3.0 - - - name: Set up JDK 11 + - name: Set up JDK 11 uses: actions/setup-java@v3.11.0 with: java-version: '17' distribution: 'temurin' cache: 'gradle' # Build - - - name: Build Dataplane + - name: Build Dataplane run: |- ./gradlew -p edc-dataplane/${{ matrix.name }} shadowJar env: GITHUB_PACKAGE_USERNAME: ${{ github.actor }} GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - - - name: Tar gzip files for veracode upload + - name: Tar gzip files for veracode upload run: |- tar -czvf edc-dataplane/${{ matrix.name }}/build/libs/${{ matrix.name }}.tar.gz edc-dataplane/${{ matrix.name }}/build/libs/${{ matrix.name }}.jar - - - name: Veracode Upload And Scan + - name: Veracode Upload And Scan uses: veracode/veracode-uploadandscan-action@v1.0 if: | needs.secret-presence.outputs.ORG_VERACODE_API_ID && needs.secret-presence.outputs.ORG_VERACODE_API_KEY diff --git a/CHANGELOG.md b/CHANGELOG.md index 79051eb6d..e84846211 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -69,7 +69,7 @@ corresponding [documentation](/docs/migration/Version_0.1.x_0.3.x.md). - update control plane docu (#623) - update postgresql version in Chart.yaml supporting-infrastructure (#622) - update link to edc logo in README.md (#612) -- update description of supporting infrastructure deployment (#616) +- update description of supporting infrastructure deployment (#616) ### Fixed @@ -84,7 +84,7 @@ corresponding [documentation](/docs/migration/Version_0.1.x_0.3.x.md). - Bump alpine (#749) - Bump alpine (#750) - Bump alpine (#752) -- Bump alpine in /edc-controlplane/edc-controlplane-memory/src/main/docker (#753) +- Bump alpine in /edc-controlplane/edc-runtime-memory/src/main/docker (#753) - Bump maven-deploy-plugin from 3.0.0 to 3.1.0 (#735) - Bump actions/setup-java from 3.9.0 to 3.10.0 (#730) - Bump s3 from 2.19.33 to 2.20.0 @@ -117,7 +117,7 @@ corresponding [documentation](/docs/migration/Version_0.1.x_0.3.x.md). - Bump s3 from 2.19.11 to 2.19.15 (#668) - Bump maven-surefire-plugin from 3.0.0-M7 to 3.0.0-M8 (#670) - Bump edc version to 0.0.1-20230109-SNAPSHOT (#666) -- Bump alpine in /edc-controlplane/edc-controlplane-memory/src/main/docker (#659) +- Bump alpine in /edc-controlplane/edc-runtime-memory/src/main/docker (#659) - Bump alpine in /edc-dataplane/edc-dataplane-azure-vault/src/main/docker (#660) - Bump alpine (#658) - Bump alpine (#661) @@ -171,7 +171,8 @@ corresponding [documentation](/docs/migration/Version_0.1.x_0.3.x.md). ## [0.1.1] - 2022-09-04 -**Important Note**: Please consolidate the migration documentation before updating your connector. [documentation](/docs/migration/Version_0.1.0_0.1.1.md). +**Important Note**: Please consolidate the migration documentation before updating your +connector. [documentation](/docs/migration/Version_0.1.0_0.1.1.md). ### Added @@ -184,7 +185,8 @@ corresponding [documentation](/docs/migration/Version_0.1.x_0.3.x.md). ### Fixed -- Connectors with Azure Vault extension are now starting again [link](https://github.com/eclipse-edc/Connector/issues/1892) +- Connectors with Azure Vault extension are now starting + again [link](https://github.com/eclipse-edc/Connector/issues/1892) ## [0.1.0] - 2022-08-19 @@ -193,11 +195,13 @@ corresponding [documentation](/docs/migration/Version_0.0.x_0.1.x.md). ### Added -- Control-Plane extension ([data-plane-selector-client](https://github.com/eclipse-edc/Connector/tree/v0.0.1-milestone-5/extensions/data-plane-selector/selector-client)) +- Control-Plane + extension ([data-plane-selector-client](https://github.com/eclipse-edc/Connector/tree/v0.0.1-milestone-5/extensions/data-plane-selector/selector-client)) - run the EDC with multiple data planes at once - Control-Plane extension ([dataplane-selector-configuration](edc-extensions/dataplane-selector-configuration)) - add data plane instances to the control plane by configuration -- Data-Plane extension ([s3-data-plane](https://github.com/eclipse-edc/Connector/tree/main/extensions/aws/data-plane-s3)) +- Data-Plane + extension ([s3-data-plane](https://github.com/eclipse-edc/Connector/tree/main/extensions/aws/data-plane-s3)) - transfer from and to AWS S3 buckets - Control-Plane extension ([data-encryption](edc-extensions/data-encryption)) - Data-Plane authentication attribute transmitted during data-plane-transfer can be encrypted symmetrically (AES) @@ -205,19 +209,27 @@ corresponding [documentation](/docs/migration/Version_0.0.x_0.1.x.md). ### Changed - Update setting name (`edc.dataplane.token.validation.endpoint` -> `edc.dataplane.token.validation.endpoint`) -- EDC has been updated to version [0.0.1-20220818-SNAPSHOT](https://oss.sonatype.org/#nexus-search;gav~org.eclipse.dataspaceconnector~~0.0.1-20220818-SNAPSHOT~~) - implications to the behavior of the connector have been covered in the [corresponding migration guide](docs/migration/Version_0.0.x_0.1.x.md) +- EDC has been updated to + version [0.0.1-20220818-SNAPSHOT](https://oss.sonatype.org/#nexus-search;gav~org.eclipse.dataspaceconnector~~0.0.1-20220818-SNAPSHOT~~) - + implications to the behavior of the connector have been covered in + the [corresponding migration guide](docs/migration/Version_0.0.x_0.1.x.md) ### Fixed -- Contract-Offer-Receiving-Connectors must also pass the ContractPolicy of the ContractDefinition before receiving offers([issue](https://github.com/eclipse-edc/Connector/issues/1331)) -- Deletion of Asset becomes impossible when Contract Negotiation exists([issue](https://github.com/eclipse-edc/Connector/issues/1403)) -- Deletion of Policy becomes impossible when Contract Definition exists([issue](https://github.com/eclipse-edc/Connector/issues/1410)) +- Contract-Offer-Receiving-Connectors must also pass the ContractPolicy of the ContractDefinition before receiving + offers([issue](https://github.com/eclipse-edc/Connector/issues/1331)) +- Deletion of Asset becomes impossible when Contract Negotiation + exists([issue](https://github.com/eclipse-edc/Connector/issues/1403)) +- Deletion of Policy becomes impossible when Contract Definition + exists([issue](https://github.com/eclipse-edc/Connector/issues/1410)) ## [0.0.6] - 2022-07-29 ### Fixed -- Fixes [release 0.0.5](https://github.com/eclipse-tractusx/tractusx-edc/releases/tag/0.0.5), which introduced classpath issues due to usage of [net.jodah:failsafe:2.4.3](https://search.maven.org/artifact/net.jodah/failsafe/2.4.3/jar) library +- Fixes [release 0.0.5](https://github.com/eclipse-tractusx/tractusx-edc/releases/tag/0.0.5), which introduced classpath + issues due to usage of [net.jodah:failsafe:2.4.3](https://search.maven.org/artifact/net.jodah/failsafe/2.4.3/jar) + library ## [0.0.5] - 2022-07-28 @@ -245,7 +257,7 @@ corresponding [documentation](/docs/migration/Version_0.0.x_0.1.x.md). ### Fixed - [#1515](https://github.com/eclipse-edc/Connector/issues/1515) SQL: Connector sends out 50 - contract offers max. + contract offers max. ### Removed diff --git a/README.md b/README.md index 566e42f5a..d1c6a0422 100644 --- a/README.md +++ b/README.md @@ -16,24 +16,25 @@ Please also refer to: ## About The Project -The project provides pre-built control- and data-plane [docker](https://www.docker.com/) images and [helm](https://helm.sh/) charts of the [Eclipse DataSpaceConnector Project](https://github.com/eclipse-edc/Connector). +The project provides pre-built control- and data-plane [docker](https://www.docker.com/) images +and [helm](https://helm.sh/) charts of +the [Eclipse DataSpaceConnector Project](https://github.com/eclipse-edc/Connector). ## Inventory -The eclipse data space connector is split up into Control-Plane and Data-Plane, whereas the Control-Plane functions as administration layer -and has responsibility of resource management, contract negotiation and administer data transfer. +The eclipse data space connector is split up into Control-Plane and Data-Plane, whereas the Control-Plane functions as +administration layer and has responsibility of resource management, contract negotiation and administer data transfer. The Data-Plane does the heavy lifting of transferring and receiving data streams. Depending on your environment there are different derivatives of the control-plane prepared: -- [edc-controlplane-memory](edc-controlplane/edc-controlplane-memory) with dependency onto - - [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) - [edc-controlplane-postgresql](edc-controlplane/edc-controlplane-postgresql) with dependency onto - [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) - [PostgreSQL 8.2 or newer](https://www.postgresql.org/) -- [edc-controlplane-postgresql-hashicorp-vault](edc-controlplane/edc-controlplane-postgresql-hashicorp-vault) with dependency onto +- [edc-controlplane-postgresql-hashicorp-vault](edc-controlplane/edc-controlplane-postgresql-hashicorp-vault) with + dependency onto - [Hashicorp Vault](https://www.vaultproject.io/) - -[PostgreSQL 8.2 or newer](https://www.postgresql.org/) + - [PostgreSQL 8.2 or newer](https://www.postgresql.org/) Derivatives of the Data-Plane can be found here @@ -42,6 +43,10 @@ Derivatives of the Data-Plane can be found here - [edc-dataplane-hashicorp-vault](edc-dataplane/edc-dataplane-hashicorp-vault) with dependency onto - [Hashicorp Vault](https://www.vaultproject.io/) +For testing/development purposes: + +- [edc-runtime-memory](edc-controlplane/edc-runtime-memory) + ## Getting Started ### Build @@ -54,15 +59,24 @@ Build Tractus-X EDC together with its Container Images ## License -Distributed under the Apache 2.0 License. See [LICENSE](https://github.com/eclipse-tractusx/tractusx-edc/blob/main/LICENSE) for more information. +Distributed under the Apache 2.0 License. +See [LICENSE](https://github.com/eclipse-tractusx/tractusx-edc/blob/main/LICENSE) for more information. + [contributors-shield]: https://img.shields.io/github/contributors/eclipse-tractusx/tractusx-edc.svg?style=for-the-badge + [contributors-url]: https://github.com/eclipse-tractusx/tractusx-edc/graphs/contributors + [stars-shield]: https://img.shields.io/github/stars/eclipse-tractusx/tractusx-edc.svg?style=for-the-badge + [stars-url]: https://github.com/eclipse-tractusx/tractusx-edc/stargazers + [license-shield]: https://img.shields.io/github/license/eclipse-tractusx/tractusx-edc.svg?style=for-the-badge + [license-url]: https://github.com/eclipse-tractusx/tractusx-edc/blob/main/LICENSE + [release-shield]: https://img.shields.io/github/v/release/eclipse-tractusx/tractusx-edc.svg?style=for-the-badge + [release-url]: https://github.com/eclipse-tractusx/tractusx-edc/releases diff --git a/charts/tractusx-connector-memory/.helmignore b/charts/tractusx-connector-memory/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/tractusx-connector-memory/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/tractusx-connector-memory/Chart.yaml b/charts/tractusx-connector-memory/Chart.yaml new file mode 100644 index 000000000..42b139a55 --- /dev/null +++ b/charts/tractusx-connector-memory/Chart.yaml @@ -0,0 +1,45 @@ +# +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) +# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + +--- +apiVersion: v2 +name: tractusx-connector-memory +description: A Helm chart for Tractus-X Eclipse Data Space Connector based on memory +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.3.2 +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.3.2" +home: https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector-memory +sources: + - https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector-memory diff --git a/charts/tractusx-connector-memory/README.md b/charts/tractusx-connector-memory/README.md new file mode 100644 index 000000000..1e37bc286 --- /dev/null +++ b/charts/tractusx-connector-memory/README.md @@ -0,0 +1,241 @@ +# tractusx-connector + +![Version: 0.3.2](https://img.shields.io/badge/Version-0.3.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.2](https://img.shields.io/badge/AppVersion-0.3.2-informational?style=flat-square) + +A Helm chart for Tractus-X Eclipse Data Space Connector + +**Homepage:** + +## TL;DR + +```shell +helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev +helm install my-release tractusx-edc/tractusx-connector --version 0.3.2 +``` + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|---------------------------------------------------------|--------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| backendService.httpProxyTokenReceiverUrl | string | `""` | | +| runtime.affinity | object | `{}` | | +| runtime.autoscaling.enabled | bool | `false` | Enables [horizontal pod autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) | +| runtime.autoscaling.maxReplicas | int | `100` | Maximum replicas if resource consumption exceeds resource threshholds | +| runtime.autoscaling.minReplicas | int | `1` | Minimal replicas if resource consumption falls below resource threshholds | +| runtime.autoscaling.targetCPUUtilizationPercentage | int | `80` | targetAverageUtilization of cpu provided to a pod | +| runtime.autoscaling.targetMemoryUtilizationPercentage | int | `80` | targetAverageUtilization of memory provided to a pod | +| runtime.debug.enabled | bool | `false` | | +| runtime.debug.port | int | `1044` | | +| runtime.debug.suspendOnStart | bool | `false` | | +| runtime.endpoints | object | `{"control":{"path":"/control","port":8083},"data":{"authKey":"","path":"/data","port":8081},"default":{"path":"/api","port":8080},"ids":{"path":"/api/v1/ids","port":8084},"metrics":{"path":"/metrics","port":9090},"observability":{"insecure":true,"path":"/observability","port":8085},"validation":{"path":"/validation","port":8082}}` | endpoints of the control plane | +| runtime.endpoints.control | object | `{"path":"/control","port":8083}` | control api, used for internal control calls. can be added to the internal ingress, but should probably not | +| runtime.endpoints.control.path | string | `"/control"` | path for incoming api calls | +| runtime.endpoints.control.port | int | `8083` | port for incoming api calls | +| runtime.endpoints.data | object | `{"authKey":"","path":"/data","port":8081}` | data management api, used by internal users, can be added to an ingress and must not be internet facing | +| runtime.endpoints.data.authKey | string | `""` | authentication key, must be attached to each 'X-Api-Key' request header | +| runtime.endpoints.data.path | string | `"/data"` | path for incoming api calls | +| runtime.endpoints.data.port | int | `8081` | port for incoming api calls | +| runtime.endpoints.default | object | `{"path":"/api","port":8080}` | default api for health checks, should not be added to any ingress | +| runtime.endpoints.default.path | string | `"/api"` | path for incoming api calls | +| runtime.endpoints.default.port | int | `8080` | port for incoming api calls | +| runtime.endpoints.ids | object | `{"path":"/api/v1/ids","port":8084}` | ids api, used for inter connector communication and must be internet facing | +| runtime.endpoints.ids.path | string | `"/api/v1/ids"` | path for incoming api calls | +| runtime.endpoints.ids.port | int | `8084` | port for incoming api calls | +| runtime.endpoints.metrics | object | `{"path":"/metrics","port":9090}` | metrics api, used for application metrics, must not be internet facing | +| runtime.endpoints.metrics.path | string | `"/metrics"` | path for incoming api calls | +| runtime.endpoints.metrics.port | int | `9090` | port for incoming api calls | +| runtime.endpoints.observability | object | `{"insecure":true,"path":"/observability","port":8085}` | observability api with unsecured access, must not be internet facing | +| runtime.endpoints.observability.insecure | bool | `true` | allow or disallow insecure access, i.e. access without authentication | +| runtime.endpoints.observability.path | string | `"/observability"` | observability api, provides /health /readiness and /liveness endpoints | +| runtime.endpoints.observability.port | int | `8085` | port for incoming API calls | +| runtime.endpoints.validation | object | `{"path":"/validation","port":8082}` | validation api, only used by the data plane and should not be added to any ingress | +| runtime.endpoints.validation.path | string | `"/validation"` | path for incoming api calls | +| runtime.endpoints.validation.port | int | `8082` | port for incoming api calls | +| runtime.env | object | `{}` | | +| runtime.envConfigMapNames | list | `[]` | | +| runtime.envSecretNames | list | `[]` | | +| runtime.envValueFrom | object | `{}` | | +| runtime.image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | +| runtime.image.repository | string | `""` | Which derivate of the control plane to use. when left empty the deployment will select the correct image automatically | +| runtime.image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion | +| runtime.ingresses[0].annotations | object | `{}` | Additional ingress annotations to add | +| runtime.ingresses[0].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | +| runtime.ingresses[0].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | +| runtime.ingresses[0].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | +| runtime.ingresses[0].enabled | bool | `false` | | +| runtime.ingresses[0].endpoints | list | `["ids"]` | EDC endpoints exposed by this ingress resource | +| runtime.ingresses[0].hostname | string | `"edc-control.local"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | +| runtime.ingresses[0].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | +| runtime.ingresses[0].tls.enabled | bool | `false` | Enables TLS on the ingress resource | +| runtime.ingresses[0].tls.secretName | string | `""` | If present overwrites the default secret name | +| runtime.ingresses[1].annotations | object | `{}` | Additional ingress annotations to add | +| runtime.ingresses[1].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | +| runtime.ingresses[1].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | +| runtime.ingresses[1].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | +| runtime.ingresses[1].enabled | bool | `false` | | +| runtime.ingresses[1].endpoints | list | `["data","control"]` | EDC endpoints exposed by this ingress resource | +| runtime.ingresses[1].hostname | string | `"edc-control.intranet"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | +| runtime.ingresses[1].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | +| runtime.ingresses[1].tls.enabled | bool | `false` | Enables TLS on the ingress resource | +| runtime.ingresses[1].tls.secretName | string | `""` | If present overwrites the default secret name | +| runtime.initContainers | list | `[]` | | +| runtime.internationalDataSpaces.catalogId | string | `"TXDC-Catalog"` | | +| runtime.internationalDataSpaces.curator | string | `""` | | +| runtime.internationalDataSpaces.description | string | `"Tractus-X Eclipse IDS Data Space Connector"` | | +| runtime.internationalDataSpaces.id | string | `"TXDC"` | | +| runtime.internationalDataSpaces.maintainer | string | `""` | | +| runtime.internationalDataSpaces.title | string | `""` | | +| runtime.livenessProbe.enabled | bool | `true` | Whether to enable kubernetes [liveness-probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | +| runtime.livenessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | +| runtime.livenessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first liveness check | +| runtime.livenessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a liveness check every 10 seconds | +| runtime.livenessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | +| runtime.livenessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | +| runtime.logging | string | `".level=INFO\norg.eclipse.edc.level=ALL\nhandlers=java.util.logging.ConsoleHandler\njava.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter\njava.util.logging.ConsoleHandler.level=ALL\njava.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n"` | configuration of the [Java Util Logging Facade](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html) | +| runtime.nodeSelector | object | `{}` | | +| runtime.opentelemetry | string | `"otel.javaagent.enabled=false\notel.javaagent.debug=false"` | configuration of the [Open Telemetry Agent](https://opentelemetry.io/docs/instrumentation/java/automatic/agent-config/) to collect and expose metrics | +| runtime.podAnnotations | object | `{}` | additional annotations for the pod | +| runtime.podLabels | object | `{}` | additional labels for the pod | +| runtime.podSecurityContext | object | `{"fsGroup":10001,"runAsGroup":10001,"runAsUser":10001,"seccompProfile":{"type":"RuntimeDefault"}}` | The [pod security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) defines privilege and access control settings for a Pod within the deployment | +| runtime.podSecurityContext.fsGroup | int | `10001` | The owner for volumes and any files created within volumes will belong to this guid | +| runtime.podSecurityContext.runAsGroup | int | `10001` | Processes within a pod will belong to this guid | +| runtime.podSecurityContext.runAsUser | int | `10001` | Runs all processes within a pod with a special uid | +| runtime.podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | Restrict a Container's Syscalls with seccomp | +| runtime.readinessProbe.enabled | bool | `true` | Whether to enable kubernetes [readiness-probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | +| runtime.readinessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | +| runtime.readinessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first readiness check | +| runtime.readinessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a readiness check every 10 seconds | +| runtime.readinessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | +| runtime.readinessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | +| runtime.replicaCount | int | `1` | | +| runtime.resources | object | `{}` | [resource management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for the container | +| runtime.securityContext.allowPrivilegeEscalation | bool | `false` | Controls [Privilege Escalation](https://kubernetes.io/docs/concepts/security/pod-security-policy/#privilege-escalation) enabling setuid binaries changing the effective user ID | +| runtime.securityContext.capabilities.add | list | `[]` | Specifies which capabilities to add to issue specialized syscalls | +| runtime.securityContext.capabilities.drop | list | `["ALL"]` | Specifies which capabilities to drop to reduce syscall attack surface | +| runtime.securityContext.readOnlyRootFilesystem | bool | `true` | Whether the root filesystem is mounted in read-only mode | +| runtime.securityContext.runAsNonRoot | bool | `true` | Requires the container to run without root privileges | +| runtime.securityContext.runAsUser | int | `10001` | The container's process will run with the specified uid | +| runtime.service.annotations | object | `{}` | | +| runtime.service.type | string | `"ClusterIP"` | [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. | +| runtime.tolerations | list | `[]` | | +| runtime.url.ids | string | `""` | Explicitly declared url for reaching the ids api (e.g. if ingresses not used) | +| runtime.volumeMounts | list | `[]` | declare where to mount [volumes](https://kubernetes.io/docs/concepts/storage/volumes/) into the container | +| runtime.volumes | list | `[]` | [volume](https://kubernetes.io/docs/concepts/storage/volumes/) directories | +| customLabels | object | `{}` | | +| daps.clientId | string | `""` | | +| daps.paths.jwks | string | `"/jwks.json"` | | +| daps.paths.token | string | `"/token"` | | +| daps.url | string | `""` | | +| dataplane.affinity | object | `{}` | | +| dataplane.autoscaling.enabled | bool | `false` | Enables [horizontal pod autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) | +| dataplane.autoscaling.maxReplicas | int | `100` | Maximum replicas if resource consumption exceeds resource threshholds | +| dataplane.autoscaling.minReplicas | int | `1` | Minimal replicas if resource consumption falls below resource threshholds | +| dataplane.autoscaling.targetCPUUtilizationPercentage | int | `80` | targetAverageUtilization of cpu provided to a pod | +| dataplane.autoscaling.targetMemoryUtilizationPercentage | int | `80` | targetAverageUtilization of memory provided to a pod | +| dataplane.aws.accessKeyId | string | `""` | | +| dataplane.aws.endpointOverride | string | `""` | | +| dataplane.aws.secretAccessKey | string | `""` | | +| dataplane.debug.enabled | bool | `false` | | +| dataplane.debug.port | int | `1044` | | +| dataplane.debug.suspendOnStart | bool | `false` | | +| dataplane.endpoints.control.path | string | `"/api/dataplane/control"` | | +| dataplane.endpoints.control.port | int | `8083` | | +| dataplane.endpoints.default.path | string | `"/api"` | | +| dataplane.endpoints.default.port | int | `8080` | | +| dataplane.endpoints.metrics.path | string | `"/metrics"` | | +| dataplane.endpoints.metrics.port | int | `9090` | | +| dataplane.endpoints.public.path | string | `"/api/public"` | | +| dataplane.endpoints.public.port | int | `8081` | | +| dataplane.endpoints.validation.path | string | `"/validation"` | | +| dataplane.endpoints.validation.port | int | `8082` | | +| dataplane.env | object | `{}` | | +| dataplane.envConfigMapNames | list | `[]` | | +| dataplane.envSecretNames | list | `[]` | | +| dataplane.envValueFrom | object | `{}` | | +| dataplane.image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | +| dataplane.image.repository | string | `""` | Which derivate of the data plane to use. when left empty the deployment will select the correct image automatically | +| dataplane.image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion | +| dataplane.ingresses[0].annotations | object | `{}` | Additional ingress annotations to add | +| dataplane.ingresses[0].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | +| dataplane.ingresses[0].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | +| dataplane.ingresses[0].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | +| dataplane.ingresses[0].enabled | bool | `false` | | +| dataplane.ingresses[0].endpoints | list | `["public"]` | EDC endpoints exposed by this ingress resource | +| dataplane.ingresses[0].hostname | string | `"edc-data.local"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | +| dataplane.ingresses[0].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | +| dataplane.ingresses[0].tls.enabled | bool | `false` | Enables TLS on the ingress resource | +| dataplane.ingresses[0].tls.secretName | string | `""` | If present overwrites the default secret name | +| dataplane.initContainers | list | `[]` | | +| dataplane.livenessProbe.enabled | bool | `true` | Whether to enable kubernetes [liveness-probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | +| dataplane.livenessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | +| dataplane.livenessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first liveness check | +| dataplane.livenessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a liveness check every 10 seconds | +| dataplane.livenessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | +| dataplane.livenessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | +| dataplane.logging | string | `".level=INFO\norg.eclipse.edc.level=ALL\nhandlers=java.util.logging.ConsoleHandler\njava.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter\njava.util.logging.ConsoleHandler.level=ALL\njava.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n"` | configuration of the [Java Util Logging Facade](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html) | +| dataplane.nodeSelector | object | `{}` | | +| dataplane.opentelemetry | string | `"otel.javaagent.enabled=false\notel.javaagent.debug=false"` | configuration of the [Open Telemetry Agent](https://opentelemetry.io/docs/instrumentation/java/automatic/agent-config/) to collect and expose metrics | +| dataplane.podAnnotations | object | `{}` | additional annotations for the pod | +| dataplane.podLabels | object | `{}` | additional labels for the pod | +| dataplane.podSecurityContext | object | `{"fsGroup":10001,"runAsGroup":10001,"runAsUser":10001,"seccompProfile":{"type":"RuntimeDefault"}}` | The [pod security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) defines privilege and access control settings for a Pod within the deployment | +| dataplane.podSecurityContext.fsGroup | int | `10001` | The owner for volumes and any files created within volumes will belong to this guid | +| dataplane.podSecurityContext.runAsGroup | int | `10001` | Processes within a pod will belong to this guid | +| dataplane.podSecurityContext.runAsUser | int | `10001` | Runs all processes within a pod with a special uid | +| dataplane.podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | Restrict a Container's Syscalls with seccomp | +| dataplane.readinessProbe.enabled | bool | `true` | Whether to enable kubernetes [readiness-probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | +| dataplane.readinessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | +| dataplane.readinessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first readiness check | +| dataplane.readinessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a liveness check every 10 seconds | +| dataplane.readinessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | +| dataplane.readinessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | +| dataplane.replicaCount | int | `1` | | +| dataplane.resources | object | `{}` | [resource management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for the container | +| dataplane.securityContext.allowPrivilegeEscalation | bool | `false` | Controls [Privilege Escalation](https://kubernetes.io/docs/concepts/security/pod-security-policy/#privilege-escalation) enabling setuid binaries changing the effective user ID | +| dataplane.securityContext.capabilities.add | list | `[]` | Specifies which capabilities to add to issue specialized syscalls | +| dataplane.securityContext.capabilities.drop | list | `["ALL"]` | Specifies which capabilities to drop to reduce syscall attack surface | +| dataplane.securityContext.readOnlyRootFilesystem | bool | `true` | Whether the root filesystem is mounted in read-only mode | +| dataplane.securityContext.runAsNonRoot | bool | `true` | Requires the container to run without root privileges | +| dataplane.securityContext.runAsUser | int | `10001` | The container's process will run with the specified uid | +| dataplane.service.port | int | `80` | | +| dataplane.service.type | string | `"ClusterIP"` | [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. | +| dataplane.tolerations | list | `[]` | | +| dataplane.url.public | string | `""` | Explicitly declared url for reaching the public api (e.g. if ingresses not used) | +| dataplane.volumeMounts | list | `[]` | declare where to mount [volumes](https://kubernetes.io/docs/concepts/storage/volumes/) into the container | +| dataplane.volumes | list | `[]` | [volume](https://kubernetes.io/docs/concepts/storage/volumes/) directories | +| fullnameOverride | string | `""` | | +| imagePullSecrets | list | `[]` | Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | +| nameOverride | string | `""` | | +| postgresql.enabled | bool | `false` | | +| postgresql.jdbcUrl | string | `""` | | +| postgresql.password | string | `""` | | +| postgresql.username | string | `""` | | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.imagePullSecrets | list | `[]` | Existing image pull secret bound to the service account to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | +| serviceAccount.name | string | `""` | | +| vault.azure.certificate | string | `nil` | | +| vault.azure.client | string | `""` | | +| vault.azure.enabled | bool | `false` | | +| vault.azure.name | string | `""` | | +| vault.azure.secret | string | `nil` | | +| vault.azure.tenant | string | `""` | | +| vault.hashicorp.enabled | bool | `false` | | +| vault.hashicorp.healthCheck.enabled | bool | `true` | | +| vault.hashicorp.healthCheck.standbyOk | bool | `true` | | +| vault.hashicorp.paths.health | string | `"/v1/sys/health"` | | +| vault.hashicorp.paths.secret | string | `"/v1/secret"` | | +| vault.hashicorp.timeout | int | `30` | | +| vault.hashicorp.token | string | `""` | | +| vault.hashicorp.url | string | `""` | | +| vault.secretNames.dapsPrivateKey | string | `"daps-private-key"` | | +| vault.secretNames.dapsPublicKey | string | `"daps-public-key"` | | +| vault.secretNames.transferProxyTokenEncryptionAesKey | string | `"transfer-proxy-token-encryption-aes-key"` | | +| vault.secretNames.transferProxyTokenSignerPrivateKey | string | `"transfer-proxy-token-signer-private-key"` | | +| vault.secretNames.transferProxyTokenSignerPublicKey | string | `"transfer-proxy-token-signer-public-key"` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.10.0](https://github.com/norwoodj/helm-docs/releases/v1.10.0) diff --git a/charts/tractusx-connector-memory/README.md.gotmpl b/charts/tractusx-connector-memory/README.md.gotmpl new file mode 100644 index 000000000..b1671f5a2 --- /dev/null +++ b/charts/tractusx-connector-memory/README.md.gotmpl @@ -0,0 +1,26 @@ +{{ template "chart.header" . }} + +{{ template "chart.deprecationWarning" . }} + +{{ template "chart.badgesSection" . }} + +{{ template "chart.description" . }} + +{{ template "chart.homepageLine" . }} + +## TL;DR + +```shell +helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev +helm install my-release tractusx-edc/tractusx-connector --version {{ .Version }} +``` + +{{ template "chart.maintainersSection" . }} + +{{ template "chart.sourcesSection" . }} + +{{ template "chart.requirementsSection" . }} + +{{ template "chart.valuesSection" . }} + +{{ template "helm-docs.versionFooter" . }} diff --git a/charts/tractusx-connector-memory/example.yaml b/charts/tractusx-connector-memory/example.yaml new file mode 100644 index 000000000..57d12b039 --- /dev/null +++ b/charts/tractusx-connector-memory/example.yaml @@ -0,0 +1,65 @@ +# +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) +# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + +## This file can be used to verify that the chart is working properly. It provides an exemplary configuration +## that is intended to be used with the supporting infrastructure. +## 1. install DAPS: +## helm install infrastructure edc-tests/deployment/src/main/resources/helm/test-infrastructure \ ─╯ +## --wait-for-jobs +## +## 2. install in-mem runtime. Note that the key and crt must match exactly the DAPS setup, c.f. edc-tests/deployment/src/main/resources/helm/test-infrastructure/values.yaml +## export DAPSKEY="" +## export DAPSCRT="" +## export YOUR_VAULT_SECRETS="daps-key:$DAPSKEY;daps-crt:$DAPSCRT" +## helm install trudy charts/tractusx-connector-memory -f charts/tractusx-connector-memory/example.yaml --set vault.secrets=$YOUR_VAULT_SECRETS + +fullnameOverride: tx-inmem +runtime: + service: + type: NodePort + endpoints: + data: + authKey: password + image: + pullPolicy: Never + tag: "latest" + repository: "edc-runtime-memory" + securityContext: + # avoids some errors in the log: cannot write temp files of large multipart requests when R/O + readOnlyRootFilesystem: false + +vault: + secretNames: + transferProxyTokenSignerPublicKey: daps-crt + transferProxyTokenSignerPrivateKey: daps-key + transferProxyTokenEncryptionAesKey: aes-keysc + dapsPrivateKey: daps-key + dapsPublicKey: daps-crt + + # this must be set through CLI args: --set vault.secrets=$YOUR_VAULT_SECRETS where YOUR_VAULT_SECRETS should + # be a string in the format "key1:secret1;key2:secret2;..." + secrets: + +daps: + url: "http://ids-daps:4567" + clientId: "E7:07:2D:74:56:66:31:F0:7B:10:EA:B6:03:06:4C:23:7F:ED:A6:65:keyid:E7:07:2D:74:56:66:31:F0:7B:10:EA:B6:03:06:4C:23:7F:ED:A6:65" + +backendService: + httpProxyTokenReceiverUrl: "http://backend:8080" diff --git a/charts/tractusx-connector-memory/templates/NOTES.txt b/charts/tractusx-connector-memory/templates/NOTES.txt new file mode 100644 index 000000000..cd49a4d15 --- /dev/null +++ b/charts/tractusx-connector-memory/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the runtime URL by running these commands: +{{ with index .Values.runtime.ingresses 0}} +{{- if .enabled }} +{{- range .paths }} + http{{ if .tls }}s{{ end }}://{{ .hostname }}{{ .path }} +{{- end }} +{{- else if contains "NodePort" $.Values.runtime.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ $.Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "txdc.fullname" $ }}) + export NODE_IP=$(kubectl get nodes --namespace {{ $.Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" $.Values.runtime.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "txdc.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "txdc.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ $.Values.runtime.service.port }} +{{- else if contains "ClusterIP" $.Values.runtime.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ $.Release.Namespace }} -l "app.kubernetes.io/name={{ include "txdc.name" $ }},app.kubernetes.io/instance={{ $.Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ $.Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ $.Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} +{{- end }} diff --git a/charts/tractusx-connector-memory/templates/_helpers.tpl b/charts/tractusx-connector-memory/templates/_helpers.tpl new file mode 100644 index 000000000..1b70bf13b --- /dev/null +++ b/charts/tractusx-connector-memory/templates/_helpers.tpl @@ -0,0 +1,157 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "txdc.name" -}} +{{- default .Chart.Name .Values.nameOverride | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "txdc.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "txdc.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Control Common labels +*/}} +{{- define "txdc.labels" -}} +helm.sh/chart: {{ include "txdc.chart" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Control Common labels +*/}} +{{- define "txdc.runtime.labels" -}} +helm.sh/chart: {{ include "txdc.chart" . }} +{{ include "txdc.runtime.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/component: edc-runtime +app.kubernetes.io/part-of: edc +{{- end }} + +{{/* +Control Selector labels +*/}} +{{- define "txdc.runtime.selectorLabels" -}} +app.kubernetes.io/name: {{ include "txdc.name" . }}-runtime +app.kubernetes.io/instance: {{ .Release.Name }}-runtime +{{- end }} + +{{/* +Data Selector labels +*/}} +{{- define "txdc.dataplane.selectorLabels" -}} +app.kubernetes.io/name: {{ include "txdc.name" . }}-dataplane +app.kubernetes.io/instance: {{ .Release.Name }}-dataplane +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "txdc.runtime.serviceaccount.name" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "txdc.fullname" . ) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Control IDS URL +*/}} +{{- define "txdc.runtime.url.ids" -}} +{{- if .Values.runtime.url.ids }}{{/* if ids api url has been specified explicitly */}} +{{- .Values.runtime.url.ids }} +{{- else }}{{/* else when ids api url has not been specified explicitly */}} +{{- with (index .Values.runtime.ingresses 0) }} +{{- if .enabled }}{{/* if ingress enabled */}} +{{- if .tls.enabled }}{{/* if TLS enabled */}} +{{- printf "https://%s" .hostname -}} +{{- else }}{{/* else when TLS not enabled */}} +{{- printf "http://%s" .hostname -}} +{{- end }}{{/* end if tls */}} +{{- else }}{{/* else when ingress not enabled */}} +{{- printf "http://%s-runtime:%v" ( include "txdc.fullname" $ ) $.Values.runtime.endpoints.ids.port -}} +{{- end }}{{/* end if ingress */}} +{{- end }}{{/* end with ingress */}} +{{- end }}{{/* end if .Values.runtime.url.ids */}} +{{- end }} + +{{/* +Observability URL +*/}} +{{- define "tdxc.runtime.url.readiness" -}} +{{- printf "http://%s-runtime:%v%s/check/readiness" (include "txdc.fullname" $ ) $.Values.runtime.endpoints.observability.port $.Values.runtime.endpoints.observability.path -}} +{{- end }} + +{{/* +Validation URL +*/}} +{{- define "txdc.runtime.url.validation" -}} +{{- printf "http://%s-runtime:%v%s/token" ( include "txdc.fullname" $ ) $.Values.runtime.endpoints.validation.port $.Values.runtime.endpoints.validation.path -}} +{{- end }} + +{{/* +Data Control URL +*/}} +{{- define "txdc.dataplane.url.control" -}} +{{- printf "http://%s-dataplane:%v%s" (include "txdc.fullname" . ) .Values.runtime.endpoints.control.port .Values.runtime.endpoints.control.path -}} +{{- end }} + +{{/* +Data Public URL +*/}} +{{- define "txdc.dataplane.url.public" -}} +{{- if .Values.runtime.url.public }}{{/* if public api url has been specified explicitly */}} +{{- .Values.runtime.url.public }} +{{- else }}{{/* else when public api url has not been specified explicitly */}} +{{- with (index .Values.runtime.ingresses 0) }} +{{- if .enabled }}{{/* if ingress enabled */}} +{{- if .tls.enabled }}{{/* if TLS enabled */}} +{{- printf "https://%s%s" .hostname $.Values.runtime.endpoints.public.path -}} +{{- else }}{{/* else when TLS not enabled */}} +{{- printf "http://%s%s" .hostname $.Values.runtime.endpoints.public.path -}} +{{- end }}{{/* end if tls */}} +{{- else }}{{/* else when ingress not enabled */}} +{{- printf "http://%s-dataplane:%v%s" (include "txdc.fullname" $ ) $.Values.runtime.endpoints.public.port $.Values.runtime.endpoints.public.path -}} +{{- end }}{{/* end if ingress */}} +{{- end }}{{/* end with ingress */}} +{{- end }}{{/* end if .Values.dataplane.url.public */}} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "txdc.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "txdc.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/charts/tractusx-connector-memory/templates/configmap-runtime.yaml b/charts/tractusx-connector-memory/templates/configmap-runtime.yaml new file mode 100644 index 000000000..8b6067e06 --- /dev/null +++ b/charts/tractusx-connector-memory/templates/configmap-runtime.yaml @@ -0,0 +1,33 @@ +# + # Copyright (c) 2023 ZF Friedrichshafen AG + # Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH + # Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) + # Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License, Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + # License for the specific language governing permissions and limitations + # under the License. + # + # SPDX-License-Identifier: Apache-2.0 + # + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "txdc.fullname" . }}-runtime + namespace: {{ .Release.Namespace | default "default" | quote }} + labels: + {{- include "txdc.runtime.labels" . | nindent 4 }} +data: + logging.properties: |- + {{- .Values.runtime.logging | nindent 4 }} diff --git a/charts/tractusx-connector-memory/templates/deployment-runtime.yaml b/charts/tractusx-connector-memory/templates/deployment-runtime.yaml new file mode 100644 index 000000000..04386678c --- /dev/null +++ b/charts/tractusx-connector-memory/templates/deployment-runtime.yaml @@ -0,0 +1,302 @@ +# + # Copyright (c) 2023 ZF Friedrichshafen AG + # Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH + # Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) + # Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License, Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + # License for the specific language governing permissions and limitations + # under the License. + # + # SPDX-License-Identifier: Apache-2.0 + # + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "txdc.fullname" . }} + labels: + {{- include "txdc.runtime.labels" . | nindent 4 }} +spec: + {{- if not .Values.runtime.autoscaling.enabled }} + replicas: {{ .Values.runtime.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "txdc.runtime.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.runtime.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "txdc.runtime.selectorLabels" . | nindent 8 }} + {{- with .Values.runtime.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "txdc.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.runtime.podSecurityContext | nindent 8 }} + initContainers: + {{- toYaml .Values.runtime.initContainers | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.runtime.securityContext | nindent 12 }} + # either use the specified image, or use the default one + {{- if .Values.runtime.image.repository }} + image: "{{ .Values.runtime.image.repository }}:{{ .Values.runtime.image.tag | default .Chart.AppVersion }}" + {{- else }} + image: "ghcr.io/catenax-ng/tx-tractusx-edc/edc-runtime-memory:{{ .Values.runtime.image.tag | default .Chart.AppVersion }}" + {{- end }} + + imagePullPolicy: {{ .Values.runtime.image.pullPolicy }} + ports: + {{- range $key,$value := .Values.runtime.endpoints }} + - name: {{ $key }} + containerPort: {{ $value.port }} + protocol: TCP + {{- end }} + {{- if .Values.runtime.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.runtime.endpoints.observability.path }}/check/liveness + port: {{ .Values.runtime.endpoints.observability.port }} + initialDelaySeconds: {{ .Values.runtime.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.runtime.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.runtime.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.runtime.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.runtime.livenessProbe.successThreshold }} + {{- end }} + {{- if .Values.runtime.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.runtime.endpoints.observability.path }}/check/readiness + port: {{ .Values.runtime.endpoints.observability.port }} + initialDelaySeconds: {{ .Values.runtime.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.runtime.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.runtime.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.runtime.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.runtime.readinessProbe.successThreshold }} + {{- end }} + resources: + {{- toYaml .Values.runtime.resources | nindent 12 }} + env: + {{- if .Values.runtime.debug.enabled }} + - name: "JAVA_TOOL_OPTIONS" + {{- if and .Values.runtime.debug.enabled .Values.runtime.debug.suspendOnStart }} + value: >- + {{ printf "-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=%v" .Values.runtime.debug.port }} + {{- else }} + value: >- + {{ printf "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=%v" .Values.runtime.debug.port }} + {{- end }} + {{- end }} + + ######################## + ## DAPS CONFIGURATION ## + ######################## + + # see extension https://github.com/eclipse-edc/Connector/tree/main/extensions/iam/oauth2/oauth2-core + - name: EDC_OAUTH_CLIENT_ID + value: {{ .Values.daps.clientId | required ".Values.daps.clientId is required" | quote }} + - name: EDC_OAUTH_PROVIDER_JWKS_URL + value: {{ printf "%s%s" .Values.daps.url .Values.daps.paths.jwks }} + - name: EDC_OAUTH_TOKEN_URL + value: {{ printf "%s%s" .Values.daps.url .Values.daps.paths.token }} + - name: EDC_OAUTH_PRIVATE_KEY_ALIAS + value: {{ .Values.vault.secretNames.dapsPrivateKey | required ".Values.vault.secretNames.dapsPrivateKey is required" | quote }} + - name: EDC_OAUTH_PUBLIC_KEY_ALIAS + value: {{ .Values.vault.secretNames.dapsPublicKey | required ".Values.vault.secretNames.dapsPublicKey is required" | quote }} + + ####### + # API # + ####### + - name: "EDC_API_AUTH_KEY" + value: {{ .Values.runtime.endpoints.data.authKey | required ".Values.runtime.endpoints.data.authKey is required" | quote }} + - name: "WEB_HTTP_DEFAULT_PORT" + value: {{ .Values.runtime.endpoints.default.port | quote }} + - name: "WEB_HTTP_DEFAULT_PATH" + value: {{ .Values.runtime.endpoints.default.path | quote }} + {{- if or (eq (substr 0 3 .Values.runtime.image.tag) "0.1") (eq (substr 0 3 .Values.runtime.image.tag) "0.2") }} + # WEB_HTTP_DATA_PORT is renamed to WEB_HTTP_MANAGEMENT_PORT from version 0.2.1 and newer + # we will keep both settings for downward capabilities + - name: "WEB_HTTP_DATA_PORT" + value: {{ .Values.runtime.endpoints.data.port | quote }} + # WEB_HTTP_DATA_PATH is renamed to WEB_HTTP_MANAGEMENT_PATH from version 0.2.1 and newer + # we will keep both settings for downward capabilities + - name: "WEB_HTTP_DATA_PATH" + value: {{ .Values.runtime.endpoints.data.path | quote }} + {{- else }} + - name: "WEB_HTTP_MANAGEMENT_PORT" + value: {{ .Values.runtime.endpoints.data.port | quote }} + - name: "WEB_HTTP_MANAGEMENT_PATH" + value: {{ .Values.runtime.endpoints.data.path | quote }} + {{- end }} + - name: "WEB_HTTP_VALIDATION_PORT" + value: {{ .Values.runtime.endpoints.validation.port | quote }} + - name: "WEB_HTTP_VALIDATION_PATH" + value: {{ .Values.runtime.endpoints.validation.path | quote }} + - name: "WEB_HTTP_CONTROL_PORT" + value: {{ .Values.runtime.endpoints.control.port | quote }} + - name: "WEB_HTTP_CONTROL_PATH" + value: {{ .Values.runtime.endpoints.control.path | quote }} + - name: "WEB_HTTP_IDS_PORT" + value: {{ .Values.runtime.endpoints.ids.port | quote }} + - name: "WEB_HTTP_IDS_PATH" + value: {{ .Values.runtime.endpoints.ids.path | quote }} + - name: "WEB_HTTP_OBSERVABILITY_PORT" + value: {{ .Values.runtime.endpoints.observability.port | quote}} + - name: "WEB_HTTP_OBSERVABILITY_PATH" + value: {{ .Values.runtime.endpoints.observability.path | quote}} + - name: "TRACTUSX_API_OBSERVABILITY_ALLOW-INSECURE" + value: {{ .Values.runtime.endpoints.observability.insecure | quote }} + - name: "WEB_HTTP_PUBLIC_PORT" + value: {{ .Values.runtime.endpoints.public.port | quote }} + - name: "WEB_HTTP_PUBLIC_PATH" + value: {{ .Values.runtime.endpoints.public.path | quote }} + - name: "EDC_DATAPLANE_TOKEN_VALIDATION_ENDPOINT" + value: {{ include "txdc.runtime.url.validation" .}} + + ######### + ## IDS ## + ######### + - name: "IDS_WEBHOOK_ADDRESS" + value: {{ include "txdc.runtime.url.ids" . | quote }} + - name: "EDC_IDS_ENDPOINT" + value: {{ printf "%s%s" (include "txdc.runtime.url.ids" .) .Values.runtime.endpoints.ids.path | quote }} + - name: "EDC_IDS_ID" + value: {{ printf "urn:connector:%s" (lower .Values.runtime.internationalDataSpaces.id) | quote }} + - name: "EDC_IDS_DESCRIPTION" + value: {{ .Values.runtime.internationalDataSpaces.description | quote }} + - name: "EDC_IDS_TITLE" + value: {{ .Values.runtime.internationalDataSpaces.title | quote }} + - name: "EDC_IDS_MAINTAINER" + value: {{ .Values.runtime.internationalDataSpaces.maintainer | quote }} + - name: "EDC_IDS_CURATOR" + value: {{ .Values.runtime.internationalDataSpaces.curator | quote }} + - name: "EDC_IDS_CATALOG_ID" + value: {{ printf "urn:catalog:%s" (lower .Values.runtime.internationalDataSpaces.catalogId) | quote }} + - name: "EDC_OAUTH_PROVIDER_AUDIENCE" + value: "idsc:IDS_CONNECTORS_ALL" + - name: "EDC_OAUTH_ENDPOINT_AUDIENCE" + value: {{ printf "%s%s%s" (include "txdc.runtime.url.ids" . ) .Values.runtime.endpoints.ids.path "/data" | quote }} + # this is the old setting name for 'EDC_OAUTH_ENDPOINT_AUDIENCE' and is mandatory for Produce EDC v0.1.2 and older + - name: "EDC_IDS_ENDPOINT_AUDIENCE" + value: {{ printf "%s%s%s" (include "txdc.runtime.url.ids" . ) .Values.runtime.endpoints.ids.path "/data" | quote }} + + ################ + ## DATA PLANE ## + ################ + + # see extension https://github.com/eclipse-tractusx/tractusx-edc/tree/develop/edc-extensions/dataplane-selector-configuration + - name: "EDC_DATAPLANE_SELECTOR_DEFAULTPLANE_URL" + value: {{ include "txdc.dataplane.url.control" . }}/transfer + - name: "EDC_DATAPLANE_SELECTOR_DEFAULTPLANE_SOURCETYPES" + value: "HttpData,AmazonS3" + - name: "EDC_DATAPLANE_SELECTOR_DEFAULTPLANE_DESTINATIONTYPES" + value: "HttpProxy,AmazonS3" + - name: "EDC_DATAPLANE_SELECTOR_DEFAULTPLANE_PROPERTIES" + value: |- + {{ printf "{ \"publicApiUrl\": \"%s\" }" (include "txdc.dataplane.url.public" . ) }} + + # see extension https://github.com/eclipse-edc/Connector/tree/main/extensions/control-plane/data-plane-transfer + - name: "EDC_TRANSFER_PROXY_ENDPOINT" + value: {{ include "txdc.dataplane.url.public" . }} + - name: "EDC_TRANSFER_PROXY_TOKEN_SIGNER_PRIVATEKEY_ALIAS" + value: {{ .Values.vault.secretNames.transferProxyTokenSignerPrivateKey | quote }} + - name: "EDC_TRANSFER_PROXY_TOKEN_VERIFIER_PUBLICKEY_ALIAS" + value: {{ .Values.vault.secretNames.transferProxyTokenSignerPublicKey | quote }} + + # see extension https://github.com/eclipse-edc/Connector/tree/main/extensions/control-plane/http-receiver + - name: "EDC_RECEIVER_HTTP_ENDPOINT" + value: {{ .Values.backendService.httpProxyTokenReceiverUrl | required ".Values.backendService.httpProxyTokenReceiverUrl is required" | quote }} + + ########### + ## VAULT ## + ########### + + # see extension https://github.com/eclipse-tractusx/tractusx-edc/tree/develop/edc-extensions/hashicorp-vault + - name: "SECRETS" + value: {{ .Values.vault.secrets | quote}} + + ##################### + ## DATA ENCRYPTION ## + ##################### + + # see extension https://github.com/eclipse-tractusx/tractusx-edc/tree/develop/edc-extensions/data-encryption + - name: "EDC_DATA_ENCRYPTION_KEYS_ALIAS" + value: {{ .Values.vault.secretNames.transferProxyTokenEncryptionAesKey | quote }} + - name: "EDC_DATA_ENCRYPTION_ALGORITHM" + value: "AES" + + ########################### + ## AAS WRAPPER EXTENSION ## + ########################### + - name: "EDC_CP_ADAPTER_CACHE_CATALOG_EXPIRE_AFTER" + value: "0" + - name: "EDC_CP_ADAPTER_REUSE_CONTRACT_AGREEMENT" + value: "0" + + ###################################### + ## Additional environment variables ## + ###################################### + {{- range $key, $value := .Values.runtime.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 16 }} + {{- end }} + {{- range $key, $value := .Values.runtime.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{- end }} + {{- if and (or .Values.runtime.envSecretNames .Values.runtime.envConfigMapNames) (or (gt (len .Values.runtime.envSecretNames) 0) (gt (len .Values.runtime.envConfigMapNames) 0)) }} + envFrom: + {{- range $value := .Values.runtime.envSecretNames }} + - secretRef: + name: {{ $value | quote }} + {{- end }} + {{- range $value := .Values.runtime.envConfigMapNames }} + - configMapRef: + name: {{ $value | quote }} + {{- end }} + {{- end }} + volumeMounts: + - name: "configuration" + mountPath: "/app/logging.properties" + subPath: "logging.properties" + volumes: + - name: "configuration" + configMap: + name: {{ include "txdc.fullname" . }}-runtime + items: + - key: "logging.properties" + path: "logging.properties" + {{- with .Values.runtime.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.runtime.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.runtime.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/tractusx-connector-memory/templates/hpa-runtime.yaml b/charts/tractusx-connector-memory/templates/hpa-runtime.yaml new file mode 100644 index 000000000..a373dfb63 --- /dev/null +++ b/charts/tractusx-connector-memory/templates/hpa-runtime.yaml @@ -0,0 +1,29 @@ +{{- if .Values.runtime.autoscaling.enabled }} +--- +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "txdc.fullname" . }}-runtime + labels: + {{- include "txdc.runtime.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "txdc.fullname" . }}-runtime + minReplicas: {{ .Values.runtime.autoscaling.minReplicas }} + maxReplicas: {{ .Values.runtime.autoscaling.maxReplicas }} + metrics: + {{- if .Values.runtime.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.runtime.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.runtime.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.runtime.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/tractusx-connector-memory/templates/ingress-runtime.yaml b/charts/tractusx-connector-memory/templates/ingress-runtime.yaml new file mode 100644 index 000000000..06c6f5c68 --- /dev/null +++ b/charts/tractusx-connector-memory/templates/ingress-runtime.yaml @@ -0,0 +1,77 @@ +{{- $fullName := include "txdc.fullname" . }} +{{- $controlLabels := include "txdc.runtime.labels" . | nindent 4 }} +{{- $controlEdcEndpoints := .Values.runtime.endpoints }} +{{- $gitVersion := .Capabilities.KubeVersion.GitVersion }} +{{- $namespace := .Release.Namespace }} + +{{- range .Values.runtime.ingresses }} +{{- if and .enabled .endpoints }} +{{- $controlIngressName := printf "%s-runtime-%s" $fullName .hostname }} +--- +{{- if semverCompare ">=1.19-0" $gitVersion }} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" $gitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $controlIngressName }} + namespace: {{ $namespace | default "default" | quote }} + labels: + {{- $controlLabels | nindent 2 }} + annotations: + {{- if and .className (not (semverCompare ">=1.18-0" $gitVersion)) }} + {{- if not (hasKey .annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .annotations "kubernetes.io/ingress.class" .className}} + {{- end }} + {{- end }} + {{- if .certManager }} + {{- if .certManager.issuer }} + {{- $_ := set .annotations "cert-manager.io/issuer" .certManager.issuer}} + {{- end }} + {{- if .certManager.clusterIssuer }} + {{- $_ := set .annotations "cert-manager.io/cluster-issuer" .certManager.clusterIssuer}} + {{- end }} + {{- end }} + {{- with .annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .className (semverCompare ">=1.18-0" $gitVersion) }} + ingressClassName: {{ .className }} + {{- end }} + {{- if .hostname }} + {{- if .tls.enabled }} + tls: + - hosts: + - {{ .hostname }} + {{- if .tls.secretName }} + secretName: {{ .tls.secretName }} + {{- else }} + secretName: {{ $controlIngressName }}-tls + {{- end }} + {{- end }} + rules: + - host: {{ .hostname }} + http: + paths: + {{- $ingressEdcEndpoints := .endpoints }} + {{- range $name, $mapping := $controlEdcEndpoints }} + {{- if (has $name $ingressEdcEndpoints) }} + - path: {{ $mapping.path }} + pathType: Prefix + backend: + {{- if semverCompare ">=1.19-0" $gitVersion }} + service: + name: {{ $fullName }}-runtime + port: + number: {{ $mapping.port }} + {{- else }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} +{{- end }}{{- /* end: if .enabled */}} +{{- end }}{{- /* end: range .Values.ingresses */}} diff --git a/charts/tractusx-connector-memory/templates/service-runtime.yaml b/charts/tractusx-connector-memory/templates/service-runtime.yaml new file mode 100644 index 000000000..241e28885 --- /dev/null +++ b/charts/tractusx-connector-memory/templates/service-runtime.yaml @@ -0,0 +1,59 @@ +# + # Copyright (c) 2023 ZF Friedrichshafen AG + # Copyright (c) 2023 Mercedes-Benz Tech Innovation GmbH + # Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) + # Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation + # + # See the NOTICE file(s) distributed with this work for additional + # information regarding copyright ownership. + # + # This program and the accompanying materials are made available under the + # terms of the Apache License, Version 2.0 which is available at + # https://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + # License for the specific language governing permissions and limitations + # under the License. + # + # SPDX-License-Identifier: Apache-2.0 + # + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "txdc.fullname" . }}-runtime + namespace: {{ .Release.Namespace | default "default" | quote }} + labels: + {{- include "txdc.runtime.labels" . | nindent 4 }} +spec: + type: {{ .Values.runtime.service.type }} + ports: + - port: {{ .Values.runtime.endpoints.default.port }} + targetPort: default + protocol: TCP + name: default + - port: {{ .Values.runtime.endpoints.control.port }} + targetPort: control + protocol: TCP + name: control + - port: {{ .Values.runtime.endpoints.data.port }} + targetPort: data + protocol: TCP + name: data + - port: {{ .Values.runtime.endpoints.validation.port }} + targetPort: validation + protocol: TCP + name: validation + - port: {{ .Values.runtime.endpoints.ids.port }} + targetPort: ids + protocol: TCP + name: ids + - port: {{ .Values.runtime.endpoints.observability.port}} + targetPort: observability + protocol: TCP + name: observability + selector: + {{- include "txdc.runtime.selectorLabels" . | nindent 4 }} diff --git a/charts/tractusx-connector-memory/templates/serviceaccount.yaml b/charts/tractusx-connector-memory/templates/serviceaccount.yaml new file mode 100644 index 000000000..c650bcd68 --- /dev/null +++ b/charts/tractusx-connector-memory/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "txdc.serviceAccountName" . }} + labels: + {{- include "txdc.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- with .Values.serviceAccount.imagePullSecrets }} +imagePullSecrets: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} diff --git a/charts/tractusx-connector-memory/templates/tests/test-readiness.yaml b/charts/tractusx-connector-memory/templates/tests/test-readiness.yaml new file mode 100644 index 000000000..d98493953 --- /dev/null +++ b/charts/tractusx-connector-memory/templates/tests/test-readiness.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "txdc.fullname" . }}-test-readiness" + labels: + {{- include "txdc.runtime.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: curlimages/curl + command: ['curl'] + args: ['{{ include "tdxc.runtime.url.readiness" . }}'] + restartPolicy: Never diff --git a/charts/tractusx-connector-memory/values.yaml b/charts/tractusx-connector-memory/values.yaml new file mode 100644 index 000000000..66fa1b7fe --- /dev/null +++ b/charts/tractusx-connector-memory/values.yaml @@ -0,0 +1,313 @@ +# +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) +# Copyright (c) 2021, 2023 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + +--- +# Default values for eclipse-dataspace-connector. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +fullnameOverride: "" +nameOverride: "" + +# -- Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) +imagePullSecrets: [] + +customLabels: {} + +runtime: + image: + repository: "" + # -- [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use + pullPolicy: IfNotPresent + # -- Overrides the image tag whose default is the chart appVersion + tag: "" + initContainers: [] + debug: + enabled: false + port: 1044 + suspendOnStart: false + internationalDataSpaces: + id: TXDC + description: Tractus-X Eclipse IDS Data Space Connector + title: "" + maintainer: "" + curator: "" + catalogId: TXDC-Catalog + livenessProbe: + # -- Whether to enable kubernetes [liveness-probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) + enabled: true + # -- seconds to wait before performing the first liveness check + initialDelaySeconds: 30 + # -- this fields specifies that kubernetes should perform a liveness check every 10 seconds + periodSeconds: 10 + # -- number of seconds after which the probe times out + timeoutSeconds: 5 + # -- when a probe fails kubernetes will try 6 times before giving up + failureThreshold: 6 + # -- number of consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + readinessProbe: + # -- Whether to enable kubernetes [readiness-probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) + enabled: true + # -- seconds to wait before performing the first readiness check + initialDelaySeconds: 30 + # -- this fields specifies that kubernetes should perform a readiness check every 10 seconds + periodSeconds: 10 + # -- number of seconds after which the probe times out + timeoutSeconds: 5 + # -- when a probe fails kubernetes will try 6 times before giving up + failureThreshold: 6 + # -- number of consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # -- endpoints of the control plane + endpoints: + # -- default api for health checks, should not be added to any ingress + default: + # -- port for incoming api calls + port: 8080 + # -- path for incoming api calls + path: /api + # -- data management api, used by internal users, can be added to an ingress and must not be internet facing + data: + # -- port for incoming api calls + port: 8081 + # -- path for incoming api calls + path: /data + # -- authentication key, must be attached to each 'X-Api-Key' request header + authKey: "" + # -- validation api, only used by the data plane and should not be added to any ingress + validation: + # -- port for incoming api calls + port: 8082 + # -- path for incoming api calls + path: /validation + # -- control api, used for internal control calls. can be added to the internal ingress, but should probably not + control: + # -- port for incoming api calls + port: 8083 + # -- path for incoming api calls + path: /control + # -- ids api, used for inter connector communication and must be internet facing + ids: + # -- port for incoming api calls + port: 8084 + # -- path for incoming api calls + path: /api/v1/ids + # -- observability api with unsecured access, must not be internet facing + observability: + # -- port for incoming API calls + port: 8085 + # -- observability api, provides /health /readiness and /liveness endpoints + path: /observability + # -- allow or disallow insecure access, i.e. access without authentication + insecure: true + public: + port: 8086 + path: /api/public + service: + # -- [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. + type: ClusterIP + annotations: {} + # -- additional labels for the pod + podLabels: {} + # -- additional annotations for the pod + podAnnotations: {} + # -- The [pod security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) defines privilege and access control settings for a Pod within the deployment + podSecurityContext: + seccompProfile: + # -- Restrict a Container's Syscalls with seccomp + type: RuntimeDefault + # -- Runs all processes within a pod with a special uid + runAsUser: 10001 + # -- Processes within a pod will belong to this guid + runAsGroup: 10001 + # -- The owner for volumes and any files created within volumes will belong to this guid + fsGroup: 10001 + # The [container security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container) defines privilege and access control settings for a Container within a pod + securityContext: + capabilities: + # -- Specifies which capabilities to drop to reduce syscall attack surface + drop: + - ALL + # -- Specifies which capabilities to add to issue specialized syscalls + add: [] + # -- Whether the root filesystem is mounted in read-only mode + readOnlyRootFilesystem: true + # -- Controls [Privilege Escalation](https://kubernetes.io/docs/concepts/security/pod-security-policy/#privilege-escalation) enabling setuid binaries changing the effective user ID + allowPrivilegeEscalation: false + # -- Requires the container to run without root privileges + runAsNonRoot: true + # -- The container's process will run with the specified uid + runAsUser: 10001 + # Extra environment variables that will be pass onto deployment pods + env: {} + # ENV_NAME: value + + # "valueFrom" environment variable references that will be added to deployment pods. Name is templated. + # ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + # secretKeyRef: + # name: secret-name + # key: value_key + + # [Kubernetes Secret Resource](https://kubernetes.io/docs/concepts/configuration/secret/) names to load environment variables from + envSecretNames: [] + # - first-secret + # - second-secret + + # [Kubernetes ConfigMap Resource](https://kubernetes.io/docs/concepts/configuration/configmap/) names to load environment variables from + envConfigMapNames: [] + # - first-config-map + # - second-config-map + + ## Ingress declaration to expose the network service. + ingresses: + ## Public / Internet facing Ingress + - enabled: false + # -- The hostname to be used to precisely map incoming traffic onto the underlying network service + hostname: "edc-control.local" + # -- Additional ingress annotations to add + annotations: {} + # -- EDC endpoints exposed by this ingress resource + endpoints: + - ids + # -- Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use + className: "" + # -- TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource + tls: + # -- Enables TLS on the ingress resource + enabled: false + # -- If present overwrites the default secret name + secretName: "" + ## Adds [cert-manager](https://cert-manager.io/docs/) annotations to the ingress resource + certManager: + # -- If preset enables certificate generation via cert-manager namespace scoped issuer + issuer: "" + # -- If preset enables certificate generation via cert-manager cluster-wide issuer + clusterIssuer: "" + ## Private / Intranet facing Ingress + - enabled: false + # -- The hostname to be used to precisely map incoming traffic onto the underlying network service + hostname: "edc-control.intranet" + # -- Additional ingress annotations to add + annotations: {} + # -- EDC endpoints exposed by this ingress resource + endpoints: + - data + - control + # -- Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use + className: "" + # -- TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource + tls: + # -- Enables TLS on the ingress resource + enabled: false + # -- If present overwrites the default secret name + secretName: "" + ## Adds [cert-manager](https://cert-manager.io/docs/) annotations to the ingress resource + certManager: + # -- If preset enables certificate generation via cert-manager namespace scoped issuer + issuer: "" + # -- If preset enables certificate generation via cert-manager cluster-wide issuer + clusterIssuer: "" + # -- declare where to mount [volumes](https://kubernetes.io/docs/concepts/storage/volumes/) into the container + volumeMounts: [] + # -- [volume](https://kubernetes.io/docs/concepts/storage/volumes/) directories + volumes: [] + # -- [resource management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for the container + resources: + {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + replicaCount: 1 + autoscaling: + # -- Enables [horizontal pod autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) + enabled: false + # -- Minimal replicas if resource consumption falls below resource threshholds + minReplicas: 1 + # -- Maximum replicas if resource consumption exceeds resource threshholds + maxReplicas: 100 + # -- targetAverageUtilization of cpu provided to a pod + targetCPUUtilizationPercentage: 80 + # -- targetAverageUtilization of memory provided to a pod + targetMemoryUtilizationPercentage: 80 + # -- configuration of the [Java Util Logging Facade](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html) + logging: |- + .level=INFO + org.eclipse.edc.level=ALL + handlers=java.util.logging.ConsoleHandler + java.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter + java.util.logging.ConsoleHandler.level=ALL + java.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n + + # [node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) to constrain pods to nodes + nodeSelector: {} + # [tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to configure preferred nodes + tolerations: [] + # [affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) to configure which nodes the pods can be scheduled on + affinity: {} + + url: + # -- Explicitly declared url for reaching the ids api (e.g. if ingresses not used) + ids: "" + public: "" + readiness: "" + +vault: + # secrets can be seeded by supplying them in a comma separated list key1:secret2,key2:secret2 + secrets: "" + secretNames: + transferProxyTokenSignerPrivateKey: transfer-proxy-token-signer-private-key + transferProxyTokenSignerPublicKey: transfer-proxy-token-signer-public-key + transferProxyTokenEncryptionAesKey: transfer-proxy-token-encryption-aes-key + dapsPrivateKey: daps-private-key + dapsPublicKey: daps-public-key + +daps: + url: "" + clientId: "" + paths: + jwks: /jwks.json + token: /token + +backendService: + httpProxyTokenReceiverUrl: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + # -- Existing image pull secret bound to the service account to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) + imagePullSecrets: [] diff --git a/charts/tractusx-connector/templates/deployment-controlplane.yaml b/charts/tractusx-connector/templates/deployment-controlplane.yaml index 1161db178..6eded494c 100644 --- a/charts/tractusx-connector/templates/deployment-controlplane.yaml +++ b/charts/tractusx-connector/templates/deployment-controlplane.yaml @@ -68,7 +68,7 @@ spec: {{- else if .Values.vault.hashicorp.enabled }} image: "tractusx/edc-controlplane-memory-hashicorp-vault:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" {{- else if .Values.vault.azure.enabled }} - image: "tractusx/edc-controlplane-memory:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" + image: "tractusx/edc-runtime-memory:{{ .Values.controlplane.image.tag | default .Chart.AppVersion }}" {{- else }} {{- fail "cannot choose control-plane image automatically based on configuration" }} {{- end }} diff --git a/docs/README.md b/docs/README.md index 259c2560b..17ad45b14 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,8 +1,10 @@ # Tractus-X EDC -The Tractus-X EDC repository creates runnable applications out of EDC extensions from the [Eclipse DataSpace Connector](https://github.com/eclipse-edc/Connector) repository. +The Tractus-X EDC repository creates runnable applications out of EDC extensions from +the [Eclipse DataSpace Connector](https://github.com/eclipse-edc/Connector) repository. -When running a EDC connector from the Tractus-X EDC repository there are three setups to choose from. They only vary by using different extensions for +When running a EDC connector from the Tractus-X EDC repository there are three setups to choose from. They only vary by +using different extensions for - Resolving of Connector-Identities - Persistence of the Control-Plane-State @@ -12,11 +14,11 @@ When running a EDC connector from the Tractus-X EDC repository there are three s The three supported setups are. -- Setup 1: In Memory & Azure Vault - - [Control Plane](../edc-controlplane/edc-controlplane-memory/README.md) +- Setup 1: Pure in Memory **Not intended for production use!** + - [Control Plane](../edc-controlplane/edc-runtime-memory/README.md) - [IDS DAPS Extensions](https://github.com/eclipse-edc/Connector/tree/main/extensions/common/iam/oauth2/daps) - In Memory Persistence done by using no extension - - [Azure Key Vault Extension](https://github.com/eclipse-edc/Connector/tree/main/extensions/common/vault/azure-vault) + - In Memory Keyvault with seedable secrets. - [Data Plane](../edc-dataplane/edc-dataplane-azure-vault/README.md) - [Azure Key Vault Extension](https://github.com/eclipse-edc/Connector/tree/main/extensions/common/vault/azure-vault) - Setup 2: PostgreSQL & Azure Vault diff --git a/docs/migration/Version_0.3.1_0.3.2.md b/docs/migration/Version_0.3.1_0.3.2.md index d6f49da29..4099e8c4b 100644 --- a/docs/migration/Version_0.3.1_0.3.2.md +++ b/docs/migration/Version_0.3.1_0.3.2.md @@ -2,7 +2,7 @@ ## Configuration of Azure KeyVault -When using Helm Charts that use the Azure KeyVault (`edc-controlplane-memory`, `edc-controlplane-postgres`) +When using Helm Charts that use the Azure KeyVault (`edc-runtime-memory`, `edc-controlplane-postgres`) it is now possible to select _either_ authentication via Client Secret (`azure.vault.secret`) or via certificate (`azure.vault.certificate`). diff --git a/edc-controlplane/build.gradle.kts b/edc-controlplane/build.gradle.kts index d27dbdf36..d7306dd1e 100644 --- a/edc-controlplane/build.gradle.kts +++ b/edc-controlplane/build.gradle.kts @@ -1,11 +1,10 @@ - plugins { `java-library` } dependencies { implementation(project(":edc-controlplane:edc-controlplane-base")) - implementation(project(":edc-controlplane:edc-controlplane-memory")) + implementation(project(":edc-controlplane:edc-runtime-memory")) implementation(project(":edc-controlplane:edc-controlplane-memory-hashicorp-vault")) implementation(project(":edc-controlplane:edc-controlplane-postgresql")) implementation(project(":edc-controlplane:edc-controlplane-postgresql-hashicorp-vault")) diff --git a/edc-controlplane/edc-controlplane-memory/README.md b/edc-controlplane/edc-runtime-memory/README.md similarity index 54% rename from edc-controlplane/edc-controlplane-memory/README.md rename to edc-controlplane/edc-runtime-memory/README.md index ca1f0bef7..caf7fe24e 100644 --- a/edc-controlplane/edc-controlplane-memory/README.md +++ b/edc-controlplane/edc-runtime-memory/README.md @@ -1,54 +1,64 @@ # EDC Control-Plane backed by In-Memory Stores +## Security + +### In-memory Vault implementation + +The goal of this extension is to provide an ephemeral, memory-based vault implementation that can be used in testing or +demo scenarios. + +Please not that this vault does not encrypt the secrets, they are held in memory in plain text at runtime! In addition, +its ephemeral nature makes it unsuitable for replicated/multi-instance scenarios, i.e. Kubernetes. + +> It is not a secure secret store, please do NOT use it in production workloads! + ## Building ```shell -./gradlew :edc-controlplane:edc-controlplane-memory:dockerize +./gradlew :edc-controlplane:edc-runtime-memory:dockerize ``` ## Configuration (configuration.properties) -Listed below are configuration keys needed to get the `edc-controlplane-memory` up and running. -Details regarding each configuration property can be found at the [documentary section of the EDC](https://github.com/eclipse-edc/Connector/tree/main/docs). - -| Key | Required | Example | Description | -|--------------------------------------------------|----------|--------------------------------------|----------------------------| -| edc.api.auth.key | | password | default value: random UUID | -| web.http.default.port | X | 8080 | | -| web.http.default.path | X | /api | | -| web.http.data.port | X | 8181 | | -| web.http.data.path | X | /data | | -| web.http.validation.port | X | 8182 | | -| web.http.validation.path | X | /validation | | -| web.http.control.port | X | 9999 | | -| web.http.control.path | X | /api/controlplane/control | | -| web.http.ids.port | X | 8282 | | -| web.http.ids.path | X | /api/v1/ids | | -| edc.receiver.http.endpoint | X | | | -| edc.ids.title | | Eclipse Dataspace Connector | | -| edc.ids.description | | Eclipse Dataspace Connector | | -| edc.ids.id | | urn:connector:edc | | -| edc.ids.security.profile | | base | | -| edc.ids.endpoint | | | | -| edc.ids.maintainer | | | | -| edc.ids.curator | | | | -| edc.ids.catalog.id | | urn:catalog:default | | -| ids.webhook.address | | | | -| edc.hostname | | localhost | | -| edc.oauth.token.url | X | | | -| edc.oauth.public.key.alias | X | key-to-daps-certificate-in-keyvault | | -| edc.oauth.private.key.alias | X | key-to-private-key-in-keyvault | | -| edc.oauth.client.id | X | daps-oauth-client-id | | -| edc.vault.clientid | X | 00000000-1111-2222-3333-444444444444 | | -| edc.vault.tenantid | X | 55555555-6666-7777-8888-999999999999 | | -| edc.vault.name | X | my-vault-name | | -| edc.vault.clientsecret | X | 34-chars-secret | | -| edc.transfer.proxy.endpoint | X | | | -| edc.transfer.proxy.token.signer.privatekey.alias | X | | | +Listed below are configuration keys needed to get the `edc-runtime-memory` up and running. +Details regarding each configuration property can be found at +the [documentary section of the EDC](https://github.com/eclipse-edc/Connector/tree/main/docs). + +| Key | Required | Example | Description | +|--------------------------------------------------|----------|-------------------------------------|----------------------------| +| edc.api.auth.key | | password | default value: random UUID | +| web.http.default.port | X | 8080 | | +| web.http.default.path | X | /api | | +| web.http.data.port | X | 8181 | | +| web.http.data.path | X | /data | | +| web.http.validation.port | X | 8182 | | +| web.http.validation.path | X | /validation | | +| web.http.control.port | X | 9999 | | +| web.http.control.path | X | /api/controlplane/control | | +| web.http.ids.port | X | 8282 | | +| web.http.ids.path | X | /api/v1/ids | | +| edc.receiver.http.endpoint | X | | | +| edc.ids.title | | Eclipse Dataspace Connector | | +| edc.ids.description | | Eclipse Dataspace Connector | | +| edc.ids.id | | urn:connector:edc | | +| edc.ids.security.profile | | base | | +| edc.ids.endpoint | | | | +| edc.ids.maintainer | | | | +| edc.ids.curator | | | | +| edc.ids.catalog.id | | urn:catalog:default | | +| ids.webhook.address | | | | +| edc.hostname | | localhost | | +| edc.oauth.token.url | X | | | +| edc.oauth.public.key.alias | X | key-to-daps-certificate-in-keyvault | | +| edc.oauth.private.key.alias | X | key-to-private-key-in-keyvault | | +| edc.oauth.client.id | X | daps-oauth-client-id | | +| edc.transfer.proxy.endpoint | X | | | +| edc.transfer.proxy.token.signer.privatekey.alias | X | | | ### Example configuration.properties -JDK properties-style configuration of the EDC Control-Plane is expected to be mounted to `/app/configuration.properties` within the container. +JDK properties-style configuration of the EDC Control-Plane is expected to be mounted to `/app/configuration.properties` +within the container. ```shell # Create configuration.properties @@ -88,12 +98,6 @@ edc.oauth.public.key.alias=key-to-daps-certificate-in-keyvault edc.oauth.private.key.alias=key-to-private-key-in-keyvault edc.oauth.client.id=daps-oauth-client-id -# Azure vault related configuration -edc.vault.clientid=00000000-1111-2222-3333-444444444444 -edc.vault.tenantid=55555555-6666-7777-8888-999999999999 -edc.vault.name=my-vault-name -edc.vault.clientsecret=34-chars-secret - # Control- / Data- Plane configuration edc.transfer.proxy.endpoint=http://dataplane-public-endpoint/public edc.transfer.proxy.token.signer.privatekey.alias=azure-vault-token-signer-private-key @@ -115,24 +119,13 @@ java.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [ EOF ``` -### Example opentelemetry.properties - -```shell -# Create opentelemetry.properties -export OPENTELEMETRY_PROPERTIES_FILE=$(mktemp /tmp/opentelemetry.properties.XXXXXX) -cat << 'EOF' > ${OPENTELEMETRY_PROPERTIES_FILE} -otel.javaagent.enabled=true -otel.javaagent.debug=false -EOF -``` - ## Running ```shell docker run \ + -e SECRETS="key1:secret1,key2:secret2" \ -p 8080:8080 -p 8181:8181 -p 8182:8182 -p 8282:8282 -p 9090:9090 -p 9999:9999 \ -v ${CONFIGURATION_PROPERTIES_FILE:-/dev/null}:/app/configuration.properties \ -v ${LOGGING_PROPERTIES_FILE:-/dev/null}:/app/logging.properties \ - -v ${OPENTELEMETRY_PROPERTIES_FILE:-/dev/null}:/app/opentelemetry.properties \ - -i edc-controlplane-memory:latest + -i edc-runtime-memory:latest ``` diff --git a/edc-controlplane/edc-controlplane-memory/build.gradle.kts b/edc-controlplane/edc-runtime-memory/build.gradle.kts similarity index 77% rename from edc-controlplane/edc-controlplane-memory/build.gradle.kts rename to edc-controlplane/edc-runtime-memory/build.gradle.kts index 1254a3634..304584058 100644 --- a/edc-controlplane/edc-controlplane-memory/build.gradle.kts +++ b/edc-controlplane/edc-runtime-memory/build.gradle.kts @@ -1,5 +1,3 @@ -import com.bmuschko.gradle.docker.tasks.image.DockerBuildImage - plugins { `java-library` id("application") @@ -7,11 +5,12 @@ plugins { } dependencies { - runtimeOnly(project(":edc-controlplane:edc-controlplane-base")) + implementation(edc.spi.core) + runtimeOnly(project(":edc-controlplane:edc-controlplane-base")) { + exclude(module = "data-encryption") + } + runtimeOnly(project(":edc-dataplane:edc-dataplane-base")) runtimeOnly(edc.core.controlplane) - runtimeOnly(edc.azure.vault) - runtimeOnly(edc.azure.identity) - } tasks.withType { diff --git a/edc-controlplane/edc-controlplane-memory/notice.md b/edc-controlplane/edc-runtime-memory/notice.md similarity index 90% rename from edc-controlplane/edc-controlplane-memory/notice.md rename to edc-controlplane/edc-runtime-memory/notice.md index c58f81b30..f33bb6885 100644 --- a/edc-controlplane/edc-controlplane-memory/notice.md +++ b/edc-controlplane/edc-runtime-memory/notice.md @@ -2,7 +2,7 @@ An EDC Control Plane using memory-based storage, and Azure KeyVault as secret store. -DockerHub: https://hub.docker.com/r/tractusx/edc-controlplane-memory +DockerHub: https://hub.docker.com/r/tractusx/edc-runtime-memory Eclipse Tractus-X product(s) installed within the image: @@ -10,7 +10,7 @@ Eclipse Tractus-X product(s) installed within the image: - GitHub: https://github.com/eclipse-tractusx/tractusx-edc - Project home: https://projects.eclipse.org/projects/automotive.tractusx -- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/main/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile +- Dockerfile: https://github.com/eclipse-tractusx/tractusx-edc/blob/main/edc-controlplane/edc-runtime-memory/src/main/docker/Dockerfile - Project license: [Apache License, Version 2.0](https://github.com/eclipse-tractusx/tractusx-edc/blob/main/LICENSE) ## Used base image diff --git a/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile b/edc-controlplane/edc-runtime-memory/src/main/docker/Dockerfile similarity index 61% rename from edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile rename to edc-controlplane/edc-runtime-memory/src/main/docker/Dockerfile index d248e8131..de17857c4 100644 --- a/edc-controlplane/edc-controlplane-memory/src/main/docker/Dockerfile +++ b/edc-controlplane/edc-runtime-memory/src/main/docker/Dockerfile @@ -1,4 +1,5 @@ # +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) # Copyright (c) 2023 ZF Friedrichshafen AG # Copyright (c) 2022,2023 Mercedes-Benz Tech Innovation GmbH # Copyright (c) 2021,2023 Contributors to the Eclipse Foundation @@ -18,13 +19,6 @@ # # SPDX-License-Identifier: Apache-2.0 # -FROM alpine:3.17.3 as otel - -ENV OTEL_AGENT_LOCATION "https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/download/v1.12.1/opentelemetry-javaagent.jar" - -HEALTHCHECK NONE - -RUN wget ${OTEL_AGENT_LOCATION} -O /tmp/opentelemetry-javaagent.jar FROM eclipse-temurin:17.0.6_10-jre-alpine ARG JAR @@ -46,18 +40,10 @@ RUN adduser \ USER "$APP_USER" WORKDIR /app -COPY --from=otel /tmp/opentelemetry-javaagent.jar . COPY ${JAR} edc-controlplane.jar HEALTHCHECK NONE -CMD ["java", \ - "-javaagent:/app/opentelemetry-javaagent.jar", \ - "-Dedc.fs.config=/app/configuration.properties", \ - "-Djava.util.logging.config.file=/app/logging.properties", \ - "-Dotel.javaagent.configuration-file=/app/opentelemetry.properties", \ - "-Dotel.metrics.exporter=prometheus", \ - "-Dotel.exporter.prometheus.port=9090", \ - "-Djava.security.egd=file:/dev/urandom", \ - "-jar", \ - "edc-controlplane.jar"] +# need the sh -c syntax so that the SECRETS variable gets expanded +# use the "exec" syntax so that SIGINT reaches the JVM -> graceful termination +CMD ["sh", "-c", "exec java -Dedc.fs.config=/app/configuration.properties -Dedc.vault.secrets=\"${SECRETS}\" -Djava.util.logging.config.file=/app/logging.properties -Djava.security.egd=file:/dev/urandom -jar edc-controlplane.jar"] diff --git a/edc-controlplane/edc-runtime-memory/src/main/java/org/eclipse/tractusx/edc/vault/memory/InMemoryVault.java b/edc-controlplane/edc-runtime-memory/src/main/java/org/eclipse/tractusx/edc/vault/memory/InMemoryVault.java new file mode 100644 index 000000000..9b92a83c0 --- /dev/null +++ b/edc-controlplane/edc-runtime-memory/src/main/java/org/eclipse/tractusx/edc/vault/memory/InMemoryVault.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + * + * Contributors: + * Bayerische Motoren Werke Aktiengesellschaft (BMW AG) - initial API and implementation + * + */ + +package org.eclipse.tractusx.edc.vault.memory; + +import org.eclipse.edc.spi.monitor.Monitor; +import org.eclipse.edc.spi.result.Result; +import org.eclipse.edc.spi.security.Vault; +import org.jetbrains.annotations.Nullable; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +public class InMemoryVault implements Vault { + private final Map secrets = new ConcurrentHashMap<>(); + private final Monitor monitor; + + public InMemoryVault(Monitor monitor) { + this.monitor = monitor; + } + + @Override + public @Nullable String resolveSecret(String s) { + monitor.debug("resolving secret " + s); + return secrets.getOrDefault(s, null); + } + + @Override + public Result storeSecret(String s, String s1) { + monitor.debug("storing secret " + s); + secrets.put(s, s1); + return Result.success(); + } + + @Override + public Result deleteSecret(String s) { + monitor.debug("deleting secret " + s); + return secrets.remove(s) == null ? + Result.failure("Secret with key " + s + " does not exist") : + Result.success(); + } +} diff --git a/edc-controlplane/edc-runtime-memory/src/main/java/org/eclipse/tractusx/edc/vault/memory/VaultMemoryExtension.java b/edc-controlplane/edc-runtime-memory/src/main/java/org/eclipse/tractusx/edc/vault/memory/VaultMemoryExtension.java new file mode 100644 index 000000000..c8d2cc7d2 --- /dev/null +++ b/edc-controlplane/edc-runtime-memory/src/main/java/org/eclipse/tractusx/edc/vault/memory/VaultMemoryExtension.java @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + * + * Contributors: + * Bayerische Motoren Werke Aktiengesellschaft (BMW AG) - initial API and implementation + * + */ +package org.eclipse.tractusx.edc.vault.memory; + +import org.eclipse.edc.runtime.metamodel.annotation.Extension; +import org.eclipse.edc.runtime.metamodel.annotation.Provider; +import org.eclipse.edc.runtime.metamodel.annotation.Provides; +import org.eclipse.edc.runtime.metamodel.annotation.Setting; +import org.eclipse.edc.spi.security.*; +import org.eclipse.edc.spi.system.ServiceExtension; +import org.eclipse.edc.spi.system.ServiceExtensionContext; + +import java.util.stream.Stream; + +@Provides({PrivateKeyResolver.class, CertificateResolver.class}) +@Extension(value = "In-memory vault extension", categories = {"vault", "security"}) +public class VaultMemoryExtension implements ServiceExtension { + + @Setting(value = "Secrets with which the vault gets initially populated. Specify as comma-separated list of key:secret pairs.") + public static final String VAULT_MEMORY_SECRETS_PROPERTY = "edc.vault.secrets"; + public static final String NAME = "In-Memory Vault Extension"; + + @Override + public String name() { + return NAME; + } + + @Provider + public Vault createInMemVault(ServiceExtensionContext context) { + var seedSecrets = context.getSetting(VAULT_MEMORY_SECRETS_PROPERTY, null); + var vault = new InMemoryVault(context.getMonitor()); + context.registerService(PrivateKeyResolver.class, new VaultPrivateKeyResolver(vault)); + context.registerService(CertificateResolver.class, new VaultCertificateResolver(vault)); + if (seedSecrets != null) { + Stream.of(seedSecrets.split(";")) + .filter(pair -> pair.contains(":")) + .map(kvp -> kvp.split(":", 2)) + .filter(kvp -> kvp.length >= 2) + .forEach(pair -> vault.storeSecret(pair[0], pair[1])); + } + return vault; + } +} diff --git a/edc-controlplane/edc-runtime-memory/src/main/resources/META-INF/services/org.eclipse.edc.spi.system.ServiceExtension b/edc-controlplane/edc-runtime-memory/src/main/resources/META-INF/services/org.eclipse.edc.spi.system.ServiceExtension new file mode 100644 index 000000000..b105388ea --- /dev/null +++ b/edc-controlplane/edc-runtime-memory/src/main/resources/META-INF/services/org.eclipse.edc.spi.system.ServiceExtension @@ -0,0 +1,21 @@ +# +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) +# Copyright (c) 2021,2022 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + +org.eclipse.tractusx.edc.vault.memory.VaultMemoryExtension diff --git a/edc-controlplane/edc-runtime-memory/src/test/java/org/eclipse/tractusx/edc/vault/memory/InMemoryVaultTest.java b/edc-controlplane/edc-runtime-memory/src/test/java/org/eclipse/tractusx/edc/vault/memory/InMemoryVaultTest.java new file mode 100644 index 000000000..c00ae8180 --- /dev/null +++ b/edc-controlplane/edc-runtime-memory/src/test/java/org/eclipse/tractusx/edc/vault/memory/InMemoryVaultTest.java @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + * + * Contributors: + * Bayerische Motoren Werke Aktiengesellschaft (BMW AG) - initial API and implementation + * + */ + +package org.eclipse.tractusx.edc.vault.memory; + +import org.eclipse.edc.spi.monitor.Monitor; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + +class InMemoryVaultTest { + + private InMemoryVault vault; + + @BeforeEach + void setUp() { + vault = new InMemoryVault(mock(Monitor.class)); + } + + @Test + void resolveSecret() { + assertThat(vault.resolveSecret("key")).isNull(); + vault.storeSecret("key", "secret"); + assertThat(vault.resolveSecret("key")).isEqualTo("secret"); + } + + @Test + void storeSecret() { + assertThat(vault.storeSecret("key", "value1").succeeded()).isTrue(); + assertThat(vault.resolveSecret("key")).isEqualTo("value1"); + assertThat(vault.storeSecret("key", "value2").succeeded()).isTrue(); + assertThat(vault.resolveSecret("key")).isEqualTo("value2"); + } + + @Test + void deleteSecret() { + assertThat(vault.deleteSecret("key").succeeded()).isFalse(); + assertThat(vault.storeSecret("key", "value1").succeeded()).isTrue(); + assertThat(vault.deleteSecret("key").succeeded()).isTrue(); + assertThat(vault.resolveSecret("key")).isNull(); + + } +} diff --git a/edc-controlplane/edc-runtime-memory/src/test/java/org/eclipse/tractusx/edc/vault/memory/VaultMemoryExtensionTest.java b/edc-controlplane/edc-runtime-memory/src/test/java/org/eclipse/tractusx/edc/vault/memory/VaultMemoryExtensionTest.java new file mode 100644 index 000000000..bec589d79 --- /dev/null +++ b/edc-controlplane/edc-runtime-memory/src/test/java/org/eclipse/tractusx/edc/vault/memory/VaultMemoryExtensionTest.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + * + * Contributors: + * Bayerische Motoren Werke Aktiengesellschaft (BMW AG) - initial API and implementation + * + */ + +package org.eclipse.tractusx.edc.vault.memory; + +import org.eclipse.edc.spi.monitor.Monitor; +import org.eclipse.edc.spi.system.ServiceExtensionContext; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.*; + +class VaultMemoryExtensionTest { + private VaultMemoryExtension extension; + private ServiceExtensionContext context; + private Monitor monitor; + + @BeforeEach + void setup() { + extension = new VaultMemoryExtension(); + context = mock(ServiceExtensionContext.class); + monitor = mock(Monitor.class); + when(context.getMonitor()).thenReturn(monitor); + } + + @Test + void name() { + assertThat(extension.name()).isEqualTo("In-Memory Vault Extension"); + } + + @ParameterizedTest + @ValueSource(strings = {"key1:", "key1:value1", "key1:value1;", ";key1:value1", ";sdf;key1:value1"}) + void createInMemVault_validString(String secret) { + when(context.getSetting(eq(VaultMemoryExtension.VAULT_MEMORY_SECRETS_PROPERTY), eq(null))).thenReturn(secret); + extension.createInMemVault(context); + verify(monitor, times(1)).debug(anyString()); + } +} diff --git a/edc-tests/cucumber/src/main/resources/deployment/helm/supporting-infrastructure/values.yaml b/edc-tests/cucumber/src/main/resources/deployment/helm/supporting-infrastructure/values.yaml index 57779134b..07c1e0b3b 100644 --- a/edc-tests/cucumber/src/main/resources/deployment/helm/supporting-infrastructure/values.yaml +++ b/edc-tests/cucumber/src/main/resources/deployment/helm/supporting-infrastructure/values.yaml @@ -8,19 +8,6 @@ install: postgresql: true vault: true minio: true - backendservice: true - -################### -# Backend Service # -################### -backend: - fullnameOverride: "backend" - service: - type: NodePort - frontend: - port: 8080 - backend: - port: 8081 ######## diff --git a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Negotiation.java b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Negotiation.java index ddc715c9b..0d380685e 100644 --- a/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Negotiation.java +++ b/edc-tests/cucumber/src/test/java/org/eclipse/tractusx/edc/tests/data/Negotiation.java @@ -22,7 +22,7 @@ public Negotiation(String id) { public void waitUntilComplete(DataManagementAPI dataManagementAPI) { await() - .pollDelay(Duration.ofMillis(2000)) + .pollDelay(Duration.ofMillis(5000)) .atMost(Timeouts.CONTRACT_NEGOTIATION) .until(() -> isComplete(dataManagementAPI)); } diff --git a/edc-tests/deployment/src/main/resources/helm/omejdn/.helmignore b/edc-tests/deployment/src/main/resources/helm/omejdn/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/omejdn/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/edc-tests/deployment/src/main/resources/helm/omejdn/Chart.yaml b/edc-tests/deployment/src/main/resources/helm/omejdn/Chart.yaml new file mode 100644 index 000000000..613b1f45c --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/omejdn/Chart.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: v2 +name: ids-daps +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.0.1 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.0.1" diff --git a/edc-tests/deployment/src/main/resources/helm/omejdn/README.md b/edc-tests/deployment/src/main/resources/helm/omejdn/README.md new file mode 100644 index 000000000..f85a94889 --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/omejdn/README.md @@ -0,0 +1,21 @@ +# Omejdn DAPS + +This chart deployes an [IDS Omejdn DAPS](https://github.com/Fraunhofer-AISEC/omejdn-server). + +Two Eclipse Dataspace Connectors need to be registered at the same DAPS instance, to be able to talk to each other. Each connector is registered in the DAPS by an unique client ID and a correpsonding client certificate. + +New connectors are configured in the omejdn _values.yaml_. + +In each Eclipse Dataspace Connector configure the following properties to use the DAPS. + +```properties + edc.oauth.client.id= + + edc.oauth.provider.jwks.url="http://:4567/.well-known/jwks.json" + edc.oauth.token.url="http://:4567/token" + + edc.oauth.private.key.alias= + edc.oauth.public.key.alias= + + edc.oauth.provider.audience=idsc:IDS_CONNECTORS_ALL +``` diff --git a/edc-tests/deployment/src/main/resources/helm/omejdn/templates/_helpers.tpl b/edc-tests/deployment/src/main/resources/helm/omejdn/templates/_helpers.tpl new file mode 100644 index 000000000..95b115eee --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/omejdn/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "omejdn.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "omejdn.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "omejdn.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "omejdn.labels" -}} +helm.sh/chart: {{ include "omejdn.chart" . }} +{{ include "omejdn.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "omejdn.selectorLabels" -}} +app.kubernetes.io/name: {{ include "omejdn.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "omejdn.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "omejdn.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/edc-tests/deployment/src/main/resources/helm/omejdn/templates/configmap.yaml b/edc-tests/deployment/src/main/resources/helm/omejdn/templates/configmap.yaml new file mode 100644 index 000000000..3d3e17c1f --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/omejdn/templates/configmap.yaml @@ -0,0 +1,73 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "omejdn.fullname" . }} + labels: + {{- include "omejdn.labels" . | nindent 4 }} +data: + scope_mapping.yml: |- + --- + idsc:IDS_CONNECTOR_ATTRIBUTES_ALL: + - referringConnector + + omejdn.yml: |- + --- + host: http://ids-daps:4567/ + path_prefix: '' + bind_to: 0.0.0.0 + allow_origin: "*" + app_env: debug + openid: false + user_backend: + - yaml + user_backend_default: yaml + accept_audience: idsc:IDS_CONNECTORS_ALL + issuer: http://ids-daps:4567/ + environment: development + default_audience: + - idsc:IDS_CONNECTORS_ALL + access_token: + expiration: 3600 + algorithm: RS256 + id_token: + expiration: 3600 + algorithm: RS256 + + plugins.yml: |- + --- + plugins: + token_user_attributes: + + clients.yml: |- + --- + - client_id: data-plane-oauth2 + client_secret: supersecret + name: provision oauth2 + grant_types: + - client_credentials + token_endpoint_auth_method: client_secret_post + scope: openid +{{- range $i, $val := .Values.connectors }} + - client_id: {{ quote $val.id }} + name: {{ quote $val.name }} + token_endpoint_auth_method: private_key_jwt + grant_types: + - client_credentials + scope: + - idsc:IDS_CONNECTOR_ATTRIBUTES_ALL + attributes: + - key: idsc + value: IDS_CONNECTOR_ATTRIBUTES_ALL + - key: securityProfile + value: idsc:BASE_SECURITY_PROFILE + {{- range $key, $value := $val.attributes }} + - key: {{ $key }} + value: {{ $value }} + {{- end }} + redirect_uri: http://localhost:4200 +{{ end -}} + + +{{- range $i, $val := .Values.connectors }} + {{ $val.name }}: {{ quote $val.certificate | toString }} +{{ end -}} diff --git a/edc-tests/deployment/src/main/resources/helm/omejdn/templates/deployment.yaml b/edc-tests/deployment/src/main/resources/helm/omejdn/templates/deployment.yaml new file mode 100644 index 000000000..289476122 --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/omejdn/templates/deployment.yaml @@ -0,0 +1,149 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "omejdn.fullname" . }} + labels: + {{- include "omejdn.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "omejdn.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "omejdn.selectorLabels" . | nindent 8 }} + spec: + {{- if .Values.imagePullSecret.dockerconfigjson }} + imagePullSecrets: + - name: {{ include "omejdn.fullname" . }}-imagepullsecret + {{- else }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + serviceAccountName: {{ include "omejdn.serviceAccountName" . }} + automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + initContainers: + - name: init-daps-pvc + image: alpine + command: + - "sh" + - "-c" + args: + - | + cp /opt/config/omejdn.yml /etc/daps/omejdn.yml + cp /opt/config/clients.yml /etc/daps/clients.yml + cp /opt/config/plugins.yml /etc/daps/plugins.yml + cp /opt/config/scope_mapping.yml /etc/daps/scope_mapping.yml + apk add --update openssl + openssl req -newkey rsa:2048 -new -nodes -x509 -days 3650 -keyout /etc/keys/omejdn/omejdn.key \ + -subj "/C=DE/ST=Berlin/L=Berlin/O=TractusX-EDC-Test, Inc./OU=DE" + volumeMounts: + - mountPath: /etc/daps + name: config-dir + - mountPath: /etc/keys/omejdn + name: omejdn-key-dir + - mountPath: /opt/config/omejdn.yml + name: omejdn-config + subPath: omejdn.yml + - mountPath: /opt/config/scope_mapping.yml + name: scope-mapping + subPath: scope_mapping.yml + - mountPath: /opt/config/clients.yml + name: clients-config + subPath: clients.yml + - mountPath: /opt/config/plugins.yml + name: plugins-config + subPath: plugins.yml + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + volumeMounts: + - mountPath: /opt/config/ + name: config-dir + - mountPath: /opt/keys/omejdn/omejdn.key + name: omejdn-key-dir + subPath: omejdn.key + - mountPath: /opt/keys/clients/ + name: client-certificates + ports: + - name: http + containerPort: 4567 + protocol: TCP + livenessProbe: + httpGet: + path: /jwks.json + port: http + readinessProbe: + httpGet: + path: /jwks.json + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + env: + - name: OMEJDN_JWT_AUD_OVERRIDE + value: "idsc:IDS_CONNECTORS_ALL" + - name: OMEJDN_PLUGINS + value: "config/plugins.yml" + volumes: + - name: config-dir + emptyDir: {} + - name: omejdn-key-dir + emptyDir: {} + - name: omejdn-config + configMap: + name: {{ include "omejdn.fullname" . }} + items: + - key: omejdn.yml + path: omejdn.yml + - name: scope-mapping + configMap: + name: {{ include "omejdn.fullname" . }} + items: + - key: scope_mapping.yml + path: scope_mapping.yml + - name: clients-config + configMap: + name: {{ include "omejdn.fullname" . }} + items: + - key: clients.yml + path: clients.yml + - name: plugins-config + configMap: + name: {{ include "omejdn.fullname" . }} + items: + - key: plugins.yml + path: plugins.yml + - name: client-certificates + configMap: + name: {{ include "omejdn.fullname" . }} + items: + {{- range $i, $val := .Values.connectors }} + - key: {{ $val.name }} + path: {{ $val.id }}.cert + {{- end }} diff --git a/edc-tests/deployment/src/main/resources/helm/omejdn/templates/hpa.yaml b/edc-tests/deployment/src/main/resources/helm/omejdn/templates/hpa.yaml new file mode 100644 index 000000000..ce2a70957 --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/omejdn/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "omejdn.fullname" . }} + labels: + {{- include "omejdn.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "omejdn.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/edc-tests/deployment/src/main/resources/helm/omejdn/templates/imagepullsecret.yaml b/edc-tests/deployment/src/main/resources/helm/omejdn/templates/imagepullsecret.yaml new file mode 100644 index 000000000..d7c1d31d7 --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/omejdn/templates/imagepullsecret.yaml @@ -0,0 +1,13 @@ +{{- if .Values.imagePullSecret.dockerconfigjson }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "edc-dataplane.fullname" . }}-imagepullsecret + namespace: {{ .Release.Namespace | default "default" | quote }} + labels: + {{- include "edc-dataplane.labels" . | nindent 4 }} +data: + .dockerconfigjson: {{ .Values.imagePullSecret.dockerconfigjson }} +type: kubernetes.io/dockerconfigjson +{{- end }} diff --git a/edc-tests/deployment/src/main/resources/helm/omejdn/templates/service.yaml b/edc-tests/deployment/src/main/resources/helm/omejdn/templates/service.yaml new file mode 100644 index 000000000..57dfe3921 --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/omejdn/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "omejdn.fullname" . }} + labels: + {{- include "omejdn.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "omejdn.selectorLabels" . | nindent 4 }} diff --git a/edc-tests/deployment/src/main/resources/helm/omejdn/templates/serviceaccount.yaml b/edc-tests/deployment/src/main/resources/helm/omejdn/templates/serviceaccount.yaml new file mode 100644 index 000000000..17baf8239 --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/omejdn/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "omejdn.serviceAccountName" . }} + labels: + {{- include "omejdn.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/edc-tests/deployment/src/main/resources/helm/omejdn/values.yaml b/edc-tests/deployment/src/main/resources/helm/omejdn/values.yaml new file mode 100644 index 000000000..ae82cd1d6 --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/omejdn/values.yaml @@ -0,0 +1,91 @@ +--- +# Default values for omejdn. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Specifies how many replicas of a deployed pod shall be created during the deployment +# Note: If horizontal pod autoscaling is enabled this setting has no effect +replicaCount: 1 + +image: + # -- Which omjedn container image to use + repository: ghcr.io/fraunhofer-aisec/omejdn-server + # -- [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use + pullPolicy: IfNotPresent + # -- Overrides the image tag whose default is the chart appVersion + tag: "1.7.1" + +imagePullSecret: + # -- Image pull secret to create to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) + # Note: This value needs to adhere to the [(base64 encoded) .dockerconfigjson format](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials). + # Furthermore, if 'imagePullSecret.dockerconfigjson' is defined, it takes precedence over 'imagePullSecrets'. + dockerconfigjson: "" + +# -- Overrides the charts name +nameOverride: "" + +# -- Overrides the releases full name +fullnameOverride: "" + +serviceAccount: + # -- Specifies whether a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) should be created per release + create: true + # -- [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to add to the service account + annotations: {} + # -- The name of the service account to use. If not set and create is true, a name is generated using the release's fullname template + name: "" + +# -- Whether to [automount kubernetes API credentials](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server) into the pod +automountServiceAccountToken: false + +# -- [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) added to deployed [pods](https://kubernetes.io/docs/concepts/workloads/pods/) +podAnnotations: {} + +# The [pod security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) defines privilege and access control settings for a Pod within the deployment +podSecurityContext: {} + +# The [container security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container) defines privilege and access control settings for a Container within a pod +securityContext: {} + +service: + # -- [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. + type: ClusterIP + # -- [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service) to expose the running application on a set of Pods as a network service. + port: 4567 + +# -- [Resource management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) applied to the deployed pod +resources: {} + +autoscaling: + # -- Enables [horizontal pod autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) + enabled: false + # -- Minimal replicas if resource consumption falls below resource threshholds + minReplicas: 1 + # -- Maximum replicas if resource consumption exceeds resource threshholds + maxReplicas: 100 + # -- targetAverageUtilization of cpu provided to a pod + targetCPUUtilizationPercentage: 80 + # -- targetAverageUtilization of memory provided to a pod + targetMemoryUtilizationPercentage: 80 + +# -- [Node-Selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) to constrain the Pod to nodes with specific labels. +nodeSelector: {} + +# -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) are applied to Pods to schedule onto nodes with matching taints. +tolerations: [] + +# -- [Affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) constrains which nodes the Pod can be scheduled on based on node labels. +affinity: {} + +# List of connector clients. Certificate and Client-ID must be configured in parallel. +#
+# Example Connector: +# - id: grMsEz3EcsS3ENYJufNgUIeg4QsaL49M0gWxSexPdC4pon96Nvju90D8RlvAJB21 +# name: my-connector +# attributes: +# issuerConnector: http://localhost:8080/ +# certificate: |- +# -----BEGIN CERTIFICATE----- +# foo +# -----END CERTIFICATE----- +connectors: [] diff --git a/edc-tests/deployment/src/main/resources/helm/test-infrastructure/.gitignore b/edc-tests/deployment/src/main/resources/helm/test-infrastructure/.gitignore new file mode 100644 index 000000000..8681aba50 --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/test-infrastructure/.gitignore @@ -0,0 +1,4 @@ +# ignore downloaded helm depdencies +charts/ + +Chart.lock diff --git a/edc-tests/deployment/src/main/resources/helm/test-infrastructure/.helmignore b/edc-tests/deployment/src/main/resources/helm/test-infrastructure/.helmignore new file mode 100644 index 000000000..8c60d7821 --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/test-infrastructure/.helmignore @@ -0,0 +1,24 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +docs diff --git a/edc-tests/deployment/src/main/resources/helm/test-infrastructure/Chart.yaml b/edc-tests/deployment/src/main/resources/helm/test-infrastructure/Chart.yaml new file mode 100644 index 000000000..6e7f24fe5 --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/test-infrastructure/Chart.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: v2 +name: all-in-one +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" + +dependencies: + # IDS Dynamic Attribute Provisioning Service (IAM) + - name: ids-daps + version: 0.0.1 + repository: "file://../omejdn" + alias: idsdaps + condition: install.daps + + # HashiCorp Vault + - name: vault + alias: vault + version: 0.20.0 + repository: https://helm.releases.hashicorp.com + condition: install.vault + + # PostgreSQL + - name: postgresql + alias: postgresql + version: 12.1.6 + repository: https://charts.bitnami.com/bitnami + condition: install.postgresql + + # MinIo + - name: minio + alias: minio + repository: https://charts.min.io + version: 4.1.0 + condition: install.minio diff --git a/edc-tests/deployment/src/main/resources/helm/test-infrastructure/README.md b/edc-tests/deployment/src/main/resources/helm/test-infrastructure/README.md new file mode 100644 index 000000000..e927d7bc5 --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/test-infrastructure/README.md @@ -0,0 +1,54 @@ +# Supporting Infrastructure Deployment + +The Supporting Infrastructure Deployment creates a complete, independent and already configured EDC test environment. +During the automated business tests, these infrastructure components are deployed together with two connectors (Plato & Sokrates). + +This deployment could also be used as + +- reference setup for teams, that want to create their own connector +- standalone infrastructure to try things out + +This deployment should **never** be used + +- in **any** production or near production environments +- in **any** long living internet facing connector setups + +## Omejdn DAPS + +The Dynamic Attribute Provisioning Service (DAPS) is a component of the IDS Ecosystem. +The Fraunhofer Institute has created a DAPS reference implementation, the Omejdn +DAPS ([link](https://github.com/Fraunhofer-AISEC/omejdn-server)). This deplyoment configures and deployes a instance of +this reference implementation. + +Definition of DAPS from the IDS Reference architecture v3.0: + +> The Identity Provider acts as an agent for the International +> Data Spaces Association. It is responsible for issuing technical identities to parties that have been approved to become +> Participants in the International Data Spaces. The Identity +> Provider is instructed to issue identities based on approved +> roles (e.g., App Store or App Provider). Only if equipped with +> such an identity, an entity is allowed to participate in the International Data Spaces + +Also, please note, that the Omejdn DAPS is meant as research sandbox and should not be used in anq +productive environment. + +> **IMPORTANT:** Omejdn is meant to be a research sandbox in which we can (re)implement standard protocols and +> potentially extend and modify functionality under the hood to support research projects. Use at your own +> risk! ([source](https://github.com/Fraunhofer-AISEC/omejdn-server)) + +## HashiCorp Vault + +The Control- and Data Plane persist confidential in the vault and persist and communicate using only the secret +names. Hence, it is not possible to run a connector without an instance of a vault. + +## PostgreSQL + +This database is used to persist the state of the Control Plane. + +## Setup + +Simply execute the following comment in a shell: + +```shell +helm install infra edc-tests/deployment/src/main/resources/helm/test-infrastructure --update-dependencies +``` diff --git a/edc-tests/deployment/src/main/resources/helm/test-infrastructure/values.yaml b/edc-tests/deployment/src/main/resources/helm/test-infrastructure/values.yaml new file mode 100644 index 000000000..feba29cc4 --- /dev/null +++ b/edc-tests/deployment/src/main/resources/helm/test-infrastructure/values.yaml @@ -0,0 +1,185 @@ +--- + +########### +# Install # +########### +install: + daps: true + postgresql: true + vault: true + minio: false + + +######## +# DAPS # +######## +idsdaps: + fullnameOverride: "ids-daps" + connectors: + - id: E7:07:2D:74:56:66:31:F0:7B:10:EA:B6:03:06:4C:23:7F:ED:A6:65:keyid:E7:07:2D:74:56:66:31:F0:7B:10:EA:B6:03:06:4C:23:7F:ED:A6:65 + name: sokrates + attributes: + referringConnector: http://sokrates-controlplane/BPNSOKRATES + # Must be the same certificate that is stores in section 'sokrates-vault' + certificate: |- + -----BEGIN CERTIFICATE----- + MIIEAzCCAuugAwIBAgIUXFgjbN7jxGRUDkoUvEwcN3zcew8wDQYJKoZIhvcNAQEL + BQAwgZAxCzAJBgNVBAYTAkRFMQ8wDQYDVQQIDAZCZXJsaW4xDzANBgNVBAcMBkJl + cmxpbjEMMAoGA1UECgwDQk1XMSAwHgYDVQQLDBdlZGMtcGxheWdyb3VuZC1wYXJ0 + bmVyMTEvMC0GA1UEAwwmc29rcmF0ZXMtZWRjLmRlbW8uY2F0ZW5hLXgubmV0L0JQ + TjEyMzQwHhcNMjIwNTEwMDc1NzMzWhcNMjMwNTEwMDc1NzMzWjCBkDELMAkGA1UE + BhMCREUxDzANBgNVBAgMBkJlcmxpbjEPMA0GA1UEBwwGQmVybGluMQwwCgYDVQQK + DANCTVcxIDAeBgNVBAsMF2VkYy1wbGF5Z3JvdW5kLXBhcnRuZXIxMS8wLQYDVQQD + DCZzb2tyYXRlcy1lZGMuZGVtby5jYXRlbmEteC5uZXQvQlBOMTIzNDCCASIwDQYJ + KoZIhvcNAQEBBQADggEPADCCAQoCggEBAK/41S8rumkk+IzBk9pBDETvjlPmlXfw + 78yRrLmbzaed3kGgygJ2GFFPLcR/Lv0WG8F8au4UEssbOxAU4RRjncCVt66ajaCa + llIqMlH8zaJ8rgxNpGeJU5YvmYRxlIo+Gwi0qnF0tqJh8Hry7OqSo0gK2YBBFJyV + grMsEz3EcsS3ENYJufNgUIeg4QsaL49M0gWxSexPdC4pon96Nvju90D8RlvAJB21 + PInqLniMaFlSnRYzCrUaja6HMmzKA+ZPZ1r9lllzsE00RASxRIxlKkwfzTtMb9O6 + ey2i2vM7hKGGlXjNsnYVX9WXEfvK4JrCadHzgX8qdez19RxFKtB+5gECAwEAAaNT + MFEwHQYDVR0OBBYEFOcHLXRWZjHwexDqtgMGTCN/7aZlMB8GA1UdIwQYMBaAFOcH + LXRWZjHwexDqtgMGTCN/7aZlMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL + BQADggEBAD2a5kuIdICNXfYLpSe7AIONwZVucaArYtpXBxHEy5lMJsTEJgjZzypd + iIMU7onEQGVbii6yVNpWfIpJYM4e8ytVdJuk5evclVKZs/lZ2IshLyWFVj+ITh2E + 28X4C/Hnmt4MPBCNowQf71nMp4LEziBgXp54qFV9C+qSTEVdrherRE0PU/zKyX10 + S/P5o42weTHnAO/pBN/8AmL3AymynKVgcPaW46IjjRAuc6kfZWCrYQ0M4+/7Ws5r + uM55Zae/L+C82OTNNaaK324ogsCkORPeQ23OCrRD8rZJmQ9bpoOGglPminfwEOhB + UHtyKgmvqCyOV3G/4G93W/xsLV0kxLA= + -----END CERTIFICATE----- + +############## +# PostgreSQL # +############## +postgresql: + fullnameOverride: "postgresql" + primary: + persistence: + enabled: false + readReplicas: + persistence: + enabled: false + auth: + database: "edc" + username: "user" + password: "password" + +######### +# MINIO # +######### +minio: + fullnameOverride: minio + replicas: 2 + drivesPerNode: 0 + serviceAccount: + create: false + persistence: + size: 128Mi + resources: + requests: + memory: 128Mi + service: + type: NodePort + control: + port: 9000 + users: + - accessKey: qwerty123 + secretKey: qwerty123 + policy: customBucketPolicy + buckets: + # in some cases the minio API acts strange if there exists no bucket at all + - name: dummybucket + policy: none + purge: true + policies: + - name: customBucketPolicy + statements: + - resources: + - 'arn:aws:s3:::*' + actions: + - "s3:PutObject" + - "s3:ListBucket" + - "s3:CreateBucket" + - "s3:GetObject" + - "s3:DeleteObject" + - "s3:DeleteBucket" + +######### +# VAULT # +######### +vault: + fullnameOverride: "vault" + injector: + enabled: false + server: + dev: + enabled: true + devRootToken: "root" + # Must be the same certificate that is configured in section 'ids-daps' + postStart: + - "sh" + - "-c" + - | + { + + sleep 5 + + /bin/vault kv put secret/sokrates/data-encryption-aes-keys content=OcvxzWCK8ETSjt1jmZw3RA== + + cat << EOF | /bin/vault kv put secret/sokrates/daps/daps-key content=- + -----BEGIN PRIVATE KEY----- + MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCv+NUvK7ppJPiM + wZPaQQxE745T5pV38O/Mkay5m82nnd5BoMoCdhhRTy3Efy79FhvBfGruFBLLGzsQ + FOEUY53Albeumo2gmpZSKjJR/M2ifK4MTaRniVOWL5mEcZSKPhsItKpxdLaiYfB6 + 8uzqkqNICtmAQRSclYKzLBM9xHLEtxDWCbnzYFCHoOELGi+PTNIFsUnsT3QuKaJ/ + ejb47vdA/EZbwCQdtTyJ6i54jGhZUp0WMwq1Go2uhzJsygPmT2da/ZZZc7BNNEQE + sUSMZSpMH807TG/TunstotrzO4ShhpV4zbJ2FV/VlxH7yuCawmnR84F/KnXs9fUc + RSrQfuYBAgMBAAECggEAO+KjsjTgcG3bhBNQnMLsSP15Y0Yicbn18ZlVvaivGS7Z + d14fwSytY+ZdPfTGaey/L16HCVSdfK9cr0Fbw9OO2P5ajzobnp9dLsMbctlkpbpm + hNtbarzKTF8QkIkSsuUl0BWjt46vpJ1N+Jl5VO7oUFkY4dPEDvG2lAEY3zlekWDm + cQeOC/YgpoW4xfRwPPS6QE0w3Q+H5NfNjfz+mSHeItTlVfTKDRliWQLPWeRZFuXh + FlRFUQnTmEE/9wpIe3Hn7WXJ3fQqcYDzxU7/zwwY9I7bB15SgVHlR0ENDPAD5X8F + MVZ3EcLlqGBy+WvTWALp6pc8YfhW3fiTWyuamXtNrQKBgQDonsIzBKEOOKdKGW0e + uyw79ErmnmzkY5nuMrMxrmTA4WKCfJ/YRRA+4sxiltWsIJ3UkHe3OBCSSCdj79hb + ugb/+UzE70hOdgrct2NUQqbrj3gvsVvU8ZRQgTRMqKpmC0zY7KOMx6NU85z3IvS1 + z5fjszcUv4kLQlldYGSAuqPy+wKBgQDBqIkc8p/wcw7ygo1q/GerNeszfoxiIFp8 + h4RWLVhkwrcXFz30wBlUWuv5/kxU8tmJcmXxe72EmUstd6wvNOAnYwCiile6zQiJ + vsr1axavZnGOtNGUp6DUAsd2iviBl7IZ7kAcqCrQo4ivGhfHmahH3hmg8wuAMjYB + 8f+FSPgaMwKBgQC7W4tMrjDOFIFhJEOIWfcRvvxI7VcFSNelS76aiDzsQVwnfxr7 + hPzFucQmsBgfUBHvMADMWGK4f1cCnh5kGtwidXgIsjVJxLeQ+EAPkLOCzQZfW3l8 + dKshgD9QcxTzpaxal5ZPAEikVqaZQtVYToCmzCTUGETYBbOWitnH+Qut2wKBgQC6 + Y6DcSLUhc0xOotLDxv1sbu/aVxF8nFEbDD+Vxf0Otc4MnmUWPRHj+8KlkVkcZcR0 + IrP1kThd+EDAGS+TG9wmbIY+6tH3S8HM+eJUBWcHGJ1xUZ1p61DC3Y3nDWiTKlLT + 3Fi+fCkBOHSku4Npq/2odh7Kp0JJd4o9oxJg0VNhuwKBgQDSFn7dqFE0Xmwc40Vr + 0wJH8cPWXKGt7KJENpj894buk2DniLD4w2x874dzTjrOFi6fKxEzbBNA9Rq9UPo8 + u9gKvl/IyWmV0c4zFCNMjRwVdnkMEte/lXcJZ67T4FXZByqAZlhrr/v0FD442Z9B + AjWFbUiBCFOo+gpAFcQGrkOQHA== + -----END PRIVATE KEY----- + EOF + + cat << EOF | /bin/vault kv put secret/sokrates/daps/daps-crt content=- + -----BEGIN CERTIFICATE----- + MIIEAzCCAuugAwIBAgIUXFgjbN7jxGRUDkoUvEwcN3zcew8wDQYJKoZIhvcNAQEL + BQAwgZAxCzAJBgNVBAYTAkRFMQ8wDQYDVQQIDAZCZXJsaW4xDzANBgNVBAcMBkJl + cmxpbjEMMAoGA1UECgwDQk1XMSAwHgYDVQQLDBdlZGMtcGxheWdyb3VuZC1wYXJ0 + bmVyMTEvMC0GA1UEAwwmc29rcmF0ZXMtZWRjLmRlbW8uY2F0ZW5hLXgubmV0L0JQ + TjEyMzQwHhcNMjIwNTEwMDc1NzMzWhcNMjMwNTEwMDc1NzMzWjCBkDELMAkGA1UE + BhMCREUxDzANBgNVBAgMBkJlcmxpbjEPMA0GA1UEBwwGQmVybGluMQwwCgYDVQQK + DANCTVcxIDAeBgNVBAsMF2VkYy1wbGF5Z3JvdW5kLXBhcnRuZXIxMS8wLQYDVQQD + DCZzb2tyYXRlcy1lZGMuZGVtby5jYXRlbmEteC5uZXQvQlBOMTIzNDCCASIwDQYJ + KoZIhvcNAQEBBQADggEPADCCAQoCggEBAK/41S8rumkk+IzBk9pBDETvjlPmlXfw + 78yRrLmbzaed3kGgygJ2GFFPLcR/Lv0WG8F8au4UEssbOxAU4RRjncCVt66ajaCa + llIqMlH8zaJ8rgxNpGeJU5YvmYRxlIo+Gwi0qnF0tqJh8Hry7OqSo0gK2YBBFJyV + grMsEz3EcsS3ENYJufNgUIeg4QsaL49M0gWxSexPdC4pon96Nvju90D8RlvAJB21 + PInqLniMaFlSnRYzCrUaja6HMmzKA+ZPZ1r9lllzsE00RASxRIxlKkwfzTtMb9O6 + ey2i2vM7hKGGlXjNsnYVX9WXEfvK4JrCadHzgX8qdez19RxFKtB+5gECAwEAAaNT + MFEwHQYDVR0OBBYEFOcHLXRWZjHwexDqtgMGTCN/7aZlMB8GA1UdIwQYMBaAFOcH + LXRWZjHwexDqtgMGTCN/7aZlMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL + BQADggEBAD2a5kuIdICNXfYLpSe7AIONwZVucaArYtpXBxHEy5lMJsTEJgjZzypd + iIMU7onEQGVbii6yVNpWfIpJYM4e8ytVdJuk5evclVKZs/lZ2IshLyWFVj+ITh2E + 28X4C/Hnmt4MPBCNowQf71nMp4LEziBgXp54qFV9C+qSTEVdrherRE0PU/zKyX10 + S/P5o42weTHnAO/pBN/8AmL3AymynKVgcPaW46IjjRAuc6kfZWCrYQ0M4+/7Ws5r + uM55Zae/L+C82OTNNaaK324ogsCkORPeQ23OCrRD8rZJmQ9bpoOGglPminfwEOhB + UHtyKgmvqCyOV3G/4G93W/xsLV0kxLA= + -----END CERTIFICATE----- + EOF + } diff --git a/settings.gradle.kts b/settings.gradle.kts index 22ac0d73a..e22d4aacc 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -19,7 +19,7 @@ include(":edc-tests:cucumber") // modules for controlplane artifacts include(":edc-controlplane") include(":edc-controlplane:edc-controlplane-base") -include(":edc-controlplane:edc-controlplane-memory") +include(":edc-controlplane:edc-runtime-memory") include(":edc-controlplane:edc-controlplane-memory-hashicorp-vault") include(":edc-controlplane:edc-controlplane-postgresql") include(":edc-controlplane:edc-controlplane-postgresql-hashicorp-vault") @@ -137,38 +137,34 @@ dependencyResolutionManagement { library("micrometer-jersey", "org.eclipse.edc", "jersey-micrometer").versionRef("edc") library("micrometer-jetty", "org.eclipse.edc", "jetty-micrometer").versionRef("edc") library("monitor-jdklogger", "org.eclipse.edc", "monitor-jdk-logger").versionRef("edc") - library( - "transfer.dynamicreceiver", - "org.eclipse.edc", - "transfer-pull-http-dynamic-receiver" - ).versionRef("edc") + library("transfer.dynamicreceiver", "org.eclipse.edc", "transfer-pull-http-dynamic-receiver").versionRef("edc") library("transfer.receiver", "org.eclipse.edc", "transfer-pull-http-receiver").versionRef("edc") bundle( - "connector", - listOf("boot", "core-connector", "core-jersey", "core-controlplane", "api-observability") + "connector", + listOf("boot", "core-connector", "core-jersey", "core-controlplane", "api-observability") ) bundle( - "dpf", - listOf("dpf-transfer", "dpf-selector-core", "dpf-selector-client", "spi-dataplane-selector") + "dpf", + listOf("dpf-transfer", "dpf-selector-core", "dpf-selector-client", "spi-dataplane-selector") ) bundle( - "sqlstores", - listOf( - "sql-assetindex", - "sql-contract-definition", - "sql-contract-negotiation", - "sql-transferprocess", - "sql-policydef" - ) + "sqlstores", + listOf( + "sql-assetindex", + "sql-contract-definition", + "sql-contract-negotiation", + "sql-transferprocess", + "sql-policydef" + ) ) bundle( - "monitoring", - listOf("micrometer-core", "micrometer-jersey", "micrometer-jetty") + "monitoring", + listOf("micrometer-core", "micrometer-jersey", "micrometer-jetty") // listOf("micrometer-core", "micrometer-jersey", "micrometer-jetty", "monitor-jdklogger") ) } From 689f778824da3a671c1ad13be817bdcd8c8e58b9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Apr 2023 07:54:30 +0200 Subject: [PATCH 80/92] chore(deps): bump org.slf4j:slf4j-api from 2.0.3 to 2.0.7 (#234) Bumps [org.slf4j:slf4j-api](https://github.com/qos-ch/slf4j) from 2.0.3 to 2.0.7. - [Release notes](https://github.com/qos-ch/slf4j/releases) - [Commits](https://github.com/qos-ch/slf4j/compare/v_2.0.3...v_2.0.7) --- updated-dependencies: - dependency-name: org.slf4j:slf4j-api dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle.kts | 2 +- edc-tests/cucumber/build.gradle.kts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/build.gradle.kts b/build.gradle.kts index 13dc385b0..809528492 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -52,7 +52,7 @@ allprojects { } dependencies { implementation("org.projectlombok:lombok:1.18.26") - implementation("org.slf4j:slf4j-api:2.0.5") + implementation("org.slf4j:slf4j-api:2.0.7") // this is used to counter version conflicts between the JUnit version pulled in by the plugin, // and the one expected by IntelliJ testImplementation(platform("org.junit:junit-bom:5.9.2")) diff --git a/edc-tests/cucumber/build.gradle.kts b/edc-tests/cucumber/build.gradle.kts index 17f5233d7..f2e40439d 100644 --- a/edc-tests/cucumber/build.gradle.kts +++ b/edc-tests/cucumber/build.gradle.kts @@ -21,7 +21,7 @@ dependencies { testImplementation("org.junit.platform:junit-platform-suite:1.9.2") testImplementation("io.cucumber:cucumber-java:7.11.2") testImplementation("io.cucumber:cucumber-junit-platform-engine:7.11.2") - testImplementation("org.slf4j:slf4j-api:2.0.3") + testImplementation("org.slf4j:slf4j-api:2.0.7") testImplementation(libs.restAssured) testImplementation(libs.postgres) testImplementation(libs.awaitility) From abb352a8048fe85c4751a38649091fb12f18bf4e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Apr 2023 07:55:25 +0200 Subject: [PATCH 81/92] chore(deps): bump com.azure:azure-security-keyvault-secrets (#235) Bumps [com.azure:azure-security-keyvault-secrets](https://github.com/Azure/azure-sdk-for-java) from 4.5.4 to 4.6.0. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-security-keyvault-keys_4.5.4...azure-cosmos_4.6.0) --- updated-dependencies: - dependency-name: com.azure:azure-security-keyvault-secrets dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- edc-dataplane/edc-dataplane-azure-vault/build.gradle.kts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/edc-dataplane/edc-dataplane-azure-vault/build.gradle.kts b/edc-dataplane/edc-dataplane-azure-vault/build.gradle.kts index bde51831c..e360497a1 100644 --- a/edc-dataplane/edc-dataplane-azure-vault/build.gradle.kts +++ b/edc-dataplane/edc-dataplane-azure-vault/build.gradle.kts @@ -9,7 +9,7 @@ dependencies { implementation(project(":edc-dataplane:edc-dataplane-base")) implementation(edc.azure.vault) implementation(edc.azure.identity) - implementation("com.azure:azure-security-keyvault-secrets:4.5.4") + implementation("com.azure:azure-security-keyvault-secrets:4.6.0") } tasks.withType { From 7c03aec007be01e1a226b864a5c3cac56a580fea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Apr 2023 07:55:45 +0200 Subject: [PATCH 82/92] chore(deps): bump com.diffplug.spotless from 6.15.0 to 6.18.0 (#236) Bumps com.diffplug.spotless from 6.15.0 to 6.18.0. --- updated-dependencies: - dependency-name: com.diffplug.spotless dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle.kts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle.kts b/build.gradle.kts index 809528492..1d4e2be61 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -6,7 +6,7 @@ plugins { `maven-publish` `jacoco-report-aggregation` id("io.freefair.lombok") version "6.6.2" - id("com.diffplug.spotless") version "6.15.0" + id("com.diffplug.spotless") version "6.18.0" id("com.github.johnrengelman.shadow") version "8.0.0" id("com.bmuschko.docker-remote-api") version "9.3.1" id("org.sonarqube") version "4.0.0.2929" From 192c4421367b4cd12b56a9523e0ddcefb4b125d3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Apr 2023 06:08:43 +0000 Subject: [PATCH 83/92] chore(deps): bump com.github.johnrengelman.shadow from 8.0.0 to 8.1.1 (#237) --- build.gradle.kts | 2 +- .../edc-controlplane-memory-hashicorp-vault/build.gradle.kts | 2 +- .../build.gradle.kts | 2 +- edc-controlplane/edc-controlplane-postgresql/build.gradle.kts | 2 +- edc-controlplane/edc-runtime-memory/build.gradle.kts | 2 +- edc-dataplane/edc-dataplane-azure-vault/build.gradle.kts | 2 +- edc-dataplane/edc-dataplane-hashicorp-vault/build.gradle.kts | 2 +- edc-tests/runtime/build.gradle.kts | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/build.gradle.kts b/build.gradle.kts index 1d4e2be61..b494219b2 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -7,7 +7,7 @@ plugins { `jacoco-report-aggregation` id("io.freefair.lombok") version "6.6.2" id("com.diffplug.spotless") version "6.18.0" - id("com.github.johnrengelman.shadow") version "8.0.0" + id("com.github.johnrengelman.shadow") version "8.1.1" id("com.bmuschko.docker-remote-api") version "9.3.1" id("org.sonarqube") version "4.0.0.2929" } diff --git a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/build.gradle.kts b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/build.gradle.kts index eb7aef8fe..3fb52b522 100644 --- a/edc-controlplane/edc-controlplane-memory-hashicorp-vault/build.gradle.kts +++ b/edc-controlplane/edc-controlplane-memory-hashicorp-vault/build.gradle.kts @@ -3,7 +3,7 @@ import com.bmuschko.gradle.docker.tasks.image.DockerBuildImage plugins { `java-library` id("application") - id("com.github.johnrengelman.shadow") version "8.0.0" + id("com.github.johnrengelman.shadow") version "8.1.1" } dependencies { diff --git a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/build.gradle.kts b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/build.gradle.kts index 7c8a46f54..637b63765 100644 --- a/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/build.gradle.kts +++ b/edc-controlplane/edc-controlplane-postgresql-hashicorp-vault/build.gradle.kts @@ -4,7 +4,7 @@ import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar plugins { `java-library` id("application") - id("com.github.johnrengelman.shadow") version "8.0.0" + id("com.github.johnrengelman.shadow") version "8.1.1" } dependencies { diff --git a/edc-controlplane/edc-controlplane-postgresql/build.gradle.kts b/edc-controlplane/edc-controlplane-postgresql/build.gradle.kts index b69e3d010..5888c34c4 100644 --- a/edc-controlplane/edc-controlplane-postgresql/build.gradle.kts +++ b/edc-controlplane/edc-controlplane-postgresql/build.gradle.kts @@ -4,7 +4,7 @@ import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar plugins { `java-library` id("application") - id("com.github.johnrengelman.shadow") version "8.0.0" + id("com.github.johnrengelman.shadow") version "8.1.1" } dependencies { diff --git a/edc-controlplane/edc-runtime-memory/build.gradle.kts b/edc-controlplane/edc-runtime-memory/build.gradle.kts index 304584058..372ec486d 100644 --- a/edc-controlplane/edc-runtime-memory/build.gradle.kts +++ b/edc-controlplane/edc-runtime-memory/build.gradle.kts @@ -1,7 +1,7 @@ plugins { `java-library` id("application") - id("com.github.johnrengelman.shadow") version "8.0.0" + id("com.github.johnrengelman.shadow") version "8.1.1" } dependencies { diff --git a/edc-dataplane/edc-dataplane-azure-vault/build.gradle.kts b/edc-dataplane/edc-dataplane-azure-vault/build.gradle.kts index e360497a1..02d29b7db 100644 --- a/edc-dataplane/edc-dataplane-azure-vault/build.gradle.kts +++ b/edc-dataplane/edc-dataplane-azure-vault/build.gradle.kts @@ -2,7 +2,7 @@ plugins { `java-library` id("application") - id("com.github.johnrengelman.shadow") version "8.0.0" + id("com.github.johnrengelman.shadow") version "8.1.1" } dependencies { diff --git a/edc-dataplane/edc-dataplane-hashicorp-vault/build.gradle.kts b/edc-dataplane/edc-dataplane-hashicorp-vault/build.gradle.kts index 7f53f8c5c..0ae98dd2d 100644 --- a/edc-dataplane/edc-dataplane-hashicorp-vault/build.gradle.kts +++ b/edc-dataplane/edc-dataplane-hashicorp-vault/build.gradle.kts @@ -2,7 +2,7 @@ plugins { `java-library` id("application") - id("com.github.johnrengelman.shadow") version "8.0.0" + id("com.github.johnrengelman.shadow") version "8.1.1" } dependencies { diff --git a/edc-tests/runtime/build.gradle.kts b/edc-tests/runtime/build.gradle.kts index 6f8a370af..0123162fb 100644 --- a/edc-tests/runtime/build.gradle.kts +++ b/edc-tests/runtime/build.gradle.kts @@ -15,7 +15,7 @@ plugins { `java-library` id("application") - id("com.github.johnrengelman.shadow") version "8.0.0" + id("com.github.johnrengelman.shadow") version "8.1.1" } From c0aa1d4a24b28a4ee6c9e0156da35029ae10cd08 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Apr 2023 06:21:18 +0000 Subject: [PATCH 84/92] chore(deps): bump io.freefair.lombok from 6.6.2 to 8.0.1 (#238) --- build.gradle.kts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle.kts b/build.gradle.kts index b494219b2..1f545aea5 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -5,7 +5,7 @@ plugins { `java-library` `maven-publish` `jacoco-report-aggregation` - id("io.freefair.lombok") version "6.6.2" + id("io.freefair.lombok") version "8.0.1" id("com.diffplug.spotless") version "6.18.0" id("com.github.johnrengelman.shadow") version "8.1.1" id("com.bmuschko.docker-remote-api") version "9.3.1" From 0b9c11cfe595793a3468afe36a3327beaf333c0f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 19 Apr 2023 05:08:43 +0000 Subject: [PATCH 85/92] chore(deps): bump org.flywaydb:flyway-core from 9.15.2 to 9.16.3 (#242) --- edc-extensions/postgresql-migration/build.gradle.kts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/edc-extensions/postgresql-migration/build.gradle.kts b/edc-extensions/postgresql-migration/build.gradle.kts index 8d7b1fa05..cb04877c0 100644 --- a/edc-extensions/postgresql-migration/build.gradle.kts +++ b/edc-extensions/postgresql-migration/build.gradle.kts @@ -11,5 +11,5 @@ dependencies { implementation(edc.sql.assetindex) implementation(edc.sql.core) - implementation("org.flywaydb:flyway-core:9.15.2") + implementation("org.flywaydb:flyway-core:9.16.3") } From d268bf029d61c84b9643f0dd3a75514115f03fb9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 19 Apr 2023 07:08:57 +0200 Subject: [PATCH 86/92] chore(deps): bump com.google.code.gson:gson from 2.10 to 2.10.1 (#243) Bumps [com.google.code.gson:gson](https://github.com/google/gson) from 2.10 to 2.10.1. - [Release notes](https://github.com/google/gson/releases) - [Changelog](https://github.com/google/gson/blob/master/CHANGELOG.md) - [Commits](https://github.com/google/gson/compare/gson-parent-2.10...gson-parent-2.10.1) --- updated-dependencies: - dependency-name: com.google.code.gson:gson dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- edc-tests/cucumber/build.gradle.kts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/edc-tests/cucumber/build.gradle.kts b/edc-tests/cucumber/build.gradle.kts index f2e40439d..2628ce71e 100644 --- a/edc-tests/cucumber/build.gradle.kts +++ b/edc-tests/cucumber/build.gradle.kts @@ -16,7 +16,7 @@ dependencies { implementation(project(":edc-extensions:transferprocess-sftp-provisioner")) - testImplementation("com.google.code.gson:gson:2.10") + testImplementation("com.google.code.gson:gson:2.10.1") testImplementation("org.apache.httpcomponents:httpclient:4.5.14") testImplementation("org.junit.platform:junit-platform-suite:1.9.2") testImplementation("io.cucumber:cucumber-java:7.11.2") From f30e78a78120068f65791902693df0331c91bc0c Mon Sep 17 00:00:00 2001 From: Garrett Smith <42892027+gcs14@users.noreply.github.com> Date: Wed, 19 Apr 2023 00:22:28 -0500 Subject: [PATCH 87/92] refactor: update GitHub output command to current version (#233) * refactor GitHub output command to current version * Remove curly braces from output statement --- .github/workflows/build.yaml | 8 ++++---- .github/workflows/draft-new-release.yaml | 2 +- .github/workflows/helm-lint.yaml | 2 +- .github/workflows/publish-new-release.yml | 2 +- .github/workflows/trivy.yml | 2 +- .github/workflows/veracode.yaml | 4 ++-- .github/workflows/verify.yaml | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 2c2dda9c2..a701ae362 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -60,10 +60,10 @@ jobs: - name: Check whether secrets exist id: secret-presence run: | - [ ! -z "${{ secrets.SONAR_TOKEN }}" ] && echo "::set-output name=SONAR_TOKEN::true" - [ ! -z "${{ secrets.GPG_PRIVATE_KEY }}" ] && echo "::set-output name=GPG_PRIVATE_KEY::true" - [ ! -z "${{ secrets.GPG_PASSPHRASE }}" ] && echo "::set-output name=GPG_PASSPHRASE::true" - [ ! -z "${{ secrets.DOCKER_HUB_TOKEN }}" ] && echo "::set-output name=DOCKER_HUB_TOKEN::true" + [ ! -z "${{ secrets.SONAR_TOKEN }}" ] && echo "SONAR_TOKEN=true" >> $GITHUB_OUTPUT + [ ! -z "${{ secrets.GPG_PRIVATE_KEY }}" ] && echo "GPG_PRIVATE_KEY=true" >> $GITHUB_OUTPUT + [ ! -z "${{ secrets.GPG_PASSPHRASE }}" ] && echo "GPG_PASSPHRASE=true" >> $GITHUB_OUTPUT + [ ! -z "${{ secrets.DOCKER_HUB_TOKEN }}" ] && echo "DOCKER_HUB_TOKEN=true" >> $GITHUB_OUTPUT exit 0 build-extensions: diff --git a/.github/workflows/draft-new-release.yaml b/.github/workflows/draft-new-release.yaml index 248f61bc4..22222ba8a 100644 --- a/.github/workflows/draft-new-release.yaml +++ b/.github/workflows/draft-new-release.yaml @@ -68,7 +68,7 @@ jobs: git add CHANGELOG.md gradle.properties $(find charts -name Chart.yaml) $(find charts -name README.md) git commit --message "Prepare release ${{ github.event.inputs.version }}" - echo "::set-output name=commit::$(git rev-parse HEAD)" + echo "commit=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT - name: Push new branch run: git push origin release/${{ github.event.inputs.version }} diff --git a/.github/workflows/helm-lint.yaml b/.github/workflows/helm-lint.yaml index ae94c84a7..7640d0a38 100644 --- a/.github/workflows/helm-lint.yaml +++ b/.github/workflows/helm-lint.yaml @@ -52,7 +52,7 @@ jobs: run: | changed=$(ct list-changed --config ct.yaml --target-branch main) if [[ -n "$changed" ]]; then - echo "::set-output name=changed::true" + echo "changed=true" >> $GITHUB_OUTPUT fi - name: chart-testing (lint) diff --git a/.github/workflows/publish-new-release.yml b/.github/workflows/publish-new-release.yml index 373c892e7..06ee58b61 100644 --- a/.github/workflows/publish-new-release.yml +++ b/.github/workflows/publish-new-release.yml @@ -37,7 +37,7 @@ jobs: name: Output release version id: release-version run: | - echo "::set-output name=RELEASE_VERSION::${{ env.RELEASE_VERSION }}" + echo "RELEASE_VERSION=${{ env.RELEASE_VERSION }}" >> $GITHUB_OUTPUT # Release: Maven Artifacts maven-release: diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index c315e8a07..819fec089 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -27,7 +27,7 @@ jobs: - name: Resolve git 7-chars sha id: git-sha7 run: | - echo "::set-output name=SHA7::${GITHUB_SHA::7}" + echo "SHA7=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT trivy-analyze-config: runs-on: ubuntu-latest diff --git a/.github/workflows/veracode.yaml b/.github/workflows/veracode.yaml index bba9df1b5..c3e7cb7a1 100644 --- a/.github/workflows/veracode.yaml +++ b/.github/workflows/veracode.yaml @@ -16,8 +16,8 @@ jobs: - name: Check whether secrets exist id: secret-presence run: | - [ ! -z "${{ secrets.ORG_VERACODE_API_ID }}" ] && echo "::set-output name=ORG_VERACODE_API_ID::true" - [ ! -z "${{ secrets.ORG_VERACODE_API_KEY }}" ] && echo "::set-output name=ORG_VERACODE_API_KEY::true" + [ ! -z "${{ secrets.ORG_VERACODE_API_ID }}" ] && echo "ORG_VERACODE_API_ID=true" >> $GITHUB_OUTPUT + [ ! -z "${{ secrets.ORG_VERACODE_API_KEY }}" ] && echo "ORG_VERACODE_API_KEY=true" >> $GITHUB_OUTPUT exit 0 verify-formatting: diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index d9dda3844..a16da4681 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -52,7 +52,7 @@ jobs: - name: Check whether secrets exist id: secret-presence run: | - [ ! -z "${{ secrets.SONAR_TOKEN }}" ] && echo "::set-output name=SONAR_TOKEN::true" + [ ! -z "${{ secrets.SONAR_TOKEN }}" ] && echo "SONAR_TOKEN=true" >> $GITHUB_OUTPUT exit 0 verify-formatting: From 31bfed751c10b8df5063ed7b06720c2f1cb0bf87 Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger <43503240+paullatzelsperger@users.noreply.github.com> Date: Wed, 19 Apr 2023 08:57:06 +0200 Subject: [PATCH 88/92] fix: only run trivy when docker images were actually built (#240) * fix: run trivy only if image exists * update checks --- .github/workflows/trivy.yml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 819fec089..67ab0fb12 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -74,8 +74,17 @@ jobs: steps: - name: Checkout uses: actions/checkout@v3.3.0 + + ## This step will fail if the docker images is not found + - name: "Check if image exists" + id: imageCheck + run: | + docker manifest inspect tractusx/${{ matrix.image }}:sha-${{ needs.git-sha7.outputs.value }} + continue-on-error: true + + ## the next two steps will only execute if the image exists check was successful - name: Run Trivy vulnerability scanner - if: always() + if: success() && steps.imageCheck.outcome != 'failure' uses: aquasecurity/trivy-action@master with: image-ref: "tractusx/${{ matrix.image }}:sha-${{ needs.git-sha7.outputs.value }}" @@ -85,7 +94,7 @@ jobs: severity: "CRITICAL,HIGH" timeout: "10m0s" - name: Upload Trivy scan results to GitHub Security tab - if: always() + if: success() && steps.imageCheck.outcome != 'failure' uses: github/codeql-action/upload-sarif@v2 with: sarif_file: "trivy-results-${{ matrix.image }}.sarif" From 6521dc37230c2fbc2a347f60feca481d443ee610 Mon Sep 17 00:00:00 2001 From: "Tuncay Tunc (ZF Friedrichshafen AG)" <100704677+tuncaytunc-zf@users.noreply.github.com> Date: Wed, 19 Apr 2023 13:02:21 +0200 Subject: [PATCH 89/92] refactor: Extract the setup-java action into a re-usable action (#246) * Extract the checkout and setup-java action into a re-usable action * Commit actions. * fix action * remove checkout extraction --- .../actions/publish-docker-image/action.yml | 10 +-- .../actions/run-deployment-test/action.yml | 10 +-- .github/actions/setup-java/action.yml | 32 ++++++++++ .github/workflows/build.yaml | 26 ++------ .github/workflows/business-tests.yaml | 8 +-- .github/workflows/deployment-test.yaml | 3 +- .github/workflows/draft-new-release.yaml | 7 +-- .github/workflows/helm-chart-release.yaml | 3 +- .github/workflows/helm-lint.yaml | 1 - .github/workflows/publish-docker.yaml | 6 +- .github/workflows/publish-new-release.yml | 17 +---- .github/workflows/trivy.yml | 6 +- .github/workflows/veracode.yaml | 30 ++------- .github/workflows/verify.yaml | 62 ++++--------------- 14 files changed, 71 insertions(+), 150 deletions(-) create mode 100644 .github/actions/setup-java/action.yml diff --git a/.github/actions/publish-docker-image/action.yml b/.github/actions/publish-docker-image/action.yml index 206e13d4c..2f8a8c522 100644 --- a/.github/actions/publish-docker-image/action.yml +++ b/.github/actions/publish-docker-image/action.yml @@ -41,8 +41,7 @@ inputs: runs: using: "composite" steps: - - name: Checkout - uses: actions/checkout@v3 + - uses: actions/checkout@v3.3.0 ##################### # Login to DockerHub @@ -56,12 +55,7 @@ runs: ##################### # Build JAR file ##################### - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Build Controlplane shell: bash run: |- diff --git a/.github/actions/run-deployment-test/action.yml b/.github/actions/run-deployment-test/action.yml index ed720b4be..9f4b40d58 100644 --- a/.github/actions/run-deployment-test/action.yml +++ b/.github/actions/run-deployment-test/action.yml @@ -42,8 +42,7 @@ inputs: runs: using: "composite" steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - name: Cache ContainerD Image Layers uses: actions/cache@v3 @@ -51,12 +50,7 @@ runs: path: /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs key: ${{ runner.os }}-io.containerd.snapshotter.v1.overlayfs - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '11' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Build docker images shell: bash diff --git a/.github/actions/setup-java/action.yml b/.github/actions/setup-java/action.yml new file mode 100644 index 000000000..ed03fafb3 --- /dev/null +++ b/.github/actions/setup-java/action.yml @@ -0,0 +1,32 @@ +# +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) +# Copyright (c) 2023 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + +--- +name: "Setup JDK 17" +description: "Setup JDK 17" +runs: + using: "composite" + steps: + - name: Setup JDK 17 + uses: actions/setup-java@v3.11.0 + with: + java-version: '17' + distribution: 'temurin' + cache: 'gradle' \ No newline at end of file diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index a701ae362..0713b0857 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -71,14 +71,8 @@ jobs: needs: [ secret-presence ] steps: # Set-Up - - name: Checkout - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: actions/checkout@v3.3.0 + - uses: ./.github/actions/setup-java # Build - name: Build Extensions run: |- @@ -104,8 +98,7 @@ jobs: permissions: contents: write steps: - - name: Checkout - uses: actions/checkout@v3 + - uses: actions/checkout@v3.3.0 - uses: ./.github/actions/publish-docker-image with: rootDir: edc-controlplane/${{ matrix.name }} @@ -128,8 +121,7 @@ jobs: permissions: contents: write steps: - - name: Checkout - uses: actions/checkout@v3 + - uses: actions/checkout@v3.3.0 - uses: ./.github/actions/publish-docker-image with: rootDir: edc-dataplane/${{ matrix.name }} @@ -149,15 +141,9 @@ jobs: needs.secret-presence.outputs.GPG_PASSPHRASE && needs.secret-presence.outputs.GPG_PRIVATE_KEY && github.event_name != 'pull_request' && github.ref != 'refs/heads/releases' steps: # Set-Up - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Import GPG Key uses: crazy-max/ghaction-import-gpg@v5 with: diff --git a/.github/workflows/business-tests.yaml b/.github/workflows/business-tests.yaml index 39caaadb1..a4726a443 100644 --- a/.github/workflows/business-tests.yaml +++ b/.github/workflows/business-tests.yaml @@ -50,15 +50,9 @@ jobs: ### Set-Up ### ############## - - name: Checkout uses: actions/checkout@v3.3.0 - - name: Set-Up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + uses: ./.github/actions/setup-java - name: Cache ContainerD Image Layers uses: actions/cache@v3 diff --git a/.github/workflows/deployment-test.yaml b/.github/workflows/deployment-test.yaml index 7d38b24ac..8e75ae31e 100644 --- a/.github/workflows/deployment-test.yaml +++ b/.github/workflows/deployment-test.yaml @@ -47,8 +47,7 @@ jobs: deployment-test-memory: runs-on: ubuntu-latest steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - uses: ./.github/actions/run-deployment-test name: "Run deployment test using KinD and Helm" with: diff --git a/.github/workflows/draft-new-release.yaml b/.github/workflows/draft-new-release.yaml index 22222ba8a..98e3c956a 100644 --- a/.github/workflows/draft-new-release.yaml +++ b/.github/workflows/draft-new-release.yaml @@ -33,12 +33,7 @@ jobs: git config user.name "GitHub actions" git config user.email noreply@github.com - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + uses: ./.github/actions/setup-java - name: Bump version in gradle.properties run: |- diff --git a/.github/workflows/helm-chart-release.yaml b/.github/workflows/helm-chart-release.yaml index bd5e55302..f19c841b9 100644 --- a/.github/workflows/helm-chart-release.yaml +++ b/.github/workflows/helm-chart-release.yaml @@ -38,8 +38,7 @@ jobs: steps: # fetch-depth: 0 is required to determine differences in chart(s) - - name: Checkout - uses: actions/checkout@v3 + - uses: actions/checkout@v3.3.0 with: fetch-depth: 0 diff --git a/.github/workflows/helm-lint.yaml b/.github/workflows/helm-lint.yaml index 7640d0a38..0b5a70f1f 100644 --- a/.github/workflows/helm-lint.yaml +++ b/.github/workflows/helm-lint.yaml @@ -26,7 +26,6 @@ jobs: ### Set-Up ### ############## - - name: Checkout uses: actions/checkout@v3.3.0 with: fetch-depth: 0 diff --git a/.github/workflows/publish-docker.yaml b/.github/workflows/publish-docker.yaml index 794d15061..24aaf2ff4 100644 --- a/.github/workflows/publish-docker.yaml +++ b/.github/workflows/publish-docker.yaml @@ -50,8 +50,7 @@ jobs: contents: write packages: write steps: - - name: Checkout - uses: actions/checkout@v3 + - uses: actions/checkout@v3.3.0 - uses: ./.github/actions/publish-docker-image with: rootDir: edc-controlplane/${{ matrix.name }} @@ -74,8 +73,7 @@ jobs: contents: write packages: write steps: - - name: Checkout - uses: actions/checkout@v3 + - uses: actions/checkout@v3.3.0 - uses: ./.github/actions/publish-docker-image with: rootDir: edc-dataplane/${{ matrix.name }} diff --git a/.github/workflows/publish-new-release.yml b/.github/workflows/publish-new-release.yml index 06ee58b61..b7de21257 100644 --- a/.github/workflows/publish-new-release.yml +++ b/.github/workflows/publish-new-release.yml @@ -54,15 +54,9 @@ jobs: run: | echo "RELEASE_VERSION=${{ needs.release-version.outputs.RELEASE_VERSION }}" >> $GITHUB_ENV - - name: Checkout uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + uses: ./.github/actions/setup-java - name: Import GPG Key uses: crazy-max/ghaction-import-gpg@v5 @@ -96,7 +90,6 @@ jobs: run: | echo "RELEASE_VERSION=${{ needs.release-version.outputs.RELEASE_VERSION }}" >> $GITHUB_ENV - - name: Checkout uses: actions/checkout@v3.3.0 with: fetch-depth: 0 @@ -144,7 +137,6 @@ jobs: run: | echo "RELEASE_VERSION=${{ needs.release-version.outputs.RELEASE_VERSION }}" >> $GITHUB_ENV - - name: Checkout uses: actions/checkout@v3.3.0 with: # 0 to fetch the full history due to upcoming merge of releases into main branch @@ -177,12 +169,7 @@ jobs: draft: false prerelease: false - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + uses: ./.github/actions/setup-java - name: Merge releases back into main and set new snapshot version if: github.event.pull_request.base.ref == 'releases' diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 67ab0fb12..2fe44c399 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -36,8 +36,7 @@ jobs: contents: read security-events: write steps: - - name: Checkout repository - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - name: Run Trivy vulnerability scanner in repo mode uses: aquasecurity/trivy-action@master with: @@ -72,8 +71,7 @@ jobs: - edc-dataplane-azure-vault - edc-dataplane-hashicorp-vault steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 ## This step will fail if the docker images is not found - name: "Check if image exists" diff --git a/.github/workflows/veracode.yaml b/.github/workflows/veracode.yaml index c3e7cb7a1..486c53096 100644 --- a/.github/workflows/veracode.yaml +++ b/.github/workflows/veracode.yaml @@ -23,16 +23,10 @@ jobs: verify-formatting: runs-on: ubuntu-latest steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 with: fetch-depth: 0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Verify proper formatting run: ./gradlew spotlessCheck @@ -51,14 +45,8 @@ jobs: - edc-controlplane-postgresql-hashicorp-vault steps: # Set-Up - - name: Checkout - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: actions/checkout@v3.3.0 + - uses: ./.github/actions/setup-java # Build - name: Build Controlplane run: |- @@ -95,14 +83,8 @@ jobs: - edc-dataplane-hashicorp-vault steps: # Set-Up - - name: Checkout - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: actions/checkout@v3.3.0 + - uses: ./.github/actions/setup-java # Build - name: Build Dataplane run: |- diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index a16da4681..2cd0432f8 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -58,15 +58,9 @@ jobs: verify-formatting: runs-on: ubuntu-latest steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Verify proper formatting run: ./gradlew spotlessCheck @@ -78,7 +72,7 @@ jobs: markdown-lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v3.3.0 - name: Install mardkdownlint run: npm install -g markdownlint-cli2 @@ -91,15 +85,9 @@ jobs: runs-on: ubuntu-latest needs: [ verify-formatting ] steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Run Unit tests run: ./gradlew test @@ -108,15 +96,9 @@ jobs: runs-on: ubuntu-latest needs: [ verify-formatting ] steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Run Integration tests run: ./gradlew test -DincludeTags="ComponentTest" @@ -125,15 +107,9 @@ jobs: runs-on: ubuntu-latest needs: [ verify-formatting ] steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Run API tests run: ./gradlew test -DincludeTags="ApiTest" @@ -142,15 +118,9 @@ jobs: runs-on: ubuntu-latest needs: [ verify-formatting ] steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Run E2E tests run: ./gradlew :edc-tests:runtime:build test -DincludeTags="EndToEndTest" @@ -162,16 +132,10 @@ jobs: runs-on: ubuntu-latest steps: # Set-Up - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 with: fetch-depth: 0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Cache SonarCloud packages uses: actions/cache@v3 with: From 04985e0476515561df7b61412fc99c1422c6cccf Mon Sep 17 00:00:00 2001 From: Enrico Risa Date: Wed, 19 Apr 2023 14:39:29 +0200 Subject: [PATCH 90/92] feat(BusinessPartnerValidation): adds logging if it's enabled on contract agreement validation (#245) * feat(BusinessPartnerValidation): adds logging if it's enabled on contract agreement validation * feat(BusinessPartnerValidation): adds logging on tests * feat(BusinessPartnerValidation): enabled by default on charts config * pr remarks --- .github/workflows/business-tests.yaml | 2 + .../templates/deployment-runtime.yaml | 6 + charts/tractusx-connector-memory/values.yaml | 3 + .../templates/deployment-controlplane.yaml | 6 + charts/tractusx-connector/values.yaml | 3 + .../build.gradle.kts | 2 +- .../BusinessPartnerValidationExtension.java | 126 +++++---- .../AbstractBusinessPartnerValidation.java | 231 +++++++++-------- .../BusinessPartnerDutyFunction.java | 20 +- .../BusinessPartnerPermissionFunction.java | 22 +- .../BusinessPartnerProhibitionFunction.java | 22 +- ...usinessPartnerValidationExtensionTest.java | 23 ++ ...AbstractBusinessPartnerValidationTest.java | 239 ++++++++++-------- .../edc/lifecycle/MultiRuntimeTest.java | 2 + .../tractusx/edc/lifecycle/Participant.java | 4 + .../tests/HttpConsumerPullWithProxyTest.java | 6 +- settings.gradle.kts | 36 +-- 17 files changed, 439 insertions(+), 314 deletions(-) diff --git a/.github/workflows/business-tests.yaml b/.github/workflows/business-tests.yaml index a4726a443..cbf0f3767 100644 --- a/.github/workflows/business-tests.yaml +++ b/.github/workflows/business-tests.yaml @@ -179,6 +179,7 @@ jobs: --set dataplane.image.repository=docker.io/library/edc-dataplane-hashicorp-vault \ --set controlplane.debug.enabled=true \ --set controlplane.suspendOnStart=false \ + --set controlplane.businesspartnervalidation.log.agreement.validation=true \ --set postgresql.enabled=true \ --set postgresql.username=user \ --set postgresql.password=password \ @@ -212,6 +213,7 @@ jobs: --set dataplane.image.repository=docker.io/library/edc-dataplane-hashicorp-vault \ --set controlplane.debug.enabled=true \ --set controlplane.suspendOnStart=false \ + --set controlplane.businesspartnervalidation.log.agreement.validation=true \ --set postgresql.enabled=true \ --set postgresql.username=user \ --set postgresql.password=password \ diff --git a/charts/tractusx-connector-memory/templates/deployment-runtime.yaml b/charts/tractusx-connector-memory/templates/deployment-runtime.yaml index 04386678c..3e7bd89e3 100644 --- a/charts/tractusx-connector-memory/templates/deployment-runtime.yaml +++ b/charts/tractusx-connector-memory/templates/deployment-runtime.yaml @@ -253,6 +253,12 @@ spec: value: "0" - name: "EDC_CP_ADAPTER_REUSE_CONTRACT_AGREEMENT" value: "0" + + ########################### + ## BUSINESS PARTNER NUMBER VALIDATION EXTENSION ## + ########################### + - name: "TRACTUSX_BUSINESSPARTNERVALIDATION_LOG_AGREEMENT_VALIDATION" + value: {{ .Values.runtime.businessPartnerValidation.log.agreementValidation | quote }} ###################################### ## Additional environment variables ## diff --git a/charts/tractusx-connector-memory/values.yaml b/charts/tractusx-connector-memory/values.yaml index 66fa1b7fe..83ce92818 100644 --- a/charts/tractusx-connector-memory/values.yaml +++ b/charts/tractusx-connector-memory/values.yaml @@ -121,6 +121,9 @@ runtime: public: port: 8086 path: /api/public + businessPartnerValidation: + log: + agreementValidation: true service: # -- [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. type: ClusterIP diff --git a/charts/tractusx-connector/templates/deployment-controlplane.yaml b/charts/tractusx-connector/templates/deployment-controlplane.yaml index 6eded494c..daab957e4 100644 --- a/charts/tractusx-connector/templates/deployment-controlplane.yaml +++ b/charts/tractusx-connector/templates/deployment-controlplane.yaml @@ -331,6 +331,12 @@ spec: - name: "EDC_CP_ADAPTER_REUSE_CONTRACT_AGREEMENT" value: "0" + ########################### + ## BUSINESS PARTNER NUMBER VALIDATION EXTENSION ## + ########################### + - name: "TRACTUSX_BUSINESSPARTNERVALIDATION_LOG_AGREEMENT_VALIDATION" + value: {{ .Values.controlplane.businessPartnerValidation.log.agreementValidation | quote }} + ###################################### ## Additional environment variables ## ###################################### diff --git a/charts/tractusx-connector/values.yaml b/charts/tractusx-connector/values.yaml index aebd45481..21acfc20b 100644 --- a/charts/tractusx-connector/values.yaml +++ b/charts/tractusx-connector/values.yaml @@ -122,6 +122,9 @@ controlplane: path: /observability # -- allow or disallow insecure access, i.e. access without authentication insecure: true + businessPartnerValidation: + log: + agreementValidation: true service: # -- [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. type: ClusterIP diff --git a/edc-extensions/business-partner-validation/build.gradle.kts b/edc-extensions/business-partner-validation/build.gradle.kts index 53cb11e31..198886d9a 100644 --- a/edc-extensions/business-partner-validation/build.gradle.kts +++ b/edc-extensions/business-partner-validation/build.gradle.kts @@ -1,4 +1,3 @@ - plugins { `java-library` `maven-publish` @@ -7,5 +6,6 @@ plugins { dependencies { api(edc.spi.core) implementation(edc.spi.policy) + implementation(edc.spi.contract) implementation(edc.spi.policyengine) } diff --git a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtension.java b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtension.java index ee076406f..d88293a72 100644 --- a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtension.java +++ b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtension.java @@ -26,6 +26,7 @@ import org.eclipse.edc.policy.model.Permission; import org.eclipse.edc.policy.model.Prohibition; import org.eclipse.edc.runtime.metamodel.annotation.Inject; +import org.eclipse.edc.runtime.metamodel.annotation.Setting; import org.eclipse.edc.spi.monitor.Monitor; import org.eclipse.edc.spi.system.ServiceExtension; import org.eclipse.edc.spi.system.ServiceExtensionContext; @@ -37,60 +38,73 @@ public class BusinessPartnerValidationExtension implements ServiceExtension { - /** - * The key for business partner numbers constraints. Must be used as left operand when declaring - * constraints. - * - *

Example: - * - *

-   * {
-   *     "constraint": {
-   *         "leftOperand": "BusinessPartnerNumber",
-   *         "operator": "EQ",
-   *         "rightOperand": "BPNLCDQ90000X42KU"
-   *     }
-   * }
-   * 
- */ - public static final String BUSINESS_PARTNER_CONSTRAINT_KEY = "BusinessPartnerNumber"; - - public BusinessPartnerValidationExtension() {} - - public BusinessPartnerValidationExtension( - final RuleBindingRegistry ruleBindingRegistry, final PolicyEngine policyEngine) { - this.ruleBindingRegistry = ruleBindingRegistry; - this.policyEngine = policyEngine; - } - - @Inject private RuleBindingRegistry ruleBindingRegistry; - - @Inject private PolicyEngine policyEngine; - - @Override - public String name() { - return "Business Partner Validation Extension"; - } - - @Override - public void initialize(ServiceExtensionContext context) { - - final Monitor monitor = context.getMonitor(); - - final BusinessPartnerDutyFunction dutyFunction = new BusinessPartnerDutyFunction(monitor); - final BusinessPartnerPermissionFunction permissionFunction = - new BusinessPartnerPermissionFunction(monitor); - final BusinessPartnerProhibitionFunction prohibitionFunction = - new BusinessPartnerProhibitionFunction(monitor); - - ruleBindingRegistry.bind("USE", ALL_SCOPES); - ruleBindingRegistry.bind(BUSINESS_PARTNER_CONSTRAINT_KEY, ALL_SCOPES); - - policyEngine.registerFunction( - ALL_SCOPES, Duty.class, BUSINESS_PARTNER_CONSTRAINT_KEY, dutyFunction); - policyEngine.registerFunction( - ALL_SCOPES, Permission.class, BUSINESS_PARTNER_CONSTRAINT_KEY, permissionFunction); - policyEngine.registerFunction( - ALL_SCOPES, Prohibition.class, BUSINESS_PARTNER_CONSTRAINT_KEY, prohibitionFunction); - } + /** + * The key for business partner numbers constraints. Must be used as left operand when declaring + * constraints. + * + *

Example: + * + *

+     * {
+     *     "constraint": {
+     *         "leftOperand": "BusinessPartnerNumber",
+     *         "operator": "EQ",
+     *         "rightOperand": "BPNLCDQ90000X42KU"
+     *     }
+     * }
+     * 
+ */ + public static final String BUSINESS_PARTNER_CONSTRAINT_KEY = "BusinessPartnerNumber"; + + public static final String DEFAULT_LOG_AGREEMENT_EVALUATION = "true"; + + + @Setting(value = "Enable logging when evaluating the business partner constraints in the agreement validation", type = "boolean", defaultValue = DEFAULT_LOG_AGREEMENT_EVALUATION) + public static final String BUSINESS_PARTNER_VALIDATION_LOG_AGREEMENT_VALIDATION = "tractusx.businesspartnervalidation.log.agreement.validation"; + @Inject + private RuleBindingRegistry ruleBindingRegistry; + @Inject + private PolicyEngine policyEngine; + + public BusinessPartnerValidationExtension() { + } + + public BusinessPartnerValidationExtension( + final RuleBindingRegistry ruleBindingRegistry, final PolicyEngine policyEngine) { + this.ruleBindingRegistry = ruleBindingRegistry; + this.policyEngine = policyEngine; + } + + @Override + public String name() { + return "Business Partner Validation Extension"; + } + + @Override + public void initialize(ServiceExtensionContext context) { + + final Monitor monitor = context.getMonitor(); + + var logAgreementEvaluation = logAgreementEvaluationSetting(context); + + final BusinessPartnerDutyFunction dutyFunction = new BusinessPartnerDutyFunction(monitor, logAgreementEvaluation); + final BusinessPartnerPermissionFunction permissionFunction = + new BusinessPartnerPermissionFunction(monitor, logAgreementEvaluation); + final BusinessPartnerProhibitionFunction prohibitionFunction = + new BusinessPartnerProhibitionFunction(monitor, logAgreementEvaluation); + + ruleBindingRegistry.bind("USE", ALL_SCOPES); + ruleBindingRegistry.bind(BUSINESS_PARTNER_CONSTRAINT_KEY, ALL_SCOPES); + + policyEngine.registerFunction( + ALL_SCOPES, Duty.class, BUSINESS_PARTNER_CONSTRAINT_KEY, dutyFunction); + policyEngine.registerFunction( + ALL_SCOPES, Permission.class, BUSINESS_PARTNER_CONSTRAINT_KEY, permissionFunction); + policyEngine.registerFunction( + ALL_SCOPES, Prohibition.class, BUSINESS_PARTNER_CONSTRAINT_KEY, prohibitionFunction); + } + + private Boolean logAgreementEvaluationSetting(ServiceExtensionContext context) { + return Boolean.parseBoolean(context.getSetting(BUSINESS_PARTNER_VALIDATION_LOG_AGREEMENT_VALIDATION, DEFAULT_LOG_AGREEMENT_EVALUATION)); + } } diff --git a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidation.java b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidation.java index 55cb0d52b..ecb5b81ef 100644 --- a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidation.java +++ b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidation.java @@ -20,132 +20,147 @@ package org.eclipse.tractusx.edc.validation.businesspartner.functions; -import java.util.Map; -import java.util.Objects; +import org.eclipse.edc.connector.contract.spi.types.agreement.ContractAgreement; import org.eclipse.edc.policy.engine.spi.PolicyContext; import org.eclipse.edc.policy.model.Operator; import org.eclipse.edc.spi.agent.ParticipantAgent; import org.eclipse.edc.spi.monitor.Monitor; +import java.util.Map; +import java.util.Objects; + +import static java.lang.String.format; + /** * Abstract class for BusinessPartnerNumber validation. This class may be inherited from the EDC * policy enforcing functions for duties, permissions and prohibitions. */ public abstract class AbstractBusinessPartnerValidation { - // Developer Note: - // Problems reported to the policy context are not logged. Therefore, everything - // that is reported to the policy context should be logged, too. - - private static final String FAIL_EVALUATION_BECAUSE_RIGHT_VALUE_NOT_STRING = - "Failing evaluation because of invalid BusinessPartnerNumber constraint. For operator 'EQ' right value must be of type 'String'. Unsupported type: '%s'"; - private static final String FAIL_EVALUATION_BECAUSE_UNSUPPORTED_OPERATOR = - "Failing evaluation because of invalid BusinessPartnerNumber constraint. As operator only 'EQ' is supported. Unsupported operator: '%s'"; - - private final Monitor monitor; - - protected AbstractBusinessPartnerValidation(Monitor monitor) { - this.monitor = Objects.requireNonNull(monitor); - } - - /** - * Name of the claim that contains the Business Partner Number. - * - *

Please note: At the time of writing (April 2022) the business partner - * number is part of the 'referringConnector' claim in the IDS DAT token. This will probably - * change for the next release. - */ - private static final String REFERRING_CONNECTOR_CLAIM = "referringConnector"; - - /** - * Evaluation funtion to decide whether a claim belongs to a specific business partner. - * - * @param operator operator of the constraint - * @param rightValue right value fo the constraint, that contains the business partner number - * (e.g. BPNLCDQ90000X42KU) - * @param policyContext context of the policy with claims - * @return true if claims are from the constrained business partner - */ - protected boolean evaluate( - final Operator operator, final Object rightValue, final PolicyContext policyContext) { - - if (policyContext.hasProblems() && !policyContext.getProblems().isEmpty()) { - String problems = String.join(", ", policyContext.getProblems()); - String message = - String.format( - "BusinessPartnerNumberValidation: Rejecting PolicyContext with problems. Problems: %s", - problems); - monitor.debug(message); - return false; + // Developer Note: + // Problems reported to the policy context are not logged. Therefore, everything + // that is reported to the policy context should be logged, too. + + private static final String FAIL_EVALUATION_BECAUSE_RIGHT_VALUE_NOT_STRING = + "Failing evaluation because of invalid BusinessPartnerNumber constraint. For operator 'EQ' right value must be of type 'String'. Unsupported type: '%s'"; + private static final String FAIL_EVALUATION_BECAUSE_UNSUPPORTED_OPERATOR = + "Failing evaluation because of invalid BusinessPartnerNumber constraint. As operator only 'EQ' is supported. Unsupported operator: '%s'"; + /** + * Name of the claim that contains the Business Partner Number. + * + *

Please note: At the time of writing (April 2022) the business partner + * number is part of the 'referringConnector' claim in the IDS DAT token. This will probably + * change for the next release. + */ + private static final String REFERRING_CONNECTOR_CLAIM = "referringConnector"; + private final Monitor monitor; + private final boolean logAgreementEvaluation; + + protected AbstractBusinessPartnerValidation(Monitor monitor, boolean logAgreementEvaluation) { + this.monitor = Objects.requireNonNull(monitor); + this.logAgreementEvaluation = logAgreementEvaluation; } - final ParticipantAgent participantAgent = policyContext.getParticipantAgent(); - final Map claims = participantAgent.getClaims(); - - if (!claims.containsKey(REFERRING_CONNECTOR_CLAIM)) { - return false; + /** + * At the time of writing (11. April 2022) the business partner number is part of the + * 'referringConnector' claim, which contains a connector URL. As the CX projects are not further + * aligned about the URL formatting, the enforcement can only be done by checking whether the URL + * _contains_ the number. As this introduces some insecurities when validation business partner + * numbers, this should be addresses in the long term. + * + * @param referringConnectorClaim describing URL with business partner number + * @param businessPartnerNumber of the constraint + * @return true if claim contains the business partner number + */ + private static boolean isCorrectBusinessPartner( + String referringConnectorClaim, String businessPartnerNumber) { + return referringConnectorClaim.contains(businessPartnerNumber); } - Object referringConnectorClaimObject = claims.get(REFERRING_CONNECTOR_CLAIM); - String referringConnectorClaim = null; - - if (referringConnectorClaimObject instanceof String) { - referringConnectorClaim = (String) referringConnectorClaimObject; + public boolean isLogAgreementEvaluation() { + return logAgreementEvaluation; } - if (referringConnectorClaim == null || referringConnectorClaim.isEmpty()) { - return false; + /** + * Evaluation funtion to decide whether a claim belongs to a specific business partner. + * + * @param operator operator of the constraint + * @param rightValue right value fo the constraint, that contains the business partner number + * (e.g. BPNLCDQ90000X42KU) + * @param policyContext context of the policy with claims + * @return true if claims are from the constrained business partner + */ + protected boolean evaluate( + final Operator operator, final Object rightValue, final PolicyContext policyContext) { + + if (policyContext.hasProblems() && !policyContext.getProblems().isEmpty()) { + String problems = String.join(", ", policyContext.getProblems()); + String message = + format( + "BusinessPartnerNumberValidation: Rejecting PolicyContext with problems. Problems: %s", + problems); + monitor.debug(message); + return false; + } + + final ParticipantAgent participantAgent = policyContext.getParticipantAgent(); + final Map claims = participantAgent.getClaims(); + + if (!claims.containsKey(REFERRING_CONNECTOR_CLAIM)) { + return false; + } + + Object referringConnectorClaimObject = claims.get(REFERRING_CONNECTOR_CLAIM); + String referringConnectorClaim = null; + + if (referringConnectorClaimObject instanceof String) { + referringConnectorClaim = (String) referringConnectorClaimObject; + } + + if (referringConnectorClaim == null || referringConnectorClaim.isEmpty()) { + return false; + } + + if (operator == Operator.EQ) { + return isBusinessPartnerNumber(referringConnectorClaim, rightValue, policyContext); + } else { + final String message = format(FAIL_EVALUATION_BECAUSE_UNSUPPORTED_OPERATOR, operator); + monitor.warning(message); + policyContext.reportProblem(message); + return false; + } } - if (operator == Operator.EQ) { - return isBusinessPartnerNumber(referringConnectorClaim, rightValue, policyContext); - } else { - final String message = String.format(FAIL_EVALUATION_BECAUSE_UNSUPPORTED_OPERATOR, operator); - monitor.warning(message); - policyContext.reportProblem(message); - return false; - } - } - - /** - * @param referringConnectorClaim of the participant - * @param businessPartnerNumber object - * @return true if object is string and successfully evaluated against the claim - */ - private boolean isBusinessPartnerNumber( - String referringConnectorClaim, Object businessPartnerNumber, PolicyContext policyContext) { - if (businessPartnerNumber == null) { - final String message = String.format(FAIL_EVALUATION_BECAUSE_RIGHT_VALUE_NOT_STRING, "null"); - monitor.warning(message); - policyContext.reportProblem(message); - return false; + /** + * @param referringConnectorClaim of the participant + * @param businessPartnerNumber object + * @return true if object is string and successfully evaluated against the claim + */ + private boolean isBusinessPartnerNumber( + String referringConnectorClaim, Object businessPartnerNumber, PolicyContext policyContext) { + if (businessPartnerNumber == null) { + final String message = format(FAIL_EVALUATION_BECAUSE_RIGHT_VALUE_NOT_STRING, "null"); + monitor.warning(message); + policyContext.reportProblem(message); + return false; + } + if (!(businessPartnerNumber instanceof String)) { + final String message = + format( + FAIL_EVALUATION_BECAUSE_RIGHT_VALUE_NOT_STRING, + businessPartnerNumber.getClass().getName()); + monitor.warning(message); + policyContext.reportProblem(message); + return false; + } + + var businessPartnerNumberStr = (String) businessPartnerNumber; + var agreement = policyContext.getContextData(ContractAgreement.class); + var isCorrectBusinessPartner = isCorrectBusinessPartner(referringConnectorClaim, businessPartnerNumberStr); + + if (agreement != null && logAgreementEvaluation) { + monitor.info(format("Evaluated policy access for referringConnectorClaim: %s and contract id: %s with result: %s", referringConnectorClaim, agreement.getId(), isCorrectBusinessPartner)); + } + return isCorrectBusinessPartner; } - if (!(businessPartnerNumber instanceof String)) { - final String message = - String.format( - FAIL_EVALUATION_BECAUSE_RIGHT_VALUE_NOT_STRING, - businessPartnerNumber.getClass().getName()); - monitor.warning(message); - policyContext.reportProblem(message); - return false; - } - - return isCorrectBusinessPartner(referringConnectorClaim, (String) businessPartnerNumber); - } - - /** - * At the time of writing (11. April 2022) the business partner number is part of the - * 'referringConnector' claim, which contains a connector URL. As the CX projects are not further - * aligned about the URL formatting, the enforcement can only be done by checking whether the URL - * _contains_ the number. As this introduces some insecurities when validation business partner - * numbers, this should be addresses in the long term. - * - * @param referringConnectorClaim describing URL with business partner number - * @param businessPartnerNumber of the constraint - * @return true if claim contains the business partner number - */ - private static boolean isCorrectBusinessPartner( - String referringConnectorClaim, String businessPartnerNumber) { - return referringConnectorClaim.contains(businessPartnerNumber); - } } diff --git a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerDutyFunction.java b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerDutyFunction.java index f53ba3cbc..061d7fd7d 100644 --- a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerDutyFunction.java +++ b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerDutyFunction.java @@ -26,16 +26,18 @@ import org.eclipse.edc.policy.model.Operator; import org.eclipse.edc.spi.monitor.Monitor; -/** AtomicConstraintFunction to validate business partner numbers for edc duties. */ +/** + * AtomicConstraintFunction to validate business partner numbers for edc duties. + */ public class BusinessPartnerDutyFunction extends AbstractBusinessPartnerValidation - implements AtomicConstraintFunction { + implements AtomicConstraintFunction { - public BusinessPartnerDutyFunction(Monitor monitor) { - super(monitor); - } + public BusinessPartnerDutyFunction(Monitor monitor, boolean shouldLogOnAgreementEvaluation) { + super(monitor, shouldLogOnAgreementEvaluation); + } - @Override - public boolean evaluate(Operator operator, Object rightValue, Duty rule, PolicyContext context) { - return evaluate(operator, rightValue, context); - } + @Override + public boolean evaluate(Operator operator, Object rightValue, Duty rule, PolicyContext context) { + return evaluate(operator, rightValue, context); + } } diff --git a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerPermissionFunction.java b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerPermissionFunction.java index 07bda765e..b6713c477 100644 --- a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerPermissionFunction.java +++ b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerPermissionFunction.java @@ -26,17 +26,19 @@ import org.eclipse.edc.policy.model.Permission; import org.eclipse.edc.spi.monitor.Monitor; -/** AtomicConstraintFunction to validate business partner numbers for edc permissions. */ +/** + * AtomicConstraintFunction to validate business partner numbers for edc permissions. + */ public class BusinessPartnerPermissionFunction extends AbstractBusinessPartnerValidation - implements AtomicConstraintFunction { + implements AtomicConstraintFunction { - public BusinessPartnerPermissionFunction(Monitor monitor) { - super(monitor); - } + public BusinessPartnerPermissionFunction(Monitor monitor, boolean shouldLogOnAgreementEvaluation) { + super(monitor, shouldLogOnAgreementEvaluation); + } - @Override - public boolean evaluate( - Operator operator, Object rightValue, Permission rule, PolicyContext context) { - return evaluate(operator, rightValue, context); - } + @Override + public boolean evaluate( + Operator operator, Object rightValue, Permission rule, PolicyContext context) { + return evaluate(operator, rightValue, context); + } } diff --git a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerProhibitionFunction.java b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerProhibitionFunction.java index f3cddf9fe..79e318741 100644 --- a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerProhibitionFunction.java +++ b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerProhibitionFunction.java @@ -26,17 +26,19 @@ import org.eclipse.edc.policy.model.Prohibition; import org.eclipse.edc.spi.monitor.Monitor; -/** AtomicConstraintFunction to validate business partner numbers for edc prohibitions. */ +/** + * AtomicConstraintFunction to validate business partner numbers for edc prohibitions. + */ public class BusinessPartnerProhibitionFunction extends AbstractBusinessPartnerValidation - implements AtomicConstraintFunction { + implements AtomicConstraintFunction { - public BusinessPartnerProhibitionFunction(Monitor monitor) { - super(monitor); - } + public BusinessPartnerProhibitionFunction(Monitor monitor, boolean shouldLogOnAgreementEvaluation) { + super(monitor, shouldLogOnAgreementEvaluation); + } - @Override - public boolean evaluate( - Operator operator, Object rightValue, Prohibition rule, PolicyContext context) { - return evaluate(operator, rightValue, context); - } + @Override + public boolean evaluate( + Operator operator, Object rightValue, Prohibition rule, PolicyContext context) { + return evaluate(operator, rightValue, context); + } } diff --git a/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtensionTest.java b/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtensionTest.java index 0240dc9ef..dcea3be41 100644 --- a/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtensionTest.java +++ b/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtensionTest.java @@ -27,10 +27,13 @@ import org.eclipse.edc.policy.model.Prohibition; import org.eclipse.edc.spi.monitor.Monitor; import org.eclipse.edc.spi.system.ServiceExtensionContext; +import org.eclipse.tractusx.edc.validation.businesspartner.functions.BusinessPartnerPermissionFunction; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; import org.mockito.Mockito; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; @@ -105,4 +108,24 @@ void testRegisterProhibitionFunction() { eq(BusinessPartnerValidationExtension.BUSINESS_PARTNER_CONSTRAINT_KEY), any()); } + + @Test + void testLogConfiguration() { + + when(serviceExtensionContext.getSetting(BusinessPartnerValidationExtension.BUSINESS_PARTNER_VALIDATION_LOG_AGREEMENT_VALIDATION, "true")).thenReturn("false"); + + var captor = ArgumentCaptor.forClass(BusinessPartnerPermissionFunction.class); + // invoke + extension.initialize(serviceExtensionContext); + + // verify + verify(policyEngine) + .registerFunction( + anyString(), + eq(Permission.class), + eq(BusinessPartnerValidationExtension.BUSINESS_PARTNER_CONSTRAINT_KEY), + captor.capture()); + + assertThat(captor.getValue().isLogAgreementEvaluation()).isFalse(); + } } diff --git a/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidationTest.java b/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidationTest.java index e8909c04e..2bc0738b0 100644 --- a/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidationTest.java +++ b/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidationTest.java @@ -20,10 +20,10 @@ package org.eclipse.tractusx.edc.validation.businesspartner.functions; -import java.util.Collections; -import java.util.List; +import org.eclipse.edc.connector.contract.spi.types.agreement.ContractAgreement; import org.eclipse.edc.policy.engine.spi.PolicyContext; import org.eclipse.edc.policy.model.Operator; +import org.eclipse.edc.policy.model.Policy; import org.eclipse.edc.spi.agent.ParticipantAgent; import org.eclipse.edc.spi.monitor.Monitor; import org.junit.jupiter.api.Assertions; @@ -31,143 +31,180 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; +import org.mockito.ArgumentCaptor; import org.mockito.Mockito; +import java.util.Collections; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.eq; + class AbstractBusinessPartnerValidationTest { - private AbstractBusinessPartnerValidation validation; + private AbstractBusinessPartnerValidation validation; + + // mocks + private Monitor monitor; + private PolicyContext policyContext; + private ParticipantAgent participantAgent; + + @BeforeEach + void BeforeEach() { + this.monitor = Mockito.mock(Monitor.class); + this.policyContext = Mockito.mock(PolicyContext.class); + this.participantAgent = Mockito.mock(ParticipantAgent.class); + + Mockito.when(policyContext.getParticipantAgent()).thenReturn(participantAgent); + + validation = new AbstractBusinessPartnerValidation(monitor, true) { + }; + } + + @ParameterizedTest + @EnumSource(Operator.class) + void testFailsOnUnsupportedOperations(Operator operator) { - // mocks - private Monitor monitor; - private PolicyContext policyContext; - private ParticipantAgent participantAgent; + if (operator == Operator.EQ) { // only allowed operator + return; + } - @BeforeEach - void BeforeEach() { - this.monitor = Mockito.mock(Monitor.class); - this.policyContext = Mockito.mock(PolicyContext.class); - this.participantAgent = Mockito.mock(ParticipantAgent.class); + // prepare + prepareContextProblems(null); + prepareBusinessPartnerClaim("yes"); - Mockito.when(policyContext.getParticipantAgent()).thenReturn(participantAgent); + // invoke & assert + Assertions.assertFalse(validation.evaluate(operator, "foo", policyContext)); + } + + @Test + void testFailsOnUnsupportedRightValue() { + + // prepare + prepareContextProblems(null); + prepareBusinessPartnerClaim("yes"); + + // invoke & assert + Assertions.assertFalse(validation.evaluate(Operator.EQ, 1, policyContext)); + } + + @Test + void testValidationFailsWhenClaimMissing() { - validation = new AbstractBusinessPartnerValidation(monitor) {}; - } + // prepare + prepareContextProblems(null); - @ParameterizedTest - @EnumSource(Operator.class) - void testFailsOnUnsupportedOperations(Operator operator) { + // invoke + final boolean isValid = validation.evaluate(Operator.EQ, "foo", policyContext); - if (operator == Operator.EQ) { // only allowed operator - return; + // assert + Assertions.assertFalse(isValid); } - // prepare - prepareContextProblems(null); - prepareBusinessPartnerClaim("yes"); + @Test + void testValidationSucceedsWhenClaimContainsValue() { - // invoke & assert - Assertions.assertFalse(validation.evaluate(operator, "foo", policyContext)); - } + // prepare + prepareContextProblems(null); - @Test - void testFailsOnUnsupportedRightValue() { + // prepare equals + prepareBusinessPartnerClaim("foo"); + final boolean isEqualsTrue = validation.evaluate(Operator.EQ, "foo", policyContext); - // prepare - prepareContextProblems(null); - prepareBusinessPartnerClaim("yes"); + // prepare contains + prepareBusinessPartnerClaim("foobar"); + final boolean isContainedTrue = validation.evaluate(Operator.EQ, "foo", policyContext); - // invoke & assert - Assertions.assertFalse(validation.evaluate(Operator.EQ, 1, policyContext)); - } + // assert + Assertions.assertTrue(isEqualsTrue); + Assertions.assertTrue(isContainedTrue); + } - @Test - void testValidationFailsWhenClaimMissing() { + @Test + void testValidationWhenParticipantHasProblems() { - // prepare - prepareContextProblems(null); + // prepare + prepareContextProblems(Collections.singletonList("big problem")); + prepareBusinessPartnerClaim("foo"); - // invoke - final boolean isValid = validation.evaluate(Operator.EQ, "foo", policyContext); + // invoke + final boolean isValid = validation.evaluate(Operator.EQ, "foo", policyContext); - // assert - Assertions.assertFalse(isValid); - } + // Mockito.verify(monitor.debug(Mockito.anyString()); + Assertions.assertFalse(isValid); + } - @Test - void testValidationSucceedsWhenClaimContainsValue() { + @Test + void testValidationWhenSingleParticipantIsValid() { - // prepare - prepareContextProblems(null); + // prepare + prepareContextProblems(null); + prepareBusinessPartnerClaim("foo"); - // prepare equals - prepareBusinessPartnerClaim("foo"); - final boolean isEqualsTrue = validation.evaluate(Operator.EQ, "foo", policyContext); + // invoke + final boolean isContainedTrue = validation.evaluate(Operator.EQ, "foo", policyContext); - // prepare contains - prepareBusinessPartnerClaim("foobar"); - final boolean isContainedTrue = validation.evaluate(Operator.EQ, "foo", policyContext); + // Mockito.verify(monitor.debug(Mockito.anyString()); + Assertions.assertTrue(isContainedTrue); + } - // assert - Assertions.assertTrue(isEqualsTrue); - Assertions.assertTrue(isContainedTrue); - } + @Test + void testValidationWhenSingleParticipantIsValidWithAgreement() { - @Test - void testValidationWhenParticipantHasProblems() { + // prepare + prepareContextProblems(null); + prepareBusinessPartnerClaim("foo"); - // prepare - prepareContextProblems(Collections.singletonList("big problem")); - prepareBusinessPartnerClaim("foo"); + var captor = ArgumentCaptor.forClass(String.class); - // invoke - final boolean isValid = validation.evaluate(Operator.EQ, "foo", policyContext); + var agreement = ContractAgreement.Builder.newInstance() + .id("agreementId") + .providerAgentId("provider") + .consumerAgentId("consumer") + .assetId("assetId") + .policy(Policy.Builder.newInstance().build()) + .build(); - // Mockito.verify(monitor.debug(Mockito.anyString()); - Assertions.assertFalse(isValid); - } + Mockito.when(policyContext.getContextData(eq(ContractAgreement.class))).thenReturn(agreement); - @Test - void testValidationWhenSingleParticipantIsValid() { + // invoke + final boolean isContainedTrue = validation.evaluate(Operator.EQ, "foo", policyContext); - // prepare - prepareContextProblems(null); - prepareBusinessPartnerClaim("foo"); + Assertions.assertTrue(isContainedTrue); - // invoke - final boolean isContainedTrue = validation.evaluate(Operator.EQ, "foo", policyContext); + Mockito.verify(monitor).info(captor.capture()); - // Mockito.verify(monitor.debug(Mockito.anyString()); - Assertions.assertTrue(isContainedTrue); - } + assertThat(captor.getValue()).contains(agreement.getId()).contains("foo"); + } - // In the past it was possible to use the 'IN' constraint with multiple BPNs as - // a list. This is no longer supported. - // The EDC must now always decline this kind of BPN format. - @Test - void testValidationForMultipleParticipants() { + // In the past it was possible to use the 'IN' constraint with multiple BPNs as + // a list. This is no longer supported. + // The EDC must now always decline this kind of BPN format. + @Test + void testValidationForMultipleParticipants() { - // prepare - prepareContextProblems(null); - prepareBusinessPartnerClaim("foo"); + // prepare + prepareContextProblems(null); + prepareBusinessPartnerClaim("foo"); - // invoke & verify - Assertions.assertFalse(validation.evaluate(Operator.IN, List.of("foo", "bar"), policyContext)); - Assertions.assertFalse(validation.evaluate(Operator.IN, List.of(1, "foo"), policyContext)); - Assertions.assertFalse(validation.evaluate(Operator.IN, List.of("bar", "bar"), policyContext)); - } + // invoke & verify + Assertions.assertFalse(validation.evaluate(Operator.IN, List.of("foo", "bar"), policyContext)); + Assertions.assertFalse(validation.evaluate(Operator.IN, List.of(1, "foo"), policyContext)); + Assertions.assertFalse(validation.evaluate(Operator.IN, List.of("bar", "bar"), policyContext)); + } - private void prepareContextProblems(List problems) { - Mockito.when(policyContext.getProblems()).thenReturn(problems); + private void prepareContextProblems(List problems) { + Mockito.when(policyContext.getProblems()).thenReturn(problems); - if (problems == null || problems.isEmpty()) { - Mockito.when(policyContext.hasProblems()).thenReturn(false); - } else { - Mockito.when(policyContext.hasProblems()).thenReturn(true); + if (problems == null || problems.isEmpty()) { + Mockito.when(policyContext.hasProblems()).thenReturn(false); + } else { + Mockito.when(policyContext.hasProblems()).thenReturn(true); + } } - } - private void prepareBusinessPartnerClaim(String businessPartnerNumber) { - Mockito.when(participantAgent.getClaims()) - .thenReturn(Collections.singletonMap("referringConnector", businessPartnerNumber)); - } + private void prepareBusinessPartnerClaim(String businessPartnerNumber) { + Mockito.when(participantAgent.getClaims()) + .thenReturn(Collections.singletonMap("referringConnector", businessPartnerNumber)); + } } diff --git a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/MultiRuntimeTest.java b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/MultiRuntimeTest.java index a28a2610d..5bf2a2417 100644 --- a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/MultiRuntimeTest.java +++ b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/MultiRuntimeTest.java @@ -69,6 +69,7 @@ public class MultiRuntimeTest { put("edc.dataplane.selector.httpplane.destinationtypes", "HttpProxy"); put("edc.dataplane.selector.httpplane.properties", "{\"publicApiUrl\":\"http://localhost:" + SOKRATES_PUBLIC_API_PORT + "/api/public\"}"); put("edc.receiver.http.dynamic.endpoint", "http://localhost:" + SOKRATES_CONNECTOR_PORT + "/api/consumer/datareference"); + put("tractusx.businesspartnervalidation.log.agreement.validation", "true"); } }); @@ -98,6 +99,7 @@ public class MultiRuntimeTest { put("edc.dataplane.selector.httpplane.sourcetypes", "HttpData"); put("edc.dataplane.selector.httpplane.destinationtypes", "HttpProxy"); put("edc.dataplane.selector.httpplane.properties", "{\"publicApiUrl\":\"http://localhost:" + PLATO_PUBLIC_API_PORT + "/api/public\"}"); + put("tractusx.businesspartnervalidation.log.agreement.validation", "true"); } }); } diff --git a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/Participant.java b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/Participant.java index dd43d4f90..bd1546111 100644 --- a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/Participant.java +++ b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/Participant.java @@ -26,6 +26,7 @@ import org.eclipse.edc.connector.api.management.transferprocess.model.TransferRequestDto; import org.eclipse.edc.connector.policy.spi.PolicyDefinition; import org.eclipse.edc.junit.extensions.EdcRuntimeExtension; +import org.eclipse.edc.policy.model.PolicyRegistrationTypes; import org.eclipse.edc.spi.asset.AssetSelectorExpression; import org.eclipse.edc.spi.iam.IdentityService; import org.eclipse.edc.spi.system.ServiceExtension; @@ -76,6 +77,9 @@ public Participant(String moduleName, String runtimeName, Map pr this.bpn = runtimeName + "-BPN"; this.backend = properties.get("edc.receiver.http.dynamic.endpoint"); this.registerServiceMock(IdentityService.class, new MockDapsService(getBpn())); + + typeManager.registerTypes(PolicyRegistrationTypes.TYPES.toArray(Class[]::new)); + } @Override diff --git a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/HttpConsumerPullWithProxyTest.java b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/HttpConsumerPullWithProxyTest.java index 78f3d2013..1f93ae5e7 100644 --- a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/HttpConsumerPullWithProxyTest.java +++ b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/HttpConsumerPullWithProxyTest.java @@ -38,7 +38,7 @@ import static org.awaitility.Awaitility.await; import static org.awaitility.pollinterval.FibonacciPollInterval.fibonacci; import static org.eclipse.edc.connector.transfer.dataplane.spi.TransferDataPlaneConstants.HTTP_PROXY; -import static org.eclipse.tractusx.edc.policy.PolicyHelperFunctions.noConstraintPolicy; +import static org.eclipse.tractusx.edc.policy.PolicyHelperFunctions.businessPartnerNumberPolicy; @EndToEndTest public class HttpConsumerPullWithProxyTest extends MultiRuntimeTest { @@ -61,8 +61,8 @@ void transferData_privateBackend() throws IOException, InterruptedException { .authKey(authCodeHeaderName) .authCode(authCode) .build()); - plato.createPolicy(noConstraintPolicy("policy-1")); - plato.createPolicy(noConstraintPolicy("policy-2")); + plato.createPolicy(businessPartnerNumberPolicy("policy-1", sokrates.getBpn())); + plato.createPolicy(businessPartnerNumberPolicy("policy-2", sokrates.getBpn())); plato.createContractDefinition(assetId, "def-1", "policy-1", "policy-2", ONE_WEEK); var negotiationId = sokrates.negotiateContract(plato, assetId); diff --git a/settings.gradle.kts b/settings.gradle.kts index e22d4aacc..a4158ef8c 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -58,7 +58,7 @@ dependencyResolutionManagement { } // create version catalog for all EDC modules create("edc") { - version("edc", "0.0.1-20230220-SNAPSHOT") + version("edc", "0.0.1-20230220.patch1") library("spi-catalog", "org.eclipse.edc", "catalog-spi").versionRef("edc") library("spi-auth", "org.eclipse.edc", "auth-spi").versionRef("edc") library("spi-transfer", "org.eclipse.edc", "transfer-spi").versionRef("edc") @@ -137,34 +137,38 @@ dependencyResolutionManagement { library("micrometer-jersey", "org.eclipse.edc", "jersey-micrometer").versionRef("edc") library("micrometer-jetty", "org.eclipse.edc", "jetty-micrometer").versionRef("edc") library("monitor-jdklogger", "org.eclipse.edc", "monitor-jdk-logger").versionRef("edc") - library("transfer.dynamicreceiver", "org.eclipse.edc", "transfer-pull-http-dynamic-receiver").versionRef("edc") + library( + "transfer.dynamicreceiver", + "org.eclipse.edc", + "transfer-pull-http-dynamic-receiver" + ).versionRef("edc") library("transfer.receiver", "org.eclipse.edc", "transfer-pull-http-receiver").versionRef("edc") bundle( - "connector", - listOf("boot", "core-connector", "core-jersey", "core-controlplane", "api-observability") + "connector", + listOf("boot", "core-connector", "core-jersey", "core-controlplane", "api-observability") ) bundle( - "dpf", - listOf("dpf-transfer", "dpf-selector-core", "dpf-selector-client", "spi-dataplane-selector") + "dpf", + listOf("dpf-transfer", "dpf-selector-core", "dpf-selector-client", "spi-dataplane-selector") ) bundle( - "sqlstores", - listOf( - "sql-assetindex", - "sql-contract-definition", - "sql-contract-negotiation", - "sql-transferprocess", - "sql-policydef" - ) + "sqlstores", + listOf( + "sql-assetindex", + "sql-contract-definition", + "sql-contract-negotiation", + "sql-transferprocess", + "sql-policydef" + ) ) bundle( - "monitoring", - listOf("micrometer-core", "micrometer-jersey", "micrometer-jetty") + "monitoring", + listOf("micrometer-core", "micrometer-jersey", "micrometer-jetty") // listOf("micrometer-core", "micrometer-jersey", "micrometer-jetty", "monitor-jdklogger") ) } From 9f9c81e36d5af0ad8098141e8962d390528f5b4c Mon Sep 17 00:00:00 2001 From: Paul Latzelsperger Date: Wed, 19 Apr 2023 17:06:03 +0200 Subject: [PATCH 91/92] release-fix: use correct value --- .github/workflows/publish-new-release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish-new-release.yml b/.github/workflows/publish-new-release.yml index b7de21257..b918c121d 100644 --- a/.github/workflows/publish-new-release.yml +++ b/.github/workflows/publish-new-release.yml @@ -68,7 +68,7 @@ jobs: run: | echo "Publishing Version $(grep -e "version" gradle.properties | cut -f2 -d"=") to Github Packages" ./gradlew publishAllPublicationsToGithubPackagesRepository - env: + with: REPO: ${{ github.repository }} GITHUB_PACKAGE_USERNAME: ${{ github.actor }} GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} From d791569c639e54314b2ed015995c8402f702c3d2 Mon Sep 17 00:00:00 2001 From: GitHub actions Date: Wed, 19 Apr 2023 15:09:08 +0000 Subject: [PATCH 92/92] Prepare release 0.3.3 --- CHANGELOG.md | 312 ++++++++--------- charts/tractusx-connector-memory/Chart.yaml | 4 +- charts/tractusx-connector-memory/README.md | 352 +++++++------------- charts/tractusx-connector/Chart.yaml | 4 +- charts/tractusx-connector/README.md | 31 +- gradle.properties | 2 +- 6 files changed, 307 insertions(+), 398 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e84846211..dadff38f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,11 +7,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.3.3] - 2023-04-19 + ## [0.3.2] - 2023-03-30 ### Fixed -- Fixed mutually-exclusive config values for Azure KeyVault +- Fixed mutually-exclusive config values for Azure KeyVault ## [0.3.1] - 2023-03-27 @@ -19,7 +21,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed -- Support unauthenticated access to the ObservabilityAPI (#126) +- Support unauthenticated access to the ObservabilityAPI (#126) ### Fixed @@ -30,144 +32,144 @@ corresponding [documentation](/docs/migration/Version_0.1.x_0.3.x.md). ### Added -- Add contract id to data source http call (#732) -- Support also support releases in ci pipeline -- Introduce typed object for oauth2 provisioning -- Add documentation -- Add test case -- Add client to omejdn -- add hydra deployment -- Configure dynamically HTTP Receiver callback endpoints. (#685) -- cp-adapter : code review, rollbacke name change (#664) -- Feature/cp adapter task 355 356 357 (#621) -- Add Validity Mapping in ContractDefinitionStepDefs class -- Add feature and create SendAnOfferwithoutConstraints method in class negotiationSteps -- Add validity attribute in class ContractDefinition -- Add Validity Mapping in ContractDefinitionStepDefs class -- Add feature and create SendAnOfferwithoutConstraints method in class negotiationSteps -- Add validity attribute in class ContractDefinition -- Local TXDC Setup Documentation (#618) -- Feature: Sftp Provisioner and Client (#554) +- Add contract id to data source http call (#732) +- Support also support releases in ci pipeline +- Introduce typed object for oauth2 provisioning +- Add documentation +- Add test case +- Add client to omejdn +- add hydra deployment +- Configure dynamically HTTP Receiver callback endpoints. (#685) +- cp-adapter : code review, rollbacke name change (#664) +- Feature/cp adapter task 355 356 357 (#621) +- Add Validity Mapping in ContractDefinitionStepDefs class +- Add feature and create SendAnOfferwithoutConstraints method in class negotiationSteps +- Add validity attribute in class ContractDefinition +- Add Validity Mapping in ContractDefinitionStepDefs class +- Add feature and create SendAnOfferwithoutConstraints method in class negotiationSteps +- Add validity attribute in class ContractDefinition +- Local TXDC Setup Documentation (#618) +- Feature: Sftp Provisioner and Client (#554) ### Changed -- Support horizontal edc scaling in cp adapter extension (#678) -- Use upstream jackson version (#741) -- Replace provision-oauth2 with data-plane-http-oauth2 -- docs: Update sample documentation (#671) -- chore: Disable build ci pipeline if just docu was updated (#705) -- Increase trivy timeout -- Remove not useful anymore custom-jsonld extension (#683) -- update setup docu (#654) -- remove trailing slash (#652) -- update alpine from 3.17.0 to 3.17.1 for controlplane-memory-hashicorp-vault (#665) -- Feature/set charts deprecated (#628) -- update setup docu (#627) -- Feature/update txdc deployment downward capabilities (#625) -- remove git submodule (#619) -- Feature/update postman (#624) -- update control plane docu (#623) -- update postgresql version in Chart.yaml supporting-infrastructure (#622) -- update link to edc logo in README.md (#612) -- update description of supporting infrastructure deployment (#616) +- Support horizontal edc scaling in cp adapter extension (#678) +- Use upstream jackson version (#741) +- Replace provision-oauth2 with data-plane-http-oauth2 +- docs: Update sample documentation (#671) +- chore: Disable build ci pipeline if just docu was updated (#705) +- Increase trivy timeout +- Remove not useful anymore custom-jsonld extension (#683) +- update setup docu (#654) +- remove trailing slash (#652) +- update alpine from 3.17.0 to 3.17.1 for controlplane-memory-hashicorp-vault (#665) +- Feature/set charts deprecated (#628) +- update setup docu (#627) +- Feature/update txdc deployment downward capabilities (#625) +- remove git submodule (#619) +- Feature/update postman (#624) +- update control plane docu (#623) +- update postgresql version in Chart.yaml supporting-infrastructure (#622) +- update link to edc logo in README.md (#612) +- update description of supporting infrastructure deployment (#616) ### Fixed -- bugfix: Fix slow AES encryption (#746) -- Fix typo in tractusx-connector values.yaml comment -- Fix not working docu link in README.md -- Fix typo in control-plane adapter README +- bugfix: Fix slow AES encryption (#746) +- Fix typo in tractusx-connector values.yaml comment +- Fix not working docu link in README.md +- Fix typo in control-plane adapter README ### Dependency updates -- Bump EDC to 20220220 (#767) -- Bump alpine (#749) -- Bump alpine (#750) -- Bump alpine (#752) -- Bump alpine in /edc-controlplane/edc-runtime-memory/src/main/docker (#753) -- Bump maven-deploy-plugin from 3.0.0 to 3.1.0 (#735) -- Bump actions/setup-java from 3.9.0 to 3.10.0 (#730) -- Bump s3 from 2.19.33 to 2.20.0 -- Bump s3 from 2.19.27 to 2.19.33 -- Bump jaxb-runtime from 4.0.1 to 4.0.2 -- Bump spotless-maven-plugin from 2.31.0 to 2.32.0 -- Bump postgresql from 42.5.1 to 42.5.3 -- Bump nimbus-jose-jwt from 9.30 to 9.30.1 -- Bump lombok from 1.18.24 to 1.18.26 -- Bump flyway-core from 9.12.0 to 9.14.1 -- Bump jackson-bom from 2.14.0-rc2 to 2.14.2 -- Bump cucumber.version from 7.11.0 to 7.11.1 -- Bump azure-sdk-bom from 1.2.8 to 1.2.9 -- Bump mockito-bom from 5.0.0 to 5.1.1 -- Bump edc version to 0.0.1-20230131-SNAPSHOT -- Bump s3 from 2.19.18 to 2.19.27 -- Bump docker/build-push-action from 3 to 4 -- Bump nimbus-jose-jwt from 9.29 to 9.30 -- Bump spotless-maven-plugin from 2.30.0 to 2.31.0 -- Bump nimbus-jose-jwt from 9.28 to 9.29 -- Bump mockito-bom from 4.11.0 to 5.0.0 -- Bump edc version to 0.0.1-20230125-SNAPSHOT -- Bump flyway-core from 9.11.0 to 9.12.0 -- Bump s3 from 2.19.15 to 2.19.18 (#684) -- Bump mikefarah/yq from 4.30.6 to 4.30.8 (#682) -- Bump spotless-maven-plugin from 2.29.0 to 2.30.0 -- Bump edc version to 0.0.1-20230115-SNAPSHOT -- Bump cucumber.version from 7.10.1 to 7.11.0 (#672) -- Bump maven-dependency-plugin from 3.4.0 to 3.5.0 (#669) -- Bump s3 from 2.19.11 to 2.19.15 (#668) -- Bump maven-surefire-plugin from 3.0.0-M7 to 3.0.0-M8 (#670) -- Bump edc version to 0.0.1-20230109-SNAPSHOT (#666) -- Bump alpine in /edc-controlplane/edc-runtime-memory/src/main/docker (#659) -- Bump alpine in /edc-dataplane/edc-dataplane-azure-vault/src/main/docker (#660) -- Bump alpine (#658) -- Bump alpine (#661) -- Bump alpine (#662) -- Bump azure/setup-kubectl from 3.1 to 3.2 (#655) -- Bump junit-bom from 5.9.1 to 5.9.2 (#657) -- Bump s3 from 2.19.2 to 2.19.11 (#648) -- Bump actions/checkout from 3.2.0 to 3.3.0 (#647) -- Bump flyway-core from 9.10.2 to 9.11.0 (#646) -- Bump spotless-maven-plugin from 2.28.0 to 2.29.0 (#641) -- Bump mockito-bom from 4.10.0 to 4.11.0 (#637) -- Bump flyway-core from 9.10.1 to 9.10.2 (#632) -- Bump s3 from 2.19.1 to 2.19.2 (#631) -- Bump s3 from 2.18.41 to 2.19.1 (#626) -- Bump mikefarah/yq from 4.30.5 to 4.30.6 (#613) -- Bump cucumber.version from 7.10.0 to 7.10.1 (#614) -- Bump s3 from 2.18.40 to 2.18.41 (#615) -- Bump azure/setup-helm from 3.4 to 3.5 (#596) -- Bump actions/checkout from 3.1.0 to 3.2.0 (#598) -- Bump mockito-bom from 4.9.0 to 4.10.0 (#607) -- Bump s3 from 2.18.39 to 2.18.40 (#609) -- Bump flyway-core from 9.10.0 to 9.10.1 (#610) -- Bump actions/setup-java from 3.8.0 to 3.9.0 (#605) -- Bump s3 from 2.18.35 to 2.18.39 (#606) +- Bump EDC to 20220220 (#767) +- Bump alpine (#749) +- Bump alpine (#750) +- Bump alpine (#752) +- Bump alpine in /edc-controlplane/edc-runtime-memory/src/main/docker (#753) +- Bump maven-deploy-plugin from 3.0.0 to 3.1.0 (#735) +- Bump actions/setup-java from 3.9.0 to 3.10.0 (#730) +- Bump s3 from 2.19.33 to 2.20.0 +- Bump s3 from 2.19.27 to 2.19.33 +- Bump jaxb-runtime from 4.0.1 to 4.0.2 +- Bump spotless-maven-plugin from 2.31.0 to 2.32.0 +- Bump postgresql from 42.5.1 to 42.5.3 +- Bump nimbus-jose-jwt from 9.30 to 9.30.1 +- Bump lombok from 1.18.24 to 1.18.26 +- Bump flyway-core from 9.12.0 to 9.14.1 +- Bump jackson-bom from 2.14.0-rc2 to 2.14.2 +- Bump cucumber.version from 7.11.0 to 7.11.1 +- Bump azure-sdk-bom from 1.2.8 to 1.2.9 +- Bump mockito-bom from 5.0.0 to 5.1.1 +- Bump edc version to 0.0.1-20230131-SNAPSHOT +- Bump s3 from 2.19.18 to 2.19.27 +- Bump docker/build-push-action from 3 to 4 +- Bump nimbus-jose-jwt from 9.29 to 9.30 +- Bump spotless-maven-plugin from 2.30.0 to 2.31.0 +- Bump nimbus-jose-jwt from 9.28 to 9.29 +- Bump mockito-bom from 4.11.0 to 5.0.0 +- Bump edc version to 0.0.1-20230125-SNAPSHOT +- Bump flyway-core from 9.11.0 to 9.12.0 +- Bump s3 from 2.19.15 to 2.19.18 (#684) +- Bump mikefarah/yq from 4.30.6 to 4.30.8 (#682) +- Bump spotless-maven-plugin from 2.29.0 to 2.30.0 +- Bump edc version to 0.0.1-20230115-SNAPSHOT +- Bump cucumber.version from 7.10.1 to 7.11.0 (#672) +- Bump maven-dependency-plugin from 3.4.0 to 3.5.0 (#669) +- Bump s3 from 2.19.11 to 2.19.15 (#668) +- Bump maven-surefire-plugin from 3.0.0-M7 to 3.0.0-M8 (#670) +- Bump edc version to 0.0.1-20230109-SNAPSHOT (#666) +- Bump alpine in /edc-controlplane/edc-runtime-memory/src/main/docker (#659) +- Bump alpine in /edc-dataplane/edc-dataplane-azure-vault/src/main/docker (#660) +- Bump alpine (#658) +- Bump alpine (#661) +- Bump alpine (#662) +- Bump azure/setup-kubectl from 3.1 to 3.2 (#655) +- Bump junit-bom from 5.9.1 to 5.9.2 (#657) +- Bump s3 from 2.19.2 to 2.19.11 (#648) +- Bump actions/checkout from 3.2.0 to 3.3.0 (#647) +- Bump flyway-core from 9.10.2 to 9.11.0 (#646) +- Bump spotless-maven-plugin from 2.28.0 to 2.29.0 (#641) +- Bump mockito-bom from 4.10.0 to 4.11.0 (#637) +- Bump flyway-core from 9.10.1 to 9.10.2 (#632) +- Bump s3 from 2.19.1 to 2.19.2 (#631) +- Bump s3 from 2.18.41 to 2.19.1 (#626) +- Bump mikefarah/yq from 4.30.5 to 4.30.6 (#613) +- Bump cucumber.version from 7.10.0 to 7.10.1 (#614) +- Bump s3 from 2.18.40 to 2.18.41 (#615) +- Bump azure/setup-helm from 3.4 to 3.5 (#596) +- Bump actions/checkout from 3.1.0 to 3.2.0 (#598) +- Bump mockito-bom from 4.9.0 to 4.10.0 (#607) +- Bump s3 from 2.18.39 to 2.18.40 (#609) +- Bump flyway-core from 9.10.0 to 9.10.1 (#610) +- Bump actions/setup-java from 3.8.0 to 3.9.0 (#605) +- Bump s3 from 2.18.35 to 2.18.39 (#606) ## [0.1.6] - 2023-02-20 ### Fixed -- SQL leakage issue -- Catalog pagination +- SQL leakage issue +- Catalog pagination ## [0.1.5] - 2023-02-13 ### Fixed -- Use patched EDC version: 0.0.1-20220922.2-SNAPSHOT to fix catalog pagination bug -- Data Encryption extension: fixed usage of a blocking algorithm +- Use patched EDC version: 0.0.1-20220922.2-SNAPSHOT to fix catalog pagination bug +- Data Encryption extension: fixed usage of a blocking algorithm ## [0.1.2] - 2022-09-30 ### Added -- Introduced DEPENDENCIES file +- Introduced DEPENDENCIES file ### Changed -- Moved helm charts from `deployment/helm` to `charts` -- Replaced distroless image with alpine in all docker images -- Update EDC commit to `740c100ac162bc41b1968c232ad81f7d739aefa9` +- Moved helm charts from `deployment/helm` to `charts` +- Replaced distroless image with alpine in all docker images +- Update EDC commit to `740c100ac162bc41b1968c232ad81f7d739aefa9` ## [0.1.1] - 2022-09-04 @@ -176,17 +178,17 @@ connector. [documentation](/docs/migration/Version_0.1.0_0.1.1.md). ### Added -- Control-Plane Extension ([cx-oauth2](/edc-extensions/cx-oauth2/README.md)) +- Control-Plane Extension ([cx-oauth2](/edc-extensions/cx-oauth2/README.md)) ### Changed -- Introduced git submodule to import EDC dependencies (instead of snapshot- or milestone artifact) -- Helm Charts: TLS secret name is now configurable +- Introduced git submodule to import EDC dependencies (instead of snapshot- or milestone artifact) +- Helm Charts: TLS secret name is now configurable ### Fixed -- Connectors with Azure Vault extension are now starting - again [link](https://github.com/eclipse-edc/Connector/issues/1892) +- Connectors with Azure Vault extension are now starting + again [link](https://github.com/eclipse-edc/Connector/issues/1892) ## [0.1.0] - 2022-08-19 @@ -195,74 +197,74 @@ corresponding [documentation](/docs/migration/Version_0.0.x_0.1.x.md). ### Added -- Control-Plane - extension ([data-plane-selector-client](https://github.com/eclipse-edc/Connector/tree/v0.0.1-milestone-5/extensions/data-plane-selector/selector-client)) - - run the EDC with multiple data planes at once -- Control-Plane extension ([dataplane-selector-configuration](edc-extensions/dataplane-selector-configuration)) - - add data plane instances to the control plane by configuration -- Data-Plane - extension ([s3-data-plane](https://github.com/eclipse-edc/Connector/tree/main/extensions/aws/data-plane-s3)) - - transfer from and to AWS S3 buckets -- Control-Plane extension ([data-encryption](edc-extensions/data-encryption)) - - Data-Plane authentication attribute transmitted during data-plane-transfer can be encrypted symmetrically (AES) +- Control-Plane + extension ([data-plane-selector-client](https://github.com/eclipse-edc/Connector/tree/v0.0.1-milestone-5/extensions/data-plane-selector/selector-client)) + - run the EDC with multiple data planes at once +- Control-Plane extension ([dataplane-selector-configuration](edc-extensions/dataplane-selector-configuration)) + - add data plane instances to the control plane by configuration +- Data-Plane + extension ([s3-data-plane](https://github.com/eclipse-edc/Connector/tree/main/extensions/aws/data-plane-s3)) + - transfer from and to AWS S3 buckets +- Control-Plane extension ([data-encryption](edc-extensions/data-encryption)) + - Data-Plane authentication attribute transmitted during data-plane-transfer can be encrypted symmetrically (AES) ### Changed -- Update setting name (`edc.dataplane.token.validation.endpoint` -> `edc.dataplane.token.validation.endpoint`) -- EDC has been updated to - version [0.0.1-20220818-SNAPSHOT](https://oss.sonatype.org/#nexus-search;gav~org.eclipse.dataspaceconnector~~0.0.1-20220818-SNAPSHOT~~) - - implications to the behavior of the connector have been covered in - the [corresponding migration guide](docs/migration/Version_0.0.x_0.1.x.md) +- Update setting name (`edc.dataplane.token.validation.endpoint` -> `edc.dataplane.token.validation.endpoint`) +- EDC has been updated to + version [0.0.1-20220818-SNAPSHOT](https://oss.sonatype.org/#nexus-search;gav~org.eclipse.dataspaceconnector~~0.0.1-20220818-SNAPSHOT~~) - + implications to the behavior of the connector have been covered in + the [corresponding migration guide](docs/migration/Version_0.0.x_0.1.x.md) ### Fixed -- Contract-Offer-Receiving-Connectors must also pass the ContractPolicy of the ContractDefinition before receiving - offers([issue](https://github.com/eclipse-edc/Connector/issues/1331)) -- Deletion of Asset becomes impossible when Contract Negotiation - exists([issue](https://github.com/eclipse-edc/Connector/issues/1403)) -- Deletion of Policy becomes impossible when Contract Definition - exists([issue](https://github.com/eclipse-edc/Connector/issues/1410)) +- Contract-Offer-Receiving-Connectors must also pass the ContractPolicy of the ContractDefinition before receiving + offers([issue](https://github.com/eclipse-edc/Connector/issues/1331)) +- Deletion of Asset becomes impossible when Contract Negotiation + exists([issue](https://github.com/eclipse-edc/Connector/issues/1403)) +- Deletion of Policy becomes impossible when Contract Definition + exists([issue](https://github.com/eclipse-edc/Connector/issues/1410)) ## [0.0.6] - 2022-07-29 ### Fixed -- Fixes [release 0.0.5](https://github.com/eclipse-tractusx/tractusx-edc/releases/tag/0.0.5), which introduced classpath - issues due to usage of [net.jodah:failsafe:2.4.3](https://search.maven.org/artifact/net.jodah/failsafe/2.4.3/jar) - library +- Fixes [release 0.0.5](https://github.com/eclipse-tractusx/tractusx-edc/releases/tag/0.0.5), which introduced classpath + issues due to usage of [net.jodah:failsafe:2.4.3](https://search.maven.org/artifact/net.jodah/failsafe/2.4.3/jar) + library ## [0.0.5] - 2022-07-28 ### Added -- EDC Health Checks for HashiCorp Vault +- EDC Health Checks for HashiCorp Vault ### Changed -- BusinessPartnerNumber constraint supports List structure -- Helm: Confidential EDC settings can be set using k8s secrets -- HashiCorp Vault API path configurable +- BusinessPartnerNumber constraint supports List structure +- Helm: Confidential EDC settings can be set using k8s secrets +- HashiCorp Vault API path configurable ## [0.0.4] - 2022-06-27 ### Added -- HashiCorp Vault Extension -- Control Plane with HashiCorp Vault and PostgreSQL support +- HashiCorp Vault Extension +- Control Plane with HashiCorp Vault and PostgreSQL support ### Changed -- Release Workflow now publishes EDC Extensions as Maven Artifacts +- Release Workflow now publishes EDC Extensions as Maven Artifacts ### Fixed -- [#1515](https://github.com/eclipse-edc/Connector/issues/1515) SQL: Connector sends out 50 - contract offers max. +- [#1515](https://github.com/eclipse-edc/Connector/issues/1515) SQL: Connector sends out 50 + contract offers max. ### Removed -- CosmosDB Control Plane -- Control API Extension for all Control Planes +- CosmosDB Control Plane +- Control API Extension for all Control Planes ## [0.0.3] - 2022-05-23 @@ -270,7 +272,9 @@ corresponding [documentation](/docs/migration/Version_0.0.x_0.1.x.md). ## [0.0.1] - 2022-05-13 -[Unreleased]: https://github.com/catenax-ng/tx-tractusx-edc/compare/0.3.2...HEAD +[Unreleased]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.3.3...HEAD + +[0.3.3]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.3.2...0.3.3 [0.3.2]: https://github.com/catenax-ng/tx-tractusx-edc/compare/0.3.1...0.3.2 diff --git a/charts/tractusx-connector-memory/Chart.yaml b/charts/tractusx-connector-memory/Chart.yaml index 42b139a55..cb0a06b72 100644 --- a/charts/tractusx-connector-memory/Chart.yaml +++ b/charts/tractusx-connector-memory/Chart.yaml @@ -34,12 +34,12 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.3.2 +version: 0.3.3 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.3.2" +appVersion: "0.3.3" home: https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector-memory sources: - https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector-memory diff --git a/charts/tractusx-connector-memory/README.md b/charts/tractusx-connector-memory/README.md index 1e37bc286..798342502 100644 --- a/charts/tractusx-connector-memory/README.md +++ b/charts/tractusx-connector-memory/README.md @@ -1,241 +1,147 @@ -# tractusx-connector +# tractusx-connector-memory -![Version: 0.3.2](https://img.shields.io/badge/Version-0.3.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.2](https://img.shields.io/badge/AppVersion-0.3.2-informational?style=flat-square) +![Version: 0.3.3](https://img.shields.io/badge/Version-0.3.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.3](https://img.shields.io/badge/AppVersion-0.3.3-informational?style=flat-square) -A Helm chart for Tractus-X Eclipse Data Space Connector +A Helm chart for Tractus-X Eclipse Data Space Connector based on memory -**Homepage:** +**Homepage:** ## TL;DR ```shell helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -helm install my-release tractusx-edc/tractusx-connector --version 0.3.2 +helm install my-release tractusx-edc/tractusx-connector --version 0.3.3 ``` ## Source Code -* +* ## Values -| Key | Type | Default | Description | -|---------------------------------------------------------|--------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| backendService.httpProxyTokenReceiverUrl | string | `""` | | -| runtime.affinity | object | `{}` | | -| runtime.autoscaling.enabled | bool | `false` | Enables [horizontal pod autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) | -| runtime.autoscaling.maxReplicas | int | `100` | Maximum replicas if resource consumption exceeds resource threshholds | -| runtime.autoscaling.minReplicas | int | `1` | Minimal replicas if resource consumption falls below resource threshholds | -| runtime.autoscaling.targetCPUUtilizationPercentage | int | `80` | targetAverageUtilization of cpu provided to a pod | -| runtime.autoscaling.targetMemoryUtilizationPercentage | int | `80` | targetAverageUtilization of memory provided to a pod | -| runtime.debug.enabled | bool | `false` | | -| runtime.debug.port | int | `1044` | | -| runtime.debug.suspendOnStart | bool | `false` | | -| runtime.endpoints | object | `{"control":{"path":"/control","port":8083},"data":{"authKey":"","path":"/data","port":8081},"default":{"path":"/api","port":8080},"ids":{"path":"/api/v1/ids","port":8084},"metrics":{"path":"/metrics","port":9090},"observability":{"insecure":true,"path":"/observability","port":8085},"validation":{"path":"/validation","port":8082}}` | endpoints of the control plane | -| runtime.endpoints.control | object | `{"path":"/control","port":8083}` | control api, used for internal control calls. can be added to the internal ingress, but should probably not | -| runtime.endpoints.control.path | string | `"/control"` | path for incoming api calls | -| runtime.endpoints.control.port | int | `8083` | port for incoming api calls | -| runtime.endpoints.data | object | `{"authKey":"","path":"/data","port":8081}` | data management api, used by internal users, can be added to an ingress and must not be internet facing | -| runtime.endpoints.data.authKey | string | `""` | authentication key, must be attached to each 'X-Api-Key' request header | -| runtime.endpoints.data.path | string | `"/data"` | path for incoming api calls | -| runtime.endpoints.data.port | int | `8081` | port for incoming api calls | -| runtime.endpoints.default | object | `{"path":"/api","port":8080}` | default api for health checks, should not be added to any ingress | -| runtime.endpoints.default.path | string | `"/api"` | path for incoming api calls | -| runtime.endpoints.default.port | int | `8080` | port for incoming api calls | -| runtime.endpoints.ids | object | `{"path":"/api/v1/ids","port":8084}` | ids api, used for inter connector communication and must be internet facing | -| runtime.endpoints.ids.path | string | `"/api/v1/ids"` | path for incoming api calls | -| runtime.endpoints.ids.port | int | `8084` | port for incoming api calls | -| runtime.endpoints.metrics | object | `{"path":"/metrics","port":9090}` | metrics api, used for application metrics, must not be internet facing | -| runtime.endpoints.metrics.path | string | `"/metrics"` | path for incoming api calls | -| runtime.endpoints.metrics.port | int | `9090` | port for incoming api calls | -| runtime.endpoints.observability | object | `{"insecure":true,"path":"/observability","port":8085}` | observability api with unsecured access, must not be internet facing | -| runtime.endpoints.observability.insecure | bool | `true` | allow or disallow insecure access, i.e. access without authentication | -| runtime.endpoints.observability.path | string | `"/observability"` | observability api, provides /health /readiness and /liveness endpoints | -| runtime.endpoints.observability.port | int | `8085` | port for incoming API calls | -| runtime.endpoints.validation | object | `{"path":"/validation","port":8082}` | validation api, only used by the data plane and should not be added to any ingress | -| runtime.endpoints.validation.path | string | `"/validation"` | path for incoming api calls | -| runtime.endpoints.validation.port | int | `8082` | port for incoming api calls | -| runtime.env | object | `{}` | | -| runtime.envConfigMapNames | list | `[]` | | -| runtime.envSecretNames | list | `[]` | | -| runtime.envValueFrom | object | `{}` | | -| runtime.image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | -| runtime.image.repository | string | `""` | Which derivate of the control plane to use. when left empty the deployment will select the correct image automatically | -| runtime.image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion | -| runtime.ingresses[0].annotations | object | `{}` | Additional ingress annotations to add | -| runtime.ingresses[0].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | -| runtime.ingresses[0].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | -| runtime.ingresses[0].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | -| runtime.ingresses[0].enabled | bool | `false` | | -| runtime.ingresses[0].endpoints | list | `["ids"]` | EDC endpoints exposed by this ingress resource | -| runtime.ingresses[0].hostname | string | `"edc-control.local"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | -| runtime.ingresses[0].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | -| runtime.ingresses[0].tls.enabled | bool | `false` | Enables TLS on the ingress resource | -| runtime.ingresses[0].tls.secretName | string | `""` | If present overwrites the default secret name | -| runtime.ingresses[1].annotations | object | `{}` | Additional ingress annotations to add | -| runtime.ingresses[1].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | -| runtime.ingresses[1].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | -| runtime.ingresses[1].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | -| runtime.ingresses[1].enabled | bool | `false` | | -| runtime.ingresses[1].endpoints | list | `["data","control"]` | EDC endpoints exposed by this ingress resource | -| runtime.ingresses[1].hostname | string | `"edc-control.intranet"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | -| runtime.ingresses[1].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | -| runtime.ingresses[1].tls.enabled | bool | `false` | Enables TLS on the ingress resource | -| runtime.ingresses[1].tls.secretName | string | `""` | If present overwrites the default secret name | -| runtime.initContainers | list | `[]` | | -| runtime.internationalDataSpaces.catalogId | string | `"TXDC-Catalog"` | | -| runtime.internationalDataSpaces.curator | string | `""` | | -| runtime.internationalDataSpaces.description | string | `"Tractus-X Eclipse IDS Data Space Connector"` | | -| runtime.internationalDataSpaces.id | string | `"TXDC"` | | -| runtime.internationalDataSpaces.maintainer | string | `""` | | -| runtime.internationalDataSpaces.title | string | `""` | | -| runtime.livenessProbe.enabled | bool | `true` | Whether to enable kubernetes [liveness-probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | -| runtime.livenessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | -| runtime.livenessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first liveness check | -| runtime.livenessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a liveness check every 10 seconds | -| runtime.livenessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | -| runtime.livenessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | -| runtime.logging | string | `".level=INFO\norg.eclipse.edc.level=ALL\nhandlers=java.util.logging.ConsoleHandler\njava.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter\njava.util.logging.ConsoleHandler.level=ALL\njava.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n"` | configuration of the [Java Util Logging Facade](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html) | -| runtime.nodeSelector | object | `{}` | | -| runtime.opentelemetry | string | `"otel.javaagent.enabled=false\notel.javaagent.debug=false"` | configuration of the [Open Telemetry Agent](https://opentelemetry.io/docs/instrumentation/java/automatic/agent-config/) to collect and expose metrics | -| runtime.podAnnotations | object | `{}` | additional annotations for the pod | -| runtime.podLabels | object | `{}` | additional labels for the pod | -| runtime.podSecurityContext | object | `{"fsGroup":10001,"runAsGroup":10001,"runAsUser":10001,"seccompProfile":{"type":"RuntimeDefault"}}` | The [pod security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) defines privilege and access control settings for a Pod within the deployment | -| runtime.podSecurityContext.fsGroup | int | `10001` | The owner for volumes and any files created within volumes will belong to this guid | -| runtime.podSecurityContext.runAsGroup | int | `10001` | Processes within a pod will belong to this guid | -| runtime.podSecurityContext.runAsUser | int | `10001` | Runs all processes within a pod with a special uid | -| runtime.podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | Restrict a Container's Syscalls with seccomp | -| runtime.readinessProbe.enabled | bool | `true` | Whether to enable kubernetes [readiness-probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | -| runtime.readinessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | -| runtime.readinessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first readiness check | -| runtime.readinessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a readiness check every 10 seconds | -| runtime.readinessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | -| runtime.readinessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | -| runtime.replicaCount | int | `1` | | -| runtime.resources | object | `{}` | [resource management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for the container | -| runtime.securityContext.allowPrivilegeEscalation | bool | `false` | Controls [Privilege Escalation](https://kubernetes.io/docs/concepts/security/pod-security-policy/#privilege-escalation) enabling setuid binaries changing the effective user ID | -| runtime.securityContext.capabilities.add | list | `[]` | Specifies which capabilities to add to issue specialized syscalls | -| runtime.securityContext.capabilities.drop | list | `["ALL"]` | Specifies which capabilities to drop to reduce syscall attack surface | -| runtime.securityContext.readOnlyRootFilesystem | bool | `true` | Whether the root filesystem is mounted in read-only mode | -| runtime.securityContext.runAsNonRoot | bool | `true` | Requires the container to run without root privileges | -| runtime.securityContext.runAsUser | int | `10001` | The container's process will run with the specified uid | -| runtime.service.annotations | object | `{}` | | -| runtime.service.type | string | `"ClusterIP"` | [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. | -| runtime.tolerations | list | `[]` | | -| runtime.url.ids | string | `""` | Explicitly declared url for reaching the ids api (e.g. if ingresses not used) | -| runtime.volumeMounts | list | `[]` | declare where to mount [volumes](https://kubernetes.io/docs/concepts/storage/volumes/) into the container | -| runtime.volumes | list | `[]` | [volume](https://kubernetes.io/docs/concepts/storage/volumes/) directories | -| customLabels | object | `{}` | | -| daps.clientId | string | `""` | | -| daps.paths.jwks | string | `"/jwks.json"` | | -| daps.paths.token | string | `"/token"` | | -| daps.url | string | `""` | | -| dataplane.affinity | object | `{}` | | -| dataplane.autoscaling.enabled | bool | `false` | Enables [horizontal pod autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) | -| dataplane.autoscaling.maxReplicas | int | `100` | Maximum replicas if resource consumption exceeds resource threshholds | -| dataplane.autoscaling.minReplicas | int | `1` | Minimal replicas if resource consumption falls below resource threshholds | -| dataplane.autoscaling.targetCPUUtilizationPercentage | int | `80` | targetAverageUtilization of cpu provided to a pod | -| dataplane.autoscaling.targetMemoryUtilizationPercentage | int | `80` | targetAverageUtilization of memory provided to a pod | -| dataplane.aws.accessKeyId | string | `""` | | -| dataplane.aws.endpointOverride | string | `""` | | -| dataplane.aws.secretAccessKey | string | `""` | | -| dataplane.debug.enabled | bool | `false` | | -| dataplane.debug.port | int | `1044` | | -| dataplane.debug.suspendOnStart | bool | `false` | | -| dataplane.endpoints.control.path | string | `"/api/dataplane/control"` | | -| dataplane.endpoints.control.port | int | `8083` | | -| dataplane.endpoints.default.path | string | `"/api"` | | -| dataplane.endpoints.default.port | int | `8080` | | -| dataplane.endpoints.metrics.path | string | `"/metrics"` | | -| dataplane.endpoints.metrics.port | int | `9090` | | -| dataplane.endpoints.public.path | string | `"/api/public"` | | -| dataplane.endpoints.public.port | int | `8081` | | -| dataplane.endpoints.validation.path | string | `"/validation"` | | -| dataplane.endpoints.validation.port | int | `8082` | | -| dataplane.env | object | `{}` | | -| dataplane.envConfigMapNames | list | `[]` | | -| dataplane.envSecretNames | list | `[]` | | -| dataplane.envValueFrom | object | `{}` | | -| dataplane.image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | -| dataplane.image.repository | string | `""` | Which derivate of the data plane to use. when left empty the deployment will select the correct image automatically | -| dataplane.image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion | -| dataplane.ingresses[0].annotations | object | `{}` | Additional ingress annotations to add | -| dataplane.ingresses[0].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | -| dataplane.ingresses[0].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | -| dataplane.ingresses[0].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | -| dataplane.ingresses[0].enabled | bool | `false` | | -| dataplane.ingresses[0].endpoints | list | `["public"]` | EDC endpoints exposed by this ingress resource | -| dataplane.ingresses[0].hostname | string | `"edc-data.local"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | -| dataplane.ingresses[0].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | -| dataplane.ingresses[0].tls.enabled | bool | `false` | Enables TLS on the ingress resource | -| dataplane.ingresses[0].tls.secretName | string | `""` | If present overwrites the default secret name | -| dataplane.initContainers | list | `[]` | | -| dataplane.livenessProbe.enabled | bool | `true` | Whether to enable kubernetes [liveness-probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | -| dataplane.livenessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | -| dataplane.livenessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first liveness check | -| dataplane.livenessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a liveness check every 10 seconds | -| dataplane.livenessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | -| dataplane.livenessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | -| dataplane.logging | string | `".level=INFO\norg.eclipse.edc.level=ALL\nhandlers=java.util.logging.ConsoleHandler\njava.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter\njava.util.logging.ConsoleHandler.level=ALL\njava.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n"` | configuration of the [Java Util Logging Facade](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html) | -| dataplane.nodeSelector | object | `{}` | | -| dataplane.opentelemetry | string | `"otel.javaagent.enabled=false\notel.javaagent.debug=false"` | configuration of the [Open Telemetry Agent](https://opentelemetry.io/docs/instrumentation/java/automatic/agent-config/) to collect and expose metrics | -| dataplane.podAnnotations | object | `{}` | additional annotations for the pod | -| dataplane.podLabels | object | `{}` | additional labels for the pod | -| dataplane.podSecurityContext | object | `{"fsGroup":10001,"runAsGroup":10001,"runAsUser":10001,"seccompProfile":{"type":"RuntimeDefault"}}` | The [pod security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) defines privilege and access control settings for a Pod within the deployment | -| dataplane.podSecurityContext.fsGroup | int | `10001` | The owner for volumes and any files created within volumes will belong to this guid | -| dataplane.podSecurityContext.runAsGroup | int | `10001` | Processes within a pod will belong to this guid | -| dataplane.podSecurityContext.runAsUser | int | `10001` | Runs all processes within a pod with a special uid | -| dataplane.podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | Restrict a Container's Syscalls with seccomp | -| dataplane.readinessProbe.enabled | bool | `true` | Whether to enable kubernetes [readiness-probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | -| dataplane.readinessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | -| dataplane.readinessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first readiness check | -| dataplane.readinessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a liveness check every 10 seconds | -| dataplane.readinessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | -| dataplane.readinessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | -| dataplane.replicaCount | int | `1` | | -| dataplane.resources | object | `{}` | [resource management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for the container | -| dataplane.securityContext.allowPrivilegeEscalation | bool | `false` | Controls [Privilege Escalation](https://kubernetes.io/docs/concepts/security/pod-security-policy/#privilege-escalation) enabling setuid binaries changing the effective user ID | -| dataplane.securityContext.capabilities.add | list | `[]` | Specifies which capabilities to add to issue specialized syscalls | -| dataplane.securityContext.capabilities.drop | list | `["ALL"]` | Specifies which capabilities to drop to reduce syscall attack surface | -| dataplane.securityContext.readOnlyRootFilesystem | bool | `true` | Whether the root filesystem is mounted in read-only mode | -| dataplane.securityContext.runAsNonRoot | bool | `true` | Requires the container to run without root privileges | -| dataplane.securityContext.runAsUser | int | `10001` | The container's process will run with the specified uid | -| dataplane.service.port | int | `80` | | -| dataplane.service.type | string | `"ClusterIP"` | [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. | -| dataplane.tolerations | list | `[]` | | -| dataplane.url.public | string | `""` | Explicitly declared url for reaching the public api (e.g. if ingresses not used) | -| dataplane.volumeMounts | list | `[]` | declare where to mount [volumes](https://kubernetes.io/docs/concepts/storage/volumes/) into the container | -| dataplane.volumes | list | `[]` | [volume](https://kubernetes.io/docs/concepts/storage/volumes/) directories | -| fullnameOverride | string | `""` | | -| imagePullSecrets | list | `[]` | Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | -| nameOverride | string | `""` | | -| postgresql.enabled | bool | `false` | | -| postgresql.jdbcUrl | string | `""` | | -| postgresql.password | string | `""` | | -| postgresql.username | string | `""` | | -| serviceAccount.annotations | object | `{}` | | -| serviceAccount.create | bool | `true` | | -| serviceAccount.imagePullSecrets | list | `[]` | Existing image pull secret bound to the service account to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | -| serviceAccount.name | string | `""` | | -| vault.azure.certificate | string | `nil` | | -| vault.azure.client | string | `""` | | -| vault.azure.enabled | bool | `false` | | -| vault.azure.name | string | `""` | | -| vault.azure.secret | string | `nil` | | -| vault.azure.tenant | string | `""` | | -| vault.hashicorp.enabled | bool | `false` | | -| vault.hashicorp.healthCheck.enabled | bool | `true` | | -| vault.hashicorp.healthCheck.standbyOk | bool | `true` | | -| vault.hashicorp.paths.health | string | `"/v1/sys/health"` | | -| vault.hashicorp.paths.secret | string | `"/v1/secret"` | | -| vault.hashicorp.timeout | int | `30` | | -| vault.hashicorp.token | string | `""` | | -| vault.hashicorp.url | string | `""` | | -| vault.secretNames.dapsPrivateKey | string | `"daps-private-key"` | | -| vault.secretNames.dapsPublicKey | string | `"daps-public-key"` | | -| vault.secretNames.transferProxyTokenEncryptionAesKey | string | `"transfer-proxy-token-encryption-aes-key"` | | -| vault.secretNames.transferProxyTokenSignerPrivateKey | string | `"transfer-proxy-token-signer-private-key"` | | -| vault.secretNames.transferProxyTokenSignerPublicKey | string | `"transfer-proxy-token-signer-public-key"` | | +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| backendService.httpProxyTokenReceiverUrl | string | `""` | | +| customLabels | object | `{}` | | +| daps.clientId | string | `""` | | +| daps.paths.jwks | string | `"/jwks.json"` | | +| daps.paths.token | string | `"/token"` | | +| daps.url | string | `""` | | +| fullnameOverride | string | `""` | | +| imagePullSecrets | list | `[]` | Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | +| nameOverride | string | `""` | | +| runtime.affinity | object | `{}` | | +| runtime.autoscaling.enabled | bool | `false` | Enables [horizontal pod autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) | +| runtime.autoscaling.maxReplicas | int | `100` | Maximum replicas if resource consumption exceeds resource threshholds | +| runtime.autoscaling.minReplicas | int | `1` | Minimal replicas if resource consumption falls below resource threshholds | +| runtime.autoscaling.targetCPUUtilizationPercentage | int | `80` | targetAverageUtilization of cpu provided to a pod | +| runtime.autoscaling.targetMemoryUtilizationPercentage | int | `80` | targetAverageUtilization of memory provided to a pod | +| runtime.businessPartnerValidation.log.agreementValidation | bool | `true` | | +| runtime.debug.enabled | bool | `false` | | +| runtime.debug.port | int | `1044` | | +| runtime.debug.suspendOnStart | bool | `false` | | +| runtime.endpoints | object | `{"control":{"path":"/control","port":8083},"data":{"authKey":"","path":"/data","port":8081},"default":{"path":"/api","port":8080},"ids":{"path":"/api/v1/ids","port":8084},"observability":{"insecure":true,"path":"/observability","port":8085},"public":{"path":"/api/public","port":8086},"validation":{"path":"/validation","port":8082}}` | endpoints of the control plane | +| runtime.endpoints.control | object | `{"path":"/control","port":8083}` | control api, used for internal control calls. can be added to the internal ingress, but should probably not | +| runtime.endpoints.control.path | string | `"/control"` | path for incoming api calls | +| runtime.endpoints.control.port | int | `8083` | port for incoming api calls | +| runtime.endpoints.data | object | `{"authKey":"","path":"/data","port":8081}` | data management api, used by internal users, can be added to an ingress and must not be internet facing | +| runtime.endpoints.data.authKey | string | `""` | authentication key, must be attached to each 'X-Api-Key' request header | +| runtime.endpoints.data.path | string | `"/data"` | path for incoming api calls | +| runtime.endpoints.data.port | int | `8081` | port for incoming api calls | +| runtime.endpoints.default | object | `{"path":"/api","port":8080}` | default api for health checks, should not be added to any ingress | +| runtime.endpoints.default.path | string | `"/api"` | path for incoming api calls | +| runtime.endpoints.default.port | int | `8080` | port for incoming api calls | +| runtime.endpoints.ids | object | `{"path":"/api/v1/ids","port":8084}` | ids api, used for inter connector communication and must be internet facing | +| runtime.endpoints.ids.path | string | `"/api/v1/ids"` | path for incoming api calls | +| runtime.endpoints.ids.port | int | `8084` | port for incoming api calls | +| runtime.endpoints.observability | object | `{"insecure":true,"path":"/observability","port":8085}` | observability api with unsecured access, must not be internet facing | +| runtime.endpoints.observability.insecure | bool | `true` | allow or disallow insecure access, i.e. access without authentication | +| runtime.endpoints.observability.path | string | `"/observability"` | observability api, provides /health /readiness and /liveness endpoints | +| runtime.endpoints.observability.port | int | `8085` | port for incoming API calls | +| runtime.endpoints.validation | object | `{"path":"/validation","port":8082}` | validation api, only used by the data plane and should not be added to any ingress | +| runtime.endpoints.validation.path | string | `"/validation"` | path for incoming api calls | +| runtime.endpoints.validation.port | int | `8082` | port for incoming api calls | +| runtime.env | object | `{}` | | +| runtime.envConfigMapNames | list | `[]` | | +| runtime.envSecretNames | list | `[]` | | +| runtime.envValueFrom | object | `{}` | | +| runtime.image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | +| runtime.image.repository | string | `""` | | +| runtime.image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion | +| runtime.ingresses[0].annotations | object | `{}` | Additional ingress annotations to add | +| runtime.ingresses[0].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | +| runtime.ingresses[0].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | +| runtime.ingresses[0].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | +| runtime.ingresses[0].enabled | bool | `false` | | +| runtime.ingresses[0].endpoints | list | `["ids"]` | EDC endpoints exposed by this ingress resource | +| runtime.ingresses[0].hostname | string | `"edc-control.local"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | +| runtime.ingresses[0].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | +| runtime.ingresses[0].tls.enabled | bool | `false` | Enables TLS on the ingress resource | +| runtime.ingresses[0].tls.secretName | string | `""` | If present overwrites the default secret name | +| runtime.ingresses[1].annotations | object | `{}` | Additional ingress annotations to add | +| runtime.ingresses[1].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | +| runtime.ingresses[1].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | +| runtime.ingresses[1].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | +| runtime.ingresses[1].enabled | bool | `false` | | +| runtime.ingresses[1].endpoints | list | `["data","control"]` | EDC endpoints exposed by this ingress resource | +| runtime.ingresses[1].hostname | string | `"edc-control.intranet"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | +| runtime.ingresses[1].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | +| runtime.ingresses[1].tls.enabled | bool | `false` | Enables TLS on the ingress resource | +| runtime.ingresses[1].tls.secretName | string | `""` | If present overwrites the default secret name | +| runtime.initContainers | list | `[]` | | +| runtime.internationalDataSpaces.catalogId | string | `"TXDC-Catalog"` | | +| runtime.internationalDataSpaces.curator | string | `""` | | +| runtime.internationalDataSpaces.description | string | `"Tractus-X Eclipse IDS Data Space Connector"` | | +| runtime.internationalDataSpaces.id | string | `"TXDC"` | | +| runtime.internationalDataSpaces.maintainer | string | `""` | | +| runtime.internationalDataSpaces.title | string | `""` | | +| runtime.livenessProbe.enabled | bool | `true` | Whether to enable kubernetes [liveness-probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | +| runtime.livenessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | +| runtime.livenessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first liveness check | +| runtime.livenessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a liveness check every 10 seconds | +| runtime.livenessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | +| runtime.livenessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | +| runtime.logging | string | `".level=INFO\norg.eclipse.edc.level=ALL\nhandlers=java.util.logging.ConsoleHandler\njava.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter\njava.util.logging.ConsoleHandler.level=ALL\njava.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n"` | configuration of the [Java Util Logging Facade](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html) | +| runtime.nodeSelector | object | `{}` | | +| runtime.podAnnotations | object | `{}` | additional annotations for the pod | +| runtime.podLabels | object | `{}` | additional labels for the pod | +| runtime.podSecurityContext | object | `{"fsGroup":10001,"runAsGroup":10001,"runAsUser":10001,"seccompProfile":{"type":"RuntimeDefault"}}` | The [pod security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) defines privilege and access control settings for a Pod within the deployment | +| runtime.podSecurityContext.fsGroup | int | `10001` | The owner for volumes and any files created within volumes will belong to this guid | +| runtime.podSecurityContext.runAsGroup | int | `10001` | Processes within a pod will belong to this guid | +| runtime.podSecurityContext.runAsUser | int | `10001` | Runs all processes within a pod with a special uid | +| runtime.podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | Restrict a Container's Syscalls with seccomp | +| runtime.readinessProbe.enabled | bool | `true` | Whether to enable kubernetes [readiness-probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | +| runtime.readinessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | +| runtime.readinessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first readiness check | +| runtime.readinessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a readiness check every 10 seconds | +| runtime.readinessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | +| runtime.readinessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | +| runtime.replicaCount | int | `1` | | +| runtime.resources | object | `{}` | [resource management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for the container | +| runtime.securityContext.allowPrivilegeEscalation | bool | `false` | Controls [Privilege Escalation](https://kubernetes.io/docs/concepts/security/pod-security-policy/#privilege-escalation) enabling setuid binaries changing the effective user ID | +| runtime.securityContext.capabilities.add | list | `[]` | Specifies which capabilities to add to issue specialized syscalls | +| runtime.securityContext.capabilities.drop | list | `["ALL"]` | Specifies which capabilities to drop to reduce syscall attack surface | +| runtime.securityContext.readOnlyRootFilesystem | bool | `true` | Whether the root filesystem is mounted in read-only mode | +| runtime.securityContext.runAsNonRoot | bool | `true` | Requires the container to run without root privileges | +| runtime.securityContext.runAsUser | int | `10001` | The container's process will run with the specified uid | +| runtime.service.annotations | object | `{}` | | +| runtime.service.type | string | `"ClusterIP"` | [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. | +| runtime.tolerations | list | `[]` | | +| runtime.url.ids | string | `""` | Explicitly declared url for reaching the ids api (e.g. if ingresses not used) | +| runtime.url.public | string | `""` | | +| runtime.url.readiness | string | `""` | | +| runtime.volumeMounts | list | `[]` | declare where to mount [volumes](https://kubernetes.io/docs/concepts/storage/volumes/) into the container | +| runtime.volumes | list | `[]` | [volume](https://kubernetes.io/docs/concepts/storage/volumes/) directories | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.imagePullSecrets | list | `[]` | Existing image pull secret bound to the service account to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | +| serviceAccount.name | string | `""` | | +| vault.secretNames.dapsPrivateKey | string | `"daps-private-key"` | | +| vault.secretNames.dapsPublicKey | string | `"daps-public-key"` | | +| vault.secretNames.transferProxyTokenEncryptionAesKey | string | `"transfer-proxy-token-encryption-aes-key"` | | +| vault.secretNames.transferProxyTokenSignerPrivateKey | string | `"transfer-proxy-token-signer-private-key"` | | +| vault.secretNames.transferProxyTokenSignerPublicKey | string | `"transfer-proxy-token-signer-public-key"` | | +| vault.secrets | string | `""` | | ---------------------------------------------- Autogenerated from chart metadata using [helm-docs v1.10.0](https://github.com/norwoodj/helm-docs/releases/v1.10.0) diff --git a/charts/tractusx-connector/Chart.yaml b/charts/tractusx-connector/Chart.yaml index f9e4322c6..696e94396 100644 --- a/charts/tractusx-connector/Chart.yaml +++ b/charts/tractusx-connector/Chart.yaml @@ -36,12 +36,12 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.3.2 +version: 0.3.3 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.3.2" +appVersion: "0.3.3" home: https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector sources: - https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector diff --git a/charts/tractusx-connector/README.md b/charts/tractusx-connector/README.md index af53087c9..12c45b649 100644 --- a/charts/tractusx-connector/README.md +++ b/charts/tractusx-connector/README.md @@ -1,6 +1,6 @@ # tractusx-connector -![Version: 0.3.2](https://img.shields.io/badge/Version-0.3.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.2](https://img.shields.io/badge/AppVersion-0.3.2-informational?style=flat-square) +![Version: 0.3.3](https://img.shields.io/badge/Version-0.3.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.3](https://img.shields.io/badge/AppVersion-0.3.3-informational?style=flat-square) A Helm chart for Tractus-X Eclipse Data Space Connector @@ -10,7 +10,7 @@ A Helm chart for Tractus-X Eclipse Data Space Connector ```shell helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -helm install my-release tractusx-edc/tractusx-connector --version 0.3.2 +helm install my-release tractusx-edc/tractusx-connector --version 0.3.3 ``` ## Source Code @@ -28,23 +28,21 @@ helm install my-release tractusx-edc/tractusx-connector --version 0.3.2 | controlplane.autoscaling.minReplicas | int | `1` | Minimal replicas if resource consumption falls below resource threshholds | | controlplane.autoscaling.targetCPUUtilizationPercentage | int | `80` | targetAverageUtilization of cpu provided to a pod | | controlplane.autoscaling.targetMemoryUtilizationPercentage | int | `80` | targetAverageUtilization of memory provided to a pod | +| controlplane.businessPartnerValidation.log.agreementValidation | bool | `true` | | | controlplane.debug.enabled | bool | `false` | | | controlplane.debug.port | int | `1044` | | | controlplane.debug.suspendOnStart | bool | `false` | | -| controlplane.endpoints | object | `{"control":{"path":"/control","port":8083},"data":{"authKey":"","path":"/data","port":8081},"default":{"path":"/api","port":8080},"ids":{"path":"/api/v1/ids","port":8084},"metrics":{"path":"/metrics","port":9090},"observability":{"insecure":true,"path":"/observability","port":8085},"validation":{"path":"/validation","port":8082}}` | endpoints of the control plane | +| controlplane.endpoints | object | `{"control":{"path":"/control","port":8083},"default":{"path":"/api","port":8080},"management":{"authKey":"","path":"/management","port":8081},"metrics":{"path":"/metrics","port":9090},"observability":{"insecure":true,"path":"/observability","port":8085},"protocol":{"path":"/api/v1/ids","port":8084}}` | endpoints of the control plane | | controlplane.endpoints.control | object | `{"path":"/control","port":8083}` | control api, used for internal control calls. can be added to the internal ingress, but should probably not | | controlplane.endpoints.control.path | string | `"/control"` | path for incoming api calls | | controlplane.endpoints.control.port | int | `8083` | port for incoming api calls | -| controlplane.endpoints.data | object | `{"authKey":"","path":"/data","port":8081}` | data management api, used by internal users, can be added to an ingress and must not be internet facing | -| controlplane.endpoints.data.authKey | string | `""` | authentication key, must be attached to each 'X-Api-Key' request header | -| controlplane.endpoints.data.path | string | `"/data"` | path for incoming api calls | -| controlplane.endpoints.data.port | int | `8081` | port for incoming api calls | | controlplane.endpoints.default | object | `{"path":"/api","port":8080}` | default api for health checks, should not be added to any ingress | | controlplane.endpoints.default.path | string | `"/api"` | path for incoming api calls | | controlplane.endpoints.default.port | int | `8080` | port for incoming api calls | -| controlplane.endpoints.ids | object | `{"path":"/api/v1/ids","port":8084}` | ids api, used for inter connector communication and must be internet facing | -| controlplane.endpoints.ids.path | string | `"/api/v1/ids"` | path for incoming api calls | -| controlplane.endpoints.ids.port | int | `8084` | port for incoming api calls | +| controlplane.endpoints.management | object | `{"authKey":"","path":"/management","port":8081}` | data management api, used by internal users, can be added to an ingress and must not be internet facing | +| controlplane.endpoints.management.authKey | string | `""` | authentication key, must be attached to each 'X-Api-Key' request header | +| controlplane.endpoints.management.path | string | `"/management"` | path for incoming api calls | +| controlplane.endpoints.management.port | int | `8081` | port for incoming api calls | | controlplane.endpoints.metrics | object | `{"path":"/metrics","port":9090}` | metrics api, used for application metrics, must not be internet facing | | controlplane.endpoints.metrics.path | string | `"/metrics"` | path for incoming api calls | | controlplane.endpoints.metrics.port | int | `9090` | port for incoming api calls | @@ -52,9 +50,9 @@ helm install my-release tractusx-edc/tractusx-connector --version 0.3.2 | controlplane.endpoints.observability.insecure | bool | `true` | allow or disallow insecure access, i.e. access without authentication | | controlplane.endpoints.observability.path | string | `"/observability"` | observability api, provides /health /readiness and /liveness endpoints | | controlplane.endpoints.observability.port | int | `8085` | port for incoming API calls | -| controlplane.endpoints.validation | object | `{"path":"/validation","port":8082}` | validation api, only used by the data plane and should not be added to any ingress | -| controlplane.endpoints.validation.path | string | `"/validation"` | path for incoming api calls | -| controlplane.endpoints.validation.port | int | `8082` | port for incoming api calls | +| controlplane.endpoints.protocol | object | `{"path":"/api/v1/ids","port":8084}` | ids api, used for inter connector communication and must be internet facing | +| controlplane.endpoints.protocol.path | string | `"/api/v1/ids"` | path for incoming api calls | +| controlplane.endpoints.protocol.port | int | `8084` | port for incoming api calls | | controlplane.env | object | `{}` | | | controlplane.envConfigMapNames | list | `[]` | | | controlplane.envSecretNames | list | `[]` | | @@ -77,7 +75,7 @@ helm install my-release tractusx-edc/tractusx-connector --version 0.3.2 | controlplane.ingresses[1].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | | controlplane.ingresses[1].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | | controlplane.ingresses[1].enabled | bool | `false` | | -| controlplane.ingresses[1].endpoints | list | `["data","control"]` | EDC endpoints exposed by this ingress resource | +| controlplane.ingresses[1].endpoints | list | `["management","control"]` | EDC endpoints exposed by this ingress resource | | controlplane.ingresses[1].hostname | string | `"edc-control.intranet"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | | controlplane.ingresses[1].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | | controlplane.ingresses[1].tls.enabled | bool | `false` | Enables TLS on the ingress resource | @@ -148,10 +146,11 @@ helm install my-release tractusx-edc/tractusx-connector --version 0.3.2 | dataplane.endpoints.default.port | int | `8080` | | | dataplane.endpoints.metrics.path | string | `"/metrics"` | | | dataplane.endpoints.metrics.port | int | `9090` | | +| dataplane.endpoints.observability.insecure | bool | `true` | allow or disallow insecure access, i.e. access without authentication | +| dataplane.endpoints.observability.path | string | `"/observability"` | observability api, provides /health /readiness and /liveness endpoints | +| dataplane.endpoints.observability.port | int | `8085` | port for incoming API calls | | dataplane.endpoints.public.path | string | `"/api/public"` | | | dataplane.endpoints.public.port | int | `8081` | | -| dataplane.endpoints.validation.path | string | `"/validation"` | | -| dataplane.endpoints.validation.port | int | `8082` | | | dataplane.env | object | `{}` | | | dataplane.envConfigMapNames | list | `[]` | | | dataplane.envSecretNames | list | `[]` | | diff --git a/gradle.properties b/gradle.properties index 66b31427b..73befaa64 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,5 +1,5 @@ groupId=org.eclipse.tractusx.edc -version=0.3.3-SNAPSHOT +version=0.3.3 javaVersion=11 # configure the build: