Skip to content
This repository was archived by the owner on Feb 6, 2025. It is now read-only.

Commit

Permalink
Merge branch 'master' into pause-failed-e2e-test
Browse files Browse the repository at this point in the history
  • Loading branch information
pablochacin authored Jul 9, 2020
2 parents a4c7b2d + f1d8360 commit b592317
Show file tree
Hide file tree
Showing 20 changed files with 234 additions and 115 deletions.
10 changes: 9 additions & 1 deletion ci/infra/aws/aws.tf
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,19 @@ locals {
}

provider "aws" {
profile = "default"
profile = "default"
}

resource "aws_key_pair" "kube" {
key_name = "${var.stack_name}-keypair"
public_key = element(var.authorized_keys, 0)

tags = merge(
local.basic_tags,
{
"Name" = "${var.stack_name}-keypair"
"Class" = "KeyPair"
},
)
}

28 changes: 14 additions & 14 deletions ci/infra/aws/iam_policies.tf
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,16 @@ locals {
}

resource "aws_iam_instance_profile" "master" {
name = local.aws_iam_instance_profile_master_terraform
role = aws_iam_role.master[count.index].name
name = local.aws_iam_instance_profile_master_terraform
role = aws_iam_role.master[count.index].name
count = length(var.iam_profile_master) == 0 ? 1 : 0
}

resource "aws_iam_role" "master" {
name = local.aws_iam_instance_profile_master_terraform
name = local.aws_iam_instance_profile_master_terraform
description = "IAM role needed by CPI on master nodes"
path = "/"
count = length(var.iam_profile_master) == 0 ? 1 : 0
path = "/"
count = length(var.iam_profile_master) == 0 ? 1 : 0

assume_role_policy = <<EOF
{
Expand All @@ -36,8 +36,8 @@ EOF
}

resource "aws_iam_role_policy" "master" {
name = local.aws_iam_instance_profile_master_terraform
role = aws_iam_role.master[count.index].id
name = local.aws_iam_instance_profile_master_terraform
role = aws_iam_role.master[count.index].id
count = length(var.iam_profile_master) == 0 ? 1 : 0

policy = <<EOF
Expand Down Expand Up @@ -112,16 +112,16 @@ EOF
}

resource "aws_iam_instance_profile" "worker" {
name = local.aws_iam_instance_profile_worker_terraform
role = aws_iam_role.worker[count.index].name
name = local.aws_iam_instance_profile_worker_terraform
role = aws_iam_role.worker[count.index].name
count = length(var.iam_profile_worker) == 0 ? 1 : 0
}

resource "aws_iam_role" "worker" {
name = local.aws_iam_instance_profile_worker_terraform
name = local.aws_iam_instance_profile_worker_terraform
description = "IAM role needed by CPI on worker nodes"
path = "/"
count = length(var.iam_profile_worker) == 0 ? 1 : 0
path = "/"
count = length(var.iam_profile_worker) == 0 ? 1 : 0

assume_role_policy = <<EOF
{
Expand All @@ -142,8 +142,8 @@ EOF


resource "aws_iam_role_policy" "worker" {
name = local.aws_iam_instance_profile_worker_terraform
role = aws_iam_role.worker[count.index].id
name = local.aws_iam_instance_profile_worker_terraform
role = aws_iam_role.worker[count.index].id
count = length(var.iam_profile_worker) == 0 ? 1 : 0

policy = <<EOF
Expand Down
8 changes: 8 additions & 0 deletions ci/infra/aws/load-balancer.tf
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,14 @@ resource "aws_elb" "kube_api" {
name = "${var.stack_name}-elb"
subnets = [aws_subnet.public.id]

tags = merge(
local.basic_tags,
{
"Name" = "${var.stack_name}-elb"
"Class" = "ElasticLoadBalancer"
},
)

security_groups = [
aws_security_group.elb.id,
aws_security_group.egress.id,
Expand Down
2 changes: 1 addition & 1 deletion ci/infra/aws/network.tf
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ resource "aws_vpc" "platform" {
)
}

// list of az which can be access from the current region
# list of az which can be access from the current region
data "aws_availability_zones" "az" {
state = "available"
}
Expand Down
2 changes: 2 additions & 0 deletions ci/infra/testrunner/platforms/terraform.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ def _provision_platform(self, masters=-1, workers=-1):
if self.conf.terraform.plugin_dir:
logger.info(f"Installing plugins from {self.conf.terraform.plugin_dir}")
init_cmd += f" -plugin-dir={self.conf.terraform.plugin_dir}"
else:
init_cmd += f" -get-plugins=false"
self._run_terraform_command(init_cmd)

self._run_terraform_command("version")
Expand Down
2 changes: 1 addition & 1 deletion ci/infra/vmware/terraform.tfvars.json.ci.example
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"vsphere_datastore_cluster": null,
"vsphere_network": "VM Network",
"vsphere_resource_pool": "CaaSP_CI",
"template_name": "SLES15-SP2-RC2-up200525-guestinfo",
"template_name": "SLES15-SP2-GMC-up200615-guestinfo",
"firmware": "bios",
"stack_name": "caasp-jenkins-v5",
"masters": 1,
Expand Down
18 changes: 9 additions & 9 deletions ci/jenkins/pipelines/prs/skuba-test.Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ pipeline {
environment {
SKUBA_BINPATH = '/home/jenkins/go/bin/skuba'
VMWARE_ENV_FILE = credentials('vmware-env')
OPENSTACK_OPENRC = credentials('ecp-openrc')
GITHUB_TOKEN = credentials('github-token')
PLATFORM = "${platform}"
TERRAFORM_STACK_NAME = "${BUILD_NUMBER}-${JOB_NAME.replaceAll("/","-")}".take(70)
Expand Down Expand Up @@ -217,18 +218,17 @@ pipeline {
}
post {
always { script {
// collect artifacts only if pr-test stage was executed.
// FIXME: this will break if we add an stage after skuba-test
if (pr_context == 'jenkins/skuba-test'){
archiveArtifacts(artifacts: "ci/infra/${PLATFORM}/terraform.tfstate", allowEmptyArchive: true)
archiveArtifacts(artifacts: "ci/infra/${PLATFORM}/terraform.tfvars.json", allowEmptyArchive: true)
archiveArtifacts(artifacts: 'testrunner.log', allowEmptyArchive: true)
archiveArtifacts(artifacts: 'ci/infra/testrunner/*.xml', allowEmptyArchive: true)
archiveArtifacts(artifacts: "ci/infra/${PLATFORM}/terraform.tfstate", allowEmptyArchive: true)
archiveArtifacts(artifacts: "ci/infra/${PLATFORM}/terraform.tfvars.json", allowEmptyArchive: true)
archiveArtifacts(artifacts: 'testrunner.log', allowEmptyArchive: true)
archiveArtifacts(artifacts: 'ci/infra/testrunner/*.xml', allowEmptyArchive: true)
// only attempt to collect logs if platform was provisioned
if (fileExists("tfout.json")) {
archiveArtifacts(artifacts: 'tfout.json', allowEmptyArchive: true)
sh(script: "make --keep-going -f ci/Makefile gather_logs", label: 'Gather Logs')
archiveArtifacts(artifacts: 'platform_logs/**/*', allowEmptyArchive: true)
junit('ci/infra/testrunner/*.xml')
}
} }
}}
cleanup {
sh(script: "make --keep-going -f ci/Makefile cleanup", label: 'Cleanup')
dir("${WORKSPACE}@tmp") {
Expand Down
12 changes: 8 additions & 4 deletions ci/jenkins/pipelines/skuba-e2e-test.Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -90,13 +90,17 @@ pipeline {
}

post {
always {
always { script {
archiveArtifacts(artifacts: "ci/infra/${PLATFORM}/terraform.tfstate", allowEmptyArchive: true)
archiveArtifacts(artifacts: "ci/infra/${PLATFORM}/terraform.tfvars.json", allowEmptyArchive: true)
archiveArtifacts(artifacts: 'testrunner.log', allowEmptyArchive: true)
sh(script: "make --keep-going -f ci/Makefile gather_logs", label: 'Gather Logs')
archiveArtifacts(artifacts: 'platform_logs/**/*', allowEmptyArchive: true)
}
// only attempt to collect logs if platform was provisioned
if (fileExists("tfout.json")) {
archiveArtifacts(artifacts: 'tfout.json', allowEmptyArchive: true)
sh(script: "make --keep-going -f ci/Makefile gather_logs", label: 'Gather Logs')
archiveArtifacts(artifacts: 'platform_logs/**/*', allowEmptyArchive: true)
}
}}
failure{ script{
if (env.RETAIN_CLUSTER) {
def retention_period= env.RETENTION_PERIOD?env.RETENTION_PERIOD:24
Expand Down
1 change: 0 additions & 1 deletion ci/packaging/suse/skuba_spec_template
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ Summary: Utility to automatically refresh and update a skuba cluster
Group: System/Management
Requires: python3-setuptools
Requires: zypper >= 1.14.15
Requires: kubernetes-client
Requires: lsof
BuildArch: noarch
%{?systemd_requires}
Expand Down
20 changes: 15 additions & 5 deletions docs/man/skuba-node-bootstrap.1.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ bootstrap - Bootstraps the first master node of the cluster
# SYNOPSIS
**bootstrap**
[**--help**|**-h**] [**--target**|**-t**] [**--user**|**-u**]
[**--bastion] [**--bastion-user**] [**--bastion-port**]
[**--sudo**|**-s**] [**--port**|**-p**] [**--ignore-preflight-errors**]
*bootstrap* *<node-name>* *-t <fqdn>* [-hsp] [-u user] [-p port]

Expand All @@ -22,13 +23,22 @@ the first node of a cluster
IP or host name of the node to connect to using SSH

**--user, -u**
User identity used to connect to target (default=root)

**--sudo, -s**
Run remote command via sudo (defaults to ssh connection user identity)
User identity used to connect to target (required)

**--port, -p**
Port to connect to using SSH

**--sudo, -s**
Run remote command via sudo (defaults to ssh connection user identity)

**--ignore-preflight-errors**
A list of checks whose errors will be shown as warnings. Value 'all' ignores errors from all checks.
A list of checks whose errors will be shown as warnings. Value 'all' ignores errors from all checks.

**--bastion**
IP or FQDN of the bastion to connect to the other nodes using SSH

**--bastion-user**
User identity used to connect to the bastion using SSH (defaults to target user)

**--bastion-port**
Port to connect to the bastion using SSH (default 22)
22 changes: 16 additions & 6 deletions docs/man/skuba-node-join.1.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ join - join a node to a cluster
# SYNOPSIS
**join**
[**--help**|**-h**] [**--target**|**-t**] [**--user**|**-u**] [**--role**|**-r**]
[**--bastion] [**--bastion-user**] [**--bastion-port**]
[**--sudo**|**-s**] [**--port**|**-p**] [**--ignore-preflight-errors**]
*join* *<node-name>* *-t <fqdn>* [-hsp] [-r master] [-u user] [-p port]

Expand All @@ -21,16 +22,25 @@ join - join a node to a cluster
IP or host name of the node to connect to using SSH

**--user, -u**
User identity used to connect to target (default=root)
User identity used to connect to target (required)

**--role, -r**
(required) Role that this node will have in the cluster (master|worker)
**--port, -p**
Port to connect to using SSH

**--sudo, -s**
Run remote command via sudo (defaults to ssh connection user identity)

**--port, -p**
Port to connect to using SSH
**--role, -r**
(required) Role that this node will have in the cluster (master|worker)

**--ignore-preflight-errors**
A list of checks whose errors will be shown as warnings. Value 'all' ignores errors from all checks.
A list of checks whose errors will be shown as warnings. Value 'all' ignores errors from all checks.

**--bastion**
IP or FQDN of the bastion to connect to the other nodes using SSH

**--bastion-user**
User identity used to connect to the bastion using SSH (defaults to target user)

**--bastion-port**
Port to connect to the bastion using SSH (default 22)
18 changes: 14 additions & 4 deletions docs/man/skuba-node-upgrade-apply.1.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ apply - Applies the upgrade plan for the given node
# SYNOPSIS
**apply**
[**--help**|**-h**] [**--port**|**-p**] [**--sudo**|**-s**] [**--target**|**-t**]
[**--bastion] [**--bastion-user**] [**--bastion-port**]
[**--user**|**-u**]
*apply* *-t <fqdn>* [-hs] [-u user] [-p port]

Expand All @@ -18,14 +19,23 @@ apply - Applies the upgrade plan for the given node
**--help, -h**
Print usage statement.

**--target, -t**
IP or host name of the node to connect to using SSH

**--user, -u**
User identity used to connect to target (required)

**--port, -p**
Port to connect to using SSH

**--sudo, -s**
Run remote command via sudo (defaults to ssh connection user identity)

**--target, -t**
IP or host name of the node to connect to using SSH
**--bastion**
IP or FQDN of the bastion to connect to the other nodes using SSH

**--user, -u**
User identity used to connect to target (default=root)
**--bastion-user**
User identity used to connect to the bastion using SSH (defaults to target user)

**--bastion-port**
Port to connect to the bastion using SSH (default 22)
24 changes: 12 additions & 12 deletions internal/pkg/skuba/addons/cilium_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,13 @@ func TestGetCiliumInitImage(t *testing.T) {
}{
{
name: "get cilium init image without revision",
imageTag: "1.5.3",
want: img.ImageRepository + "/cilium-init:1.5.3",
imageTag: "1.7.5",
want: img.ImageRepository + "/cilium-init:1.7.5",
},
{
name: "get cilium init image with revision",
imageTag: "1.5.3-rev2",
want: img.ImageRepository + "/cilium-init:1.5.3-rev2",
imageTag: "1.7.5-rev2",
want: img.ImageRepository + "/cilium-init:1.7.5-rev2",
},
}
for _, tt := range tests {
Expand All @@ -63,13 +63,13 @@ func TestGetCiliumOperatorImage(t *testing.T) {
}{
{
name: "get cilium operator image without revision",
imageTag: "1.5.3",
want: img.ImageRepository + "/cilium-operator:1.5.3",
imageTag: "1.7.5",
want: img.ImageRepository + "/cilium-operator:1.7.5",
},
{
name: "get cilium operator image with revision",
imageTag: "1.5.3-rev2",
want: img.ImageRepository + "/cilium-operator:1.5.3-rev2",
imageTag: "1.7.5-rev2",
want: img.ImageRepository + "/cilium-operator:1.7.5-rev2",
},
}
for _, tt := range tests {
Expand All @@ -90,13 +90,13 @@ func TestGetCiliumImage(t *testing.T) {
}{
{
name: "get cilium image without revision",
imageTag: "1.5.3",
want: img.ImageRepository + "/cilium:1.5.3",
imageTag: "1.7.5",
want: img.ImageRepository + "/cilium:1.7.5",
},
{
name: "get cilium image with revision",
imageTag: "1.5.3-rev2",
want: img.ImageRepository + "/cilium:1.5.3-rev2",
imageTag: "1.7.5-rev2",
want: img.ImageRepository + "/cilium:1.7.5-rev2",
},
}
for _, tt := range tests {
Expand Down
15 changes: 11 additions & 4 deletions internal/pkg/skuba/deployments/ssh/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,9 +113,16 @@ func kubernetesUpgradeStageOne(t *Target, data interface{}) error {
// 1.17 is the last version included in CaaSP4. It's the tipping
// point where we changed our packaging.
// On 1.17 we can't remove kubernetes-1.17-kubeadm, because it doesn't exist.
// Removing kubeadm keeps kubelet alive.
// The rest needs to be removed on the next stage.
pkgs = append(pkgs, "-patterns-caasp-Node-1.17", "-\"kubernetes-kubeadm<1.18\"", "-caasp-config")
// We are removing kubeadm while keeping kubelet alive to its version 1.17.
// For the initial migration we need to update crio kubeadm
// to 1.18 in stage1, due to conflict resolution: the caasp4
// cri-o-kubeadm-criconfig requires kubernetes-kubeadm which is
// not provided anymore (when we remove kubernetes-kubeadm, and
// because we don't want to have the same provides: on the new
// package to avoid upgrade during zypper migration).
// we need to remove cri-o in stage2 else 1.17 kubelet could
// complain about cri-runtime being absent.
pkgs = append(pkgs, "-patterns-caasp-Node-1.17", "-\"kubernetes-kubeadm<1.18\"", "-caasp-config", "-cri-o-kubeadm-criconfig")
} else {
pkgs = append(pkgs, fmt.Sprintf("-kubernetes-%s-kubeadm", currentV))
}
Expand Down Expand Up @@ -144,7 +151,7 @@ func kubernetesUpgradeStageTwo(t *Target, data interface{}) error {
pkgs = append(pkgs, "-\"kubernetes-kubelet<1.18\"")
pkgs = append(pkgs, "-kubernetes-common")
pkgs = append(pkgs, "-\"kubernetes-client<1.18\"")
pkgs = append(pkgs, "-cri-o*")
pkgs = append(pkgs, "-\"cri-o<1.18\"")
} else {
pkgs = append(pkgs, fmt.Sprintf("-kubernetes-%s-*", currentV))
pkgs = append(pkgs, fmt.Sprintf("-cri-o-%s*", currentV))
Expand Down
Loading

0 comments on commit b592317

Please sign in to comment.