From bb6d33fcf28c9ce89d2f7228032da50c59a85cfe Mon Sep 17 00:00:00 2001 From: Michael Weibel Date: Wed, 25 Mar 2026 20:55:02 +0100 Subject: [PATCH] test: add e2e test infrastructure using CAPI test framework MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce end-to-end tests for the cloudscale infrastructure provider using the Cluster API e2e test framework (Ginkgo-based). This sets up the full test infrastructure and implements the following specs: - Lifecycle (single CP): Basic smoke test — creates a 1 CP + 1 worker cluster and validates all cloudscale resources (network, subnet, LB, servers) are provisioned correctly. Runs nightly. - Lifecycle (HA): 3 CP + 2 worker cluster with server groups for anti-affinity. Runs weekly. - Cluster upgrade: In-place K8s version upgrade (v1.34 → v1.35) with rolling CP and worker nodes. Conformance skipped. Runs weekly. - Self-hosted: Tests clusterctl move (pivot) from bootstrap kind cluster into the workload cluster. Runs weekly. - MD remediation: MachineHealthCheck-driven replacement of unhealthy worker machines. Runs weekly. - K8s conformance: Full conformance suite runs biweekly, fast variant (skip Serial) runs weekly. KCP remediation was intentionally left out — it requires VMs to call back to the management cluster API (via wait-signal.sh), which is not possible with a local Kind cluster. Infrastructure additions: - GitHub Actions workflows: manual (test-e2e.yml), nightly, weekly, biweekly schedules with concurrency control - Kustomize-based cluster template generation (base + overlays for HA, upgrades, md-remediation) - Cilium CNI and cloudscale CCM manifest generation scripts - SSH-based log collector for VM diagnostics on failure - cloudscale API resource leak detection (pre/post-test snapshots) Also upgrades k8s dependencies to 0.35.0 and cluster-api to 1.13.0-beta to resolve incompatibilities with the e2e test framework. --- .github/actions/e2e-setup/action.yml | 25 + .github/workflows/cleanup-e2e-images.yml | 25 + .github/workflows/e2e-biweekly.yml | 42 + .github/workflows/e2e-nightly.yml | 42 + .github/workflows/e2e-weekly.yml | 62 + .github/workflows/test-e2e.yml | 79 +- .github/workflows/zizmor.yml | 26 + .gitignore | 2 + Makefile | 230 ++- README.md | 41 +- config/manager/kustomization.yaml | 10 +- go.mod | 107 +- go.sum | 241 ++- hack/clean-e2e-images.sh | 61 + hack/generate-e2e-ccm.sh | 81 ++ hack/generate-e2e-cni.sh | 72 + internal/cloudscale/services.go | 2 +- templates/cluster-template.yaml | 1 - test/e2e/cloudscale_helpers.go | 126 ++ test/e2e/config/cloudscale.yaml | 127 ++ .../infrastructure-cloudscale/bases/ccm.yaml | 200 +++ .../bases/cluster.yaml | 95 ++ .../infrastructure-cloudscale/bases/cni.yaml | 1292 +++++++++++++++++ .../infrastructure-cloudscale/bases/md.yaml | 62 + .../bases/mt-cp-upgrade-to.yaml | 11 + .../bases/mt-worker-upgrade-to.yaml | 11 + .../cluster-template-ha/kustomization.yaml | 9 + .../cluster-template-ha/server-groups.yaml | 22 + .../kustomization.yaml | 10 + .../cluster-template-md-remediation/md.yaml | 13 + .../cluster-template-md-remediation/mhc.yaml | 23 + .../kustomization.yaml | 11 + .../machine-image.yaml | 20 + .../cluster-template/kustomization.yaml | 7 + .../main/metadata.yaml | 11 + test/e2e/data/kubetest/conformance-fast.yaml | 8 + test/e2e/data/kubetest/conformance.yaml | 7 + test/e2e/data/shared/v1beta2/metadata.yaml | 7 + test/e2e/e2e_suite_test.go | 224 ++- test/e2e/e2e_test.go | 487 ++----- test/e2e/helpers.go | 80 + test/e2e/log_collector.go | 203 +++ test/utils/utils.go | 226 --- tilt-provider.yaml | 2 +- 44 files changed, 3612 insertions(+), 831 deletions(-) create mode 100644 .github/actions/e2e-setup/action.yml create mode 100644 .github/workflows/cleanup-e2e-images.yml create mode 100644 .github/workflows/e2e-biweekly.yml create mode 100644 .github/workflows/e2e-nightly.yml create mode 100644 .github/workflows/e2e-weekly.yml create mode 100644 .github/workflows/zizmor.yml create mode 100755 hack/clean-e2e-images.sh create mode 100755 hack/generate-e2e-ccm.sh create mode 100755 hack/generate-e2e-cni.sh create mode 100644 test/e2e/cloudscale_helpers.go create mode 100644 test/e2e/config/cloudscale.yaml create mode 100644 test/e2e/data/infrastructure-cloudscale/bases/ccm.yaml create mode 100644 test/e2e/data/infrastructure-cloudscale/bases/cluster.yaml create mode 100644 test/e2e/data/infrastructure-cloudscale/bases/cni.yaml create mode 100644 test/e2e/data/infrastructure-cloudscale/bases/md.yaml create mode 100644 test/e2e/data/infrastructure-cloudscale/bases/mt-cp-upgrade-to.yaml create mode 100644 test/e2e/data/infrastructure-cloudscale/bases/mt-worker-upgrade-to.yaml create mode 100644 test/e2e/data/infrastructure-cloudscale/cluster-template-ha/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-cloudscale/cluster-template-ha/server-groups.yaml create mode 100644 test/e2e/data/infrastructure-cloudscale/cluster-template-md-remediation/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-cloudscale/cluster-template-md-remediation/md.yaml create mode 100644 test/e2e/data/infrastructure-cloudscale/cluster-template-md-remediation/mhc.yaml create mode 100644 test/e2e/data/infrastructure-cloudscale/cluster-template-upgrades/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-cloudscale/cluster-template-upgrades/machine-image.yaml create mode 100644 test/e2e/data/infrastructure-cloudscale/cluster-template/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-cloudscale/main/metadata.yaml create mode 100644 test/e2e/data/kubetest/conformance-fast.yaml create mode 100644 test/e2e/data/kubetest/conformance.yaml create mode 100644 test/e2e/data/shared/v1beta2/metadata.yaml create mode 100644 test/e2e/helpers.go create mode 100644 test/e2e/log_collector.go delete mode 100644 test/utils/utils.go diff --git a/.github/actions/e2e-setup/action.yml b/.github/actions/e2e-setup/action.yml new file mode 100644 index 0000000..a2a2b25 --- /dev/null +++ b/.github/actions/e2e-setup/action.yml @@ -0,0 +1,25 @@ +name: Setup E2E environment +description: Sets up Go, tool cache, and SSH for e2e tests + +runs: + using: composite + steps: + - name: Setup Go + uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5.6.0 + with: + go-version-file: go.mod + + - name: Cache tool binaries + uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 + with: + path: bin/ + key: e2e-tools-${{ runner.os }}-${{ hashFiles('Makefile') }} + + - name: Setup SSH for log collection + shell: bash + run: | # zizmor: ignore[github-env] + ssh-keygen -t ed25519 -f /tmp/e2e-ssh-key -N "" -q + eval $(ssh-agent -s) + ssh-add /tmp/e2e-ssh-key + echo "SSH_AUTH_SOCK=${SSH_AUTH_SOCK}" >> "$GITHUB_ENV" + echo "CLOUDSCALE_SSH_PUBLIC_KEY=$(cat /tmp/e2e-ssh-key.pub)" >> "$GITHUB_ENV" diff --git a/.github/workflows/cleanup-e2e-images.yml b/.github/workflows/cleanup-e2e-images.yml new file mode 100644 index 0000000..2e8bd78 --- /dev/null +++ b/.github/workflows/cleanup-e2e-images.yml @@ -0,0 +1,25 @@ +name: Cleanup E2E Images + +permissions: + contents: read + +on: + schedule: + - cron: "0 4 * * 0" # Weekly, Sunday 4 AM UTC + workflow_dispatch: + +jobs: + cleanup: + name: Delete old e2e image tags + runs-on: ubuntu-latest + environment: e2e + steps: + - name: Checkout + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 + with: + persist-credentials: false + + - name: Clean up e2e-* tags from capcs-staging + env: + QUAY_E2E_TOKEN: ${{ secrets.QUAY_E2E_TOKEN }} + run: make clean-e2e-images diff --git a/.github/workflows/e2e-biweekly.yml b/.github/workflows/e2e-biweekly.yml new file mode 100644 index 0000000..efae073 --- /dev/null +++ b/.github/workflows/e2e-biweekly.yml @@ -0,0 +1,42 @@ +name: E2E Tests (Biweekly Conformance) + +permissions: + contents: read + +on: + schedule: + - cron: "0 3 1,15 * *" # 3 AM UTC on 1st and 15th of each month + workflow_dispatch: + +concurrency: + group: e2e-tests + cancel-in-progress: false + +jobs: + e2e-conformance: + name: Full K8s Conformance + runs-on: ubuntu-latest + environment: e2e + steps: + - name: Checkout + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 + with: + fetch-depth: 0 + persist-credentials: false + + - name: Setup e2e environment + uses: ./.github/actions/e2e-setup + + - name: Run full conformance e2e tests + env: + CLOUDSCALE_API_TOKEN: ${{ secrets.CLOUDSCALE_API_TOKEN }} + TAG: e2e-conformance-${{ github.sha }} + run: make test-e2e-conformance + + - name: Upload test artifacts + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: e2e-artifacts-conformance-${{ github.run_id }} + path: _artifacts/ + retention-days: 30 diff --git a/.github/workflows/e2e-nightly.yml b/.github/workflows/e2e-nightly.yml new file mode 100644 index 0000000..114546f --- /dev/null +++ b/.github/workflows/e2e-nightly.yml @@ -0,0 +1,42 @@ +name: E2E Tests (Nightly) + +permissions: + contents: read + +on: + schedule: + - cron: "0 2 * * *" # 2 AM UTC daily + workflow_dispatch: + +concurrency: + group: e2e-tests + cancel-in-progress: false + +jobs: + e2e-lifecycle: + name: Nightly Lifecycle Tests + runs-on: ubuntu-latest + environment: e2e + steps: + - name: Checkout + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 + with: + fetch-depth: 0 + persist-credentials: false + + - name: Setup e2e environment + uses: ./.github/actions/e2e-setup + + - name: Run lifecycle e2e tests + env: + CLOUDSCALE_API_TOKEN: ${{ secrets.CLOUDSCALE_API_TOKEN }} + TAG: e2e-nightly-${{ github.sha }} + run: make test-e2e-lifecycle + + - name: Upload test artifacts + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: e2e-artifacts-nightly-${{ github.run_id }} + path: _artifacts/ + retention-days: 7 diff --git a/.github/workflows/e2e-weekly.yml b/.github/workflows/e2e-weekly.yml new file mode 100644 index 0000000..4c9e20e --- /dev/null +++ b/.github/workflows/e2e-weekly.yml @@ -0,0 +1,62 @@ +name: E2E Tests (Weekly) + +permissions: + contents: read + +on: + schedule: + - cron: "0 3 * * 0" # 3 AM UTC Sunday + workflow_dispatch: + +concurrency: + group: e2e-tests + cancel-in-progress: false + +jobs: + e2e-weekly: + name: Weekly Test Suite + runs-on: ubuntu-latest + environment: e2e + steps: + - name: Checkout + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 + with: + fetch-depth: 0 + persist-credentials: false + + - name: Setup e2e environment + uses: ./.github/actions/e2e-setup + + - name: Login to quay.io + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 + with: + registry: quay.io + username: ${{ secrets.QUAY_E2E_USERNAME }} + password: ${{ secrets.QUAY_E2E_PASSWORD }} + + - name: Run weekly e2e tests + env: + CLOUDSCALE_API_TOKEN: ${{ secrets.CLOUDSCALE_API_TOKEN }} + TAG: e2e-weekly-${{ github.sha }} + run: | + make test-e2e \ + GINKGO_LABEL_FILTER="ha || upgrade || self-hosted || kcp-remediation || conformance" \ + KUBETEST_CONFIGURATION=./data/kubetest/conformance-fast.yaml + + - name: Clean up e2e image + if: always() + env: + QUAY_E2E_TOKEN: ${{ secrets.QUAY_E2E_TOKEN }} + TAG: e2e-weekly-${{ github.sha }} + run: | + curl -s -X DELETE \ + -H "Authorization: Bearer ${QUAY_E2E_TOKEN}" \ + "https://quay.io/api/v1/repository/cloudscalech/capcs-staging/tag/${TAG}" + + - name: Upload test artifacts + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: e2e-artifacts-weekly-${{ github.run_id }} + path: _artifacts/ + retention-days: 30 diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index f68f7ca..dcad644 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -1,45 +1,78 @@ -name: E2E Tests +name: E2E Tests (Manual) permissions: contents: read on: workflow_dispatch: - - # TODO: Re-enable automatic triggers once e2e tests are working - # push: - # branches: [main] + inputs: + test_target: + description: 'Make target to run' + required: true + default: 'test-e2e-lifecycle' + type: choice + options: + - test-e2e-lifecycle + - test-e2e + - test-e2e-ha + - test-e2e-upgrade + - test-e2e-self-hosted + - test-e2e-md-remediation + - test-e2e-conformance + - test-e2e-conformance-fast concurrency: - group: e2e-tests-${{ github.ref }} - cancel-in-progress: true + group: e2e-tests + cancel-in-progress: false jobs: test-e2e: - name: Run on Ubuntu + name: ${{ github.event.inputs.test_target }} runs-on: ubuntu-latest + environment: e2e steps: - - name: Clone the code + - name: Checkout uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 with: fetch-depth: 0 persist-credentials: false - - name: Setup Go - uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5.6.0 - with: - go-version-file: go.mod + - name: Setup e2e environment + uses: ./.github/actions/e2e-setup - - name: Install the latest version of kind - run: | - curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-$(go env GOARCH) - chmod +x ./kind - sudo mv ./kind /usr/local/bin/kind + - name: Login to quay.io + if: >- + github.event.inputs.test_target == 'test-e2e-self-hosted' || + github.event.inputs.test_target == 'test-e2e' + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 + with: + registry: quay.io + username: ${{ secrets.QUAY_E2E_USERNAME }} + password: ${{ secrets.QUAY_E2E_PASSWORD }} - - name: Verify kind installation - run: kind version + - name: Run e2e tests + env: + CLOUDSCALE_API_TOKEN: ${{ secrets.CLOUDSCALE_API_TOKEN }} + TAG: e2e-manual-${{ github.sha }} + TEST_TARGET: ${{ github.event.inputs.test_target }} + run: make $TEST_TARGET - - name: Running Test e2e + - name: Clean up e2e image + if: >- + github.event.inputs.test_target == 'test-e2e-self-hosted' || + github.event.inputs.test_target == 'test-e2e' + env: + QUAY_E2E_TOKEN: ${{ secrets.QUAY_E2E_TOKEN }} + TAG: e2e-manual-${{ github.sha }} run: | - go mod tidy - make test-e2e + curl -s -X DELETE \ + -H "Authorization: Bearer ${QUAY_E2E_TOKEN}" \ + "https://quay.io/api/v1/repository/cloudscalech/capcs-staging/tag/${TAG}" + + - name: Upload test artifacts + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: e2e-artifacts-manual-${{ github.run_id }} + path: _artifacts/ + retention-days: 14 diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml new file mode 100644 index 0000000..d8895ac --- /dev/null +++ b/.github/workflows/zizmor.yml @@ -0,0 +1,26 @@ +name: Workflow Security Lint + +permissions: + actions: read + contents: read + security-events: write + +on: + push: + branches: [main] + paths: ['.github/**'] + pull_request: + paths: ['.github/**'] + +jobs: + zizmor: + name: zizmor + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 + with: + persist-credentials: false + + - name: Run zizmor + uses: zizmorcore/zizmor-action@71321a20a9ded102f6e9ce5718a2fcec2c4f70d8 # v0.5.2 diff --git a/.gitignore b/.gitignore index 47fd391..ee746f3 100644 --- a/.gitignore +++ b/.gitignore @@ -34,3 +34,5 @@ go.work # e2e _artifacts/ +test/e2e/config/*.generated.yaml +test/e2e/data/infrastructure-cloudscale/main/cluster-template*.yaml diff --git a/Makefile b/Makefile index 6e8dea3..f567997 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,9 @@ # Image URL to use all building/pushing image targets -IMG ?= controller:latest +TAG ?= dev +IMG ?= quay.io/cloudscalech/capcs-staging:$(TAG) + +# E2E image configuration +E2E_TAG ?= e2e-$(shell git rev-parse --short HEAD) # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -61,35 +65,6 @@ vet: ## Run go vet against code. test: manifests generate fmt vet setup-envtest ## Run tests. KUBEBUILDER_ASSETS="$(shell "$(ENVTEST)" use $(ENVTEST_K8S_VERSION) --bin-dir "$(LOCALBIN)" -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out -# TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'. -# The default setup assumes Kind is pre-installed and builds/loads the Manager Docker image locally. -# CertManager is installed by default; skip with: -# - CERT_MANAGER_INSTALL_SKIP=true -KIND_CLUSTER ?= cluster-api-provider-cloudscale-test-e2e - -.PHONY: setup-test-e2e -setup-test-e2e: ## Set up a Kind cluster for e2e tests if it does not exist - @command -v $(KIND) >/dev/null 2>&1 || { \ - echo "Kind is not installed. Please install Kind manually."; \ - exit 1; \ - } - @case "$$($(KIND) get clusters)" in \ - *"$(KIND_CLUSTER)"*) \ - echo "Kind cluster '$(KIND_CLUSTER)' already exists. Skipping creation." ;; \ - *) \ - echo "Creating Kind cluster '$(KIND_CLUSTER)'..."; \ - $(KIND) create cluster --name $(KIND_CLUSTER) ;; \ - esac - -.PHONY: test-e2e -test-e2e: setup-test-e2e manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind. - KIND=$(KIND) KIND_CLUSTER=$(KIND_CLUSTER) go test -tags=e2e ./test/e2e/ -v -ginkgo.v - $(MAKE) cleanup-test-e2e - -.PHONY: cleanup-test-e2e -cleanup-test-e2e: ## Tear down the Kind cluster used for e2e tests - @$(KIND) delete cluster --name $(KIND_CLUSTER) - .PHONY: lint lint: golangci-lint ## Run golangci-lint linter "$(GOLANGCI_LINT)" run @@ -102,6 +77,178 @@ lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes lint-config: golangci-lint ## Verify golangci-lint linter configuration "$(GOLANGCI_LINT)" config verify +##@ Dependencies + +## Location to install dependencies to +LOCALBIN := $(shell pwd)/bin +$(LOCALBIN): + mkdir -p "$(LOCALBIN)" + +## Tool Binaries +KUBECTL ?= kubectl +KIND ?= kind +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest +GOLANGCI_LINT = $(LOCALBIN)/golangci-lint +GINKGO ?= $(LOCALBIN)/ginkgo + +##@ E2E Testing + +E2E_CONF_FILE_SOURCE ?= $(shell pwd)/test/e2e/config/cloudscale.yaml +E2E_CONF_FILE ?= $(shell pwd)/test/e2e/config/cloudscale.generated.yaml +E2E_ARTIFACTS_FOLDER ?= $(shell pwd)/_artifacts +E2E_TEMPLATES := test/e2e/data/infrastructure-cloudscale +GINKGO_TIMEOUT ?= 2h +GINKGO_NODES ?= 1 +SKIP_RESOURCE_CLEANUP ?= false +USE_EXISTING_CLUSTER ?= false +KUBETEST_CONFIGURATION ?= ./data/kubetest/conformance.yaml +GINKGO_LABEL_FILTER ?= + +# Cilium CNI configuration +CILIUM_VERSION ?= 1.19.2 + +# CCM configuration +CCM_VERSION ?= 1.3.0 + +.PHONY: ginkgo +ginkgo: $(GINKGO) ## Download ginkgo locally if necessary. +$(GINKGO): $(LOCALBIN) + $(call go-install-tool,$(GINKGO),github.com/onsi/ginkgo/v2/ginkgo,$(shell go list -m -f '{{.Version}}' github.com/onsi/ginkgo/v2)) + +.PHONY: generate-e2e-cni +generate-e2e-cni: ## Regenerate Cilium CNI manifest from Helm chart + @CILIUM_VERSION=$(CILIUM_VERSION) hack/generate-e2e-cni.sh + +.PHONY: generate-e2e-ccm +generate-e2e-ccm: ## Regenerate cloudscale CCM manifest + @CCM_VERSION=$(CCM_VERSION) hack/generate-e2e-ccm.sh + +.PHONY: generate-e2e-templates +generate-e2e-templates: $(KUSTOMIZE) generate-e2e-cni generate-e2e-ccm ## Generate e2e cluster templates using kustomize overlays + @mkdir -p $(E2E_TEMPLATES)/main + @echo "Generating cluster-template.yaml..." + @"$(KUSTOMIZE)" build --load-restrictor LoadRestrictionsNone $(E2E_TEMPLATES)/cluster-template > $(E2E_TEMPLATES)/main/cluster-template.yaml + @echo "Generating cluster-template-ha.yaml..." + @"$(KUSTOMIZE)" build --load-restrictor LoadRestrictionsNone $(E2E_TEMPLATES)/cluster-template-ha > $(E2E_TEMPLATES)/main/cluster-template-ha.yaml + @echo "Generating cluster-template-upgrades.yaml..." + @"$(KUSTOMIZE)" build --load-restrictor LoadRestrictionsNone $(E2E_TEMPLATES)/cluster-template-upgrades > $(E2E_TEMPLATES)/main/cluster-template-upgrades.yaml + @echo "Generating cluster-template-md-remediation.yaml..." + @"$(KUSTOMIZE)" build --load-restrictor LoadRestrictionsNone $(E2E_TEMPLATES)/cluster-template-md-remediation > $(E2E_TEMPLATES)/main/cluster-template-md-remediation.yaml + @echo "Templates generated successfully." + +.PHONY: generate-e2e-config +generate-e2e-config: ## Generate e2e config from template by resolving environment variables + TAG=$(TAG) IMG=$(IMG) KUBETEST_CONFIGURATION=$(KUBETEST_CONFIGURATION) envsubst < $(E2E_CONF_FILE_SOURCE) > $(E2E_CONF_FILE) + +.PHONY: test-e2e +test-e2e: TAG = $(E2E_TAG) +test-e2e: $(GINKGO) generate-e2e-templates generate-e2e-config docker-build docker-push ## Run all e2e tests + $(GINKGO) -v --trace --tags=e2e \ + --nodes=$(GINKGO_NODES) \ + --timeout=$(GINKGO_TIMEOUT) \ + $(if $(GINKGO_LABEL_FILTER),--label-filter="$(GINKGO_LABEL_FILTER)") \ + --output-dir="$(E2E_ARTIFACTS_FOLDER)" --junit-report="junit.e2e_suite.xml" \ + ./test/e2e -- \ + -e2e.config=$(E2E_CONF_FILE) \ + -e2e.artifacts-folder=$(E2E_ARTIFACTS_FOLDER) \ + -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ + -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) + +.PHONY: test-e2e-lifecycle +test-e2e-lifecycle: $(GINKGO) generate-e2e-templates generate-e2e-config docker-build ## Run lifecycle e2e tests only (single control-plane) + $(GINKGO) -v --trace --tags=e2e \ + --nodes=$(GINKGO_NODES) \ + --label-filter="lifecycle && !ha" \ + --timeout=60m \ + --output-dir="$(E2E_ARTIFACTS_FOLDER)" --junit-report="junit.e2e_lifecycle.xml" \ + ./test/e2e -- \ + -e2e.config=$(E2E_CONF_FILE) \ + -e2e.artifacts-folder=$(E2E_ARTIFACTS_FOLDER) \ + -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ + -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) + +.PHONY: test-e2e-ha +test-e2e-ha: $(GINKGO) generate-e2e-templates generate-e2e-config docker-build ## Run HA e2e tests only (3 control-plane nodes) + $(GINKGO) -v --trace --tags=e2e \ + --nodes=$(GINKGO_NODES) \ + --label-filter="ha" \ + --timeout=90m \ + --output-dir="$(E2E_ARTIFACTS_FOLDER)" --junit-report="junit.e2e_ha.xml" \ + ./test/e2e -- \ + -e2e.config=$(E2E_CONF_FILE) \ + -e2e.artifacts-folder=$(E2E_ARTIFACTS_FOLDER) \ + -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ + -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) + +.PHONY: test-e2e-upgrade +test-e2e-upgrade: $(GINKGO) generate-e2e-templates generate-e2e-config docker-build ## Run cluster upgrade e2e tests + $(GINKGO) -v --trace --tags=e2e \ + --nodes=$(GINKGO_NODES) \ + --label-filter="upgrade" \ + --timeout=90m \ + --output-dir="$(E2E_ARTIFACTS_FOLDER)" --junit-report="junit.e2e_upgrade.xml" \ + ./test/e2e -- \ + -e2e.config=$(E2E_CONF_FILE) \ + -e2e.artifacts-folder=$(E2E_ARTIFACTS_FOLDER) \ + -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ + -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) + +.PHONY: test-e2e-self-hosted +test-e2e-self-hosted: TAG = $(E2E_TAG) +test-e2e-self-hosted: $(GINKGO) generate-e2e-templates generate-e2e-config docker-build docker-push ## Run self-hosted e2e tests + $(GINKGO) -v --trace --tags=e2e \ + --nodes=$(GINKGO_NODES) \ + --label-filter="self-hosted" \ + --timeout=90m \ + --output-dir="$(E2E_ARTIFACTS_FOLDER)" --junit-report="junit.e2e_self_hosted.xml" \ + ./test/e2e -- \ + -e2e.config=$(E2E_CONF_FILE) \ + -e2e.artifacts-folder=$(E2E_ARTIFACTS_FOLDER) \ + -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ + -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) + +.PHONY: test-e2e-md-remediation +test-e2e-md-remediation: $(GINKGO) generate-e2e-templates generate-e2e-config docker-build ## Run MD remediation e2e tests + $(GINKGO) -v --trace --tags=e2e \ + --nodes=$(GINKGO_NODES) \ + --label-filter="md-remediation" \ + --timeout=90m \ + --output-dir="$(E2E_ARTIFACTS_FOLDER)" --junit-report="junit.e2e_md_remediation.xml" \ + ./test/e2e -- \ + -e2e.config=$(E2E_CONF_FILE) \ + -e2e.artifacts-folder=$(E2E_ARTIFACTS_FOLDER) \ + -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ + -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) + +.PHONY: test-e2e-conformance +test-e2e-conformance: $(GINKGO) generate-e2e-templates generate-e2e-config docker-build ## Run K8s conformance e2e tests + $(GINKGO) -v --trace --tags=e2e \ + --nodes=$(GINKGO_NODES) \ + --label-filter="conformance" \ + --timeout=150m \ + --output-dir="$(E2E_ARTIFACTS_FOLDER)" --junit-report="junit.e2e_conformance.xml" \ + ./test/e2e -- \ + -e2e.config=$(E2E_CONF_FILE) \ + -e2e.artifacts-folder=$(E2E_ARTIFACTS_FOLDER) \ + -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ + -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) + +.PHONY: test-e2e-conformance-fast +test-e2e-conformance-fast: KUBETEST_CONFIGURATION = ./data/kubetest/conformance-fast.yaml +test-e2e-conformance-fast: $(GINKGO) generate-e2e-templates generate-e2e-config docker-build ## Run K8s conformance e2e tests (fast, skip Serial) + $(GINKGO) -v --trace --tags=e2e \ + --nodes=$(GINKGO_NODES) \ + --label-filter="conformance" \ + --timeout=90m \ + --output-dir="$(E2E_ARTIFACTS_FOLDER)" --junit-report="junit.e2e_conformance_fast.xml" \ + ./test/e2e -- \ + -e2e.config=$(E2E_CONF_FILE) \ + -e2e.artifacts-folder=$(E2E_ARTIFACTS_FOLDER) \ + -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ + -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) + ##@ Build .PHONY: build @@ -117,19 +264,23 @@ run: manifests generate fmt vet ## Run a controller from your host. # More info: https://docs.docker.com/develop/develop-images/build_enhancements/ .PHONY: docker-build docker-build: ## Build docker image with the manager. - $(CONTAINER_TOOL) build -t ${IMG} . + $(CONTAINER_TOOL) build --platform linux/amd64 -t ${IMG} . .PHONY: docker-push docker-push: ## Push docker image with the manager. $(CONTAINER_TOOL) push ${IMG} +.PHONY: clean-e2e-images +clean-e2e-images: ## Delete e2e-* tags older than 7 days from capcs-staging (requires QUAY_E2E_TOKEN) + @./hack/clean-e2e-images.sh + # PLATFORMS defines the target platforms for the manager image be built to provide support to multiple # architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: # - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ # - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ # - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) # To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. -PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +PLATFORMS ?= linux/amd64 .PHONY: docker-buildx docker-buildx: ## Build and push docker image for the manager for cross-platform support # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile @@ -171,21 +322,6 @@ deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. "$(KUSTOMIZE)" build config/default | "$(KUBECTL)" delete --ignore-not-found=$(ignore-not-found) -f - -##@ Dependencies - -## Location to install dependencies to -LOCALBIN ?= $(shell pwd)/bin -$(LOCALBIN): - mkdir -p "$(LOCALBIN)" - -## Tool Binaries -KUBECTL ?= kubectl -KIND ?= kind -KUSTOMIZE ?= $(LOCALBIN)/kustomize -CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen -ENVTEST ?= $(LOCALBIN)/setup-envtest -GOLANGCI_LINT = $(LOCALBIN)/golangci-lint - ## Tool Versions KUSTOMIZE_VERSION ?= v5.8.1 CONTROLLER_TOOLS_VERSION ?= v0.20.1 diff --git a/README.md b/README.md index c35573c..5202f1d 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # Cluster API Provider for cloudscale.ch -Kubernetes [Cluster API](https://cluster-api.sigs.k8s.io/) infrastructure provider for [cloudscale.ch](https://www.cloudscale.ch). +Kubernetes [Cluster API](https://cluster-api.sigs.k8s.io/) infrastructure provider +for [cloudscale.ch](https://www.cloudscale.ch). **Status**: early development @@ -37,13 +38,47 @@ make generate make test-e2e ``` +### E2E Tests + +E2E tests are built on the [CAPI e2e test framework](https://pkg.go.dev/sigs.k8s.io/cluster-api/test/e2e) +(Ginkgo-based) and provision real clusters on cloudscale.ch. Tests use Ginkgo labels for +filtering and are split into suites of increasing cost, scheduled accordingly: + +| Suite | Label | Description | ~Duration | Schedule | Make target | +|--------------------|------------------|------------------------------------------------------------------------------------------|-----------|----------|-----------------------------| +| Lifecycle | `lifecycle` | 1 CP + 1 worker: create, validate cloudscale resources, delete | < 5 min | Nightly | `test-e2e-lifecycle` | +| HA lifecycle | `ha` | 3 CP + 2 workers with anti-affinity server groups | < 10 min | Weekly | `test-e2e-ha` | +| Cluster upgrade | `upgrade` | Rolling K8s version upgrade (v1.34 → v1.35) | < 10 min | Weekly | `test-e2e-upgrade` | +| Self-hosted | `self-hosted` | clusterctl move (pivot) to workload cluster. Requires container image in public registry | < 15 min | Weekly | `test-e2e-self-hosted` | +| MD remediation | `md-remediation` | MachineHealthCheck auto-replacement of unhealthy workers | < 10 min | Weekly | `test-e2e-md-remediation` | +| Conformance (fast) | `conformance` | K8s conformance, skip Serial tests | < 60 min | Weekly | `test-e2e-conformance-fast` | +| Conformance (full) | `conformance` | Full K8s conformance including Serial tests | < 120 min | Biweekly | `test-e2e-conformance` | + +Durations are approximate from a real CI run; conformance varies with cluster size. + +**Why this split?** The single-CP lifecycle test is the cheapest smoke test and runs +nightly to catch regressions early. HA, upgrade, self-hosted, and remediation tests are more +resource-intensive and run weekly. Full K8s conformance is the most expensive and runs biweekly +(1st + 15th of month). All suites can be triggered manually via the `test-e2e.yml` workflow +dispatch. E2E tests share a concurrency group so only one suite runs at a time. + +Any run involving the self-hosted spec requires the container image to be published to our registry. The self-hosted +spec moves the management cluster to the first workload cluster. That workload cluster doesn't have access to the +locally +built images and therefore needs a published container image. + +For PRs, no e2e test is automatically run. It is advised to run them locally before submitting, as well as for a +reviewer +to run them locally and/or manually triggering the workflow **after** reviewing the code is safe. + ### Tilt -The easiest way to work on this provider is by using the +The easiest way to work on this provider is by using the [Tilt setup](https://cluster-api.sigs.k8s.io/developer/core/tilt.html) of Cluster-API. Refer to the linked documentation on how to set up your local tilt. This requires cloning -[Cluster-API core](https://github.com/kubernetes-sigs/cluster-api) to your host. The necessary commands need to be executed in the +[Cluster-API core](https://github.com/kubernetes-sigs/cluster-api) to your host. The necessary commands need to be +executed in the Cluster-API core repository (**not** in this repository). An example `tilt-settings.yaml`, which should also be placed in the Cluster-API core repository, is provided here: diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 28c75b7..415692f 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -1,5 +1,9 @@ -resources: - - manager.yaml - - credentials.yaml apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization +resources: +- manager.yaml +- credentials.yaml +images: +- name: controller + newName: quay.io/cloudscalech/capcs-staging + newTag: dev diff --git a/go.mod b/go.mod index 01498ab..96c1b7c 100644 --- a/go.mod +++ b/go.mod @@ -5,30 +5,49 @@ go 1.25.3 require ( github.com/cloudscale-ch/cloudscale-go-sdk/v8 v8.0.0 github.com/go-logr/logr v1.4.3 - github.com/onsi/ginkgo/v2 v2.27.2 - github.com/onsi/gomega v1.38.2 + github.com/onsi/ginkgo/v2 v2.28.1 + github.com/onsi/gomega v1.39.1 + golang.org/x/crypto v0.49.0 golang.org/x/oauth2 v0.36.0 - golang.org/x/sync v0.19.0 - k8s.io/api v0.35.0 - k8s.io/apimachinery v0.35.0 - k8s.io/client-go v0.35.0 + golang.org/x/sync v0.20.0 + k8s.io/api v0.35.2 + k8s.io/apiextensions-apiserver v0.35.2 + k8s.io/apimachinery v0.35.2 + k8s.io/client-go v0.35.2 + k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 - sigs.k8s.io/cluster-api v1.12.3 - sigs.k8s.io/controller-runtime v0.23.1 + sigs.k8s.io/cluster-api v1.13.0-beta.0 + sigs.k8s.io/cluster-api/test v1.13.0-beta.0 + sigs.k8s.io/controller-runtime v0.23.3 ) require ( + al.essio.dev/pkg/shellescape v1.5.1 // indirect cel.dev/expr v0.25.1 // indirect + dario.cat/mergo v1.0.1 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect + github.com/Masterminds/sprig/v3 v3.3.0 // indirect + github.com/Microsoft/go-winio v0.5.0 // indirect + github.com/adrg/xdg v0.5.3 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/docker v28.5.2+incompatible // indirect + github.com/docker/go-connections v0.6.0 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect @@ -38,68 +57,96 @@ require ( github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.26.0 // indirect github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 // indirect + github.com/google/go-github/v82 v82.0.0 // indirect + github.com/google/go-querystring v1.2.0 // indirect + github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect + github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 // indirect + github.com/olekukonko/errors v1.1.0 // indirect + github.com/olekukonko/ll v0.1.1 // indirect + github.com/olekukonko/tablewriter v1.0.9 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.23.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.66.1 // indirect github.com/prometheus/procfs v0.16.1 // indirect - github.com/spf13/cobra v1.10.1 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/cobra v1.10.2 // indirect github.com/spf13/pflag v1.0.10 // indirect + github.com/spf13/viper v1.21.0 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/valyala/fastjson v1.6.10 // indirect github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect go.opentelemetry.io/otel v1.40.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect go.opentelemetry.io/otel/sdk v1.40.0 // indirect go.opentelemetry.io/otel/trace v1.40.0 // indirect - go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.1 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.48.0 // indirect - golang.org/x/sys v0.40.0 // indirect - golang.org/x/term v0.38.0 // indirect - golang.org/x/text v0.32.0 // indirect + golang.org/x/mod v0.33.0 // indirect + golang.org/x/net v0.52.0 // indirect + golang.org/x/sys v0.42.0 // indirect + golang.org/x/term v0.41.0 // indirect + golang.org/x/text v0.35.0 // indirect golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.39.0 // indirect + golang.org/x/tools v0.42.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect google.golang.org/grpc v1.79.3 // indirect - google.golang.org/protobuf v1.36.10 // indirect + google.golang.org/protobuf v1.36.11 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.35.0 // indirect - k8s.io/apiserver v0.35.0 // indirect - k8s.io/component-base v0.35.0 // indirect - k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/apiserver v0.35.2 // indirect + k8s.io/cluster-bootstrap v0.35.2 // indirect + k8s.io/component-base v0.35.2 // indirect k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/kind v0.31.0 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.2 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index 188239f..b6aaff9 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,13 @@ +al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho= +al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -10,24 +16,37 @@ github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1 github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= +github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78= +github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cloudscale-ch/cloudscale-go-sdk/v8 v8.0.0 h1:XP3thdgotNVpPF27568RYHt9kqosVm8eJznJ+X4PJIk= github.com/cloudscale-ch/cloudscale-go-sdk/v8 v8.0.0/go.mod h1:H4qxiHJof+IdwvaV26ZcmNR39EyggnKIcDfLYcYnBCI= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= -github.com/coredns/corefile-migration v1.0.30 h1:ljZNPGgna+4yKv81gfkvkgLEWdtz0NjBR1glaiPI140= -github.com/coredns/corefile-migration v1.0.30/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= +github.com/coredns/corefile-migration v1.0.31 h1:f7WGhY8M2Jn8P2dVO0p7wSQ1QKsMARl6WEyUjCb/V38= +github.com/coredns/corefile-migration v1.0.31/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -36,14 +55,26 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= +github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 h1:7QPwrLT79GlD5sizHf27aoY2RTvw62mO6x7mxkScNk0= +github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU= github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= @@ -71,10 +102,14 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= @@ -83,17 +118,27 @@ github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-github/v82 v82.0.0 h1:OH09ESON2QwKCUVMYmMcVu1IFKFoaZHwqYaUtr/MVfk= +github.com/google/go-github/v82 v82.0.0/go.mod h1:hQ6Xo0VKfL8RZ7z1hSfB4fvISg0QqHOqe9BP0qo+WvM= +github.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0= +github.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY= -github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= +github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 h1:z2ogiKUYzX5Is6zr/vP9vJGqPwcdqsWjOt+V8J7+bTc= +github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -119,26 +164,56 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE= github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= -github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= -github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= -github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 h1:zrbMGy9YXpIeTnGj4EljqMiZsIcE09mmF8XsD5AYOJc= +github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6/go.mod h1:rEKTHC9roVVicUIfZK7DYrdIoM0EOr8mK1Hj5s3JjH0= +github.com/olekukonko/errors v1.1.0 h1:RNuGIh15QdDenh+hNvKrJkmxxjV4hcS50Db478Ou5sM= +github.com/olekukonko/errors v1.1.0/go.mod h1:ppzxA5jBKcO1vIpCXQ9ZqgDh8iwODz6OXIGKU8r5m4Y= +github.com/olekukonko/ll v0.1.1 h1:9Dfeed5/Mgaxb9lHRAftLK9pVfYETvHn+If6lywVhJc= +github.com/olekukonko/ll v0.1.1/go.mod h1:2dJo+hYZcJMLMbKwHEWvxCUbAOLc/CXWS9noET22Mdo= +github.com/olekukonko/tablewriter v1.0.9 h1:XGwRsYLC2bY7bNd93Dk51bcPZksWZmLYuaTHR0FqfL8= +github.com/olekukonko/tablewriter v1.0.9/go.mod h1:5c+EBPeSqvXnLLgkm9isDdzR3wjfBkHR9Nhfp3NWrzo= +github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI= +github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE= +github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28= +github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -151,18 +226,32 @@ github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9Z github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= -github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= -github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -170,12 +259,15 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -184,18 +276,30 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/valyala/fastjson v1.6.10 h1:/yjJg8jaVQdYR3arGxPE2X5z89xrlhS0eGXdv+ADTh4= +github.com/valyala/fastjson v1.6.10/go.mod h1:e6FubmQouUNP73jtMLmcbxS6ydWIpOfhz34TSfO3JaE= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +go.etcd.io/etcd/api/v3 v3.6.8 h1:gqb1VN92TAI6G2FiBvWcqKtHiIjr4SU2GdXxTwyexbM= +go.etcd.io/etcd/api/v3 v3.6.8/go.mod h1:qyQj1HZPUV3B5cbAL8scG62+fyz5dSxxu0w8pn28N6Q= +go.etcd.io/etcd/client/pkg/v3 v3.6.8 h1:Qs/5C0LNFiqXxYf2GU8MVjYUEXJ6sZaYOz0zEqQgy50= +go.etcd.io/etcd/client/pkg/v3 v3.6.8/go.mod h1:GsiTRUZE2318PggZkAo6sWb6l8JLVrnckTNfbG8PWtw= +go.etcd.io/etcd/client/v3 v3.6.8 h1:B3G76t1UykqAOrbio7s/EPatixQDkQBevN8/mwiplrY= +go.etcd.io/etcd/client/v3 v3.6.8/go.mod h1:MVG4BpSIuumPi+ELF7wYtySETmoTWBHVcDoHdVupwt8= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.65.0 h1:XmiuHzgJt067+a6kwyAzkhXooYVv3/TOw9cM2VfJgUM= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.65.0/go.mod h1:KDgtbWKTQs4bM+VPUr6WlL9m/WXcmkCcBlIzqxPGzmI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40= go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= @@ -204,8 +308,8 @@ go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4A go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= -go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= -go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= +go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -216,40 +320,43 @@ go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= -golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= +golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= -golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= -golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= +golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= -golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= -golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= -golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= -golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= +golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU= +golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A= +golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8= +golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= +golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= -google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M= +google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -260,20 +367,22 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= -k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= -k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= -k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU= -k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= -k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= -k8s.io/apiserver v0.35.0 h1:CUGo5o+7hW9GcAEF3x3usT3fX4f9r8xmgQeCBDaOgX4= -k8s.io/apiserver v0.35.0/go.mod h1:QUy1U4+PrzbJaM3XGu2tQ7U9A4udRRo5cyxkFX0GEds= -k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= -k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= -k8s.io/cluster-bootstrap v0.34.2 h1:oKckPeunVCns37BntcsxaOesDul32yzGd3DFLjW2fc8= -k8s.io/cluster-bootstrap v0.34.2/go.mod h1:f21byPR7X5nt12ivZi+J3pb4sG4SH6VySX8KAAJA8BY= -k8s.io/component-base v0.35.0 h1:+yBrOhzri2S1BVqyVSvcM3PtPyx5GUxCK2tinZz1G94= -k8s.io/component-base v0.35.0/go.mod h1:85SCX4UCa6SCFt6p3IKAPej7jSnF3L8EbfSyMZayJR0= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +k8s.io/api v0.35.2 h1:tW7mWc2RpxW7HS4CoRXhtYHSzme1PN1UjGHJ1bdrtdw= +k8s.io/api v0.35.2/go.mod h1:7AJfqGoAZcwSFhOjcGM7WV05QxMMgUaChNfLTXDRE60= +k8s.io/apiextensions-apiserver v0.35.2 h1:iyStXHoJZsUXPh/nFAsjC29rjJWdSgUmG1XpApE29c0= +k8s.io/apiextensions-apiserver v0.35.2/go.mod h1:OdyGvcO1FtMDWQ+rRh/Ei3b6X3g2+ZDHd0MSRGeS8rU= +k8s.io/apimachinery v0.35.2 h1:NqsM/mmZA7sHW02JZ9RTtk3wInRgbVxL8MPfzSANAK8= +k8s.io/apimachinery v0.35.2/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/apiserver v0.35.2 h1:rb52v0CZGEL0FkhjS+I6jHflAp7fZ4MIaKcEHX7wmDk= +k8s.io/apiserver v0.35.2/go.mod h1:CROJUAu0tfjZLyYgSeBsBan2T7LUJGh0ucWwTCSSk7g= +k8s.io/client-go v0.35.2 h1:YUfPefdGJA4aljDdayAXkc98DnPkIetMl4PrKX97W9o= +k8s.io/client-go v0.35.2/go.mod h1:4QqEwh4oQpeK8AaefZ0jwTFJw/9kIjdQi0jpKeYvz7g= +k8s.io/cluster-bootstrap v0.35.2 h1:6qGTBbRabtK3NU42fVdeKsWQy5yT7RPyqn5RmXTlGXk= +k8s.io/cluster-bootstrap v0.35.2/go.mod h1:dWypJ4l++6TDt+zJOr3aog2cOZ4kWDAnkcDdsc1vW8Q= +k8s.io/component-base v0.35.2 h1:btgR+qNrpWuRSuvWSnQYsZy88yf5gVwemvz0yw79pGc= +k8s.io/component-base v0.35.2/go.mod h1:B1iBJjooe6xIJYUucAxb26RwhAjzx0gHnqO9htWIX+0= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= @@ -282,15 +391,19 @@ k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzk k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/cluster-api v1.12.3 h1:cuOl3fWXhlXFuQcyIH4C8i3ns8rLhtcnK+x00MVdKBs= -sigs.k8s.io/cluster-api v1.12.3/go.mod h1:EAiTJtf/8M5eBetPwumi6t8DJJ55Ln6Fkvh2OAa7PD4= -sigs.k8s.io/controller-runtime v0.23.1 h1:TjJSM80Nf43Mg21+RCy3J70aj/W6KyvDtOlpKf+PupE= -sigs.k8s.io/controller-runtime v0.23.1/go.mod h1:B6COOxKptp+YaUT5q4l6LqUJTRpizbgf9KSRNdQGns0= +sigs.k8s.io/cluster-api v1.13.0-beta.0 h1:RmnhP+XWKABSQcW86WVGNIFg8bUPUKvEKfD9ucKLmNQ= +sigs.k8s.io/cluster-api v1.13.0-beta.0/go.mod h1:gNQrTS/VtkwT7ItKf5+eUj6uiTjzb5fwF8ksVKHA2Gk= +sigs.k8s.io/cluster-api/test v1.13.0-beta.0 h1:kOMfJG9NgVAS7a675NnwAsr1JxH1yGq9IB+o+AWKbF8= +sigs.k8s.io/cluster-api/test v1.13.0-beta.0/go.mod h1:ESHqwOD5qkZbwguMeGRVALCzdVVjf4dGyoJnLZQgL0s= +sigs.k8s.io/controller-runtime v0.23.3 h1:VjB/vhoPoA9l1kEKZHBMnQF33tdCLQKJtydy4iqwZ80= +sigs.k8s.io/controller-runtime v0.23.3/go.mod h1:B6COOxKptp+YaUT5q4l6LqUJTRpizbgf9KSRNdQGns0= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kind v0.31.0 h1:UcT4nzm+YM7YEbqiAKECk+b6dsvc/HRZZu9U0FolL1g= +sigs.k8s.io/kind v0.31.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 h1:2WOzJpHUBVrrkDjU4KBT8n5LDcj824eX0I5UKcgeRUs= -sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2 h1:kwVWMx5yS1CrnFWA/2QHyRVJ8jM6dBA80uLmm0wJkk8= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/hack/clean-e2e-images.sh b/hack/clean-e2e-images.sh new file mode 100755 index 0000000..d5e9a83 --- /dev/null +++ b/hack/clean-e2e-images.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# Delete e2e-* image tags older than 7 days from capcs-staging on quay.io. +# Requires QUAY_E2E_TOKEN environment variable (OAuth token with repo:admin scope). +# Set DRY_RUN=true to list tags without deleting them. + +set -euo pipefail + +REPO="cloudscalech/capcs-staging" +API="https://quay.io/api/v1/repository/${REPO}/tag/" +MAX_AGE_DAYS="${MAX_AGE_DAYS:-7}" +DRY_RUN="${DRY_RUN:-false}" + +if [[ -z "${QUAY_E2E_TOKEN:-}" ]]; then + echo "Error: QUAY_E2E_TOKEN environment variable is required" >&2 + exit 1 +fi + +cutoff=$(date -u -v-${MAX_AGE_DAYS}d +%s 2>/dev/null || date -u -d "${MAX_AGE_DAYS} days ago" +%s) + +if [[ "${DRY_RUN}" == "true" ]]; then + echo "DRY RUN: will list tags without deleting them" +fi +echo "Listing e2e-* tags older than ${MAX_AGE_DAYS} days..." + +page=1 +deleted=0 +while true; do + response=$(curl -s -H "Authorization: Bearer ${QUAY_E2E_TOKEN}" \ + "${API}?filter_tag_name=like:e2e-%25&limit=100&page=${page}") + + tags=$(echo "${response}" | jq -r '.tags // [] | .[] | select(.end_ts == null) | "\(.name) \(.start_ts)"') + + if [[ -z "${tags}" ]]; then + break + fi + + while IFS=' ' read -r name start_ts; do + if [[ "${start_ts}" -lt "${cutoff}" ]]; then + created=$(date -u -r "${start_ts}" 2>/dev/null || date -u -d "@${start_ts}") + if [[ "${DRY_RUN}" == "true" ]]; then + echo "Would delete tag: ${name} (created ${created})" + else + echo "Deleting tag: ${name} (created ${created})" + curl -s -X DELETE -H "Authorization: Bearer ${QUAY_E2E_TOKEN}" "${API}${name}" > /dev/null + fi + deleted=$((deleted + 1)) + fi + done <<< "${tags}" + + has_more=$(echo "${response}" | jq -r '.has_additional') + if [[ "${has_more}" != "true" ]]; then + break + fi + page=$((page + 1)) +done + +if [[ "${DRY_RUN}" == "true" ]]; then + echo "Would delete ${deleted} e2e tag(s)." +else + echo "Deleted ${deleted} e2e tag(s)." +fi diff --git a/hack/generate-e2e-ccm.sh b/hack/generate-e2e-ccm.sh new file mode 100755 index 0000000..33f0863 --- /dev/null +++ b/hack/generate-e2e-ccm.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash +# Generate cloudscale CCM manifest for e2e ClusterResourceSet. +# Accepts CCM_VERSION env var (default: 1.3.0). +# Skips generation if bases/ccm.yaml already contains the matching version. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" + +CCM_VERSION="${CCM_VERSION:-1.3.0}" +E2E_TEMPLATES="${REPO_ROOT}/test/e2e/data/infrastructure-cloudscale" +CCM_FILE="${E2E_TEMPLATES}/bases/ccm.yaml" + +# Check if the file already contains the matching version +if [ -f "${CCM_FILE}" ] && grep -q "quay.io/cloudscalech/cloudscale-cloud-controller-manager:${CCM_VERSION}" "${CCM_FILE}"; then + echo "CCM manifest already at version ${CCM_VERSION}, skipping." + exit 0 +fi + +echo "Generating CCM manifest (version ${CCM_VERSION})..." + +CCM_YAML=$(curl -LsS "https://github.com/cloudscale-ch/cloudscale-cloud-controller-manager/releases/download/${CCM_VERSION}/config.yml") + +mkdir -p "$(dirname "${CCM_FILE}")" + +cat > "${CCM_FILE}" <<'OUTER' +# Cloud Controller Manager resources for ClusterResourceSet +# Automatically deployed to workload clusters with label: ccm=cloudscale +# Regenerate with: make generate-e2e-ccm +# Code generated by generate-e2e-ccm.sh. DO NOT EDIT. +OUTER + +cat >> "${CCM_FILE}" </dev/null || true +helm repo update cilium + +CILIUM_YAML=$(helm template cilium cilium/cilium --version "${CILIUM_VERSION}" --namespace kube-system \ + --set ipam.mode=kubernetes \ + --set routingMode=tunnel \ + --set tunnelProtocol=vxlan \ + --set kubeProxyReplacement=true \ + --set hubble.enabled=false \ + --set hubble.relay.enabled=false \ + --set hubble.ui.enabled=false \ + --set operator.replicas=1 \ + --set envoy.enabled=false \ + --set l7Proxy=false 2>/dev/null | sed '/^#/d; /^[[:space:]]*#/d; s/\${BIN_PATH}/\$\${BIN_PATH}/g' | cat -s) + +mkdir -p "$(dirname "${CNI_FILE}")" + +cat > "${CNI_FILE}" < 0 { + return fmt.Errorf("infrastructure resource leaks detected:\n%s", strings.Join(leaks, "\n")) + } + return nil +} diff --git a/test/e2e/config/cloudscale.yaml b/test/e2e/config/cloudscale.yaml new file mode 100644 index 0000000..68e2992 --- /dev/null +++ b/test/e2e/config/cloudscale.yaml @@ -0,0 +1,127 @@ +# E2EConfig for cluster-api-provider-cloudscale e2e tests +# Used by CAPI test framework (sigs.k8s.io/cluster-api/test/framework) + +managementClusterName: capcs-e2e + +images: + - name: ${IMG} + loadBehavior: mustLoad + +providers: + - name: cluster-api + type: CoreProvider + versions: + - name: v1.12.4 + value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.12.4/core-components.yaml + type: url + contract: v1beta2 + files: + - sourcePath: "../data/shared/v1beta2/metadata.yaml" + + - name: kubeadm + type: BootstrapProvider + versions: + - name: v1.12.4 + value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.12.4/bootstrap-components.yaml + type: url + contract: v1beta2 + files: + - sourcePath: "../data/shared/v1beta2/metadata.yaml" + + - name: kubeadm + type: ControlPlaneProvider + versions: + - name: v1.12.4 + value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.12.4/control-plane-components.yaml + type: url + contract: v1beta2 + files: + - sourcePath: "../data/shared/v1beta2/metadata.yaml" + + - name: cloudscale + type: InfrastructureProvider + versions: + - name: v1.13.99 # dev version, matches CAPI convention + value: ../../../config/default + type: kustomize + contract: v1beta2 + replacements: + - old: "image: quay.io/cloudscalech/capcs-staging:.*" + new: "image: ${IMG}" + files: + - sourcePath: "../data/infrastructure-cloudscale/main/cluster-template.yaml" + - sourcePath: "../data/infrastructure-cloudscale/main/cluster-template-ha.yaml" + - sourcePath: "../data/infrastructure-cloudscale/main/cluster-template-upgrades.yaml" + - sourcePath: "../data/infrastructure-cloudscale/main/cluster-template-md-remediation.yaml" + - sourcePath: "../data/infrastructure-cloudscale/main/metadata.yaml" + targetName: metadata.yaml + +variables: + # Kubernetes versions + KUBERNETES_VERSION: "v1.35.2" + KUBERNETES_VERSION_UPGRADE_FROM: "v1.34.3" + KUBERNETES_VERSION_UPGRADE_TO: "v1.35.2" + + # cloudscale.ch configuration + CLOUDSCALE_REGION: "lpg" + CLOUDSCALE_CONTROL_PLANE_MACHINE_FLAVOR: "flex-4-2" + CLOUDSCALE_WORKER_MACHINE_FLAVOR: "flex-4-2" + CLOUDSCALE_MACHINE_IMAGE: "custom:ubuntu-2404-kube-v1.35.2" + CLOUDSCALE_MACHINE_IMAGE_UPGRADE_FROM: "custom:ubuntu-2404-kube-v1.34.3" + # CLOUDSCALE_SSH_PUBLIC_KEY: Set via environment variable + CLOUDSCALE_ROOT_VOLUME_SIZE: "20" + CLOUDSCALE_NETWORK_CIDR: "10.100.0.0/24" + + # Machine template names for upgrade tests + CONTROL_PLANE_MACHINE_TEMPLATE_UPGRADE_TO: "k8s-upgrade-control-plane" + WORKERS_MACHINE_TEMPLATE_UPGRADE_TO: "k8s-upgrade-worker" + + # Cluster configuration + CONTROL_PLANE_MACHINE_COUNT: "1" + WORKER_MACHINE_COUNT: "1" + + # Feature flags + EXP_CLUSTER_RESOURCE_SET: "true" + + # Conformance testing + KUBETEST_CONFIGURATION: "${KUBETEST_CONFIGURATION}" + KUBETEST_NUMBER_OF_NODES: "2" + KUBETEST_GINKGO_NODES: "1" + +intervals: + # Controller startup + default/wait-controllers: ["5m", "10s"] + + # Cluster provisioning + default/wait-cluster: ["15m", "30s"] + default/wait-control-plane: ["20m", "30s"] + default/wait-worker-nodes: ["15m", "30s"] + + # Machine operations + default/wait-machine-status: ["10m", "30s"] + default/wait-machine-remediation: ["15m", "30s"] + + # Upgrades + default/wait-machine-upgrade: ["30m", "30s"] + default/wait-control-plane-upgrade: ["35m", "30s"] + + # Upgrade spec (spec name: k8s-upgrade-and-conformance) + k8s-upgrade-and-conformance/wait-cluster: ["15m", "30s"] + k8s-upgrade-and-conformance/wait-control-plane: ["25m", "30s"] + k8s-upgrade-and-conformance/wait-worker-nodes: ["15m", "30s"] + k8s-upgrade-and-conformance/wait-control-plane-upgrade: ["35m", "30s"] + k8s-upgrade-and-conformance/wait-machine-upgrade: ["30m", "30s"] + + # MD remediation + md-remediation/wait-cluster: ["15m", "30s"] + md-remediation/wait-control-plane: ["20m", "30s"] + md-remediation/wait-worker-nodes: ["15m", "30s"] + md-remediation/wait-machine-remediation: ["15m", "30s"] + + # K8s conformance + k8s-conformance/wait-cluster: ["15m", "30s"] + k8s-conformance/wait-control-plane: ["20m", "30s"] + k8s-conformance/wait-worker-nodes: ["15m", "30s"] + + # Deletion + default/wait-delete-cluster: ["15m", "30s"] diff --git a/test/e2e/data/infrastructure-cloudscale/bases/ccm.yaml b/test/e2e/data/infrastructure-cloudscale/bases/ccm.yaml new file mode 100644 index 0000000..ccbd6d1 --- /dev/null +++ b/test/e2e/data/infrastructure-cloudscale/bases/ccm.yaml @@ -0,0 +1,200 @@ +# Cloud Controller Manager resources for ClusterResourceSet +# Automatically deployed to workload clusters with label: ccm=cloudscale +# Regenerate with: make generate-e2e-ccm +# Code generated by generate-e2e-ccm.sh. DO NOT EDIT. +# CCM version: 1.3.0 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "${CLUSTER_NAME}-ccm" + namespace: "${NAMESPACE}" +data: + ccm.yaml: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - create + - update + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cloudscale-cloud-controller-manager + namespace: kube-system + labels: + k8s-app: cloudscale-cloud-controller-manager + spec: + selector: + matchLabels: + k8s-app: cloudscale-cloud-controller-manager + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + k8s-app: cloudscale-cloud-controller-manager + spec: + serviceAccountName: cloud-controller-manager + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - key: "node-role.kubernetes.io/control-plane" + effect: NoSchedule + - key: "CriticalAddonsOnly" + operator: "Exists" + - key: "node.cloudprovider.kubernetes.io/uninitialized" + value: "true" + effect: "NoSchedule" + - key: "node.kubernetes.io/not-ready" + effect: "NoSchedule" + hostNetwork: true + containers: + - name: cloudscale-cloud-controller-manager + image: quay.io/cloudscalech/cloudscale-cloud-controller-manager:1.3.0 + imagePullPolicy: IfNotPresent + command: + - "cloudscale-cloud-controller-manager" + - "--allow-untagged-cloud" + - "--v=3" + - "--concurrent-service-syncs=2" + env: + - name: CLOUDSCALE_API_URL + value: https://api.cloudscale.ch/ + - name: CLOUDSCALE_ACCESS_TOKEN + valueFrom: + secretKeyRef: + name: cloudscale + key: access-token +--- +apiVersion: v1 +kind: Secret +metadata: + name: "${CLUSTER_NAME}-ccm-credentials" + namespace: "${NAMESPACE}" +type: addons.cluster.x-k8s.io/resource-set +stringData: + secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: cloudscale + namespace: kube-system + type: Opaque + stringData: + access-token: "${CLOUDSCALE_API_TOKEN}" +--- +apiVersion: addons.cluster.x-k8s.io/v1beta2 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-ccm" + namespace: "${NAMESPACE}" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + ccm: cloudscale + resources: + - name: "${CLUSTER_NAME}-ccm-credentials" + kind: Secret + - name: "${CLUSTER_NAME}-ccm" + kind: ConfigMap diff --git a/test/e2e/data/infrastructure-cloudscale/bases/cluster.yaml b/test/e2e/data/infrastructure-cloudscale/bases/cluster.yaml new file mode 100644 index 0000000..6363822 --- /dev/null +++ b/test/e2e/data/infrastructure-cloudscale/bases/cluster.yaml @@ -0,0 +1,95 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: "${CLUSTER_NAME}-credentials" + namespace: "${NAMESPACE}" + labels: + clusterctl.cluster.x-k8s.io/move: "" +type: Opaque +stringData: + token: "${CLOUDSCALE_API_TOKEN}" +--- +apiVersion: cluster.x-k8s.io/v1beta2 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" + labels: + ccm: cloudscale + cni: cilium +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + services: + cidrBlocks: ["10.96.0.0/12"] + serviceDomain: "cluster.local" + infrastructureRef: + apiGroup: infrastructure.cluster.x-k8s.io + kind: CloudscaleCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + apiGroup: controlplane.cluster.x-k8s.io + kind: KubeadmControlPlane + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: CloudscaleCluster +metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +spec: + region: "${CLOUDSCALE_REGION}" + network: + cidr: "${CLOUDSCALE_NETWORK_CIDR}" + gatewayAddress: "" # disable gateway, use public interface for internet access + controlPlaneLoadBalancer: + enabled: true + credentialsRef: + name: "${CLUSTER_NAME}-credentials" +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 +kind: KubeadmControlPlane +metadata: + name: "${CLUSTER_NAME}-control-plane" + namespace: "${NAMESPACE}" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: "${KUBERNETES_VERSION}" + machineTemplate: + spec: + infrastructureRef: + apiGroup: infrastructure.cluster.x-k8s.io + kind: CloudscaleMachineTemplate + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + - name: cloud-provider + value: external + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + - name: cloud-provider + value: external + users: + - name: capi + groups: "adm, sudo" + shell: "/bin/bash" + sudo: "ALL=(ALL) NOPASSWD:ALL" + sshAuthorizedKeys: + - "${CLOUDSCALE_SSH_PUBLIC_KEY}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: CloudscaleMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" + namespace: "${NAMESPACE}" +spec: + template: + spec: + flavor: "${CLOUDSCALE_CONTROL_PLANE_MACHINE_FLAVOR}" + image: "${CLOUDSCALE_MACHINE_IMAGE}" + rootVolumeSize: ${CLOUDSCALE_ROOT_VOLUME_SIZE} diff --git a/test/e2e/data/infrastructure-cloudscale/bases/cni.yaml b/test/e2e/data/infrastructure-cloudscale/bases/cni.yaml new file mode 100644 index 0000000..cb996cc --- /dev/null +++ b/test/e2e/data/infrastructure-cloudscale/bases/cni.yaml @@ -0,0 +1,1292 @@ +# Cilium CNI resources for ClusterResourceSet +# Automatically deployed to workload clusters with label: cni=cilium +# Regenerate with: make generate-e2e-cni +# Code generated by generate-e2e-cni.sh. DO NOT EDIT. +# Cilium version: 1.19.2 + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "${CLUSTER_NAME}-cni" + namespace: "${NAMESPACE}" +data: + cilium.yaml: | + --- + apiVersion: v1 + kind: Namespace + metadata: + name: "cilium-secrets" + labels: + app.kubernetes.io/part-of: cilium + annotations: + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: "cilium" + namespace: kube-system + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: "cilium-operator" + namespace: kube-system + --- + apiVersion: v1 + kind: ConfigMap + metadata: + name: cilium-config + namespace: kube-system + data: + + identity-allocation-mode: crd + + identity-heartbeat-timeout: "30m0s" + identity-gc-interval: "15m0s" + cilium-endpoint-gc-interval: "5m0s" + nodes-gc-interval: "5m0s" + + debug: "false" + debug-verbose: "" + metrics-sampling-interval: "5m" + enable-policy: "default" + policy-cidr-match-mode: "" + proxy-prometheus-port: "9964" + operator-prometheus-serve-addr: ":9963" + enable-metrics: "true" + enable-policy-secrets-sync: "true" + policy-secrets-only-from-secrets-namespace: "true" + policy-secrets-namespace: "cilium-secrets" + + enable-ipv4: "true" + + enable-ipv6: "false" + custom-cni-conf: "false" + enable-bpf-clock-probe: "false" + monitor-aggregation: medium + + monitor-aggregation-interval: "5s" + + monitor-aggregation-flags: all + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-policy-stats-map-max: "65536" + bpf-lb-map-max: "65536" + bpf-lb-external-clusterip: "false" + bpf-lb-source-range-all-types: "false" + bpf-lb-algorithm-annotation: "false" + bpf-lb-mode-annotation: "false" + + bpf-distributed-lru: "false" + bpf-events-drop-enabled: "true" + bpf-events-policy-verdict-enabled: "true" + bpf-events-trace-enabled: "true" + + preallocate-bpf-maps: "false" + + cluster-name: "default" + cluster-id: "0" + + routing-mode: "tunnel" + tunnel-protocol: "vxlan" + tunnel-source-port-range: "0-0" + service-no-backend-response: "reject" + policy-deny-response: "none" + + enable-l7-proxy: "false" + enable-ipv4-masquerade: "true" + enable-ipv4-big-tcp: "false" + enable-ipv6-big-tcp: "false" + enable-ipv6-masquerade: "true" + enable-tunnel-big-tcp: "false" + enable-tcx: "true" + datapath-mode: "veth" + enable-masquerade-to-route-source: "false" + + enable-xt-socket-fallback: "true" + install-no-conntrack-iptables-rules: "false" + iptables-random-fully: "false" + + auto-direct-node-routes: "false" + direct-routing-skip-unreachable: "false" + + kube-proxy-replacement: "true" + kube-proxy-replacement-healthz-bind-address: "" + enable-no-service-endpoints-routable: "true" + bpf-lb-sock: "false" + nodeport-addresses: "" + enable-health-check-nodeport: "true" + enable-health-check-loadbalancer-ip: "false" + node-port-bind-protection: "true" + enable-auto-protect-node-port-range: "true" + bpf-lb-acceleration: "disabled" + enable-service-topology: "false" + enable-l2-neigh-discovery: "false" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + enable-k8s-networkpolicy: "true" + enable-endpoint-lockdown-on-policy-overflow: "false" + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist + cni-exclusive: "true" + cni-log-file: "/var/run/cilium/cilium-cni.log" + enable-endpoint-health-checking: "true" + enable-health-checking: "true" + health-check-icmp-failure-threshold: "3" + enable-well-known-identities: "false" + enable-node-selector-labels: "false" + synchronize-k8s-nodes: "true" + operator-api-serve-addr: "127.0.0.1:9234" + + enable-hubble: "false" + ipam: "kubernetes" + ipam-cilium-node-update-rate: "15s" + + default-lb-service-ipam: "lbipam" + egress-gateway-reconciliation-trigger-interval: "1s" + enable-vtep: "false" + vtep-endpoint: "" + vtep-cidr: "" + vtep-mask: "" + vtep-mac: "" + + packetization-layer-pmtud-mode: "blackhole" + procfs: "/host/proc" + bpf-root: "/sys/fs/bpf" + cgroup-root: "/run/cilium/cgroupv2" + + identity-management-mode: "agent" + enable-sctp: "false" + remove-cilium-node-taints: "true" + set-cilium-node-taints: "true" + set-cilium-is-up-condition: "true" + unmanaged-pod-watcher-interval: "15s" + dnsproxy-enable-transparent-mode: "true" + dnsproxy-socket-linger-timeout: "10" + tofqdns-dns-reject-response-code: "refused" + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: "0s" + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-proxy-response-max-delay: "100ms" + tofqdns-preallocate-identities: "true" + agent-not-ready-taint-key: "node.cilium.io/agent-not-ready" + + mesh-auth-enabled: "false" + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + mesh-auth-gc-interval: "5m0s" + + proxy-xff-num-trusted-hops-ingress: "0" + proxy-xff-num-trusted-hops-egress: "0" + proxy-connect-timeout: "2" + proxy-initial-fetch-timeout: "30" + proxy-max-active-downstream-connections: "50000" + proxy-max-requests-per-connection: "0" + proxy-max-connection-duration-seconds: "0" + proxy-idle-timeout-seconds: "60" + proxy-max-concurrent-retries: "128" + proxy-use-original-source-address: "true" + proxy-cluster-max-connections: "1024" + proxy-cluster-max-requests: "1024" + http-retry-count: "3" + http-stream-idle-timeout: "300" + + external-envoy-proxy: "false" + envoy-base-id: "0" + envoy-access-log-buffer-size: "4096" + envoy-keep-cap-netbindservice: "false" + max-connected-clusters: "255" + clustermesh-cache-ttl: "0s" + clustermesh-enable-endpoint-sync: "false" + clustermesh-enable-mcs-api: "false" + clustermesh-mcs-api-install-crds: "true" + policy-default-local-cluster: "true" + + nat-map-stats-entries: "32" + nat-map-stats-interval: "30s" + enable-lb-ipam: "true" + enable-non-default-deny-policies: "true" + enable-source-ip-verification: "true" + + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cilium + labels: + app.kubernetes.io/part-of: cilium + rules: + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch + - get + - apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools + - ciliumbgpnodeconfigs + - ciliumbgpadvertisements + - ciliumbgppeerconfigs + - ciliumclusterwideenvoyconfigs + - ciliumclusterwidenetworkpolicies + - ciliumegressgatewaypolicies + - ciliumendpoints + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumnetworkpolicies + - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools + verbs: + - list + - watch + - apiGroups: + - cilium.io + resources: + - ciliumidentities + - ciliumendpoints + - ciliumnodes + verbs: + - create + - apiGroups: + - cilium.io + resources: + - ciliumidentities + verbs: + - update + - apiGroups: + - cilium.io + resources: + - ciliumendpoints + verbs: + - delete + - get + - apiGroups: + - cilium.io + resources: + - ciliumnodes + - ciliumnodes/status + verbs: + - get + - update + - apiGroups: + - cilium.io + resources: + - ciliumendpoints/status + - ciliumendpoints + - ciliuml2announcementpolicies/status + - ciliumbgpnodeconfigs/status + verbs: + - patch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cilium-operator + labels: + app.kubernetes.io/part-of: cilium + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - delete + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - cilium-config + verbs: + - patch + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - update + - patch + - apiGroups: + - "" + resources: + - namespaces + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumclusterwidenetworkpolicies + verbs: + - create + - update + - deletecollection + - patch + - get + - list + - watch + - apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + verbs: + - patch + - update + - apiGroups: + - cilium.io + resources: + - ciliumendpoints + - ciliumidentities + verbs: + - delete + - list + - watch + - apiGroups: + - cilium.io + resources: + - ciliumidentities + verbs: + - update + - apiGroups: + - cilium.io + resources: + - ciliumnodes + verbs: + - create + - update + - get + - list + - watch + - delete + - apiGroups: + - cilium.io + resources: + - ciliumnodes/status + verbs: + - update + - apiGroups: + - cilium.io + resources: + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumbgppeerconfigs + - ciliumbgpadvertisements + - ciliumbgpnodeconfigs + verbs: + - create + - update + - get + - list + - watch + - delete + - patch + - apiGroups: + - cilium.io + resources: + - ciliumbgpclusterconfigs/status + - ciliumbgppeerconfigs/status + verbs: + - update + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - update + resourceNames: + - ciliumloadbalancerippools.cilium.io + - ciliumbgpclusterconfigs.cilium.io + - ciliumbgppeerconfigs.cilium.io + - ciliumbgpadvertisements.cilium.io + - ciliumbgpnodeconfigs.cilium.io + - ciliumbgpnodeconfigoverrides.cilium.io + - ciliumclusterwideenvoyconfigs.cilium.io + - ciliumclusterwidenetworkpolicies.cilium.io + - ciliumegressgatewaypolicies.cilium.io + - ciliumendpoints.cilium.io + - ciliumendpointslices.cilium.io + - ciliumenvoyconfigs.cilium.io + - ciliumidentities.cilium.io + - ciliumlocalredirectpolicies.cilium.io + - ciliumnetworkpolicies.cilium.io + - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io + - ciliumgatewayclassconfigs.cilium.io + - apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools + - ciliumpodippools + - ciliumbgpclusterconfigs + - ciliumbgpnodeconfigoverrides + - ciliumbgppeerconfigs + verbs: + - get + - list + - watch + - apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create + - apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools/status + verbs: + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update + - apiGroups: + - cilium.io + resources: + - ciliumendpointslices + verbs: + - deletecollection + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cilium + labels: + app.kubernetes.io/part-of: cilium + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium + subjects: + - kind: ServiceAccount + name: "cilium" + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: cilium-operator + labels: + app.kubernetes.io/part-of: cilium + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator + subjects: + - kind: ServiceAccount + name: "cilium-operator" + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: cilium-config-agent + namespace: kube-system + labels: + app.kubernetes.io/part-of: cilium + rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: cilium-tlsinterception-secrets + namespace: "cilium-secrets" + labels: + app.kubernetes.io/part-of: cilium + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: cilium-operator-tlsinterception-secrets + namespace: "cilium-secrets" + labels: + app.kubernetes.io/part-of: cilium + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - update + - patch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: cilium-operator-ztunnel + namespace: kube-system + labels: + app.kubernetes.io/part-of: cilium + rules: + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - create + - delete + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: cilium-config-agent + namespace: kube-system + labels: + app.kubernetes.io/part-of: cilium + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent + subjects: + - kind: ServiceAccount + name: "cilium" + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: cilium-tlsinterception-secrets + namespace: "cilium-secrets" + labels: + app.kubernetes.io/part-of: cilium + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-tlsinterception-secrets + subjects: + - kind: ServiceAccount + name: "cilium" + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: cilium-operator-tlsinterception-secrets + namespace: "cilium-secrets" + labels: + app.kubernetes.io/part-of: cilium + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-operator-tlsinterception-secrets + subjects: + - kind: ServiceAccount + name: "cilium-operator" + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: cilium-operator-ztunnel + namespace: kube-system + labels: + app.kubernetes.io/part-of: cilium + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-operator-ztunnel + subjects: + - kind: ServiceAccount + name: "cilium-operator" + namespace: kube-system + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: cilium + namespace: kube-system + labels: + k8s-app: cilium + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: cilium-agent + spec: + selector: + matchLabels: + k8s-app: cilium + updateStrategy: + rollingUpdate: + maxUnavailable: 2 + type: RollingUpdate + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: cilium-agent + labels: + k8s-app: cilium + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium + spec: + securityContext: + appArmorProfile: + type: Unconfined + seccompProfile: + type: Unconfined + containers: + - name: cilium-agent + image: "quay.io/cilium/cilium:v1.19.2@sha256:7bc7e0be845cae0a70241e622cd03c3b169001c9383dd84329c59ca86a8b1341" + imagePullPolicy: IfNotPresent + command: + - cilium-agent + args: + - --config-dir=/tmp/cilium/config-map + startupProbe: + httpGet: + host: "127.0.0.1" + path: /healthz + port: health + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 300 + periodSeconds: 2 + successThreshold: 1 + initialDelaySeconds: 5 + livenessProbe: + httpGet: + host: "127.0.0.1" + path: /healthz + port: health + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + - name: "require-k8s-connectivity" + value: "false" + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 10 + timeoutSeconds: 5 + readinessProbe: + httpGet: + host: "127.0.0.1" + path: /healthz + port: health + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + - name: GOMEMLIMIT + valueFrom: + resourceFieldRef: + resource: limits.memory + divisor: '1' + - name: KUBE_CLIENT_BACKOFF_BASE + value: "1" + - name: KUBE_CLIENT_BACKOFF_DURATION + value: "120" + lifecycle: + postStart: + exec: + command: + - "bash" + - "-c" + - | + set -o errexit + set -o pipefail + set -o nounset + + if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]]; + then + echo 'Deleting iptables rules created by the AWS CNI VPC plugin' + iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore + fi + echo 'Done!' + + preStop: + exec: + command: + - /cni-uninstall.sh + ports: + - name: health + containerPort: 9879 + hostPort: 9879 + protocol: TCP + securityContext: + seLinuxOptions: + level: s0 + type: spc_t + capabilities: + add: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + - SYSLOG + drop: + - ALL + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - name: bpf-maps + mountPath: /sys/fs/bpf + mountPropagation: HostToContainer + - name: cilium-run + mountPath: /var/run/cilium + - name: cilium-netns + mountPath: /var/run/cilium/netns + mountPropagation: HostToContainer + - name: etc-cni-netd + mountPath: /host/etc/cni/net.d + - name: clustermesh-secrets + mountPath: /var/lib/cilium/clustermesh + readOnly: true + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + - name: tmp + mountPath: /tmp + + initContainers: + - name: config + image: "quay.io/cilium/cilium:v1.19.2@sha256:7bc7e0be845cae0a70241e622cd03c3b169001c9383dd84329c59ca86a8b1341" + imagePullPolicy: IfNotPresent + command: + - cilium-dbg + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + volumeMounts: + - name: tmp + mountPath: /tmp + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + capabilities: + add: + - NET_ADMIN + drop: + - ALL + - name: mount-cgroup + image: "quay.io/cilium/cilium:v1.19.2@sha256:7bc7e0be845cae0a70241e622cd03c3b169001c9383dd84329c59ca86a8b1341" + imagePullPolicy: IfNotPresent + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "$${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + volumeMounts: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + seLinuxOptions: + level: s0 + type: spc_t + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + - name: apply-sysctl-overwrites + image: "quay.io/cilium/cilium:v1.19.2@sha256:7bc7e0be845cae0a70241e622cd03c3b169001c9383dd84329c59ca86a8b1341" + imagePullPolicy: IfNotPresent + env: + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "$${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + volumeMounts: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + seLinuxOptions: + level: s0 + type: spc_t + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + - name: mount-bpf-fs + image: "quay.io/cilium/cilium:v1.19.2@sha256:7bc7e0be845cae0a70241e622cd03c3b169001c9383dd84329c59ca86a8b1341" + imagePullPolicy: IfNotPresent + args: + - 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf' + command: + - /bin/bash + - -c + - -- + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + privileged: true + volumeMounts: + - name: bpf-maps + mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + - name: clean-cilium-state + image: "quay.io/cilium/cilium:v1.19.2@sha256:7bc7e0be845cae0a70241e622cd03c3b169001c9383dd84329c59ca86a8b1341" + imagePullPolicy: IfNotPresent + command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + name: cilium-config + key: clean-cilium-state + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + name: cilium-config + key: clean-cilium-bpf-state + optional: true + - name: WRITE_CNI_CONF_WHEN_READY + valueFrom: + configMapKeyRef: + name: cilium-config + key: write-cni-conf-when-ready + optional: true + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + seLinuxOptions: + level: s0 + type: spc_t + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + drop: + - ALL + volumeMounts: + - name: bpf-maps + mountPath: /sys/fs/bpf + - name: cilium-cgroup + mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + - name: cilium-run + mountPath: /var/run/cilium # wait-for-kube-proxy + - name: install-cni-binaries + image: "quay.io/cilium/cilium:v1.19.2@sha256:7bc7e0be845cae0a70241e622cd03c3b169001c9383dd84329c59ca86a8b1341" + imagePullPolicy: IfNotPresent + command: + - "/install-plugin.sh" + resources: + limits: + cpu: 1 + memory: 1Gi + requests: + cpu: 100m + memory: 10Mi + securityContext: + seLinuxOptions: + level: s0 + type: spc_t + capabilities: + drop: + - ALL + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - name: cni-path + mountPath: /host/opt/cni/bin # .Values.cni.install + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccountName: "cilium" + automountServiceAccountToken: true + terminationGracePeriodSeconds: 1 + hostNetwork: true + + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux + tolerations: + - operator: Exists + volumes: + - name: tmp + emptyDir: {} + - name: cilium-run + hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + - name: cilium-netns + hostPath: + path: /var/run/netns + type: DirectoryOrCreate + - name: bpf-maps + hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + - name: hostproc + hostPath: + path: /proc + type: Directory + - name: cilium-cgroup + hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + - name: cni-path + hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + - name: etc-cni-netd + hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + - name: lib-modules + hostPath: + path: /lib/modules + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: clustermesh-secrets + projected: + defaultMode: 0400 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + name: clustermesh-apiserver-remote-cert + optional: true + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + - secret: + name: clustermesh-apiserver-local-cert + optional: true + items: + - key: tls.key + path: local-etcd-client.key + - key: tls.crt + path: local-etcd-client.crt + - key: ca.crt + path: local-etcd-client-ca.crt + - name: host-proc-sys-net + hostPath: + path: /proc/sys/net + type: Directory + - name: host-proc-sys-kernel + hostPath: + path: /proc/sys/kernel + type: Directory + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cilium-operator + namespace: kube-system + labels: + io.cilium/app: operator + name: cilium-operator + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: cilium-operator + spec: + replicas: 1 + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 100% + type: RollingUpdate + template: + metadata: + annotations: + prometheus.io/port: "9963" + prometheus.io/scrape: "true" + labels: + io.cilium/app: operator + name: cilium-operator + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: cilium-operator + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + containers: + - name: cilium-operator + image: "quay.io/cilium/operator-generic:v1.19.2@sha256:e363f4f634c2a66a36e01618734ea17e7b541b949b9a5632f9c180ab16de23f0" + imagePullPolicy: IfNotPresent + command: + - cilium-operator-generic + args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + ports: + - name: health + containerPort: 9234 + hostPort: 9234 + - name: prometheus + containerPort: 9963 + hostPort: 9963 + protocol: TCP + livenessProbe: + httpGet: + host: "127.0.0.1" + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + readinessProbe: + httpGet: + host: "127.0.0.1" + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 0 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 5 + volumeMounts: + - name: cilium-config-path + mountPath: /tmp/cilium/config-map + readOnly: true + + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + terminationMessagePolicy: FallbackToLogsOnError + hostNetwork: true + restartPolicy: Always + priorityClassName: system-cluster-critical + serviceAccountName: "cilium-operator" + automountServiceAccountToken: true + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + io.cilium/app: operator + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.cilium.io/agent-not-ready + operator: Exists + + volumes: + - name: cilium-config-path + configMap: + name: cilium-config +--- +apiVersion: addons.cluster.x-k8s.io/v1beta2 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-cni" + namespace: "${NAMESPACE}" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: cilium + resources: + - name: "${CLUSTER_NAME}-cni" + kind: ConfigMap diff --git a/test/e2e/data/infrastructure-cloudscale/bases/md.yaml b/test/e2e/data/infrastructure-cloudscale/bases/md.yaml new file mode 100644 index 0000000..c68291e --- /dev/null +++ b/test/e2e/data/infrastructure-cloudscale/bases/md.yaml @@ -0,0 +1,62 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta2 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-md-0" + apiGroup: bootstrap.cluster.x-k8s.io + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-md-0" + apiGroup: infrastructure.cluster.x-k8s.io + kind: CloudscaleMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: CloudscaleMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + flavor: "${CLOUDSCALE_WORKER_MACHINE_FLAVOR}" + image: "${CLOUDSCALE_MACHINE_IMAGE}" + rootVolumeSize: ${CLOUDSCALE_ROOT_VOLUME_SIZE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + - name: cloud-provider + value: external + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + - name: cloud-provider + value: external + users: + - name: capi + groups: "adm, sudo" + shell: "/bin/bash" + sudo: "ALL=(ALL) NOPASSWD:ALL" + sshAuthorizedKeys: + - "${CLOUDSCALE_SSH_PUBLIC_KEY}" \ No newline at end of file diff --git a/test/e2e/data/infrastructure-cloudscale/bases/mt-cp-upgrade-to.yaml b/test/e2e/data/infrastructure-cloudscale/bases/mt-cp-upgrade-to.yaml new file mode 100644 index 0000000..cecc1f8 --- /dev/null +++ b/test/e2e/data/infrastructure-cloudscale/bases/mt-cp-upgrade-to.yaml @@ -0,0 +1,11 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: CloudscaleMachineTemplate +metadata: + name: "k8s-upgrade-control-plane" + namespace: "${NAMESPACE}" +spec: + template: + spec: + flavor: "${CLOUDSCALE_CONTROL_PLANE_MACHINE_FLAVOR}" + image: "${CLOUDSCALE_MACHINE_IMAGE}" + rootVolumeSize: ${CLOUDSCALE_ROOT_VOLUME_SIZE} diff --git a/test/e2e/data/infrastructure-cloudscale/bases/mt-worker-upgrade-to.yaml b/test/e2e/data/infrastructure-cloudscale/bases/mt-worker-upgrade-to.yaml new file mode 100644 index 0000000..fc74768 --- /dev/null +++ b/test/e2e/data/infrastructure-cloudscale/bases/mt-worker-upgrade-to.yaml @@ -0,0 +1,11 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: CloudscaleMachineTemplate +metadata: + name: "k8s-upgrade-worker" + namespace: "${NAMESPACE}" +spec: + template: + spec: + flavor: "${CLOUDSCALE_WORKER_MACHINE_FLAVOR}" + image: "${CLOUDSCALE_MACHINE_IMAGE}" + rootVolumeSize: ${CLOUDSCALE_ROOT_VOLUME_SIZE} diff --git a/test/e2e/data/infrastructure-cloudscale/cluster-template-ha/kustomization.yaml b/test/e2e/data/infrastructure-cloudscale/cluster-template-ha/kustomization.yaml new file mode 100644 index 0000000..edaf868 --- /dev/null +++ b/test/e2e/data/infrastructure-cloudscale/cluster-template-ha/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../bases/cluster.yaml + - ../bases/md.yaml + - ../bases/ccm.yaml + - ../bases/cni.yaml +patches: + - path: server-groups.yaml diff --git a/test/e2e/data/infrastructure-cloudscale/cluster-template-ha/server-groups.yaml b/test/e2e/data/infrastructure-cloudscale/cluster-template-ha/server-groups.yaml new file mode 100644 index 0000000..7f45fe7 --- /dev/null +++ b/test/e2e/data/infrastructure-cloudscale/cluster-template-ha/server-groups.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: CloudscaleMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" + namespace: "${NAMESPACE}" +spec: + template: + spec: + serverGroup: + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: CloudscaleMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + serverGroup: + name: "${CLUSTER_NAME}-md-0" diff --git a/test/e2e/data/infrastructure-cloudscale/cluster-template-md-remediation/kustomization.yaml b/test/e2e/data/infrastructure-cloudscale/cluster-template-md-remediation/kustomization.yaml new file mode 100644 index 0000000..bfb394f --- /dev/null +++ b/test/e2e/data/infrastructure-cloudscale/cluster-template-md-remediation/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../bases/cluster.yaml + - ../bases/md.yaml + - ../bases/ccm.yaml + - ../bases/cni.yaml + - mhc.yaml +patches: + - path: md.yaml diff --git a/test/e2e/data/infrastructure-cloudscale/cluster-template-md-remediation/md.yaml b/test/e2e/data/infrastructure-cloudscale/cluster-template-md-remediation/md.yaml new file mode 100644 index 0000000..f5c8d81 --- /dev/null +++ b/test/e2e/data/infrastructure-cloudscale/cluster-template-md-remediation/md.yaml @@ -0,0 +1,13 @@ +# Kustomize strategic merge patch for MachineDeployment. +# Adds the e2e.remediation.label to worker machine templates so they are +# selected by the MachineHealthCheck defined in mhc.yaml. +apiVersion: cluster.x-k8s.io/v1beta2 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" +spec: + template: + metadata: + labels: + "e2e.remediation.label": "" diff --git a/test/e2e/data/infrastructure-cloudscale/cluster-template-md-remediation/mhc.yaml b/test/e2e/data/infrastructure-cloudscale/cluster-template-md-remediation/mhc.yaml new file mode 100644 index 0000000..b5d716a --- /dev/null +++ b/test/e2e/data/infrastructure-cloudscale/cluster-template-md-remediation/mhc.yaml @@ -0,0 +1,23 @@ +--- +# MachineHealthCheck targeting worker machines with label e2e.remediation.label="". +# Triggers remediation after 10s when the e2e.remediation.condition is False. +# Based on upstream CAPI Docker reference: +# sigs.k8s.io/cluster-api/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml +apiVersion: cluster.x-k8s.io/v1beta2 +kind: MachineHealthCheck +metadata: + name: "${CLUSTER_NAME}-mhc-0" + namespace: "${NAMESPACE}" +spec: + clusterName: "${CLUSTER_NAME}" + selector: + matchLabels: + e2e.remediation.label: "" + checks: + unhealthyNodeConditions: + - type: e2e.remediation.condition + status: "False" + timeoutSeconds: 10 + remediation: + triggerIf: + unhealthyLessThanOrEqualTo: 100% diff --git a/test/e2e/data/infrastructure-cloudscale/cluster-template-upgrades/kustomization.yaml b/test/e2e/data/infrastructure-cloudscale/cluster-template-upgrades/kustomization.yaml new file mode 100644 index 0000000..532d535 --- /dev/null +++ b/test/e2e/data/infrastructure-cloudscale/cluster-template-upgrades/kustomization.yaml @@ -0,0 +1,11 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../bases/cluster.yaml + - ../bases/md.yaml + - ../bases/ccm.yaml + - ../bases/cni.yaml + - ../bases/mt-cp-upgrade-to.yaml + - ../bases/mt-worker-upgrade-to.yaml +patches: + - path: machine-image.yaml diff --git a/test/e2e/data/infrastructure-cloudscale/cluster-template-upgrades/machine-image.yaml b/test/e2e/data/infrastructure-cloudscale/cluster-template-upgrades/machine-image.yaml new file mode 100644 index 0000000..00416ed --- /dev/null +++ b/test/e2e/data/infrastructure-cloudscale/cluster-template-upgrades/machine-image.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: CloudscaleMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" + namespace: "${NAMESPACE}" +spec: + template: + spec: + image: "${CLOUDSCALE_MACHINE_IMAGE_UPGRADE_FROM}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: CloudscaleMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + image: "${CLOUDSCALE_MACHINE_IMAGE_UPGRADE_FROM}" diff --git a/test/e2e/data/infrastructure-cloudscale/cluster-template/kustomization.yaml b/test/e2e/data/infrastructure-cloudscale/cluster-template/kustomization.yaml new file mode 100644 index 0000000..68bde08 --- /dev/null +++ b/test/e2e/data/infrastructure-cloudscale/cluster-template/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../bases/cluster.yaml + - ../bases/md.yaml + - ../bases/ccm.yaml + - ../bases/cni.yaml diff --git a/test/e2e/data/infrastructure-cloudscale/main/metadata.yaml b/test/e2e/data/infrastructure-cloudscale/main/metadata.yaml new file mode 100644 index 0000000..05cf434 --- /dev/null +++ b/test/e2e/data/infrastructure-cloudscale/main/metadata.yaml @@ -0,0 +1,11 @@ +# Provider metadata for cluster-api-provider-cloudscale +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +kind: Metadata +releaseSeries: + - major: 0 + minor: 1 + contract: v1beta2 + # Dev version for local testing (matches CAPI convention) + - major: 1 + minor: 13 + contract: v1beta2 \ No newline at end of file diff --git a/test/e2e/data/kubetest/conformance-fast.yaml b/test/e2e/data/kubetest/conformance-fast.yaml new file mode 100644 index 0000000..dad0bc2 --- /dev/null +++ b/test/e2e/data/kubetest/conformance-fast.yaml @@ -0,0 +1,8 @@ +ginkgo.focus: \[Conformance\] +ginkgo.skip: \[Serial\]|HostPort.*\[Conformance\].* +disable-log-dump: true +ginkgo.slow-spec-threshold: 120s +ginkgo.flake-attempts: 3 +ginkgo.trace: true +ginkgo.v: true +system-pods-startup-timeout: 5m diff --git a/test/e2e/data/kubetest/conformance.yaml b/test/e2e/data/kubetest/conformance.yaml new file mode 100644 index 0000000..41087d7 --- /dev/null +++ b/test/e2e/data/kubetest/conformance.yaml @@ -0,0 +1,7 @@ +ginkgo.focus: \[Conformance\] +disable-log-dump: true +ginkgo.slow-spec-threshold: 120s +ginkgo.flake-attempts: 3 +ginkgo.trace: true +ginkgo.v: true +system-pods-startup-timeout: 5m diff --git a/test/e2e/data/shared/v1beta2/metadata.yaml b/test/e2e/data/shared/v1beta2/metadata.yaml new file mode 100644 index 0000000..0029be7 --- /dev/null +++ b/test/e2e/data/shared/v1beta2/metadata.yaml @@ -0,0 +1,7 @@ +# Shared metadata for v1beta2 contract +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +kind: Metadata +releaseSeries: + - major: 1 + minor: 12 + contract: v1beta2 \ No newline at end of file diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 7a71dae..d8a39a6 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -1,5 +1,4 @@ //go:build e2e -// +build e2e /* Copyright 2026 cloudscale.ch. @@ -20,82 +19,199 @@ limitations under the License. package e2e import ( - "fmt" + "context" + "flag" "os" - "os/exec" + "path/filepath" "testing" + "github.com/cloudscale-ch/cloudscale-go-sdk/v8" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - - "github.com/cloudscale-ch/cluster-api-provider-cloudscale/test/utils" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/bootstrap" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + + infrav1beta2 "github.com/cloudscale-ch/cluster-api-provider-cloudscale/api/v1beta2" ) var ( - // managerImage is the manager image to be built and loaded for testing. - managerImage = "example.com/cluster-api-provider-cloudscale:v0.0.1" - // shouldCleanupCertManager tracks whether CertManager was installed by this suite. - shouldCleanupCertManager = false + // Test suite configuration + ctx = context.Background() + e2eConfig *clusterctl.E2EConfig + clusterctlConfigPath string + bootstrapClusterProvider bootstrap.ClusterProvider + bootstrapClusterProxy framework.ClusterProxy + + // cloudscale API client and resource snapshot for leak detection + cloudscaleClient *cloudscale.Client + preTestSnapshot *resourceSnapshot + + // Command line flags + configPath string + artifactFolder string + skipCleanup bool + useExistingCluster bool + + // Scheme for the test + scheme = runtime.NewScheme() ) -// TestE2E runs the e2e test suite to validate the solution in an isolated environment. -// The default setup requires Kind and CertManager. -// -// To skip CertManager installation, set: CERT_MANAGER_INSTALL_SKIP=true +func init() { + flag.StringVar(&configPath, "e2e.config", "", "Path to the e2e config file") + flag.StringVar(&artifactFolder, "e2e.artifacts-folder", "", "Folder where test artifacts should be stored") + flag.BoolVar(&skipCleanup, "e2e.skip-resource-cleanup", false, "If true, the resource cleanup after tests will be skipped") + flag.BoolVar(&useExistingCluster, "e2e.use-existing-cluster", false, "If true, use an existing cluster for e2e tests") + + // Register schemes + _ = clientgoscheme.AddToScheme(scheme) // Standard k8s types (apps/v1, core/v1, etc.) + _ = clusterv1.AddToScheme(scheme) + _ = infrav1beta2.AddToScheme(scheme) + _ = bootstrapv1.AddToScheme(scheme) + _ = controlplanev1.AddToScheme(scheme) + _ = apiextensionsv1.AddToScheme(scheme) + _ = clusterctlv1.AddToScheme(scheme) +} + func TestE2E(t *testing.T) { RegisterFailHandler(Fail) - _, _ = fmt.Fprintf(GinkgoWriter, "Starting cluster-api-provider-cloudscale e2e test suite\n") - RunSpecs(t, "e2e suite") + ctrl.SetLogger(klog.Background()) + + RunSpecs(t, "cluster-api-provider-cloudscale e2e suite") } -var _ = BeforeSuite(func() { - By("building the manager image") - cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", managerImage)) - _, err := utils.Run(cmd) - ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to build the manager image") +var _ = SynchronizedBeforeSuite(func() []byte { + // This runs only on the first Ginkgo node + Expect(configPath).To(BeAnExistingFile(), "E2E config file is required: --e2e.config=") - // TODO(user): If you want to change the e2e test vendor from Kind, - // ensure the image is built and available, then remove the following block. - By("loading the manager image on Kind") - err = utils.LoadImageToKindClusterWithName(managerImage) - ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to load the manager image into Kind") + By("Loading e2e config") + e2eConfig = clusterctl.LoadE2EConfig(ctx, clusterctl.LoadE2EConfigInput{ + ConfigPath: configPath, + }) + Expect(e2eConfig).NotTo(BeNil(), "Failed to load e2e config") - setupCertManager() -}) + By("Validating required environment variables") + apiToken := os.Getenv("CLOUDSCALE_API_TOKEN") + Expect(apiToken).NotTo(BeEmpty(), "CLOUDSCALE_API_TOKEN environment variable is required") -var _ = AfterSuite(func() { - teardownCertManager() -}) + // Add secrets to e2eConfig variables so CreateRepository includes them in clusterctl.yaml + e2eConfig.Variables["CLOUDSCALE_API_TOKEN"] = apiToken -// setupCertManager installs CertManager if needed for webhook tests. -// Skips installation if CERT_MANAGER_INSTALL_SKIP=true or if already present. -func setupCertManager() { - if os.Getenv("CERT_MANAGER_INSTALL_SKIP") == "true" { - _, _ = fmt.Fprintf(GinkgoWriter, "Skipping CertManager installation (CERT_MANAGER_INSTALL_SKIP=true)\n") - return - } + sshKey := os.Getenv("CLOUDSCALE_SSH_PUBLIC_KEY") + Expect(sshKey).NotTo(BeEmpty(), "CLOUDSCALE_SSH_PUBLIC_KEY environment variable is required") + e2eConfig.Variables["CLOUDSCALE_SSH_PUBLIC_KEY"] = sshKey + + By("Taking pre-test snapshot of cloudscale infrastructure resources") + cloudscaleClient = newCloudscaleClient(apiToken) + var err error + preTestSnapshot, err = takeResourceSnapshot(ctx, cloudscaleClient) + Expect(err).NotTo(HaveOccurred(), "Failed to snapshot cloudscale resources") - By("checking if CertManager is already installed") - if utils.IsCertManagerCRDsInstalled() { - _, _ = fmt.Fprintf(GinkgoWriter, "CertManager is already installed. Skipping installation.\n") - return + By("Setting up artifacts folder") + if artifactFolder == "" { + artifactFolder = filepath.Join(os.TempDir(), "capcs-e2e-artifacts") + } + Expect(os.MkdirAll(artifactFolder, 0755)).To(Succeed()) + + By("Creating a clusterctl local repository") + clusterctlConfigPath = clusterctl.CreateRepository(ctx, clusterctl.CreateRepositoryInput{ + E2EConfig: e2eConfig, + RepositoryFolder: filepath.Join(artifactFolder, "repository"), + }) + + By("Setting up bootstrap cluster") + bootstrapClusterProvider, bootstrapClusterProxy = setupBootstrapCluster(e2eConfig, scheme, useExistingCluster) + + By("Initializing management cluster with providers") + clusterctl.InitManagementClusterAndWatchControllerLogs(ctx, + clusterctl.InitManagementClusterAndWatchControllerLogsInput{ + ClusterProxy: bootstrapClusterProxy, + ClusterctlConfigPath: clusterctlConfigPath, + InfrastructureProviders: e2eConfig.InfrastructureProviders(), + // CoreProvider, BootstrapProviders, ControlPlaneProviders use defaults (cluster-api, kubeadm, kubeadm) + // If providers are already installed (use-existing-cluster), init is skipped automatically + LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), + }, + e2eConfig.GetIntervals("", "wait-controllers")...) + + return []byte(bootstrapClusterProxy.GetKubeconfigPath()) +}, func(data []byte) { + // This runs on all Ginkgo nodes + Expect(configPath).To(BeAnExistingFile(), "E2E config file is required") + + e2eConfig = clusterctl.LoadE2EConfig(ctx, clusterctl.LoadE2EConfigInput{ + ConfigPath: configPath, + }) + Expect(e2eConfig).NotTo(BeNil()) + + if artifactFolder == "" { + artifactFolder = filepath.Join(os.TempDir(), "capcs-e2e-artifacts") } - // Mark for cleanup before installation to handle interruptions and partial installs. - shouldCleanupCertManager = true + kubeconfigPath := string(data) + Expect(kubeconfigPath).ToNot(BeEmpty(), "Kubeconfig path was not passed from the first node") + bootstrapClusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, scheme, + framework.WithMachineLogCollector(CloudscaleLogCollector{}), + ) +}) - By("installing CertManager") - Expect(utils.InstallCertManager()).To(Succeed(), "Failed to install CertManager") -} +var _ = SynchronizedAfterSuite(func() { + // This runs on all Ginkgo nodes - nothing to do here +}, func() { + // This runs only on the first Ginkgo node + if !skipCleanup && cloudscaleClient != nil && preTestSnapshot != nil { + By("Checking for leaked cloudscale infrastructure resources") + Expect(checkForLeakedResources(ctx, cloudscaleClient, preTestSnapshot)).To(Succeed(), + "Infrastructure resources leaked during test run") + } + + By("Tearing down the management cluster") + if !skipCleanup && bootstrapClusterProvider != nil { + bootstrapClusterProvider.Dispose(ctx) + } +}) -// teardownCertManager uninstalls CertManager if it was installed by setupCertManager. -// This ensures we only remove what we installed. -func teardownCertManager() { - if !shouldCleanupCertManager { - _, _ = fmt.Fprintf(GinkgoWriter, "Skipping CertManager cleanup (not installed by this suite)\n") - return +// setupBootstrapCluster creates or uses an existing bootstrap cluster +func setupBootstrapCluster(config *clusterctl.E2EConfig, scheme *runtime.Scheme, useExisting bool) (bootstrap.ClusterProvider, framework.ClusterProxy) { + var clusterProvider bootstrap.ClusterProvider + var clusterProxy framework.ClusterProxy + + if useExisting { + By("Using existing cluster") + kubeconfigPath := os.Getenv("KUBECONFIG") + if kubeconfigPath == "" { + kubeconfigPath = filepath.Join(os.Getenv("HOME"), ".kube", "config") + } + clusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, scheme, + framework.WithMachineLogCollector(CloudscaleLogCollector{}), + ) + } else { + By("Creating a Kind bootstrap cluster") + clusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(ctx, + bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{ + Name: config.ManagementClusterName, + RequiresDockerSock: true, + Images: config.Images, + }) + Expect(clusterProvider).NotTo(BeNil(), "Failed to create Kind cluster") + + kubeconfigPath := clusterProvider.GetKubeconfigPath() + Expect(kubeconfigPath).To(BeAnExistingFile(), "Kubeconfig should exist") + + clusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, scheme, + framework.WithMachineLogCollector(CloudscaleLogCollector{}), + ) } - By("uninstalling CertManager") - utils.UninstallCertManager() + return clusterProvider, clusterProxy } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index abdcf18..9082085 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -1,5 +1,4 @@ //go:build e2e -// +build e2e /* Copyright 2026 cloudscale.ch. @@ -20,394 +19,126 @@ limitations under the License. package e2e import ( - "encoding/json" - "fmt" - "os" - "os/exec" - "path/filepath" - "time" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/cloudscale-ch/cluster-api-provider-cloudscale/test/utils" + "k8s.io/utils/ptr" + capi_e2e "sigs.k8s.io/cluster-api/test/e2e" ) -// namespace where the project is deployed in -const namespace = "cluster-api-provider-cloudscale-system" - -// serviceAccountName created for the project -const serviceAccountName = "cluster-api-provider-cloudscale-controller-manager" - -// metricsServiceName is the name of the metrics service of the project -const metricsServiceName = "cluster-api-provider-cloudscale-controller-manager-metrics-service" - -// metricsRoleBindingName is the name of the RBAC that will be created to allow get the metrics data -const metricsRoleBindingName = "cluster-api-provider-cloudscale-metrics-binding" - -var _ = Describe("Manager", Ordered, func() { - var controllerPodName string - - // Before running the tests, set up the environment by creating the namespace, - // enforce the restricted security policy to the namespace, installing CRDs, - // and deploying the controller. - BeforeAll(func() { - By("creating manager namespace") - cmd := exec.Command("kubectl", "create", "ns", namespace) - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "Failed to create namespace") - - By("labeling the namespace to enforce the restricted security policy") - cmd = exec.Command("kubectl", "label", "--overwrite", "ns", namespace, - "pod-security.kubernetes.io/enforce=restricted") - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "Failed to label namespace with restricted policy") - - By("installing CRDs") - cmd = exec.Command("make", "install") - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "Failed to install CRDs") - - By("deploying the controller-manager") - cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", managerImage)) - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "Failed to deploy the controller-manager") - }) - - // After all tests have been executed, clean up by undeploying the controller, uninstalling CRDs, - // and deleting the namespace. - AfterAll(func() { - By("cleaning up the curl pod for metrics") - cmd := exec.Command("kubectl", "delete", "pod", "curl-metrics", "-n", namespace) - _, _ = utils.Run(cmd) - - By("undeploying the controller-manager") - cmd = exec.Command("make", "undeploy") - _, _ = utils.Run(cmd) - - By("uninstalling CRDs") - cmd = exec.Command("make", "uninstall") - _, _ = utils.Run(cmd) - - By("removing manager namespace") - cmd = exec.Command("kubectl", "delete", "ns", namespace) - _, _ = utils.Run(cmd) - }) - - // After each test, check for failures and collect logs, events, - // and pod descriptions for debugging. - AfterEach(func() { - specReport := CurrentSpecReport() - if specReport.Failed() { - By("Fetching controller manager pod logs") - cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) - controllerLogs, err := utils.Run(cmd) - if err == nil { - _, _ = fmt.Fprintf(GinkgoWriter, "Controller logs:\n %s", controllerLogs) - } else { - _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Controller logs: %s", err) +// Workload cluster lifecycle tests verify basic cluster provisioning on cloudscale. +// PostMachinesProvisioned validates that all expected cloudscale resources (network, +// subnet, LB, servers) are present. The HA variant adds server groups for anti-affinity. +// +// QuickStartSpec exercises the full create-wait-validate-delete lifecycle without the +// overhead of upgrades or pivots. +var _ = Describe("Workload cluster lifecycle", Label("lifecycle"), func() { + Context("With single control-plane node", func() { + capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { + return capi_e2e.QuickStartSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: ptr.To("cloudscale"), + Flavor: ptr.To(""), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](1), + PostMachinesProvisioned: validateCloudscaleResources, } - - By("Fetching Kubernetes events") - cmd = exec.Command("kubectl", "get", "events", "-n", namespace, "--sort-by=.lastTimestamp") - eventsOutput, err := utils.Run(cmd) - if err == nil { - _, _ = fmt.Fprintf(GinkgoWriter, "Kubernetes events:\n%s", eventsOutput) - } else { - _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Kubernetes events: %s", err) - } - - By("Fetching curl-metrics logs") - cmd = exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) - metricsOutput, err := utils.Run(cmd) - if err == nil { - _, _ = fmt.Fprintf(GinkgoWriter, "Metrics logs:\n %s", metricsOutput) - } else { - _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get curl-metrics logs: %s", err) - } - - By("Fetching controller manager pod description") - cmd = exec.Command("kubectl", "describe", "pod", controllerPodName, "-n", namespace) - podDescription, err := utils.Run(cmd) - if err == nil { - fmt.Println("Pod description:\n", podDescription) - } else { - fmt.Println("Failed to describe controller pod") - } - } - }) - - SetDefaultEventuallyTimeout(2 * time.Minute) - SetDefaultEventuallyPollingInterval(time.Second) - - Context("Manager", func() { - It("should run successfully", func() { - By("validating that the controller-manager pod is running as expected") - verifyControllerUp := func(g Gomega) { - // Get the name of the controller-manager pod - cmd := exec.Command("kubectl", "get", - "pods", "-l", "control-plane=controller-manager", - "-o", "go-template={{ range .items }}"+ - "{{ if not .metadata.deletionTimestamp }}"+ - "{{ .metadata.name }}"+ - "{{ \"\\n\" }}{{ end }}{{ end }}", - "-n", namespace, - ) - - podOutput, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve controller-manager pod information") - podNames := utils.GetNonEmptyLines(podOutput) - g.Expect(podNames).To(HaveLen(1), "expected 1 controller pod running") - controllerPodName = podNames[0] - g.Expect(controllerPodName).To(ContainSubstring("controller-manager")) - - // Validate the pod's status - cmd = exec.Command("kubectl", "get", - "pods", controllerPodName, "-o", "jsonpath={.status.phase}", - "-n", namespace, - ) - output, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(output).To(Equal("Running"), "Incorrect controller-manager pod status") - } - Eventually(verifyControllerUp).Should(Succeed()) - }) - - It("should ensure the metrics endpoint is serving metrics", func() { - By("creating a ClusterRoleBinding for the service account to allow access to metrics") - cmd := exec.Command("kubectl", "create", "clusterrolebinding", metricsRoleBindingName, - "--clusterrole=cluster-api-provider-cloudscale-metrics-reader", - fmt.Sprintf("--serviceaccount=%s:%s", namespace, serviceAccountName), - ) - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "Failed to create ClusterRoleBinding") - - By("validating that the metrics service is available") - cmd = exec.Command("kubectl", "get", "service", metricsServiceName, "-n", namespace) - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "Metrics service should exist") - - By("getting the service account token") - token, err := serviceAccountToken() - Expect(err).NotTo(HaveOccurred()) - Expect(token).NotTo(BeEmpty()) - - By("ensuring the controller pod is ready") - verifyControllerPodReady := func(g Gomega) { - cmd := exec.Command("kubectl", "get", "pod", controllerPodName, "-n", namespace, - "-o", "jsonpath={.status.conditions[?(@.type=='Ready')].status}") - output, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(output).To(Equal("True"), "Controller pod not ready") - } - Eventually(verifyControllerPodReady, 3*time.Minute, time.Second).Should(Succeed()) - - By("verifying that the controller manager is serving the metrics server") - verifyMetricsServerStarted := func(g Gomega) { - cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace) - output, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(output).To(ContainSubstring("Serving metrics server"), - "Metrics server not yet started") - } - Eventually(verifyMetricsServerStarted, 3*time.Minute, time.Second).Should(Succeed()) - - By("waiting for the webhook service endpoints to be ready") - verifyWebhookEndpointsReady := func(g Gomega) { - cmd := exec.Command("kubectl", "get", "endpointslices.discovery.k8s.io", "-n", namespace, - "-l", "kubernetes.io/service-name=cluster-api-provider-cloudscale-webhook-service", - "-o", "jsonpath={range .items[*]}{range .endpoints[*]}{.addresses[*]}{end}{end}") - output, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred(), "Webhook endpoints should exist") - g.Expect(output).ShouldNot(BeEmpty(), "Webhook endpoints not yet ready") - } - Eventually(verifyWebhookEndpointsReady, 3*time.Minute, time.Second).Should(Succeed()) - - By("verifying the mutating webhook server is ready") - verifyMutatingWebhookReady := func(g Gomega) { - cmd := exec.Command("kubectl", "get", "mutatingwebhookconfigurations.admissionregistration.k8s.io", - "cluster-api-provider-cloudscale-mutating-webhook-configuration", - "-o", "jsonpath={.webhooks[0].clientConfig.caBundle}") - output, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred(), "MutatingWebhookConfiguration should exist") - g.Expect(output).ShouldNot(BeEmpty(), "Mutating webhook CA bundle not yet injected") - } - Eventually(verifyMutatingWebhookReady, 3*time.Minute, time.Second).Should(Succeed()) - - By("verifying the validating webhook server is ready") - verifyValidatingWebhookReady := func(g Gomega) { - cmd := exec.Command("kubectl", "get", "validatingwebhookconfigurations.admissionregistration.k8s.io", - "cluster-api-provider-cloudscale-validating-webhook-configuration", - "-o", "jsonpath={.webhooks[0].clientConfig.caBundle}") - output, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred(), "ValidatingWebhookConfiguration should exist") - g.Expect(output).ShouldNot(BeEmpty(), "Validating webhook CA bundle not yet injected") - } - Eventually(verifyValidatingWebhookReady, 3*time.Minute, time.Second).Should(Succeed()) - - By("waiting additional time for webhook server to stabilize") - time.Sleep(5 * time.Second) - - // +kubebuilder:scaffold:e2e-metrics-webhooks-readiness - - By("creating the curl-metrics pod to access the metrics endpoint") - cmd = exec.Command("kubectl", "run", "curl-metrics", "--restart=Never", - "--namespace", namespace, - "--image=curlimages/curl:latest", - "--overrides", - fmt.Sprintf(`{ - "spec": { - "containers": [{ - "name": "curl", - "image": "curlimages/curl:latest", - "command": ["/bin/sh", "-c"], - "args": [ - "for i in $(seq 1 30); do curl -v -k -H 'Authorization: Bearer %s' https://%s.%s.svc.cluster.local:8443/metrics && exit 0 || sleep 2; done; exit 1" - ], - "securityContext": { - "readOnlyRootFilesystem": true, - "allowPrivilegeEscalation": false, - "capabilities": { - "drop": ["ALL"] - }, - "runAsNonRoot": true, - "runAsUser": 1000, - "seccompProfile": { - "type": "RuntimeDefault" - } - } - }], - "serviceAccountName": "%s" - } - }`, token, metricsServiceName, namespace, serviceAccountName)) - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "Failed to create curl-metrics pod") - - By("waiting for the curl-metrics pod to complete.") - verifyCurlUp := func(g Gomega) { - cmd := exec.Command("kubectl", "get", "pods", "curl-metrics", - "-o", "jsonpath={.status.phase}", - "-n", namespace) - output, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(output).To(Equal("Succeeded"), "curl pod in wrong status") - } - Eventually(verifyCurlUp, 5*time.Minute).Should(Succeed()) - - By("getting the metrics by checking curl-metrics logs") - verifyMetricsAvailable := func(g Gomega) { - metricsOutput, err := getMetricsOutput() - g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod") - g.Expect(metricsOutput).NotTo(BeEmpty()) - g.Expect(metricsOutput).To(ContainSubstring("< HTTP/1.1 200 OK")) - } - Eventually(verifyMetricsAvailable, 2*time.Minute).Should(Succeed()) - }) - - It("should provisioned cert-manager", func() { - By("validating that cert-manager has the certificate Secret") - verifyCertManager := func(g Gomega) { - cmd := exec.Command("kubectl", "get", "secrets", "webhook-server-cert", "-n", namespace) - _, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred()) - } - Eventually(verifyCertManager).Should(Succeed()) - }) - - It("should have CA injection for mutating webhooks", func() { - By("checking CA injection for mutating webhooks") - verifyCAInjection := func(g Gomega) { - cmd := exec.Command("kubectl", "get", - "mutatingwebhookconfigurations.admissionregistration.k8s.io", - "cluster-api-provider-cloudscale-mutating-webhook-configuration", - "-o", "go-template={{ range .webhooks }}{{ .clientConfig.caBundle }}{{ end }}") - mwhOutput, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(mwhOutput)).To(BeNumerically(">", 10)) - } - Eventually(verifyCAInjection).Should(Succeed()) }) + }) - It("should have CA injection for validating webhooks", func() { - By("checking CA injection for validating webhooks") - verifyCAInjection := func(g Gomega) { - cmd := exec.Command("kubectl", "get", - "validatingwebhookconfigurations.admissionregistration.k8s.io", - "cluster-api-provider-cloudscale-validating-webhook-configuration", - "-o", "go-template={{ range .webhooks }}{{ .clientConfig.caBundle }}{{ end }}") - vwhOutput, err := utils.Run(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(vwhOutput)).To(BeNumerically(">", 10)) + Context("With HA control-plane", Label("ha"), func() { + capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { + return capi_e2e.QuickStartSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: ptr.To("cloudscale"), + Flavor: ptr.To("ha"), + ControlPlaneMachineCount: ptr.To[int64](3), + WorkerMachineCount: ptr.To[int64](2), + PostMachinesProvisioned: validateCloudscaleResources, } - Eventually(verifyCAInjection).Should(Succeed()) }) - - // +kubebuilder:scaffold:e2e-webhooks-checks - - // TODO: Customize the e2e test suite with scenarios specific to your project. - // Consider applying sample/CR(s) and check their status and/or verifying - // the reconciliation by using the metrics, i.e.: - // metricsOutput, err := getMetricsOutput() - // Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod") - // Expect(metricsOutput).To(ContainSubstring( - // fmt.Sprintf(`controller_runtime_reconcile_total{controller="%s",result="success"} 1`, - // strings.ToLower(), - // )) }) }) -// serviceAccountToken returns a token for the specified service account in the given namespace. -// It uses the Kubernetes TokenRequest API to generate a token by directly sending a request -// and parsing the resulting token from the API response. -func serviceAccountToken() (string, error) { - const tokenRequestRawString = `{ - "apiVersion": "authentication.k8s.io/v1", - "kind": "TokenRequest" - }` - - // Temporary file to store the token request - secretName := fmt.Sprintf("%s-token-request", serviceAccountName) - tokenRequestFile := filepath.Join("/tmp", secretName) - err := os.WriteFile(tokenRequestFile, []byte(tokenRequestRawString), os.FileMode(0o644)) - if err != nil { - return "", err - } - - var out string - verifyTokenCreation := func(g Gomega) { - // Execute kubectl command to create the token - cmd := exec.Command("kubectl", "create", "--raw", fmt.Sprintf( - "/api/v1/namespaces/%s/serviceaccounts/%s/token", - namespace, - serviceAccountName, - ), "-f", tokenRequestFile) - - output, err := cmd.CombinedOutput() - g.Expect(err).NotTo(HaveOccurred()) - - // Parse the JSON output to extract the token - var token tokenRequest - err = json.Unmarshal(output, &token) - g.Expect(err).NotTo(HaveOccurred()) - - out = token.Status.Token - } - Eventually(verifyTokenCreation).Should(Succeed()) +// Cluster upgrade tests verify in-place Kubernetes version upgrades by rolling +// control-plane and worker nodes to new machine images. Conformance tests are skipped +// (SkipConformanceTests: true) to keep runtime reasonable — the separate conformance +// spec covers that. +var _ = Describe("Cluster upgrade", Label("upgrade"), func() { + capi_e2e.ClusterUpgradeConformanceSpec(ctx, func() capi_e2e.ClusterUpgradeConformanceSpecInput { + return capi_e2e.ClusterUpgradeConformanceSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + SkipConformanceTests: true, + InfrastructureProvider: ptr.To("cloudscale"), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](1), + } + }) +}) - return out, err -} +// Self-hosted tests verify the pivot workflow: CAPI management components are moved +// from the bootstrap kind cluster into the workload cluster via clusterctl move. +// SkipUpgrade is set to isolate the pivot test from upgrade mechanics. This catches +// regressions in our provider's ability to manage itself after pivot. +var _ = Describe("Self-hosted cluster", Label("self-hosted"), func() { + capi_e2e.SelfHostedSpec(ctx, func() capi_e2e.SelfHostedSpecInput { + return capi_e2e.SelfHostedSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: ptr.To("cloudscale"), + SkipUpgrade: true, + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](1), + } + }) +}) -// getMetricsOutput retrieves and returns the logs from the curl pod used to access the metrics endpoint. -func getMetricsOutput() (string, error) { - By("getting the curl-metrics logs") - cmd := exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) - return utils.Run(cmd) -} +// MD remediation tests verify that unhealthy worker machines are automatically +// replaced via MachineHealthCheck. The CAPI MachineDeploymentRemediationSpec +// marks a worker node unhealthy and validates that MHC detects and replaces +// the machine. Runs weekly. +// +// KCP remediation (KCPRemediationSpec) was intentionally left out — it requires +// VMs to call back to the management cluster API (via wait-signal.sh), which is +// not possible when the management cluster is a local Kind cluster. +var _ = Describe("MD remediation", Label("md-remediation"), func() { + capi_e2e.MachineDeploymentRemediationSpec(ctx, func() capi_e2e.MachineDeploymentRemediationSpecInput { + return capi_e2e.MachineDeploymentRemediationSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: ptr.To("cloudscale"), + } + }) +}) -// tokenRequest is a simplified representation of the Kubernetes TokenRequest API response, -// containing only the token field that we need to extract. -type tokenRequest struct { - Status struct { - Token string `json:"token"` - } `json:"status"` -} +// Kubernetes conformance runs the official K8s conformance suite (via kubetest) against +// a provisioned workload cluster. +// This ensures our provider produces clusters that pass the K8s conformance bar. +var _ = Describe("Kubernetes conformance", Label("conformance"), func() { + capi_e2e.K8SConformanceSpec(ctx, func() capi_e2e.K8SConformanceSpecInput { + return capi_e2e.K8SConformanceSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: ptr.To("cloudscale"), + } + }) +}) diff --git a/test/e2e/helpers.go b/test/e2e/helpers.go new file mode 100644 index 0000000..a59a049 --- /dev/null +++ b/test/e2e/helpers.go @@ -0,0 +1,80 @@ +//go:build e2e + +/* +Copyright 2026 cloudscale.ch. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/utils/ptr" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1beta2 "github.com/cloudscale-ch/cluster-api-provider-cloudscale/api/v1beta2" +) + +// validateCloudscaleResources validates that cloudscale-specific resources are properly created. +// This is called by QuickStartSpec after machines are provisioned. +func validateCloudscaleResources(proxy framework.ClusterProxy, namespace, clusterName string) { + ctx := context.Background() + c := proxy.GetClient() + + By("Validating CloudscaleCluster resources") + + // Get CloudscaleCluster + cloudscaleCluster := &infrav1beta2.CloudscaleCluster{} + key := client.ObjectKey{Namespace: namespace, Name: clusterName} + Expect(c.Get(ctx, key, cloudscaleCluster)).To(Succeed(), "Failed to get CloudscaleCluster") + + // Validate network resources are created + Expect(cloudscaleCluster.Status.NetworkID).NotTo(BeEmpty(), "NetworkID should be set") + Expect(cloudscaleCluster.Status.SubnetID).NotTo(BeEmpty(), "SubnetID should be set") + // Validate load balancer resources (if enabled - default is true) + if ptr.Deref(cloudscaleCluster.Spec.ControlPlaneLoadBalancer.Enabled, true) { + Expect(cloudscaleCluster.Status.LoadBalancerID).NotTo(BeEmpty(), "LoadBalancerID should be set") + Expect(cloudscaleCluster.Status.LoadBalancerPoolID).NotTo(BeEmpty(), "LoadBalancerPoolID should be set") + Expect(cloudscaleCluster.Status.LoadBalancerListenerID).NotTo(BeEmpty(), "LoadBalancerListenerID should be set") + } + + // Validate provisioned status + Expect(ptr.Deref(cloudscaleCluster.Status.Initialization.Provisioned, false)).To(BeTrue(), "CloudscaleCluster should be provisioned") + + // Validate control plane endpoint + Expect(cloudscaleCluster.Spec.ControlPlaneEndpoint).NotTo(BeNil(), "ControlPlaneEndpoint should be set") + Expect(cloudscaleCluster.Spec.ControlPlaneEndpoint.Host).NotTo(BeEmpty(), "ControlPlaneEndpoint.Host should be set") + Expect(cloudscaleCluster.Spec.ControlPlaneEndpoint.Port).To(Equal(int32(6443)), "ControlPlaneEndpoint.Port should be 6443") + + By("Validating CloudscaleMachine resources") + + // List CloudscaleMachines + machineList := &infrav1beta2.CloudscaleMachineList{} + Expect(c.List(ctx, machineList, client.InNamespace(namespace))).To(Succeed(), "Failed to list CloudscaleMachines") + + for _, machine := range machineList.Items { + // Validate each machine has a server ID + Expect(machine.Status.ServerID).NotTo(BeEmpty(), "Machine %s should have ServerID", machine.Name) + + // Validate provisioned status + Expect(ptr.Deref(machine.Status.Initialization.Provisioned, false)).To(BeTrue(), "Machine %s should be provisioned", machine.Name) + + // Validate addresses + Expect(machine.Status.Addresses).NotTo(BeEmpty(), "Machine %s should have addresses", machine.Name) + } +} diff --git a/test/e2e/log_collector.go b/test/e2e/log_collector.go new file mode 100644 index 0000000..e6ee81d --- /dev/null +++ b/test/e2e/log_collector.go @@ -0,0 +1,203 @@ +//go:build e2e + +/* +Copyright 2026 cloudscale.ch. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "net" + "os" + osExec "os/exec" + "path/filepath" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +const sshUser = "capi" + +// CloudscaleLogCollector collects logs from cloudscale VMs via SSH. +type CloudscaleLogCollector struct{} + +func (c CloudscaleLogCollector) CollectMachineLog(ctx context.Context, _ client.Client, m *clusterv1.Machine, outputPath string) error { + logger := log.FromContext(ctx).WithValues("machine", m.Name) + + ip := machineExternalIP(m) + if ip == "" { + return fmt.Errorf("no external IP found for machine %s", m.Name) + } + + sshClient, err := sshDial(ip) + if err != nil { + return fmt.Errorf("SSH dial to %s (%s): %w", m.Name, ip, err) + } + defer sshClient.Close() + + if err := os.MkdirAll(outputPath, 0750); err != nil { + return fmt.Errorf("create output dir: %w", err) + } + + commands := []struct { + outputFile string + command string + }{ + {"journal.log", "sudo journalctl --no-pager --output=short-precise"}, + {"kern.log", "sudo journalctl --no-pager --output=short-precise -k"}, + {"kubelet.log", "sudo journalctl --no-pager --output=short-precise -u kubelet.service"}, + {"containerd.log", "sudo journalctl --no-pager --output=short-precise -u containerd.service"}, + {"cloud-init.log", "sudo cat /var/log/cloud-init.log"}, + {"cloud-init-output.log", "sudo cat /var/log/cloud-init-output.log"}, + {"crictl-info.txt", "sudo crictl info"}, + } + + for _, cmd := range commands { + if err := sshRunToFile(sshClient, cmd.command, filepath.Join(outputPath, cmd.outputFile)); err != nil { + logger.V(1).Info("Failed to collect log", "file", cmd.outputFile, "error", err) + } + } + + // Collect /var/log/pods as a tarball and extract locally + if err := sshCollectPods(sshClient, filepath.Join(outputPath, "pods")); err != nil { + logger.V(1).Info("Failed to collect pod logs", "error", err) + } + + return nil +} + +func (c CloudscaleLogCollector) CollectMachinePoolLog(_ context.Context, _ client.Client, _ *clusterv1.MachinePool, _ string) error { + return nil +} + +func (c CloudscaleLogCollector) CollectInfrastructureLogs(_ context.Context, _ client.Client, _ *clusterv1.Cluster, _ string) error { + return nil +} + +// machineExternalIP returns the first ExternalIP address from the Machine status. +func machineExternalIP(m *clusterv1.Machine) string { + for _, addr := range m.Status.Addresses { + if addr.Type == clusterv1.MachineExternalIP { + return addr.Address + } + } + return "" +} + +// sshDial connects to a host via SSH using the ssh-agent. +func sshDial(host string) (*ssh.Client, error) { + sock := os.Getenv("SSH_AUTH_SOCK") + if sock == "" { + return nil, fmt.Errorf("SSH_AUTH_SOCK not set; ssh-agent is required for log collection") + } + + conn, err := net.Dial("unix", sock) + if err != nil { + return nil, fmt.Errorf("connect to ssh-agent: %w", err) + } + + agentClient := agent.NewClient(conn) + config := &ssh.ClientConfig{ + User: sshUser, + Auth: []ssh.AuthMethod{ + ssh.PublicKeysCallback(agentClient.Signers), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), //nolint:gosec // E2E test machines are ephemeral. + } + + client, err := ssh.Dial("tcp", net.JoinHostPort(host, "22"), config) + if err != nil { + conn.Close() + return nil, err + } + return client, nil +} + +// sshRunToFile runs a command over SSH and writes stdout to a local file. +func sshRunToFile(client *ssh.Client, command, outputFile string) error { + session, err := client.NewSession() + if err != nil { + return err + } + defer session.Close() + + output, err := session.CombinedOutput(command) + if err != nil { + // Still write partial output if available + if len(output) > 0 { + _ = writeFile(outputFile, output) + } + return err + } + + return writeFile(outputFile, output) +} + +// sshCollectPods tars /var/log/pods on the remote and extracts it locally. +func sshCollectPods(client *ssh.Client, outputDir string) error { + session, err := client.NewSession() + if err != nil { + return err + } + defer session.Close() + + tarData, err := session.CombinedOutput("sudo tar -cf - -C /var/log/pods . 2>/dev/null") + if err != nil { + return fmt.Errorf("tar pods: %w", err) + } + + if len(tarData) == 0 { + return nil + } + + if err := os.MkdirAll(outputDir, 0750); err != nil { + return err + } + + // Write tar to temp file and extract + tmpFile, err := os.CreateTemp("", "pods-*.tar") + if err != nil { + return err + } + defer os.Remove(tmpFile.Name()) + + if _, err := tmpFile.Write(tarData); err != nil { + tmpFile.Close() + return err + } + tmpFile.Close() + + // Use local tar to extract + cmd := fmt.Sprintf("tar -xf %s -C %s", tmpFile.Name(), outputDir) + return runLocalCommand(cmd) +} + +// writeFile writes data to a file, creating parent directories as needed. +func writeFile(path string, data []byte) error { + if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { + return err + } + return os.WriteFile(path, data, 0600) +} + +// runLocalCommand runs a shell command on the local machine. +func runLocalCommand(command string) error { + return osExec.Command("sh", "-c", command).Run() //nolint:gosec // E2E test helper with controlled input. +} diff --git a/test/utils/utils.go b/test/utils/utils.go deleted file mode 100644 index cd60dc8..0000000 --- a/test/utils/utils.go +++ /dev/null @@ -1,226 +0,0 @@ -/* -Copyright 2026 cloudscale.ch. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "bufio" - "bytes" - "fmt" - "os" - "os/exec" - "strings" - - . "github.com/onsi/ginkgo/v2" // nolint:revive,staticcheck -) - -const ( - certmanagerVersion = "v1.19.4" - certmanagerURLTmpl = "https://github.com/cert-manager/cert-manager/releases/download/%s/cert-manager.yaml" - - defaultKindBinary = "kind" - defaultKindCluster = "kind" -) - -func warnError(err error) { - _, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err) -} - -// Run executes the provided command within this context -func Run(cmd *exec.Cmd) (string, error) { - dir, _ := GetProjectDir() - cmd.Dir = dir - - if err := os.Chdir(cmd.Dir); err != nil { - _, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %q\n", err) - } - - cmd.Env = append(os.Environ(), "GO111MODULE=on") - command := strings.Join(cmd.Args, " ") - _, _ = fmt.Fprintf(GinkgoWriter, "running: %q\n", command) - output, err := cmd.CombinedOutput() - if err != nil { - return string(output), fmt.Errorf("%q failed with error %q: %w", command, string(output), err) - } - - return string(output), nil -} - -// UninstallCertManager uninstalls the cert manager -func UninstallCertManager() { - url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) - cmd := exec.Command("kubectl", "delete", "-f", url) - if _, err := Run(cmd); err != nil { - warnError(err) - } - - // Delete leftover leases in kube-system (not cleaned by default) - kubeSystemLeases := []string{ - "cert-manager-cainjector-leader-election", - "cert-manager-controller", - } - for _, lease := range kubeSystemLeases { - cmd = exec.Command("kubectl", "delete", "lease", lease, - "-n", "kube-system", "--ignore-not-found", "--force", "--grace-period=0") - if _, err := Run(cmd); err != nil { - warnError(err) - } - } -} - -// InstallCertManager installs the cert manager bundle. -func InstallCertManager() error { - url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) - cmd := exec.Command("kubectl", "apply", "-f", url) - if _, err := Run(cmd); err != nil { - return err - } - // Wait for cert-manager-webhook to be ready, which can take time if cert-manager - // was re-installed after uninstalling on a cluster. - cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook", - "--for", "condition=Available", - "--namespace", "cert-manager", - "--timeout", "5m", - ) - - _, err := Run(cmd) - return err -} - -// IsCertManagerCRDsInstalled checks if any Cert Manager CRDs are installed -// by verifying the existence of key CRDs related to Cert Manager. -func IsCertManagerCRDsInstalled() bool { - // List of common Cert Manager CRDs - certManagerCRDs := []string{ - "certificates.cert-manager.io", - "issuers.cert-manager.io", - "clusterissuers.cert-manager.io", - "certificaterequests.cert-manager.io", - "orders.acme.cert-manager.io", - "challenges.acme.cert-manager.io", - } - - // Execute the kubectl command to get all CRDs - cmd := exec.Command("kubectl", "get", "crds") - output, err := Run(cmd) - if err != nil { - return false - } - - // Check if any of the Cert Manager CRDs are present - crdList := GetNonEmptyLines(output) - for _, crd := range certManagerCRDs { - for _, line := range crdList { - if strings.Contains(line, crd) { - return true - } - } - } - - return false -} - -// LoadImageToKindClusterWithName loads a local docker image to the kind cluster -func LoadImageToKindClusterWithName(name string) error { - cluster := defaultKindCluster - if v, ok := os.LookupEnv("KIND_CLUSTER"); ok { - cluster = v - } - kindOptions := []string{"load", "docker-image", name, "--name", cluster} - kindBinary := defaultKindBinary - if v, ok := os.LookupEnv("KIND"); ok { - kindBinary = v - } - cmd := exec.Command(kindBinary, kindOptions...) - _, err := Run(cmd) - return err -} - -// GetNonEmptyLines converts given command output string into individual objects -// according to line breakers, and ignores the empty elements in it. -func GetNonEmptyLines(output string) []string { - var res []string - elements := strings.SplitSeq(output, "\n") - for element := range elements { - if element != "" { - res = append(res, element) - } - } - - return res -} - -// GetProjectDir will return the directory where the project is -func GetProjectDir() (string, error) { - wd, err := os.Getwd() - if err != nil { - return wd, fmt.Errorf("failed to get current working directory: %w", err) - } - wd = strings.ReplaceAll(wd, "/test/e2e", "") - return wd, nil -} - -// UncommentCode searches for target in the file and remove the comment prefix -// of the target content. The target content may span multiple lines. -func UncommentCode(filename, target, prefix string) error { - // false positive - // nolint:gosec - content, err := os.ReadFile(filename) - if err != nil { - return fmt.Errorf("failed to read file %q: %w", filename, err) - } - strContent := string(content) - - idx := strings.Index(strContent, target) - if idx < 0 { - return fmt.Errorf("unable to find the code %q to be uncommented", target) - } - - out := new(bytes.Buffer) - _, err = out.Write(content[:idx]) - if err != nil { - return fmt.Errorf("failed to write to output: %w", err) - } - - scanner := bufio.NewScanner(bytes.NewBufferString(target)) - if !scanner.Scan() { - return nil - } - for { - if _, err = out.WriteString(strings.TrimPrefix(scanner.Text(), prefix)); err != nil { - return fmt.Errorf("failed to write to output: %w", err) - } - // Avoid writing a newline in case the previous line was the last in target. - if !scanner.Scan() { - break - } - if _, err = out.WriteString("\n"); err != nil { - return fmt.Errorf("failed to write to output: %w", err) - } - } - - if _, err = out.Write(content[idx+len(target):]); err != nil { - return fmt.Errorf("failed to write to output: %w", err) - } - - // false positive - // nolint:gosec - if err = os.WriteFile(filename, out.Bytes(), 0644); err != nil { - return fmt.Errorf("failed to write file %q: %w", filename, err) - } - - return nil -} diff --git a/tilt-provider.yaml b/tilt-provider.yaml index 70cca14..60057c2 100644 --- a/tilt-provider.yaml +++ b/tilt-provider.yaml @@ -1,6 +1,6 @@ name: cloudscale config: - image: controller:latest # change to remote image name if desired + image: quay.io/cloudscalech/capcs-staging:latest label: CAPCS live_reload_deps: ["cmd", "go.mod", "go.sum", "api", "internal"] go_main: cmd/main.go \ No newline at end of file