Skip to content

Commit

Permalink
Added Backup-Restore tests
Browse files Browse the repository at this point in the history
Signed-off-by: Chandan Pinjani <[email protected]>
  • Loading branch information
cpinjani committed Jan 15, 2025
1 parent 8044bf1 commit d483d97
Show file tree
Hide file tree
Showing 30 changed files with 1,080 additions and 202 deletions.
20 changes: 20 additions & 0 deletions .github/workflows/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,9 @@ on:
proxy:
description: Install Rancher behind proxy
type: boolean
backup_operator_version:
description: Backup Restore operator version
type: string

env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
Expand Down Expand Up @@ -329,6 +332,19 @@ jobs:
run: |
make e2e-sync-import-tests
- name: Backup/Restore cluster tests
if: ${{ !cancelled() && steps.prepare-rancher.outcome != 'failure' && contains(inputs.tests_to_run, 'backup_restore') }}
env:
RANCHER_HOSTNAME: ${{ env.RANCHER_HOSTNAME }}
RANCHER_PASSWORD: ${{ env.RANCHER_PASSWORD }}
RANCHER_VERSION: ${{ inputs.rancher_version }}
BACKUP_OPERATOR_VERSION: ${{ inputs.backup_operator_version }}
CATTLE_TEST_CONFIG: ${{ github.workspace }}/cattle-config-provisioning.yaml
QASE_RUN_ID: ${{ steps.qase.outputs.qase_run_id }}
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
run: |
make e2e-backup-restore-tests
- name: Finalize Qase Run and publish Results
env:
QASE_RUN_ID: ${{ steps.qase.outputs.qase_run_id }}
Expand Down Expand Up @@ -373,6 +389,10 @@ jobs:
echo "Tests run: ${{ inputs.tests_to_run }}" >> ${GITHUB_STEP_SUMMARY}
OPERATOR_HELM_VERSION=$(helm get metadata rancher-${{ inputs.hosted_provider }}-operator -n cattle-system -o json | jq -r .version)
echo "Installed rancher-${{ inputs.hosted_provider }}-operator chart version: $OPERATOR_HELM_VERSION" >> ${GITHUB_STEP_SUMMARY}
if [ ${{ inputs.tests_to_run }} == "backup_restore" ]; then
BR_OPERATOR_HELM_VERSION=$(helm get metadata rancher-backup -n cattle-resources-system -o json | jq -r .version)
echo "Installed backup-restore operator chart version: $BR_OPERATOR_HELM_VERSION" >> ${GITHUB_STEP_SUMMARY}
fi
delete-runner:
if: ${{ always() && inputs.destroy_runner == true }}
Expand Down
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,9 @@ e2e-k8s-chart-support-import-tests: deps ## Run the 'K8sChartSupportImport' test
e2e-k8s-chart-support-provisioning-tests: deps ## Run the 'K8sChartSupportProvisioning' test suite for a given ${PROVIDER}
ginkgo ${STANDARD_TEST_OPTIONS} --focus "K8sChartSupportProvisioning" ./hosted/${PROVIDER}/k8s_chart_support

e2e-backup-restore-tests: deps ## Run the 'BackupRestore' test suite for a given ${PROVIDER}
ginkgo ${STANDARD_TEST_OPTIONS} --focus "BackupRestore" ./hosted/${PROVIDER}/backup_restore

clean-k3s: ## Uninstall k3s cluster
/usr/local/bin/k3s-killall.sh && /usr/local/bin/k3s-uninstall.sh || true
sudo rm -r /etc/default/k3s || true
Expand Down
100 changes: 100 additions & 0 deletions hosted/aks/backup_restore/backup_restore_suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
/*
Copyright © 2023 - 2024 SUSE LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package backup_test

import (
"fmt"
"os"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/rancher-sandbox/qase-ginkgo"

"testing"

"github.com/rancher/hosted-providers-e2e/hosted/aks/helper"
"github.com/rancher/hosted-providers-e2e/hosted/helpers"
"github.com/rancher/shepherd/clients/rancher"
management "github.com/rancher/shepherd/clients/rancher/generated/management/v3"
namegen "github.com/rancher/shepherd/pkg/namegenerator"
)

const (
increaseBy = 1
)

var (
testCaseID int64
clusterName, backupFile string
ctx helpers.RancherContext
cluster *management.Cluster
location = helpers.GetAKSLocation()
k3sVersion = os.Getenv("INSTALL_K3S_VERSION")
)

func TestBackupRestore(t *testing.T) {
RegisterFailHandler(Fail)
helpers.CommonSynchronizedBeforeSuite()
ctx = helpers.CommonBeforeSuite()
RunSpecs(t, "BackupRestore Suite")
}

var _ = ReportBeforeEach(func(report SpecReport) {
// Reset case ID
testCaseID = -1
})

var _ = ReportAfterEach(func(report SpecReport) {
// Add result in Qase if asked
Qase(testCaseID, report)
})

var _ = BeforeEach(func() {
clusterName = namegen.AppendRandomString(helpers.ClusterNamePrefix)
k8sVersion, err := helper.GetK8sVersion(ctx.RancherAdminClient, ctx.CloudCredID, location, false)
Expect(err).NotTo(HaveOccurred())

GinkgoLogr.Info(fmt.Sprintf("Using K8s version %s for cluster %s", k8sVersion, clusterName))
cluster, err = helper.CreateAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, k8sVersion, location, nil)
Expect(err).To(BeNil())
cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())
})

var _ = AfterEach(func() {
if ctx.ClusterCleanup && cluster != nil {
err := helper.DeleteAKSHostCluster(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())
} else {
fmt.Println("Skipping downstream cluster deletion: ", clusterName)
}
})

func restoreNodesChecks(cluster *management.Cluster, client *rancher.Client, clusterName string) {
helpers.ClusterIsReadyChecks(cluster, client, clusterName)
initialNodeCount := *cluster.AKSConfig.NodePools[0].Count

By("scaling up the nodepool", func() {
var err error
cluster, err = helper.ScaleNodePool(cluster, client, initialNodeCount+1, true, true)
Expect(err).To(BeNil())
})

By("adding a nodepool", func() {
var err error
cluster, err = helper.AddNodePool(cluster, increaseBy, client, true, true)
Expect(err).To(BeNil())
})
}
84 changes: 84 additions & 0 deletions hosted/aks/backup_restore/backup_restore_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
/*
Copyright © 2022 - 2025 SUSE LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package backup_test

import (
"os"
"os/exec"
"time"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/rancher-sandbox/ele-testhelpers/kubectl"
"github.com/rancher-sandbox/ele-testhelpers/tools"
"github.com/rancher/hosted-providers-e2e/hosted/helpers"
)

const (
backupResourceName = "hp-backup"
restoreResourceName = "hp-restore"
)

var _ = Describe("BackupRestore", func() {
// Create kubectl context
// Default timeout is too small, so New() cannot be used
k := &kubectl.Kubectl{
Namespace: "",
PollTimeout: tools.SetTimeout(300 * time.Second),
PollInterval: 500 * time.Millisecond,
}

It("Do a full backup/restore test", func() {
By("Checking hosted cluster is ready", func() {
helpers.ClusterIsReadyChecks(cluster, ctx.RancherAdminClient, clusterName)
})

By("Performing a backup", func() {
// Report to Qase
// testCaseID = 65
backupFile = helpers.ExecuteBackup(k, backupResourceName)
})

By("Perform restore pre-requisites: Uninstalling k3s", func() {
out, err := exec.Command("k3s-uninstall.sh").CombinedOutput()
Expect(err).To(Not(HaveOccurred()), out)
})

By("Perform restore pre-requisites: Getting k3s ready", func() {
helpers.InstallK3S(k, k3sVersion, "none", "none")
})

By("Performing a restore", func() {
helpers.ExecuteRestore(k, restoreResourceName, backupFile)
})

By("Performing post migration installations: Installing CertManager", func() {
helpers.InstallCertManager(k, "none", "none")
})

By("Performing post migration installations: Installing Rancher Manager", func() {
rancherChannel, rancherVersion, rancherHeadVersion := helpers.GetRancherVersions()
helpers.InstallRancherManager(k, os.Getenv("RANCHER_HOSTNAME"), rancherChannel, rancherVersion, rancherHeadVersion, "none", "none")
})

By("Performing post migration installations: Checking Rancher Deployments", func() {
helpers.CheckRancherDeployments(k)
})

By("Checking hosted cluster is able to be modified", func() {
restoreNodesChecks(cluster, ctx.RancherAdminClient, clusterName)
})
})
})
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import (
)

var (
ctx helpers.Context
ctx helpers.RancherContext
clusterName, k8sVersion string
testCaseID int64
location = helpers.GetAKSLocation()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
)

var (
ctx helpers.Context
ctx helpers.RancherContext
clusterName, k8sVersion string
testCaseID int64
location = helpers.GetAKSLocation()
Expand Down Expand Up @@ -85,7 +85,7 @@ var _ = ReportAfterEach(func(report SpecReport) {
Qase(testCaseID, report)
})

func commonchecks(ctx *helpers.Context, cluster *management.Cluster, clusterName, rancherUpgradedVersion, hostname, k8sUpgradedVersion string) {
func commonchecks(ctx *helpers.RancherContext, cluster *management.Cluster, clusterName, rancherUpgradedVersion, hostname, k8sUpgradedVersion string) {
helpers.ClusterIsReadyChecks(cluster, ctx.RancherAdminClient, clusterName)

var originalChartVersion string
Expand Down
2 changes: 1 addition & 1 deletion hosted/aks/p0/p0_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ const (
)

var (
ctx helpers.Context
ctx helpers.RancherContext
clusterName string
testCaseID int64
location = helpers.GetAKSLocation()
Expand Down
2 changes: 1 addition & 1 deletion hosted/aks/p1/p1_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import (
)

var (
ctx helpers.Context
ctx helpers.RancherContext
clusterName, location string
testCaseID int64
)
Expand Down
2 changes: 1 addition & 1 deletion hosted/aks/support_matrix/support_matrix_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import (
var (
availableVersionList []string
testCaseID int64
ctx helpers.Context
ctx helpers.RancherContext
location = helpers.GetAKSLocation()
)

Expand Down
100 changes: 100 additions & 0 deletions hosted/eks/backup_restore/backup_restore_suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
/*
Copyright © 2023 - 2024 SUSE LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package backup_test

import (
"fmt"
"os"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/rancher-sandbox/qase-ginkgo"

"testing"

"github.com/rancher/hosted-providers-e2e/hosted/eks/helper"
"github.com/rancher/hosted-providers-e2e/hosted/helpers"
"github.com/rancher/shepherd/clients/rancher"
management "github.com/rancher/shepherd/clients/rancher/generated/management/v3"
namegen "github.com/rancher/shepherd/pkg/namegenerator"
)

const (
increaseBy = 1
)

var (
testCaseID int64
clusterName, backupFile string
ctx helpers.RancherContext
cluster *management.Cluster
region = helpers.GetEKSRegion()
k3sVersion = os.Getenv("INSTALL_K3S_VERSION")
)

func TestBackupRestore(t *testing.T) {
RegisterFailHandler(Fail)
helpers.CommonSynchronizedBeforeSuite()
ctx = helpers.CommonBeforeSuite()
RunSpecs(t, "BackupRestore Suite")
}

var _ = ReportBeforeEach(func(report SpecReport) {
// Reset case ID
testCaseID = -1
})

var _ = ReportAfterEach(func(report SpecReport) {
// Add result in Qase if asked
Qase(testCaseID, report)
})

var _ = BeforeEach(func() {
clusterName = namegen.AppendRandomString(helpers.ClusterNamePrefix)
k8sVersion, err := helper.GetK8sVersion(ctx.RancherAdminClient, false)
Expect(err).To(BeNil())

GinkgoLogr.Info(fmt.Sprintf("Using K8s version %s for cluster %s", k8sVersion, clusterName))
cluster, err = helper.CreateEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, k8sVersion, region, nil)
Expect(err).To(BeNil())
cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())
})

var _ = AfterEach(func() {
if ctx.ClusterCleanup && cluster != nil {
err := helper.DeleteEKSHostCluster(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())
} else {
fmt.Println("Skipping downstream cluster deletion: ", clusterName)
}
})

func restoreNodesChecks(cluster *management.Cluster, client *rancher.Client, clusterName string) {
helpers.ClusterIsReadyChecks(cluster, client, clusterName)
initialNodeCount := *cluster.EKSConfig.NodeGroups[0].DesiredSize

By("scaling up the NodeGroup", func() {
var err error
cluster, err = helper.ScaleNodeGroup(cluster, client, initialNodeCount+increaseBy, true, true)
Expect(err).To(BeNil())
})

By("adding a NodeGroup", func() {
var err error
cluster, err = helper.AddNodeGroup(cluster, increaseBy, client, true, true)
Expect(err).To(BeNil())
})
}
Loading

0 comments on commit d483d97

Please sign in to comment.