diff --git a/scripts/ci-e2e.sh b/scripts/ci-e2e.sh index 9ad0021cc5..21a7554e3a 100755 --- a/scripts/ci-e2e.sh +++ b/scripts/ci-e2e.sh @@ -71,8 +71,6 @@ source "${M3_DEV_ENV_PATH}/lib/releases.sh" source "${M3_DEV_ENV_PATH}/lib/ironic_basic_auth.sh" # shellcheck disable=SC1091,SC1090 source "${M3_DEV_ENV_PATH}/lib/ironic_tls_setup.sh" -# shellcheck disable=SC1091,SC1090 -source "/tmp/vars_cluster.sh" # image for live iso testing export LIVE_ISO_IMAGE="https://artifactory.nordix.org/artifactory/metal3/images/iso/minimal_linux_live-v2.iso" diff --git a/test/e2e/basic_integration_test.go b/test/e2e/basic_integration_test.go index 288e490644..33e8152ddd 100644 --- a/test/e2e/basic_integration_test.go +++ b/test/e2e/basic_integration_test.go @@ -4,6 +4,7 @@ import ( "os" "path/filepath" "strings" + "fmt" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -17,6 +18,20 @@ var _ = Describe("When testing basic cluster creation [basic]", Label("basic"), // We need to override clusterctl apply log folder to avoid getting our credentials exposed. clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName()) + FKASDeployLogFolder := filepath.Join(os.TempDir(), "fkas-deploy-logs", bootstrapClusterProxy.GetName()) + FKASKustomization := e2eConfig.GetVariable("FKAS_RELEASE_LATEST") + By(fmt.Sprintf("Installing FKAS from kustomization %s on the bootsrap cluster", FKASKustomization)) + err := BuildAndApplyKustomization(ctx, &BuildAndApplyKustomizationInput{ + Kustomization: FKASKustomization, + ClusterProxy: bootstrapClusterProxy, + WaitForDeployment: true, + WatchDeploymentLogs: true, + LogPath: FKASDeployLogFolder, + DeploymentName: "metal3-fake-api-server", + DeploymentNamespace: "metal3", + WaitIntervals: e2eConfig.GetIntervals("default", "wait-deployment"), + }) + Expect(err).NotTo(HaveOccurred()) }) It("Should create a workload cluster", func() { @@ -27,6 +42,13 @@ var _ = Describe("When testing basic cluster creation [basic]", Label("basic"), }) AfterEach(func() { + RemoveDeployment(ctx, func() RemoveDeploymentInput { + return RemoveDeploymentInput{ + ManagementCluster: bootstrapClusterProxy, + Namespace: "metal3", + Name: "metal3-fake-api-server", + } + }) DumpSpecResourcesAndCleanup(ctx, specName, bootstrapClusterProxy, artifactFolder, namespace, e2eConfig.GetIntervals, clusterName, clusterctlLogFolder, skipCleanup) }) }) diff --git a/test/e2e/config/e2e_conf.yaml b/test/e2e/config/e2e_conf.yaml index 3f909bff74..2286b15e63 100644 --- a/test/e2e/config/e2e_conf.yaml +++ b/test/e2e/config/e2e_conf.yaml @@ -212,7 +212,8 @@ variables: BMO_RELEASE_0.5: "data/bmo-deployment/overlays/release-0.5" BMO_RELEASE_0.6: "data/bmo-deployment/overlays/release-0.6" BMO_RELEASE_LATEST: "data/bmo-deployment/overlays/release-latest" - + FKAS_RELEASE_LATEST: "data/fkas-deployment" + intervals: default/wait-controllers: ["10m", "10s"] default/wait-cluster: ["20m", "30s"] # The second time to check the availibility of the cluster should happen late, so kcp object has time to be created diff --git a/test/e2e/data/fkas-deployment/deployment.yaml b/test/e2e/data/fkas-deployment/deployment.yaml new file mode 100644 index 0000000000..aac15eac6a --- /dev/null +++ b/test/e2e/data/fkas-deployment/deployment.yaml @@ -0,0 +1,27 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metal3-fake-api-server + namespace: metal3 +spec: + replicas: 1 + selector: + matchLabels: + app: capim + strategy: + type: Recreate + template: + metadata: + labels: + app: capim + spec: + hostNetwork: true + containers: + - image: 192.168.111.1:5000/localimages/api-server + imagePullPolicy: IfNotPresent + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + name: apiserver \ No newline at end of file diff --git a/test/e2e/data/fkas-deployment/kustomization.yaml b/test/e2e/data/fkas-deployment/kustomization.yaml new file mode 100644 index 0000000000..e79e1381dd --- /dev/null +++ b/test/e2e/data/fkas-deployment/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- deployment.yaml \ No newline at end of file diff --git a/test/e2e/data/infrastructure-metal3/cluster-template-centos-fake/secrets.yaml b/test/e2e/data/infrastructure-metal3/cluster-template-centos-fake/secrets.yaml index 61628e986e..f59b69b720 100644 --- a/test/e2e/data/infrastructure-metal3/cluster-template-centos-fake/secrets.yaml +++ b/test/e2e/data/infrastructure-metal3/cluster-template-centos-fake/secrets.yaml @@ -8,8 +8,8 @@ metadata: namespace: ${namespace} type: kubernetes.io/tls data: - tls.crt: ${caCertEncoded} - tls.key: ${caKeyEncoded} + tls.crt: ${CA_CERT_ENCODED} + tls.key: ${CA_KEY_ENCODED} --- apiVersion: v1 kind: Secret @@ -20,5 +20,5 @@ metadata: namespace: ${namespace} type: kubernetes.io/tls data: - tls.crt: ${etcdCertEncoded} - tls.key: ${etcdKeyEncoded} \ No newline at end of file + tls.crt: ${ETCD_CERT_ENCODED} + tls.key: ${ETCD_KEY_ENCODED} \ No newline at end of file diff --git a/test/e2e/pivoting_based_feature_test.go b/test/e2e/pivoting_based_feature_test.go index c5647aa3f4..fe996ae72c 100644 --- a/test/e2e/pivoting_based_feature_test.go +++ b/test/e2e/pivoting_based_feature_test.go @@ -5,6 +5,7 @@ import ( "os" "path/filepath" "strings" + "fmt" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -199,7 +200,11 @@ func createFakeTargetCluster(k8sVersion string) (framework.ClusterProxy, *cluste caCertEncoded:=b64.StdEncoding.EncodeToString(caCert) etcdKeyEncoded:=b64.StdEncoding.EncodeToString(etcdKey) etcdCertEncoded:=b64.StdEncoding.EncodeToString(etcdCert) - cluster_endpoints, err :=http.Get("http://172.22.0.2:3333/register?resource=metal3/test72&caKey="+caKeyEncoded+"&caCert="+caCertEncoded+"&etcdKey="+etcdKeyEncoded+"&etcdCert="+etcdCertEncoded) + os.Setenv("CA_KEY_ENCODED", caKeyEncoded) + os.Setenv("CA_CERT_ENCODED", caCertEncoded) + os.Setenv("ETCD_KEY_ENCODED", etcdKeyEncoded) + os.Setenv("ETCD_CERT_ENCODED", etcdCertEncoded) + cluster_endpoints, err :=http.Get("http://172.22.0.2:3333/register?resource=metal3/test1&caKey="+caKeyEncoded+"&caCert="+caCertEncoded+"&etcdKey="+etcdKeyEncoded+"&etcdCert="+etcdCertEncoded) check(err) defer cluster_endpoints.Body.Close() body, err := ioutil.ReadAll(cluster_endpoints.Body) @@ -207,6 +212,8 @@ func createFakeTargetCluster(k8sVersion string) (framework.ClusterProxy, *cluste var response Endpoint json.Unmarshal(body, &response) Logf("CLUSTER_APIENDPOINT_HOST %v CLUSTER_APIENDPOINT_PORT %v", response.Host, response.Port) + os.Setenv("CLUSTER_APIENDPOINT_HOST", response.Host) + os.Setenv("CLUSTER_APIENDPOINT_PORT", fmt.Sprintf("%v",response.Port)) return createTargetCluster(k8sVersion) } @@ -236,21 +243,45 @@ func createTargetCluster(k8sVersion string) (framework.ClusterProxy, *clusterctl // get bmh // get m3m // waiting machine - By("Waiting for all Machines to be provisioning") + By("Waiting for one Machine to be provisioning") WaitForNumMachinesInState(ctx, clusterv1.MachinePhaseProvisioning, WaitForNumInput{ Client: bootstrapClusterProxy.GetClient(), Options: []client.ListOption{client.InNamespace(namespace)}, - Replicas: 2, + Replicas: 1, Intervals: e2eConfig.GetIntervals(specName, "wait-machine-remediation"), }) + metal3Machines := infrav1.Metal3MachineList{} + metal3Machines_updated := "" bootstrapClusterProxy.GetClient().List(ctx, &metal3Machines, []client.ListOption{client.InNamespace(namespace)}...) for _, m3machine := range metal3Machines.Items { - providerID:="metal3://metal3/"+Metal3MachineToBmhName(m3machine)+"/"+m3machine.GetName() - machine, _ := Metal3MachineToMachineName(m3machine) - resp, err :=http.Get("http://172.22.0.2:3333/updateNode?resource=metal3/test1&nodeName="+machine+"&providerID="+providerID) + if (m3machine.GetAnnotations()["metal3.io/BareMetalHost"] != ""){ + providerID:="metal3://metal3/"+Metal3MachineToBmhName(m3machine)+"/"+m3machine.GetName() + machine, _ := Metal3MachineToMachineName(m3machine) + Logf("http://172.22.0.2:3333/updateNode?resource=metal3/test1&nodeName="+machine+"&providerID="+providerID) + resp, err :=http.Get("http://172.22.0.2:3333/updateNode?resource=metal3/test1&nodeName="+machine+"&providerID="+providerID) + metal3Machines_updated= m3machine.GetName() Logf("resp : %v err: %v", resp, err) + } + + } + By("Waiting for the other Machine to be provisioning") + WaitForNumMachinesInState(ctx, clusterv1.MachinePhaseProvisioning, WaitForNumInput{ + Client: bootstrapClusterProxy.GetClient(), + Options: []client.ListOption{client.InNamespace(namespace)}, + Replicas: 2, + Intervals: e2eConfig.GetIntervals(specName, "wait-machine-remediation"), + }) + for _, m3machine := range metal3Machines.Items { + if (m3machine.GetName() != metal3Machines_updated){ + providerID:="metal3://metal3/"+Metal3MachineToBmhName(m3machine)+"/"+m3machine.GetName() + machine, _ := Metal3MachineToMachineName(m3machine) + Logf("http://172.22.0.2:3333/updateNode?resource=metal3/test1&nodeName="+machine+"&providerID="+providerID) + resp, err :=http.Get("http://172.22.0.2:3333/updateNode?resource=metal3/test1&nodeName="+machine+"&providerID="+providerID) + Logf("resp : %v err: %v", resp, err) + } + } }, WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),