Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update stuff from previous controller-runtime upgrade #630

Merged
merged 1 commit into from
Apr 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion config/scorecard/patches/basic.config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
entrypoint:
- scorecard-test
- basic-check-spec
image: quay.io/operator-framework/scorecard-test:v1.6.1
image: quay.io/operator-framework/scorecard-test:v1.34.0
labels:
suite: basic
test: basic-check-spec-test
10 changes: 5 additions & 5 deletions config/scorecard/patches/olm.config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
entrypoint:
- scorecard-test
- olm-bundle-validation
image: quay.io/operator-framework/scorecard-test:v1.6.1
image: quay.io/operator-framework/scorecard-test:v1.34.0
labels:
suite: olm
test: olm-bundle-validation-test
Expand All @@ -14,7 +14,7 @@
entrypoint:
- scorecard-test
- olm-crds-have-validation
image: quay.io/operator-framework/scorecard-test:v1.6.1
image: quay.io/operator-framework/scorecard-test:v1.34.0
labels:
suite: olm
test: olm-crds-have-validation-test
Expand All @@ -24,7 +24,7 @@
entrypoint:
- scorecard-test
- olm-crds-have-resources
image: quay.io/operator-framework/scorecard-test:v1.6.1
image: quay.io/operator-framework/scorecard-test:v1.34.0
labels:
suite: olm
test: olm-crds-have-resources-test
Expand All @@ -34,7 +34,7 @@
entrypoint:
- scorecard-test
- olm-spec-descriptors
image: quay.io/operator-framework/scorecard-test:v1.6.1
image: quay.io/operator-framework/scorecard-test:v1.34.0
labels:
suite: olm
test: olm-spec-descriptors-test
Expand All @@ -44,7 +44,7 @@
entrypoint:
- scorecard-test
- olm-status-descriptors
image: quay.io/operator-framework/scorecard-test:v1.6.1
image: quay.io/operator-framework/scorecard-test:v1.34.0
labels:
suite: olm
test: olm-status-descriptors-test
50 changes: 37 additions & 13 deletions hack/cluster.sh
Original file line number Diff line number Diff line change
@@ -1,24 +1,30 @@
#!/bin/sh
set -o errexit

# create registry container unless it already exists
# 1. Create registry container unless it already exists
reg_name='kind-registry'
reg_port='5000'
running="$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)"
if [ "${running}" != 'true' ]; then
reg_port='5001'
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
docker run \
-d --restart=always -p "${reg_port}:5000" --name "${reg_name}" \
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --network bridge --name "${reg_name}" \
registry:2
fi

# create a cluster with the local registry enabled in containerd
cat <<EOF | kind create cluster --image kindest/node:v1.28.7 --config=-
# 2. Create kind cluster with containerd registry config dir enabled
# TODO: kind will eventually enable this by default and this patch will
# be unnecessary.
#
# See:
# https://github.com/kubernetes-sigs/kind/issues/2875
# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
cat <<EOF | kind create cluster --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
endpoint = ["http://${reg_name}:${reg_port}"]
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
nodes:
- role: control-plane
- role: worker
Expand All @@ -29,11 +35,29 @@ nodes:
- role: worker
EOF

# connect the registry to the cluster network
# (the network may already be connected)
docker network connect "kind" "${reg_name}" || true
# 3. Add the registry config to the nodes
#
# This is necessary because localhost resolves to loopback addresses that are
# network-namespace local.
# In other words: localhost in the container is not localhost on the host.
#
# We want a consistent name that works from both ends, so we tell containerd to
# alias localhost:${reg_port} to the registry container when pulling images
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
for node in $(kind get nodes); do
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
[host."http://${reg_name}:5000"]
EOF
done

# 4. Connect the registry to the cluster network if not already connected
# This allows kind to bootstrap the network but ensures they're on the same network
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
docker network connect "kind" "${reg_name}"
fi

# Document the local registry
# 5. Document the local registry
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
cat <<EOF | kubectl apply -f -
apiVersion: v1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ func (r *CassandraDatacenterReconciler) Reconcile(ctx context.Context, request c
// Owned objects are automatically garbage collected.
// Return and don't requeue
logger.Info("CassandraDatacenter resource not found. Ignoring since object must be deleted.")
return ctrl.Result{}, nil
return ctrl.Result{}, reconcile.TerminalError(err)
}

// Error reading the object
Expand All @@ -123,7 +123,7 @@ func (r *CassandraDatacenterReconciler) Reconcile(ctx context.Context, request c
if err := rc.IsValid(rc.Datacenter); err != nil {
logger.Error(err, "CassandraDatacenter resource is invalid")
rc.Recorder.Eventf(rc.Datacenter, "Warning", "ValidationFailed", err.Error())
return ctrl.Result{}, err
return ctrl.Result{}, reconcile.TerminalError(err)
}

// TODO fold this into the quiet period
Expand Down
225 changes: 225 additions & 0 deletions pkg/reconciliation/handler_reconcile_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,225 @@
package reconciliation_test

import (
"context"
"fmt"
"testing"
"time"

api "github.com/k8ssandra/cass-operator/apis/cassandra/v1beta1"
"github.com/k8ssandra/cass-operator/pkg/mocks"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"

controllers "github.com/k8ssandra/cass-operator/internal/controllers/cassandra"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)

func TestReconcile(t *testing.T) {
var (
name = "cluster-example-cluster"
namespace = "default"
size int32 = 2
)
storageSize := resource.MustParse("1Gi")
storageName := "server-data"
storageConfig := api.StorageConfig{
CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{
StorageClassName: &storageName,
AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"},
Resources: corev1.VolumeResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{"storage": storageSize},
},
},
}

// Instance a CassandraDatacenter
dc := &api.CassandraDatacenter{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: api.CassandraDatacenterSpec{
ManagementApiAuth: api.ManagementApiAuthConfig{
Insecure: &api.ManagementApiAuthInsecureConfig{},
},
Size: size,
ServerType: "dse",
ServerVersion: "6.8.42",
StorageConfig: storageConfig,
ClusterName: "cluster-example",
},
}

// Objects to keep track of
trackObjects := []runtime.Object{
dc,
}

s := scheme.Scheme
s.AddKnownTypes(api.GroupVersion, dc)

fakeClient := fake.NewClientBuilder().WithStatusSubresource(dc).WithRuntimeObjects(trackObjects...).Build()

r := &controllers.CassandraDatacenterReconciler{
Client: fakeClient,
Scheme: s,
Recorder: record.NewFakeRecorder(100),
}

request := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}

result, err := r.Reconcile(context.TODO(), request)
if err != nil {
t.Fatalf("Reconciliation Failure: (%v)", err)
}

if result != (reconcile.Result{Requeue: true, RequeueAfter: 2 * time.Second}) {
t.Error("Reconcile did not return a correct result.")
}
}

func TestReconcile_NotFound(t *testing.T) {
var (
name = "datacenter-example"
namespace = "default"
size int32 = 2
)

storageSize := resource.MustParse("1Gi")
storageName := "server-data"
storageConfig := api.StorageConfig{
CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{
StorageClassName: &storageName,
AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"},
Resources: corev1.VolumeResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{"storage": storageSize},
},
},
}

// Instance a CassandraDatacenter
dc := &api.CassandraDatacenter{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: api.CassandraDatacenterSpec{
ManagementApiAuth: api.ManagementApiAuthConfig{
Insecure: &api.ManagementApiAuthInsecureConfig{},
},
Size: size,
StorageConfig: storageConfig,
},
}

// Objects to keep track of
trackObjects := []runtime.Object{}

s := scheme.Scheme
s.AddKnownTypes(api.GroupVersion, dc)

fakeClient := fake.NewClientBuilder().WithStatusSubresource(dc).WithRuntimeObjects(trackObjects...).Build()

r := &controllers.CassandraDatacenterReconciler{
Client: fakeClient,
Scheme: s,
}

request := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}

result, err := r.Reconcile(context.TODO(), request)
require.Error(t, err)
require.Equal(t, reconcile.Result{}, result)
}

func TestReconcile_Error(t *testing.T) {
var (
name = "datacenter-example"
namespace = "default"
size int32 = 2
)

storageSize := resource.MustParse("1Gi")
storageName := "server-data"
storageConfig := api.StorageConfig{
CassandraDataVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{
StorageClassName: &storageName,
AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"},
Resources: corev1.VolumeResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{"storage": storageSize},
},
},
}

// Instance a CassandraDatacenter
dc := &api.CassandraDatacenter{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: api.CassandraDatacenterSpec{
ManagementApiAuth: api.ManagementApiAuthConfig{
Insecure: &api.ManagementApiAuthInsecureConfig{},
},
Size: size,
StorageConfig: storageConfig,
},
}

// Objects to keep track of

s := scheme.Scheme
s.AddKnownTypes(api.GroupVersion, dc)

mockClient := &mocks.Client{}
mockClient.On("Get",
mock.MatchedBy(
func(ctx context.Context) bool {
return ctx != nil
}),
mock.MatchedBy(
func(key client.ObjectKey) bool {
return key != client.ObjectKey{}
}),
mock.MatchedBy(
func(obj runtime.Object) bool {
return obj != nil
})).
Return(fmt.Errorf("some cryptic error")).
Once()

r := &controllers.CassandraDatacenterReconciler{
Client: mockClient,
Scheme: s,
}

request := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}

_, err := r.Reconcile(context.TODO(), request)
require.Error(t, err, "Reconciliation should have failed")
}
Loading
Loading