diff --git a/control-plane/cmd/webhook-kafka/main.go b/control-plane/cmd/webhook-kafka/main.go index 6fe3070d4f..e7b421840a 100644 --- a/control-plane/cmd/webhook-kafka/main.go +++ b/control-plane/cmd/webhook-kafka/main.go @@ -37,6 +37,7 @@ import ( eventingcorev1 "knative.dev/eventing/pkg/apis/eventing/v1" "knative.dev/eventing/pkg/apis/feature" + messagingv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1" sourcesv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" sourcesv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1beta1" @@ -57,6 +58,7 @@ var types = map[schema.GroupVersionKind]resourcesemantics.GenericCRD{ sourcesv1beta1.SchemeGroupVersion.WithKind("KafkaSource"): &sourcesv1beta1.KafkaSource{}, sourcesv1.SchemeGroupVersion.WithKind("KafkaSource"): &sourcesv1.KafkaSource{}, messagingv1beta1.SchemeGroupVersion.WithKind("KafkaChannel"): &messagingv1beta1.KafkaChannel{}, + messagingv1.SchemeGroupVersion.WithKind("KafkaChannel"): &messagingv1.KafkaChannel{}, eventingcorev1.SchemeGroupVersion.WithKind("Broker"): &eventingv1.BrokerStub{}, kafkainternals.SchemeGroupVersion.WithKind("ConsumerGroup"): &kafkainternals.ConsumerGroup{}, kafkainternals.SchemeGroupVersion.WithKind("Consumer"): &kafkainternals.Consumer{}, @@ -168,6 +170,14 @@ func NewConversionController(ctx context.Context, _ configmap.Watcher) *controll sourcesv1.SchemeGroupVersion.Version: &sourcesv1.KafkaSource{}, }, }, + messagingv1.Kind("KafkaChannel"): { + DefinitionName: "kafkachannels.messaging.knative.dev", + HubVersion: messagingv1beta1.SchemeGroupVersion.Version, + Zygotes: map[string]conversion.ConvertibleObject{ + messagingv1beta1.SchemeGroupVersion.Version: &messagingv1beta1.KafkaChannel{}, + messagingv1.SchemeGroupVersion.Version: &messagingv1.KafkaChannel{}, + }, + }, }, // A function that infuses the context passed to ConvertTo/ConvertFrom/SetDefaults with custom metadata. ctxFunc, diff --git a/control-plane/config/eventing-kafka-broker/100-channel/100-kafka-channel.yaml b/control-plane/config/eventing-kafka-broker/100-channel/100-kafka-channel.yaml index b9cbd2c763..d12026bf2a 100644 --- a/control-plane/config/eventing-kafka-broker/100-channel/100-kafka-channel.yaml +++ b/control-plane/config/eventing-kafka-broker/100-channel/100-kafka-channel.yaml @@ -319,6 +319,301 @@ spec: - name: Age type: date jsonPath: .metadata.creationTimestamp + - name: v1 + served: true + storage: false + subresources: + status: {} + schema: + openAPIV3Schema: + description: 'KafkaChannel is a resource representing a Channel that is backed by a topic of an Apache Kafka cluster.' + type: object + properties: + spec: + description: Spec defines the desired state of the Channel. + type: object + properties: + numPartitions: + description: NumPartitions is the number of partitions of a Kafka topic. By default, it is set to 1. + type: integer + format: int32 + default: 1 + replicationFactor: + description: ReplicationFactor is the replication factor of a Kafka topic. By default, it is set to 1. + type: integer + maximum: 32767 + default: 1 + retentionDuration: + description: RetentionDuration is the retention time for events in a Kafka Topic represented as an ISO-8601 Duration. By default it is set to 168 hours, which is the precise form of 7 days. + type: string + delivery: + description: DeliverySpec contains the default delivery spec for each subscription to this Channelable. Each subscription delivery spec, if any, overrides this global delivery spec. + type: object + properties: + backoffDelay: + description: 'BackoffDelay is the delay before retrying. More information on Duration format: - https://www.iso.org/iso-8601-date-and-time-format.html - https://en.wikipedia.org/wiki/ISO_8601 For linear policy, backoff delay is backoffDelay*. For exponential policy, backoff delay is backoffDelay*2^.' + type: string + backoffPolicy: + description: BackoffPolicy is the retry backoff policy (linear, exponential). + type: string + deadLetterSink: + description: DeadLetterSink is the sink receiving event that could not be sent to a destination. + type: object + properties: + ref: + description: Ref points to an Addressable. + type: object + properties: + apiVersion: + description: API version of the referent. + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ This is optional field, it gets defaulted to the object holding it if left out.' + type: string + uri: + description: URI can be an absolute URL(non-empty scheme and non-empty host) pointing to the target or a relative URI. Relative URIs will be resolved using the base URI retrieved from Ref. + type: string + CACerts: + type: string + audience: + description: Audience is the OIDC audience for the deadLetterSink. + type: string + retry: + description: Retry is the minimum number of retries the sender should attempt when sending an event before moving it to the dead letter sink. + type: integer + format: int32 + x-kubernetes-preserve-unknown-fields: true # This is necessary to enable experimental features in the delivery + subscribers: + description: This is the list of subscriptions for this subscribable. + type: array + items: + type: object + properties: + delivery: + description: DeliverySpec contains options controlling the event delivery + type: object + properties: + backoffDelay: + description: 'BackoffDelay is the delay before retrying. More information on Duration format: - https://www.iso.org/iso-8601-date-and-time-format.html - https://en.wikipedia.org/wiki/ISO_8601 For linear policy, backoff delay is backoffDelay*. For exponential policy, backoff delay is backoffDelay*2^.' + type: string + backoffPolicy: + description: BackoffPolicy is the retry backoff policy (linear, exponential). + type: string + deadLetterSink: + description: DeadLetterSink is the sink receiving event that could not be sent to a destination. + type: object + properties: + ref: + description: Ref points to an Addressable. + type: object + properties: + apiVersion: + description: API version of the referent. + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ This is optional field, it gets defaulted to the object holding it if left out.' + type: string + uri: + description: URI can be an absolute URL(non-empty scheme and non-empty host) pointing to the target or a relative URI. Relative URIs will be resolved using the base URI retrieved from Ref. + type: string + CACerts: + type: string + audience: + description: Audience is the OIDC audience for the deadLetterSink. + type: string + retry: + description: Retry is the minimum number of retries the sender should attempt when sending an event before moving it to the dead letter sink. + type: integer + format: int32 + x-kubernetes-preserve-unknown-fields: true # This is necessary to enable experimental features in the delivery + generation: + description: Generation of the origin of the subscriber with uid:UID. + type: integer + format: int64 + name: + description: The name of the subscription + type: string + replyUri: + description: ReplyURI is the endpoint for the reply + type: string + replyCACerts: + description: replyCACerts is the CA certs to trust for the reply. + type: string + replyAudience: + description: ReplyAudience is the OIDC audience for the replyUri. + type: string + subscriberUri: + description: SubscriberURI is the endpoint for the subscriber + type: string + subscriberCACerts: + description: SubscriberCACerts is the CA certs to trust for the subscriber. + type: string + subscriberAudience: + description: SubscriberAudience is the OIDC audience for the subscriberUri. + type: string + uid: + description: UID is used to understand the origin of the subscriber. + type: string + auth: + description: Auth provides the relevant information for OIDC authentication. + type: object + properties: + serviceAccountName: + description: ServiceAccountName is the name of the generated service account used for this components OIDC authentication. + type: string + status: + description: Status represents the current state of the KafkaChannel. This data may be out of date. + type: object + properties: + address: + type: object + required: + - url + properties: + name: + type: string + url: + type: string + CACerts: + type: string + audience: + type: string + addresses: + description: Kafka Sink is Addressable. It exposes the endpoints as URIs to get events delivered into the Kafka topic. + type: array + items: + type: object + properties: + name: + type: string + url: + type: string + CACerts: + type: string + audience: + type: string + annotations: + description: Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards. + type: object + x-kubernetes-preserve-unknown-fields: true + policies: + description: List of applied EventPolicies + type: array + items: + type: object + properties: + apiVersion: + description: The API version of the applied EventPolicy. This indicates, which version of EventPolicy is supported by the resource. + type: string + name: + description: The name of the applied EventPolicy + type: string + conditions: + description: Conditions the latest available observations of a resource's current state. + type: array + items: + type: object + required: + - type + - status + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition transitioned from one status to another. We use VolatileTime in place of metav1.Time to exclude this from creating equality.Semantic differences (all other things held constant). + type: string + message: + description: A human readable message indicating details about the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + severity: + description: Severity with which to treat failures of this type of condition. When this is not specified, it defaults to Error. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + deadLetterChannel: + description: DeadLetterChannel is a KReference and is set by the channel when it supports native error handling via a channel Failed messages are delivered here. + type: object + properties: + apiVersion: + description: API version of the referent. + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ This is optional field, it gets defaulted to the object holding it if left out.' + type: string + deadLetterSinkUri: + description: DeadLetterSinkURI is the resolved URI of the dead letter ref if one is specified in the Spec.Delivery. + type: string + deadLetterSinkCACerts: + type: string + deadLetterSinkAudience: + description: OIDC audience of the dead letter sink. + type: string + observedGeneration: + description: ObservedGeneration is the 'Generation' of the Service that was last processed by the controller. + type: integer + format: int64 + subscribers: + description: This is the list of subscription's statuses for this channel. + type: array + items: + type: object + properties: + message: + description: A human readable message indicating details of Ready status. + type: string + observedGeneration: + description: Generation of the origin of the subscriber with uid:UID. + type: integer + format: int64 + ready: + description: Status of the subscriber. + type: string + uid: + description: UID is used to understand the origin of the subscriber. + type: string + auth: + description: Auth provides the relevant information for OIDC authentication. + type: object + properties: + serviceAccountName: + description: ServiceAccountName is the name of the generated service account used for this components OIDC authentication. + type: string + additionalPrinterColumns: + - name: Ready + type: string + jsonPath: ".status.conditions[?(@.type==\"Ready\")].status" + - name: Reason + type: string + jsonPath: ".status.conditions[?(@.type==\"Ready\")].reason" + - name: URL + type: string + jsonPath: .status.address.url + - name: Age + type: date + jsonPath: .metadata.creationTimestamp names: kind: KafkaChannel plural: kafkachannels @@ -337,7 +632,7 @@ spec: conversionReviewVersions: ["v1", "v1beta1"] clientConfig: service: - name: kafka-webhook + name: kafka-webhook-eventing namespace: knative-eventing --- diff --git a/control-plane/pkg/apis/messaging/v1/doc.go b/control-plane/pkg/apis/messaging/v1/doc.go new file mode 100644 index 0000000000..fb67a49e04 --- /dev/null +++ b/control-plane/pkg/apis/messaging/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 is the v1 version of the API. +// +k8s:deepcopy-gen=package +// +groupName=messaging.knative.dev +package v1 diff --git a/control-plane/pkg/apis/messaging/v1/kafka_channel_conversion.go b/control-plane/pkg/apis/messaging/v1/kafka_channel_conversion.go new file mode 100644 index 0000000000..b6d864e750 --- /dev/null +++ b/control-plane/pkg/apis/messaging/v1/kafka_channel_conversion.go @@ -0,0 +1,34 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + "knative.dev/pkg/apis" +) + +// ConvertTo implements apis.Convertible +func (channel *KafkaChannel) ConvertTo(_ context.Context, sink apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", sink) +} + +// ConvertFrom implements apis.Convertible +func (sink *KafkaChannel) ConvertFrom(_ context.Context, channel apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", channel) +} diff --git a/control-plane/pkg/apis/messaging/v1/kafka_channel_defaults.go b/control-plane/pkg/apis/messaging/v1/kafka_channel_defaults.go new file mode 100644 index 0000000000..5933839539 --- /dev/null +++ b/control-plane/pkg/apis/messaging/v1/kafka_channel_defaults.go @@ -0,0 +1,73 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "time" + + "knative.dev/eventing/pkg/apis/messaging" + "knative.dev/pkg/apis" +) + +const ( + // DefaultNumPartitions is the KafkaChannel Spec default for the number of partitions + DefaultNumPartitions = 1 + + // DefaultReplicationFactor is the KafkaChannel Spec default for the replication factor + DefaultReplicationFactor = 1 + + // DefaultRetentionISO8601Duration is the KafkaChannel Spec default for the retention duration as an ISO-8601 string + DefaultRetentionISO8601Duration = "PT168H" // Precise 7 Days + + // DefaultRetentionDuration is the time.Duration equivalent of the DefaultRetentionISO8601Duration + DefaultRetentionDuration = 7 * 24 * time.Hour // Precise 7 Days + + // KafkaTopicConfigRetentionMs is the key in the Sarama TopicDetail ConfigEntries map for retention time (in ms) + KafkaTopicConfigRetentionMs = "retention.ms" +) + +func (kc *KafkaChannel) SetDefaults(ctx context.Context) { + // Set the duck subscription to the stored version of the duck + // we support. Reason for this is that the stored version will + // not get a chance to get modified, but for newer versions + // conversion webhook will be able to take a crack at it and + // can modify it to match the duck shape. + if kc.Annotations == nil { + kc.Annotations = make(map[string]string) + } + + if _, ok := kc.Annotations[messaging.SubscribableDuckVersionAnnotation]; !ok { + kc.Annotations[messaging.SubscribableDuckVersionAnnotation] = "v1" + } + + ctx = apis.WithinParent(ctx, kc.ObjectMeta) + kc.Spec.SetDefaults(ctx) +} + +func (kcs *KafkaChannelSpec) SetDefaults(ctx context.Context) { + if kcs.NumPartitions == 0 { + kcs.NumPartitions = DefaultNumPartitions + } + if kcs.ReplicationFactor == 0 { + kcs.ReplicationFactor = DefaultReplicationFactor + } + if len(kcs.RetentionDuration) <= 0 { + kcs.RetentionDuration = DefaultRetentionISO8601Duration + } + kcs.Delivery.SetDefaults(ctx) +} diff --git a/control-plane/pkg/apis/messaging/v1/kafka_channel_lifecycle.go b/control-plane/pkg/apis/messaging/v1/kafka_channel_lifecycle.go new file mode 100644 index 0000000000..e2be529f36 --- /dev/null +++ b/control-plane/pkg/apis/messaging/v1/kafka_channel_lifecycle.go @@ -0,0 +1,149 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "sync" + + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +// The consolidated and distributed KafkaChannel implementations require +// differentiated ConditionSets in order to accurately reflect their varied +// runtime architectures. One of the channel specific "Register..." functions +// in pkg/channel//apis/messaging/kafka_channel_lifecycle.go should be +// called via an init() in the main() of associated components. +var kc apis.ConditionSet +var channelCondSetLock = sync.RWMutex{} + +// Shared / Common Conditions Used By All Channel Implementations +const ( + + // KafkaChannelConditionReady has status True when all sub-conditions below have been set to True. + KafkaChannelConditionReady = apis.ConditionReady + + // KafkaChannelConditionAddressable has status true when this KafkaChannel meets + // the Addressable contract and has a non-empty URL. + KafkaChannelConditionAddressable apis.ConditionType = "Addressable" + + // KafkaChannelConditionConfigReady has status True when the Kafka configuration to use by the channel + // exists and is valid (i.e. the connection has been established). + KafkaChannelConditionConfigReady apis.ConditionType = "ConfigurationReady" + + // KafkaChannelConditionTopicReady has status True when the Kafka topic to use by the channel exists. + KafkaChannelConditionTopicReady apis.ConditionType = "TopicReady" + + // KafkaChannelConditionChannelServiceReady has status True when the K8S Service representing the channel + // is ready. Because this uses ExternalName, there are no endpoints to check. + KafkaChannelConditionChannelServiceReady apis.ConditionType = "ChannelServiceReady" + + ConditionEventPoliciesReady apis.ConditionType = "EventPoliciesReady" +) + +// RegisterAlternateKafkaChannelConditionSet register a different apis.ConditionSet. +func RegisterAlternateKafkaChannelConditionSet(conditionSet apis.ConditionSet) { + channelCondSetLock.Lock() + defer channelCondSetLock.Unlock() + + kc = conditionSet +} + +// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface. +func (*KafkaChannel) GetConditionSet() apis.ConditionSet { + channelCondSetLock.RLock() + defer channelCondSetLock.RUnlock() + + return kc +} + +// GetConditionSet retrieves the condition set for this resource. +func (*KafkaChannelStatus) GetConditionSet() apis.ConditionSet { + channelCondSetLock.RLock() + defer channelCondSetLock.RUnlock() + + return kc +} + +// GetCondition returns the condition currently associated with the given type, or nil. +func (kcs *KafkaChannelStatus) GetCondition(t apis.ConditionType) *apis.Condition { + return kcs.GetConditionSet().Manage(kcs).GetCondition(t) +} + +// IsReady returns true if the resource is ready overall. +func (kcs *KafkaChannelStatus) IsReady() bool { + return kcs.GetConditionSet().Manage(kcs).IsHappy() +} + +// InitializeConditions sets relevant unset conditions to Unknown state. +func (kcs *KafkaChannelStatus) InitializeConditions() { + kcs.GetConditionSet().Manage(kcs).InitializeConditions() +} + +// SetAddress sets the address (as part of Addressable contract) and marks the correct condition. +func (kcs *KafkaChannelStatus) SetAddress(addr *duckv1.Addressable) { + if kcs.Address == nil { + kcs.Address = &duckv1.Addressable{} + } + if addr != nil { + kcs.Address = addr + kcs.GetConditionSet().Manage(kcs).MarkTrue(KafkaChannelConditionAddressable) + } else { + kcs.Address.URL = nil + kcs.GetConditionSet().Manage(kcs).MarkFalse(KafkaChannelConditionAddressable, "EmptyURL", "URL is nil") + } +} + +func (kcs *KafkaChannelStatus) MarkConfigTrue() { + kcs.GetConditionSet().Manage(kcs).MarkTrue(KafkaChannelConditionConfigReady) +} + +func (kcs *KafkaChannelStatus) MarkConfigFailed(reason, messageFormat string, messageA ...interface{}) { + kcs.GetConditionSet().Manage(kcs).MarkFalse(KafkaChannelConditionConfigReady, reason, messageFormat, messageA...) +} + +func (kcs *KafkaChannelStatus) MarkTopicTrue() { + kcs.GetConditionSet().Manage(kcs).MarkTrue(KafkaChannelConditionTopicReady) +} + +func (kcs *KafkaChannelStatus) MarkTopicFailed(reason, messageFormat string, messageA ...interface{}) { + kcs.GetConditionSet().Manage(kcs).MarkFalse(KafkaChannelConditionTopicReady, reason, messageFormat, messageA...) +} + +func (kcs *KafkaChannelStatus) MarkChannelServiceFailed(reason, messageFormat string, messageA ...interface{}) { + kcs.GetConditionSet().Manage(kcs).MarkFalse(KafkaChannelConditionChannelServiceReady, reason, messageFormat, messageA...) +} + +func (kcs *KafkaChannelStatus) MarkChannelServiceTrue() { + kcs.GetConditionSet().Manage(kcs).MarkTrue(KafkaChannelConditionChannelServiceReady) +} + +func (kcs *KafkaChannelStatus) MarkEventPoliciesTrue() { + kcs.GetConditionSet().Manage(kcs).MarkTrue(ConditionEventPoliciesReady) +} + +func (kcs *KafkaChannelStatus) MarkEventPoliciesTrueWithReason(reason, messageFormat string, messageA ...interface{}) { + kcs.GetConditionSet().Manage(kcs).MarkTrueWithReason(ConditionEventPoliciesReady, reason, messageFormat, messageA...) +} + +func (kcs *KafkaChannelStatus) MarkEventPoliciesFailed(reason, messageFormat string, messageA ...interface{}) { + kcs.GetConditionSet().Manage(kcs).MarkFalse(ConditionEventPoliciesReady, reason, messageFormat, messageA...) +} + +func (kcs *KafkaChannelStatus) MarkEventPoliciesUnknown(reason, messageFormat string, messageA ...interface{}) { + kcs.GetConditionSet().Manage(kcs).MarkUnknown(ConditionEventPoliciesReady, reason, messageFormat, messageA...) +} diff --git a/control-plane/pkg/apis/messaging/v1/kafka_channel_types.go b/control-plane/pkg/apis/messaging/v1/kafka_channel_types.go new file mode 100644 index 0000000000..d8f8c57a5f --- /dev/null +++ b/control-plane/pkg/apis/messaging/v1/kafka_channel_types.go @@ -0,0 +1,119 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + "github.com/rickb777/date/period" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + eventingduck "knative.dev/eventing/pkg/apis/duck/v1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" +) + +// +genclient +// +genreconciler +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KafkaChannel is a resource representing a Kafka Channel. +type KafkaChannel struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of the Channel. + Spec KafkaChannelSpec `json:"spec,omitempty"` + + // Status represents the current state of the KafkaChannel. This data may be out of + // date. + // +optional + Status KafkaChannelStatus `json:"status,omitempty"` +} + +var ( + // Check that this KafkaChannel can be validated and defaulted. + _ apis.Validatable = (*KafkaChannel)(nil) + _ apis.Defaultable = (*KafkaChannel)(nil) + + _ runtime.Object = (*KafkaChannel)(nil) + + // Check that we can create OwnerReferences to a KafkaChannel. + _ kmeta.OwnerRefable = (*KafkaChannel)(nil) + + // Check that the type conforms to the duck Knative Resource shape. + _ duckv1.KRShaped = (*KafkaChannel)(nil) +) + +// KafkaChannelSpec defines the specification for a KafkaChannel. +type KafkaChannelSpec struct { + // NumPartitions is the number of partitions of a Kafka topic. By default, it is set to 1. + NumPartitions int32 `json:"numPartitions"` + + // ReplicationFactor is the replication factor of a Kafka topic. By default, it is set to 1. + ReplicationFactor int16 `json:"replicationFactor"` + + // RetentionDuration is the duration for which events will be retained in the Kafka Topic. + // By default, it is set to 168 hours, which is the precise form for 7 days. + // More information on Duration format: + // - https://www.iso.org/iso-8601-date-and-time-format.html + // - https://en.wikipedia.org/wiki/ISO_8601 + RetentionDuration string `json:"retentionDuration"` + + // Channel conforms to Duck type Channelable. + eventingduck.ChannelableSpec `json:",inline"` +} + +// ParseRetentionDuration returns the parsed Offset Time if valid (RFC3339 format) or an error for invalid content. +// Note - If the optional RetentionDuration field is not present, or is invalid, a Duration of "-1" will be returned. +func (kcs *KafkaChannelSpec) ParseRetentionDuration() (time.Duration, error) { + retentionPeriod, err := period.Parse(kcs.RetentionDuration) + if err != nil { + return time.Duration(-1), err + } + retentionDuration, _ := retentionPeriod.Duration() // Ignore precision flag and accept ISO8601 estimation + return retentionDuration, nil +} + +// KafkaChannelStatus represents the current state of a KafkaChannel. +type KafkaChannelStatus struct { + // Channel conforms to Duck type ChannelableStatus. + eventingduck.ChannelableStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KafkaChannelList is a collection of KafkaChannels. +type KafkaChannelList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + Items []KafkaChannel `json:"items"` +} + +// GetGroupVersionKind returns GroupVersionKind for KafkaChannels +func (kc *KafkaChannel) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("KafkaChannel") +} + +// GetStatus retrieves the duck status for this resource. Implements the KRShaped interface. +func (kc *KafkaChannel) GetStatus() *duckv1.Status { + return &kc.Status.Status +} diff --git a/control-plane/pkg/apis/messaging/v1/kafka_channel_validation.go b/control-plane/pkg/apis/messaging/v1/kafka_channel_validation.go new file mode 100644 index 0000000000..8b5e923497 --- /dev/null +++ b/control-plane/pkg/apis/messaging/v1/kafka_channel_validation.go @@ -0,0 +1,159 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + "github.com/google/go-cmp/cmp" + + "github.com/google/go-cmp/cmp/cmpopts" + "knative.dev/eventing/pkg/apis/eventing" + "knative.dev/pkg/apis" + "knative.dev/pkg/kmp" +) + +const eventingControllerSAName = "system:serviceaccount:knative-eventing:eventing-controller" + +func (kc *KafkaChannel) Validate(ctx context.Context) *apis.FieldError { + errs := kc.Spec.Validate(ctx).ViaField("spec") + + // Validate annotations + if kc.Annotations != nil { + if scope, ok := kc.Annotations[eventing.ScopeAnnotationKey]; ok { + if scope != "namespace" && scope != "cluster" { + iv := apis.ErrInvalidValue(scope, "") + iv.Details = "expected either 'cluster' or 'namespace'" + errs = errs.Also(iv.ViaFieldKey("annotations", eventing.ScopeAnnotationKey).ViaField("metadata")) + } + } + } + + if apis.IsInUpdate(ctx) { + original := apis.GetBaseline(ctx).(*KafkaChannel) + errs = errs.Also(kc.CheckImmutableFields(ctx, original)) + errs = errs.Also(kc.CheckSubscribersChangeAllowed(ctx, original)) + } + + return errs +} + +func (kcs *KafkaChannelSpec) Validate(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + + if kcs.NumPartitions <= 0 { + fe := apis.ErrInvalidValue(kcs.NumPartitions, "numPartitions") + errs = errs.Also(fe) + } + + if kcs.ReplicationFactor <= 0 { + fe := apis.ErrInvalidValue(kcs.ReplicationFactor, "replicationFactor") + errs = errs.Also(fe) + } + + retentionDuration, err := kcs.ParseRetentionDuration() + if retentionDuration < 0 || err != nil { + fe := apis.ErrInvalidValue(kcs.RetentionDuration, "retentionDuration") + errs = errs.Also(fe) + } + + for i, subscriber := range kcs.SubscribableSpec.Subscribers { + if subscriber.ReplyURI == nil && subscriber.SubscriberURI == nil { + fe := apis.ErrMissingField("replyURI", "subscriberURI") + fe.Details = "expected at least one of, got none" + errs = errs.Also(fe.ViaField(fmt.Sprintf("subscriber[%d]", i)).ViaField("subscribable")) + } + } + return errs +} + +func (kc *KafkaChannel) CheckImmutableFields(_ context.Context, original *KafkaChannel) *apis.FieldError { + if original == nil { + return nil + } + + ignoreArguments := []cmp.Option{cmpopts.IgnoreFields(KafkaChannelSpec{}, "ChannelableSpec")} + + // In the specific case of the original RetentionDuration being an empty string, allow it + // as an exception to the immutability requirement. + // + // KafkaChannels created pre-v0.26 will not have a RetentionDuration field (thus an empty + // string), and in v0.26 there is a post-install job that updates this to its proper value. + // This immutability check was added after the post-install job, and without this exception + // it will fail attempting to upgrade those pre-v0.26 channels. + if original.Spec.RetentionDuration == "" && kc.Spec.RetentionDuration != "" { + ignoreArguments = append(ignoreArguments, cmpopts.IgnoreFields(KafkaChannelSpec{}, "RetentionDuration")) + } + + if diff, err := kmp.ShortDiff(original.Spec, kc.Spec, ignoreArguments...); err != nil { + return &apis.FieldError{ + Message: "Failed to diff KafkaChannel", + Paths: []string{"spec"}, + Details: err.Error(), + } + } else if diff != "" { + return &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: diff, + } + } + + return nil +} + +func (kc *KafkaChannel) CheckSubscribersChangeAllowed(ctx context.Context, original *KafkaChannel) *apis.FieldError { + if original == nil { + return nil + } + + if !canChangeChannelSpecAuth(ctx) { + return kc.checkSubsciberSpecAuthChanged(ctx, original) + } + return nil +} + +func (kc *KafkaChannel) checkSubsciberSpecAuthChanged(ctx context.Context, original *KafkaChannel) *apis.FieldError { + if diff, err := kmp.ShortDiff(original.Spec.Subscribers, kc.Spec.Subscribers); err != nil { + return &apis.FieldError{ + Message: "Failed to diff Channel.Spec.Subscribers", + Paths: []string{"spec.subscribers"}, + Details: err.Error(), + } + } else if diff != "" { + user := apis.GetUserInfo(ctx) + userName := "" + if user != nil { + userName = user.Username + } + return &apis.FieldError{ + Message: fmt.Sprintf("Channel.Spec.Subscribers changed by user %s which was not the %s service account", userName, eventingControllerSAName), + Paths: []string{"spec.subscribers"}, + Details: diff, + } + } + return nil +} + +func canChangeChannelSpecAuth(ctx context.Context) bool { + user := apis.GetUserInfo(ctx) + if user == nil { + return false + } + return user.Username == eventingControllerSAName +} diff --git a/control-plane/pkg/apis/messaging/v1/register.go b/control-plane/pkg/apis/messaging/v1/register.go new file mode 100644 index 0000000000..9e52c1ce9d --- /dev/null +++ b/control-plane/pkg/apis/messaging/v1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: messaging.GroupName, Version: "v1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &KafkaChannel{}, + &KafkaChannelList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/control-plane/pkg/apis/messaging/v1/zz_generated.deepcopy.go b/control-plane/pkg/apis/messaging/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..19d8f94de5 --- /dev/null +++ b/control-plane/pkg/apis/messaging/v1/zz_generated.deepcopy.go @@ -0,0 +1,121 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannel) DeepCopyInto(out *KafkaChannel) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannel. +func (in *KafkaChannel) DeepCopy() *KafkaChannel { + if in == nil { + return nil + } + out := new(KafkaChannel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaChannel) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannelList) DeepCopyInto(out *KafkaChannelList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KafkaChannel, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannelList. +func (in *KafkaChannelList) DeepCopy() *KafkaChannelList { + if in == nil { + return nil + } + out := new(KafkaChannelList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaChannelList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannelSpec) DeepCopyInto(out *KafkaChannelSpec) { + *out = *in + in.ChannelableSpec.DeepCopyInto(&out.ChannelableSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannelSpec. +func (in *KafkaChannelSpec) DeepCopy() *KafkaChannelSpec { + if in == nil { + return nil + } + out := new(KafkaChannelSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannelStatus) DeepCopyInto(out *KafkaChannelStatus) { + *out = *in + in.ChannelableStatus.DeepCopyInto(&out.ChannelableStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannelStatus. +func (in *KafkaChannelStatus) DeepCopy() *KafkaChannelStatus { + if in == nil { + return nil + } + out := new(KafkaChannelStatus) + in.DeepCopyInto(out) + return out +} diff --git a/control-plane/pkg/apis/messaging/v1beta1/kafka_channel_conversion.go b/control-plane/pkg/apis/messaging/v1beta1/kafka_channel_conversion.go index 3dbdd70342..1f8643ddb6 100644 --- a/control-plane/pkg/apis/messaging/v1beta1/kafka_channel_conversion.go +++ b/control-plane/pkg/apis/messaging/v1beta1/kafka_channel_conversion.go @@ -21,14 +21,46 @@ import ( "fmt" "knative.dev/pkg/apis" + + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1" ) // ConvertTo implements apis.Convertible -func (channel *KafkaChannel) ConvertTo(_ context.Context, sink apis.Convertible) error { - return fmt.Errorf("v1beta1 is the highest known version, got: %T", sink) +func (channel *KafkaChannel) ConvertTo(_ context.Context, to apis.Convertible) error { + switch sink := to.(type) { + case *v1.KafkaChannel: + channel.ObjectMeta.DeepCopyInto(&sink.ObjectMeta) + sink.Spec = v1.KafkaChannelSpec{ + NumPartitions: channel.Spec.NumPartitions, + ReplicationFactor: channel.Spec.ReplicationFactor, + RetentionDuration: channel.Spec.RetentionDuration, + ChannelableSpec: *channel.Spec.ChannelableSpec.DeepCopy(), + } + sink.Status = v1.KafkaChannelStatus{ + ChannelableStatus: *channel.Status.ChannelableStatus.DeepCopy(), + } + return nil + default: + return fmt.Errorf("unknown version, got: %T", sink) + } } // ConvertFrom implements apis.Convertible -func (sink *KafkaChannel) ConvertFrom(_ context.Context, channel apis.Convertible) error { - return fmt.Errorf("v1beta1 is the highest known version, got: %T", channel) +func (sink *KafkaChannel) ConvertFrom(_ context.Context, from apis.Convertible) error { + switch channel := from.(type) { + case *v1.KafkaChannel: + channel.ObjectMeta.DeepCopyInto(&sink.ObjectMeta) + sink.Spec = KafkaChannelSpec{ + NumPartitions: channel.Spec.NumPartitions, + ReplicationFactor: channel.Spec.ReplicationFactor, + RetentionDuration: channel.Spec.RetentionDuration, + ChannelableSpec: *channel.Spec.ChannelableSpec.DeepCopy(), + } + sink.Status = KafkaChannelStatus{ + ChannelableStatus: *channel.Status.ChannelableStatus.DeepCopy(), + } + return nil + default: + return fmt.Errorf("unknown version, got: %T", channel) + } } diff --git a/control-plane/pkg/client/clientset/versioned/clientset.go b/control-plane/pkg/client/clientset/versioned/clientset.go index 2ce9b59834..697d292636 100644 --- a/control-plane/pkg/client/clientset/versioned/clientset.go +++ b/control-plane/pkg/client/clientset/versioned/clientset.go @@ -29,6 +29,7 @@ import ( bindingsv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/bindings/v1beta1" eventingv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/eventing/v1alpha1" internalv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/internalskafkaeventing/v1alpha1" + messagingv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/messaging/v1" messagingv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/messaging/v1beta1" sourcesv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/sources/v1" sourcesv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/sources/v1beta1" @@ -40,6 +41,7 @@ type Interface interface { BindingsV1beta1() bindingsv1beta1.BindingsV1beta1Interface EventingV1alpha1() eventingv1alpha1.EventingV1alpha1Interface InternalV1alpha1() internalv1alpha1.InternalV1alpha1Interface + MessagingV1() messagingv1.MessagingV1Interface MessagingV1beta1() messagingv1beta1.MessagingV1beta1Interface SourcesV1() sourcesv1.SourcesV1Interface SourcesV1beta1() sourcesv1beta1.SourcesV1beta1Interface @@ -52,6 +54,7 @@ type Clientset struct { bindingsV1beta1 *bindingsv1beta1.BindingsV1beta1Client eventingV1alpha1 *eventingv1alpha1.EventingV1alpha1Client internalV1alpha1 *internalv1alpha1.InternalV1alpha1Client + messagingV1 *messagingv1.MessagingV1Client messagingV1beta1 *messagingv1beta1.MessagingV1beta1Client sourcesV1 *sourcesv1.SourcesV1Client sourcesV1beta1 *sourcesv1beta1.SourcesV1beta1Client @@ -77,6 +80,11 @@ func (c *Clientset) InternalV1alpha1() internalv1alpha1.InternalV1alpha1Interfac return c.internalV1alpha1 } +// MessagingV1 retrieves the MessagingV1Client +func (c *Clientset) MessagingV1() messagingv1.MessagingV1Interface { + return c.messagingV1 +} + // MessagingV1beta1 retrieves the MessagingV1beta1Client func (c *Clientset) MessagingV1beta1() messagingv1beta1.MessagingV1beta1Interface { return c.messagingV1beta1 @@ -152,6 +160,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.messagingV1, err = messagingv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.messagingV1beta1, err = messagingv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -189,6 +201,7 @@ func New(c rest.Interface) *Clientset { cs.bindingsV1beta1 = bindingsv1beta1.New(c) cs.eventingV1alpha1 = eventingv1alpha1.New(c) cs.internalV1alpha1 = internalv1alpha1.New(c) + cs.messagingV1 = messagingv1.New(c) cs.messagingV1beta1 = messagingv1beta1.New(c) cs.sourcesV1 = sourcesv1.New(c) cs.sourcesV1beta1 = sourcesv1beta1.New(c) diff --git a/control-plane/pkg/client/clientset/versioned/fake/clientset_generated.go b/control-plane/pkg/client/clientset/versioned/fake/clientset_generated.go index 15b43cdf62..03f1ae9521 100644 --- a/control-plane/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/control-plane/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -33,6 +33,8 @@ import ( fakeeventingv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake" internalv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/internalskafkaeventing/v1alpha1" fakeinternalv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/internalskafkaeventing/v1alpha1/fake" + messagingv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/messaging/v1" + fakemessagingv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/fake" messagingv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/messaging/v1beta1" fakemessagingv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/messaging/v1beta1/fake" sourcesv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/sources/v1" @@ -115,6 +117,11 @@ func (c *Clientset) InternalV1alpha1() internalv1alpha1.InternalV1alpha1Interfac return &fakeinternalv1alpha1.FakeInternalV1alpha1{Fake: &c.Fake} } +// MessagingV1 retrieves the MessagingV1Client +func (c *Clientset) MessagingV1() messagingv1.MessagingV1Interface { + return &fakemessagingv1.FakeMessagingV1{Fake: &c.Fake} +} + // MessagingV1beta1 retrieves the MessagingV1beta1Client func (c *Clientset) MessagingV1beta1() messagingv1beta1.MessagingV1beta1Interface { return &fakemessagingv1beta1.FakeMessagingV1beta1{Fake: &c.Fake} diff --git a/control-plane/pkg/client/clientset/versioned/fake/register.go b/control-plane/pkg/client/clientset/versioned/fake/register.go index 34e19e4050..d867130d07 100644 --- a/control-plane/pkg/client/clientset/versioned/fake/register.go +++ b/control-plane/pkg/client/clientset/versioned/fake/register.go @@ -28,6 +28,7 @@ import ( bindingsv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1beta1" eventingv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/eventing/v1alpha1" internalv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/internalskafkaeventing/v1alpha1" + messagingv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1" messagingv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1beta1" sourcesv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" sourcesv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1beta1" @@ -41,6 +42,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ bindingsv1beta1.AddToScheme, eventingv1alpha1.AddToScheme, internalv1alpha1.AddToScheme, + messagingv1.AddToScheme, messagingv1beta1.AddToScheme, sourcesv1.AddToScheme, sourcesv1beta1.AddToScheme, diff --git a/control-plane/pkg/client/clientset/versioned/scheme/register.go b/control-plane/pkg/client/clientset/versioned/scheme/register.go index cff3156b9c..793c9f4456 100644 --- a/control-plane/pkg/client/clientset/versioned/scheme/register.go +++ b/control-plane/pkg/client/clientset/versioned/scheme/register.go @@ -28,6 +28,7 @@ import ( bindingsv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1beta1" eventingv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/eventing/v1alpha1" internalv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/internalskafkaeventing/v1alpha1" + messagingv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1" messagingv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1beta1" sourcesv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" sourcesv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1beta1" @@ -41,6 +42,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ bindingsv1beta1.AddToScheme, eventingv1alpha1.AddToScheme, internalv1alpha1.AddToScheme, + messagingv1.AddToScheme, messagingv1beta1.AddToScheme, sourcesv1.AddToScheme, sourcesv1beta1.AddToScheme, diff --git a/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/doc.go b/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/doc.go new file mode 100644 index 0000000000..54167ac41c --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/doc.go @@ -0,0 +1,20 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/fake/doc.go b/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/fake/doc.go new file mode 100644 index 0000000000..7f850700e3 --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/fake/fake_kafkachannel.go b/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/fake/fake_kafkachannel.go new file mode 100644 index 0000000000..6b411bff07 --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/fake/fake_kafkachannel.go @@ -0,0 +1,147 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1" +) + +// FakeKafkaChannels implements KafkaChannelInterface +type FakeKafkaChannels struct { + Fake *FakeMessagingV1 + ns string +} + +var kafkachannelsResource = v1.SchemeGroupVersion.WithResource("kafkachannels") + +var kafkachannelsKind = v1.SchemeGroupVersion.WithKind("KafkaChannel") + +// Get takes name of the kafkaChannel, and returns the corresponding kafkaChannel object, and an error if there is any. +func (c *FakeKafkaChannels) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.KafkaChannel, err error) { + emptyResult := &v1.KafkaChannel{} + obj, err := c.Fake. + Invokes(testing.NewGetActionWithOptions(kafkachannelsResource, c.ns, name, options), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.KafkaChannel), err +} + +// List takes label and field selectors, and returns the list of KafkaChannels that match those selectors. +func (c *FakeKafkaChannels) List(ctx context.Context, opts metav1.ListOptions) (result *v1.KafkaChannelList, err error) { + emptyResult := &v1.KafkaChannelList{} + obj, err := c.Fake. + Invokes(testing.NewListActionWithOptions(kafkachannelsResource, kafkachannelsKind, c.ns, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.KafkaChannelList{ListMeta: obj.(*v1.KafkaChannelList).ListMeta} + for _, item := range obj.(*v1.KafkaChannelList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kafkaChannels. +func (c *FakeKafkaChannels) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchActionWithOptions(kafkachannelsResource, c.ns, opts)) + +} + +// Create takes the representation of a kafkaChannel and creates it. Returns the server's representation of the kafkaChannel, and an error, if there is any. +func (c *FakeKafkaChannels) Create(ctx context.Context, kafkaChannel *v1.KafkaChannel, opts metav1.CreateOptions) (result *v1.KafkaChannel, err error) { + emptyResult := &v1.KafkaChannel{} + obj, err := c.Fake. + Invokes(testing.NewCreateActionWithOptions(kafkachannelsResource, c.ns, kafkaChannel, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.KafkaChannel), err +} + +// Update takes the representation of a kafkaChannel and updates it. Returns the server's representation of the kafkaChannel, and an error, if there is any. +func (c *FakeKafkaChannels) Update(ctx context.Context, kafkaChannel *v1.KafkaChannel, opts metav1.UpdateOptions) (result *v1.KafkaChannel, err error) { + emptyResult := &v1.KafkaChannel{} + obj, err := c.Fake. + Invokes(testing.NewUpdateActionWithOptions(kafkachannelsResource, c.ns, kafkaChannel, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.KafkaChannel), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKafkaChannels) UpdateStatus(ctx context.Context, kafkaChannel *v1.KafkaChannel, opts metav1.UpdateOptions) (result *v1.KafkaChannel, err error) { + emptyResult := &v1.KafkaChannel{} + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceActionWithOptions(kafkachannelsResource, "status", c.ns, kafkaChannel, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.KafkaChannel), err +} + +// Delete takes name of the kafkaChannel and deletes it. Returns an error if one occurs. +func (c *FakeKafkaChannels) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(kafkachannelsResource, c.ns, name, opts), &v1.KafkaChannel{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKafkaChannels) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionActionWithOptions(kafkachannelsResource, c.ns, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1.KafkaChannelList{}) + return err +} + +// Patch applies the patch and returns the patched kafkaChannel. +func (c *FakeKafkaChannels) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KafkaChannel, err error) { + emptyResult := &v1.KafkaChannel{} + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(kafkachannelsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.KafkaChannel), err +} diff --git a/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/fake/fake_messaging_client.go b/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/fake/fake_messaging_client.go new file mode 100644 index 0000000000..a0d3a71730 --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/fake/fake_messaging_client.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/messaging/v1" +) + +type FakeMessagingV1 struct { + *testing.Fake +} + +func (c *FakeMessagingV1) KafkaChannels(namespace string) v1.KafkaChannelInterface { + return &FakeKafkaChannels{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeMessagingV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/generated_expansion.go b/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/generated_expansion.go new file mode 100644 index 0000000000..6bba162c28 --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type KafkaChannelExpansion interface{} diff --git a/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/kafkachannel.go b/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/kafkachannel.go new file mode 100644 index 0000000000..9db9b3dfff --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/kafkachannel.go @@ -0,0 +1,69 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1" + scheme "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/scheme" +) + +// KafkaChannelsGetter has a method to return a KafkaChannelInterface. +// A group's client should implement this interface. +type KafkaChannelsGetter interface { + KafkaChannels(namespace string) KafkaChannelInterface +} + +// KafkaChannelInterface has methods to work with KafkaChannel resources. +type KafkaChannelInterface interface { + Create(ctx context.Context, kafkaChannel *v1.KafkaChannel, opts metav1.CreateOptions) (*v1.KafkaChannel, error) + Update(ctx context.Context, kafkaChannel *v1.KafkaChannel, opts metav1.UpdateOptions) (*v1.KafkaChannel, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, kafkaChannel *v1.KafkaChannel, opts metav1.UpdateOptions) (*v1.KafkaChannel, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.KafkaChannel, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.KafkaChannelList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KafkaChannel, err error) + KafkaChannelExpansion +} + +// kafkaChannels implements KafkaChannelInterface +type kafkaChannels struct { + *gentype.ClientWithList[*v1.KafkaChannel, *v1.KafkaChannelList] +} + +// newKafkaChannels returns a KafkaChannels +func newKafkaChannels(c *MessagingV1Client, namespace string) *kafkaChannels { + return &kafkaChannels{ + gentype.NewClientWithList[*v1.KafkaChannel, *v1.KafkaChannelList]( + "kafkachannels", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.KafkaChannel { return &v1.KafkaChannel{} }, + func() *v1.KafkaChannelList { return &v1.KafkaChannelList{} }), + } +} diff --git a/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/messaging_client.go b/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/messaging_client.go new file mode 100644 index 0000000000..c91d225aa6 --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/messaging/v1/messaging_client.go @@ -0,0 +1,107 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + rest "k8s.io/client-go/rest" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1" + "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/scheme" +) + +type MessagingV1Interface interface { + RESTClient() rest.Interface + KafkaChannelsGetter +} + +// MessagingV1Client is used to interact with features provided by the messaging.knative.dev group. +type MessagingV1Client struct { + restClient rest.Interface +} + +func (c *MessagingV1Client) KafkaChannels(namespace string) KafkaChannelInterface { + return newKafkaChannels(c, namespace) +} + +// NewForConfig creates a new MessagingV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*MessagingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new MessagingV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*MessagingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &MessagingV1Client{client}, nil +} + +// NewForConfigOrDie creates a new MessagingV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *MessagingV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new MessagingV1Client for the given RESTClient. +func New(c rest.Interface) *MessagingV1Client { + return &MessagingV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *MessagingV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/control-plane/pkg/client/informers/externalversions/generic.go b/control-plane/pkg/client/informers/externalversions/generic.go index 61384009ff..4f06e782d7 100644 --- a/control-plane/pkg/client/informers/externalversions/generic.go +++ b/control-plane/pkg/client/informers/externalversions/generic.go @@ -27,6 +27,7 @@ import ( v1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1beta1" v1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/eventing/v1alpha1" internalskafkaeventingv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/internalskafkaeventing/v1alpha1" + messagingv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1" messagingv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1beta1" sourcesv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" sourcesv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1beta1" @@ -76,6 +77,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case internalskafkaeventingv1alpha1.SchemeGroupVersion.WithResource("consumergroups"): return &genericInformer{resource: resource.GroupResource(), informer: f.Internal().V1alpha1().ConsumerGroups().Informer()}, nil + // Group=messaging.knative.dev, Version=v1 + case messagingv1.SchemeGroupVersion.WithResource("kafkachannels"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Messaging().V1().KafkaChannels().Informer()}, nil + // Group=messaging.knative.dev, Version=v1beta1 case messagingv1beta1.SchemeGroupVersion.WithResource("kafkachannels"): return &genericInformer{resource: resource.GroupResource(), informer: f.Messaging().V1beta1().KafkaChannels().Informer()}, nil diff --git a/control-plane/pkg/client/informers/externalversions/messaging/interface.go b/control-plane/pkg/client/informers/externalversions/messaging/interface.go index 3f0be0d115..949a429dfc 100644 --- a/control-plane/pkg/client/informers/externalversions/messaging/interface.go +++ b/control-plane/pkg/client/informers/externalversions/messaging/interface.go @@ -20,11 +20,14 @@ package messaging import ( internalinterfaces "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/internalinterfaces" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/messaging/v1" v1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/messaging/v1beta1" ) // Interface provides access to each of this group's versions. type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -40,6 +43,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/control-plane/pkg/client/informers/externalversions/messaging/v1/interface.go b/control-plane/pkg/client/informers/externalversions/messaging/v1/interface.go new file mode 100644 index 0000000000..1d8b08bcc1 --- /dev/null +++ b/control-plane/pkg/client/informers/externalversions/messaging/v1/interface.go @@ -0,0 +1,45 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // KafkaChannels returns a KafkaChannelInformer. + KafkaChannels() KafkaChannelInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// KafkaChannels returns a KafkaChannelInformer. +func (v *version) KafkaChannels() KafkaChannelInformer { + return &kafkaChannelInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/control-plane/pkg/client/informers/externalversions/messaging/v1/kafkachannel.go b/control-plane/pkg/client/informers/externalversions/messaging/v1/kafkachannel.go new file mode 100644 index 0000000000..e5f4e436ab --- /dev/null +++ b/control-plane/pkg/client/informers/externalversions/messaging/v1/kafkachannel.go @@ -0,0 +1,90 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + messagingv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1" + versioned "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/internalinterfaces" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/listers/messaging/v1" +) + +// KafkaChannelInformer provides access to a shared informer and lister for +// KafkaChannels. +type KafkaChannelInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.KafkaChannelLister +} + +type kafkaChannelInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewKafkaChannelInformer constructs a new informer for KafkaChannel type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewKafkaChannelInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredKafkaChannelInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredKafkaChannelInformer constructs a new informer for KafkaChannel type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredKafkaChannelInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MessagingV1().KafkaChannels(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MessagingV1().KafkaChannels(namespace).Watch(context.TODO(), options) + }, + }, + &messagingv1.KafkaChannel{}, + resyncPeriod, + indexers, + ) +} + +func (f *kafkaChannelInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredKafkaChannelInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *kafkaChannelInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&messagingv1.KafkaChannel{}, f.defaultInformer) +} + +func (f *kafkaChannelInformer) Lister() v1.KafkaChannelLister { + return v1.NewKafkaChannelLister(f.Informer().GetIndexer()) +} diff --git a/control-plane/pkg/client/injection/informers/messaging/v1/kafkachannel/fake/fake.go b/control-plane/pkg/client/injection/informers/messaging/v1/kafkachannel/fake/fake.go new file mode 100644 index 0000000000..c88fdeaf1b --- /dev/null +++ b/control-plane/pkg/client/injection/informers/messaging/v1/kafkachannel/fake/fake.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + fake "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/informers/factory/fake" + kafkachannel "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/informers/messaging/v1/kafkachannel" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = kafkachannel.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Messaging().V1().KafkaChannels() + return context.WithValue(ctx, kafkachannel.Key{}, inf), inf.Informer() +} diff --git a/control-plane/pkg/client/injection/informers/messaging/v1/kafkachannel/filtered/fake/fake.go b/control-plane/pkg/client/injection/informers/messaging/v1/kafkachannel/filtered/fake/fake.go new file mode 100644 index 0000000000..5b5b3eaefd --- /dev/null +++ b/control-plane/pkg/client/injection/informers/messaging/v1/kafkachannel/filtered/fake/fake.go @@ -0,0 +1,52 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + factoryfiltered "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/informers/factory/filtered" + filtered "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/informers/messaging/v1/kafkachannel/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +var Get = filtered.Get + +func init() { + injection.Fake.RegisterFilteredInformers(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(factoryfiltered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := factoryfiltered.Get(ctx, selector) + inf := f.Messaging().V1().KafkaChannels() + ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} diff --git a/control-plane/pkg/client/injection/informers/messaging/v1/kafkachannel/filtered/kafkachannel.go b/control-plane/pkg/client/injection/informers/messaging/v1/kafkachannel/filtered/kafkachannel.go new file mode 100644 index 0000000000..5792694cec --- /dev/null +++ b/control-plane/pkg/client/injection/informers/messaging/v1/kafkachannel/filtered/kafkachannel.go @@ -0,0 +1,65 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by injection-gen. DO NOT EDIT. + +package filtered + +import ( + context "context" + + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/messaging/v1" + filtered "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterFilteredInformers(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct { + Selector string +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(filtered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := filtered.Get(ctx, selector) + inf := f.Messaging().V1().KafkaChannels() + ctx = context.WithValue(ctx, Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context, selector string) v1.KafkaChannelInformer { + untyped := ctx.Value(Key{Selector: selector}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/messaging/v1.KafkaChannelInformer with selector %s from context.", selector) + } + return untyped.(v1.KafkaChannelInformer) +} diff --git a/control-plane/pkg/client/injection/informers/messaging/v1/kafkachannel/kafkachannel.go b/control-plane/pkg/client/injection/informers/messaging/v1/kafkachannel/kafkachannel.go new file mode 100644 index 0000000000..638b457306 --- /dev/null +++ b/control-plane/pkg/client/injection/informers/messaging/v1/kafkachannel/kafkachannel.go @@ -0,0 +1,52 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by injection-gen. DO NOT EDIT. + +package kafkachannel + +import ( + context "context" + + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/messaging/v1" + factory "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/informers/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Messaging().V1().KafkaChannels() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.KafkaChannelInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/messaging/v1.KafkaChannelInformer from context.") + } + return untyped.(v1.KafkaChannelInformer) +} diff --git a/control-plane/pkg/client/injection/reconciler/messaging/v1/kafkachannel/controller.go b/control-plane/pkg/client/injection/reconciler/messaging/v1/kafkachannel/controller.go new file mode 100644 index 0000000000..17b02d20f8 --- /dev/null +++ b/control-plane/pkg/client/injection/reconciler/messaging/v1/kafkachannel/controller.go @@ -0,0 +1,170 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by injection-gen. DO NOT EDIT. + +package kafkachannel + +import ( + context "context" + fmt "fmt" + reflect "reflect" + strings "strings" + + zap "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" + record "k8s.io/client-go/tools/record" + versionedscheme "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/scheme" + client "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/client" + kafkachannel "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/informers/messaging/v1/kafkachannel" + kubeclient "knative.dev/pkg/client/injection/kube/client" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" + logkey "knative.dev/pkg/logging/logkey" + reconciler "knative.dev/pkg/reconciler" +) + +const ( + defaultControllerAgentName = "kafkachannel-controller" + defaultFinalizerName = "kafkachannels.messaging.knative.dev" +) + +// NewImpl returns a controller.Impl that handles queuing and feeding work from +// the queue through an implementation of controller.Reconciler, delegating to +// the provided Interface and optional Finalizer methods. OptionsFn is used to return +// controller.ControllerOptions to be used by the internal reconciler. +func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl { + logger := logging.FromContext(ctx) + + // Check the options function input. It should be 0 or 1. + if len(optionsFns) > 1 { + logger.Fatal("Up to one options function is supported, found: ", len(optionsFns)) + } + + kafkachannelInformer := kafkachannel.Get(ctx) + + lister := kafkachannelInformer.Lister() + + var promoteFilterFunc func(obj interface{}) bool + var promoteFunc = func(bkt reconciler.Bucket) {} + + rec := &reconcilerImpl{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + + // Signal promotion event + promoteFunc(bkt) + + all, err := lister.List(labels.Everything()) + if err != nil { + return err + } + for _, elt := range all { + if promoteFilterFunc != nil { + if ok := promoteFilterFunc(elt); !ok { + continue + } + } + enq(bkt, types.NamespacedName{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, + Client: client.Get(ctx), + Lister: lister, + reconciler: r, + finalizerName: defaultFinalizerName, + } + + ctrType := reflect.TypeOf(r).Elem() + ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name()) + ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".") + + logger = logger.With( + zap.String(logkey.ControllerType, ctrTypeName), + zap.String(logkey.Kind, "messaging.knative.dev.KafkaChannel"), + ) + + impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger}) + agentName := defaultControllerAgentName + + // Pass impl to the options. Save any optional results. + for _, fn := range optionsFns { + opts := fn(impl) + if opts.ConfigStore != nil { + rec.configStore = opts.ConfigStore + } + if opts.FinalizerName != "" { + rec.finalizerName = opts.FinalizerName + } + if opts.AgentName != "" { + agentName = opts.AgentName + } + if opts.SkipStatusUpdates { + rec.skipStatusUpdates = true + } + if opts.DemoteFunc != nil { + rec.DemoteFunc = opts.DemoteFunc + } + if opts.PromoteFilterFunc != nil { + promoteFilterFunc = opts.PromoteFilterFunc + } + if opts.PromoteFunc != nil { + promoteFunc = opts.PromoteFunc + } + } + + rec.Recorder = createRecorder(ctx, agentName) + + return impl +} + +func createRecorder(ctx context.Context, agentName string) record.EventRecorder { + logger := logging.FromContext(ctx) + + recorder := controller.GetEventRecorder(ctx) + if recorder == nil { + // Create event broadcaster + logger.Debug("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + watches := []watch.Interface{ + eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof), + eventBroadcaster.StartRecordingToSink( + &v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}), + } + recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName}) + go func() { + <-ctx.Done() + for _, w := range watches { + w.Stop() + } + }() + } + + return recorder +} + +func init() { + versionedscheme.AddToScheme(scheme.Scheme) +} diff --git a/control-plane/pkg/client/injection/reconciler/messaging/v1/kafkachannel/reconciler.go b/control-plane/pkg/client/injection/reconciler/messaging/v1/kafkachannel/reconciler.go new file mode 100644 index 0000000000..e5390dd9be --- /dev/null +++ b/control-plane/pkg/client/injection/reconciler/messaging/v1/kafkachannel/reconciler.go @@ -0,0 +1,440 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by injection-gen. DO NOT EDIT. + +package kafkachannel + +import ( + context "context" + json "encoding/json" + fmt "fmt" + + zap "go.uber.org/zap" + "go.uber.org/zap/zapcore" + corev1 "k8s.io/api/core/v1" + equality "k8s.io/apimachinery/pkg/api/equality" + errors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + sets "k8s.io/apimachinery/pkg/util/sets" + record "k8s.io/client-go/tools/record" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1" + versioned "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned" + messagingv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/listers/messaging/v1" + controller "knative.dev/pkg/controller" + kmp "knative.dev/pkg/kmp" + logging "knative.dev/pkg/logging" + reconciler "knative.dev/pkg/reconciler" +) + +// Interface defines the strongly typed interfaces to be implemented by a +// controller reconciling v1.KafkaChannel. +type Interface interface { + // ReconcileKind implements custom logic to reconcile v1.KafkaChannel. Any changes + // to the objects .Status or .Finalizers will be propagated to the stored + // object. It is recommended that implementors do not call any update calls + // for the Kind inside of ReconcileKind, it is the responsibility of the calling + // controller to propagate those properties. The resource passed to ReconcileKind + // will always have an empty deletion timestamp. + ReconcileKind(ctx context.Context, o *v1.KafkaChannel) reconciler.Event +} + +// Finalizer defines the strongly typed interfaces to be implemented by a +// controller finalizing v1.KafkaChannel. +type Finalizer interface { + // FinalizeKind implements custom logic to finalize v1.KafkaChannel. Any changes + // to the objects .Status or .Finalizers will be ignored. Returning a nil or + // Normal type reconciler.Event will allow the finalizer to be deleted on + // the resource. The resource passed to FinalizeKind will always have a set + // deletion timestamp. + FinalizeKind(ctx context.Context, o *v1.KafkaChannel) reconciler.Event +} + +// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a +// controller reconciling v1.KafkaChannel if they want to process resources for which +// they are not the leader. +type ReadOnlyInterface interface { + // ObserveKind implements logic to observe v1.KafkaChannel. + // This method should not write to the API. + ObserveKind(ctx context.Context, o *v1.KafkaChannel) reconciler.Event +} + +type doReconcile func(ctx context.Context, o *v1.KafkaChannel) reconciler.Event + +// reconcilerImpl implements controller.Reconciler for v1.KafkaChannel resources. +type reconcilerImpl struct { + // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. + reconciler.LeaderAwareFuncs + + // Client is used to write back status updates. + Client versioned.Interface + + // Listers index properties about resources. + Lister messagingv1.KafkaChannelLister + + // Recorder is an event recorder for recording Event resources to the + // Kubernetes API. + Recorder record.EventRecorder + + // configStore allows for decorating a context with config maps. + // +optional + configStore reconciler.ConfigStore + + // reconciler is the implementation of the business logic of the resource. + reconciler Interface + + // finalizerName is the name of the finalizer to reconcile. + finalizerName string + + // skipStatusUpdates configures whether or not this reconciler automatically updates + // the status of the reconciled resource. + skipStatusUpdates bool +} + +// Check that our Reconciler implements controller.Reconciler. +var _ controller.Reconciler = (*reconcilerImpl)(nil) + +// Check that our generated Reconciler is always LeaderAware. +var _ reconciler.LeaderAware = (*reconcilerImpl)(nil) + +func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister messagingv1.KafkaChannelLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler { + // Check the options function input. It should be 0 or 1. + if len(options) > 1 { + logger.Fatal("Up to one options struct is supported, found: ", len(options)) + } + + // Fail fast when users inadvertently implement the other LeaderAware interface. + // For the typed reconcilers, Promote shouldn't take any arguments. + if _, ok := r.(reconciler.LeaderAware); ok { + logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r) + } + + rec := &reconcilerImpl{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + all, err := lister.List(labels.Everything()) + if err != nil { + return err + } + for _, elt := range all { + // TODO: Consider letting users specify a filter in options. + enq(bkt, types.NamespacedName{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, + Client: client, + Lister: lister, + Recorder: recorder, + reconciler: r, + finalizerName: defaultFinalizerName, + } + + for _, opts := range options { + if opts.ConfigStore != nil { + rec.configStore = opts.ConfigStore + } + if opts.FinalizerName != "" { + rec.finalizerName = opts.FinalizerName + } + if opts.SkipStatusUpdates { + rec.skipStatusUpdates = true + } + if opts.DemoteFunc != nil { + rec.DemoteFunc = opts.DemoteFunc + } + } + + return rec +} + +// Reconcile implements controller.Reconciler +func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + + // Initialize the reconciler state. This will convert the namespace/name + // string into a distinct namespace and name, determine if this instance of + // the reconciler is the leader, and any additional interfaces implemented + // by the reconciler. Returns an error is the resource key is invalid. + s, err := newState(key, r) + if err != nil { + logger.Error("Invalid resource key: ", key) + return nil + } + + // If we are not the leader, and we don't implement either ReadOnly + // observer interfaces, then take a fast-path out. + if s.isNotLeaderNorObserver() { + return controller.NewSkipKey(key) + } + + // If configStore is set, attach the frozen configuration to the context. + if r.configStore != nil { + ctx = r.configStore.ToContext(ctx) + } + + // Add the recorder to context. + ctx = controller.WithEventRecorder(ctx, r.Recorder) + + // Get the resource with this namespace/name. + + getter := r.Lister.KafkaChannels(s.namespace) + + original, err := getter.Get(s.name) + + if errors.IsNotFound(err) { + // The resource may no longer exist, in which case we stop processing and call + // the ObserveDeletion handler if appropriate. + logger.Debugf("Resource %q no longer exists", key) + if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok { + return del.ObserveDeletion(ctx, types.NamespacedName{ + Namespace: s.namespace, + Name: s.name, + }) + } + return nil + } else if err != nil { + return err + } + + // Don't modify the informers copy. + resource := original.DeepCopy() + + var reconcileEvent reconciler.Event + + name, do := s.reconcileMethodFor(resource) + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", name)) + switch name { + case reconciler.DoReconcileKind: + // Set and update the finalizer on resource if r.reconciler + // implements Finalizer. + if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { + return fmt.Errorf("failed to set finalizers: %w", err) + } + + if !r.skipStatusUpdates { + reconciler.PreProcessReconcile(ctx, resource) + } + + // Reconcile this copy of the resource and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileEvent = do(ctx, resource) + + if !r.skipStatusUpdates { + reconciler.PostProcessReconcile(ctx, resource, original) + } + + case reconciler.DoFinalizeKind: + // For finalizing reconcilers, if this resource being marked for deletion + // and reconciled cleanly (nil or normal event), remove the finalizer. + reconcileEvent = do(ctx, resource) + + if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil { + return fmt.Errorf("failed to clear finalizers: %w", err) + } + + case reconciler.DoObserveKind: + // Observe any changes to this resource, since we are not the leader. + reconcileEvent = do(ctx, resource) + + } + + // Synchronize the status. + switch { + case r.skipStatusUpdates: + // This reconciler implementation is configured to skip resource updates. + // This may mean this reconciler does not observe spec, but reconciles external changes. + case equality.Semantic.DeepEqual(original.Status, resource.Status): + // If we didn't change anything then don't call updateStatus. + // This is important because the copy we loaded from the injectionInformer's + // cache may be stale and we don't want to overwrite a prior update + // to status with this stale state. + case !s.isLeader: + // High-availability reconcilers may have many replicas watching the resource, but only + // the elected leader is expected to write modifications. + logger.Warn("Saw status changes when we aren't the leader!") + default: + if err = r.updateStatus(ctx, logger, original, resource); err != nil { + logger.Warnw("Failed to update resource status", zap.Error(err)) + r.Recorder.Eventf(resource, corev1.EventTypeWarning, "UpdateFailed", + "Failed to update status for %q: %v", resource.Name, err) + return err + } + } + + // Report the reconciler event, if any. + if reconcileEvent != nil { + var event *reconciler.ReconcilerEvent + if reconciler.EventAs(reconcileEvent, &event) { + logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) + r.Recorder.Event(resource, event.EventType, event.Reason, event.Error()) + + // the event was wrapped inside an error, consider the reconciliation as failed + if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { + return reconcileEvent + } + return nil + } + + if controller.IsSkipKey(reconcileEvent) { + // This is a wrapped error, don't emit an event. + } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok { + // This is a wrapped error, don't emit an event. + } else { + logger.Errorw("Returned an error", zap.Error(reconcileEvent)) + r.Recorder.Event(resource, corev1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + } + return reconcileEvent + } + + return nil +} + +func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1.KafkaChannel, desired *v1.KafkaChannel) error { + existing = existing.DeepCopy() + return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { + // The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API. + if attempts > 0 { + + getter := r.Client.MessagingV1().KafkaChannels(desired.Namespace) + + existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{}) + if err != nil { + return err + } + } + + // If there's nothing to update, just return. + if equality.Semantic.DeepEqual(existing.Status, desired.Status) { + return nil + } + + if logger.Desugar().Core().Enabled(zapcore.DebugLevel) { + if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" { + logger.Debug("Updating status with: ", diff) + } + } + + existing.Status = desired.Status + + updater := r.Client.MessagingV1().KafkaChannels(existing.Namespace) + + _, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{}) + return err + }) +} + +// updateFinalizersFiltered will update the Finalizers of the resource. +// TODO: this method could be generic and sync all finalizers. For now it only +// updates defaultFinalizerName or its override. +func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1.KafkaChannel, desiredFinalizers sets.Set[string]) (*v1.KafkaChannel, error) { + // Don't modify the informers copy. + existing := resource.DeepCopy() + + var finalizers []string + + // If there's nothing to update, just return. + existingFinalizers := sets.New[string](existing.Finalizers...) + + if desiredFinalizers.Has(r.finalizerName) { + if existingFinalizers.Has(r.finalizerName) { + // Nothing to do. + return resource, nil + } + // Add the finalizer. + finalizers = append(existing.Finalizers, r.finalizerName) + } else { + if !existingFinalizers.Has(r.finalizerName) { + // Nothing to do. + return resource, nil + } + // Remove the finalizer. + existingFinalizers.Delete(r.finalizerName) + finalizers = sets.List(existingFinalizers) + } + + mergePatch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "finalizers": finalizers, + "resourceVersion": existing.ResourceVersion, + }, + } + + patch, err := json.Marshal(mergePatch) + if err != nil { + return resource, err + } + + patcher := r.Client.MessagingV1().KafkaChannels(resource.Namespace) + + resourceName := resource.Name + updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{}) + if err != nil { + r.Recorder.Eventf(existing, corev1.EventTypeWarning, "FinalizerUpdateFailed", + "Failed to update finalizers for %q: %v", resourceName, err) + } else { + r.Recorder.Eventf(updated, corev1.EventTypeNormal, "FinalizerUpdate", + "Updated %q finalizers", resource.GetName()) + } + return updated, err +} + +func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1.KafkaChannel) (*v1.KafkaChannel, error) { + if _, ok := r.reconciler.(Finalizer); !ok { + return resource, nil + } + + finalizers := sets.New[string](resource.Finalizers...) + + // If this resource is not being deleted, mark the finalizer. + if resource.GetDeletionTimestamp().IsZero() { + finalizers.Insert(r.finalizerName) + } + + // Synchronize the finalizers filtered by r.finalizerName. + return r.updateFinalizersFiltered(ctx, resource, finalizers) +} + +func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1.KafkaChannel, reconcileEvent reconciler.Event) (*v1.KafkaChannel, error) { + if _, ok := r.reconciler.(Finalizer); !ok { + return resource, nil + } + if resource.GetDeletionTimestamp().IsZero() { + return resource, nil + } + + finalizers := sets.New[string](resource.Finalizers...) + + if reconcileEvent != nil { + var event *reconciler.ReconcilerEvent + if reconciler.EventAs(reconcileEvent, &event) { + if event.EventType == corev1.EventTypeNormal { + finalizers.Delete(r.finalizerName) + } + } + } else { + finalizers.Delete(r.finalizerName) + } + + // Synchronize the finalizers filtered by r.finalizerName. + return r.updateFinalizersFiltered(ctx, resource, finalizers) +} diff --git a/control-plane/pkg/client/injection/reconciler/messaging/v1/kafkachannel/state.go b/control-plane/pkg/client/injection/reconciler/messaging/v1/kafkachannel/state.go new file mode 100644 index 0000000000..5d47f8f37b --- /dev/null +++ b/control-plane/pkg/client/injection/reconciler/messaging/v1/kafkachannel/state.go @@ -0,0 +1,97 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by injection-gen. DO NOT EDIT. + +package kafkachannel + +import ( + fmt "fmt" + + types "k8s.io/apimachinery/pkg/types" + cache "k8s.io/client-go/tools/cache" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1" + reconciler "knative.dev/pkg/reconciler" +) + +// state is used to track the state of a reconciler in a single run. +type state struct { + // key is the original reconciliation key from the queue. + key string + // namespace is the namespace split from the reconciliation key. + namespace string + // name is the name split from the reconciliation key. + name string + // reconciler is the reconciler. + reconciler Interface + // roi is the read only interface cast of the reconciler. + roi ReadOnlyInterface + // isROI (Read Only Interface) the reconciler only observes reconciliation. + isROI bool + // isLeader the instance of the reconciler is the elected leader. + isLeader bool +} + +func newState(key string, r *reconcilerImpl) (*state, error) { + // Convert the namespace/name string into a distinct namespace and name. + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return nil, fmt.Errorf("invalid resource key: %s", key) + } + + roi, isROI := r.reconciler.(ReadOnlyInterface) + + isLeader := r.IsLeaderFor(types.NamespacedName{ + Namespace: namespace, + Name: name, + }) + + return &state{ + key: key, + namespace: namespace, + name: name, + reconciler: r.reconciler, + roi: roi, + isROI: isROI, + isLeader: isLeader, + }, nil +} + +// isNotLeaderNorObserver checks to see if this reconciler with the current +// state is enabled to do any work or not. +// isNotLeaderNorObserver returns true when there is no work possible for the +// reconciler. +func (s *state) isNotLeaderNorObserver() bool { + if !s.isLeader && !s.isROI { + // If we are not the leader, and we don't implement the ReadOnly + // interface, then take a fast-path out. + return true + } + return false +} + +func (s *state) reconcileMethodFor(o *v1.KafkaChannel) (string, doReconcile) { + if o.GetDeletionTimestamp().IsZero() { + if s.isLeader { + return reconciler.DoReconcileKind, s.reconciler.ReconcileKind + } else if s.isROI { + return reconciler.DoObserveKind, s.roi.ObserveKind + } + } else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok { + return reconciler.DoFinalizeKind, fin.FinalizeKind + } + return "unknown", nil +} diff --git a/control-plane/pkg/client/listers/messaging/v1/expansion_generated.go b/control-plane/pkg/client/listers/messaging/v1/expansion_generated.go new file mode 100644 index 0000000000..277b9afe23 --- /dev/null +++ b/control-plane/pkg/client/listers/messaging/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// KafkaChannelListerExpansion allows custom methods to be added to +// KafkaChannelLister. +type KafkaChannelListerExpansion interface{} + +// KafkaChannelNamespaceListerExpansion allows custom methods to be added to +// KafkaChannelNamespaceLister. +type KafkaChannelNamespaceListerExpansion interface{} diff --git a/control-plane/pkg/client/listers/messaging/v1/kafkachannel.go b/control-plane/pkg/client/listers/messaging/v1/kafkachannel.go new file mode 100644 index 0000000000..3c2591b2cd --- /dev/null +++ b/control-plane/pkg/client/listers/messaging/v1/kafkachannel.go @@ -0,0 +1,70 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1" +) + +// KafkaChannelLister helps list KafkaChannels. +// All objects returned here must be treated as read-only. +type KafkaChannelLister interface { + // List lists all KafkaChannels in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.KafkaChannel, err error) + // KafkaChannels returns an object that can list and get KafkaChannels. + KafkaChannels(namespace string) KafkaChannelNamespaceLister + KafkaChannelListerExpansion +} + +// kafkaChannelLister implements the KafkaChannelLister interface. +type kafkaChannelLister struct { + listers.ResourceIndexer[*v1.KafkaChannel] +} + +// NewKafkaChannelLister returns a new KafkaChannelLister. +func NewKafkaChannelLister(indexer cache.Indexer) KafkaChannelLister { + return &kafkaChannelLister{listers.New[*v1.KafkaChannel](indexer, v1.Resource("kafkachannel"))} +} + +// KafkaChannels returns an object that can list and get KafkaChannels. +func (s *kafkaChannelLister) KafkaChannels(namespace string) KafkaChannelNamespaceLister { + return kafkaChannelNamespaceLister{listers.NewNamespaced[*v1.KafkaChannel](s.ResourceIndexer, namespace)} +} + +// KafkaChannelNamespaceLister helps list and get KafkaChannels. +// All objects returned here must be treated as read-only. +type KafkaChannelNamespaceLister interface { + // List lists all KafkaChannels in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.KafkaChannel, err error) + // Get retrieves the KafkaChannel from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.KafkaChannel, error) + KafkaChannelNamespaceListerExpansion +} + +// kafkaChannelNamespaceLister implements the KafkaChannelNamespaceLister +// interface. +type kafkaChannelNamespaceLister struct { + listers.ResourceIndexer[*v1.KafkaChannel] +} diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index e3e1d0a518..ee606d55ef 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -56,7 +56,7 @@ group "Knative Codegen" # Knative Injection "${KNATIVE_CODEGEN_PKG}"/hack/generate-knative.sh "injection" \ knative.dev/eventing-kafka-broker/control-plane/pkg/client knative.dev/eventing-kafka-broker/control-plane/pkg/apis \ - "eventing:v1alpha1 messaging:v1beta1 sources:v1 sources:v1beta1 bindings:v1beta1 internalskafkaeventing:v1alpha1" \ + "eventing:v1alpha1 messaging:v1 messaging:v1beta1 sources:v1 sources:v1beta1 bindings:v1beta1 internalskafkaeventing:v1alpha1" \ --go-header-file "${REPO_ROOT_DIR}"/hack/boilerplate/boilerplate.go.txt "${KNATIVE_CODEGEN_PKG}"/hack/generate-knative.sh "injection" \