From 768ac6947f4796af8349d89b45e42cbdbd283603 Mon Sep 17 00:00:00 2001 From: Artem Bortnikov Date: Wed, 13 Nov 2024 10:51:46 +0200 Subject: [PATCH] wip: controller flow Signed-off-by: Artem Bortnikov --- go.mod | 1 + go.sum | 2 + internal/controller/etcdcluster_controller.go | 16 +- .../controller/etcdcluster_controller_new.go | 377 ++++++++++++++++++ internal/controller/observables.go | 54 ++- site/content/en/docs/v0.4/reference/api.md | 4 +- 6 files changed, 435 insertions(+), 19 deletions(-) create mode 100644 internal/controller/etcdcluster_controller_new.go diff --git a/go.mod b/go.mod index 66839769..a420aa97 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.19.0 go.etcd.io/etcd/client/v3 v3.5.14 + golang.org/x/sync v0.7.0 k8s.io/api v0.30.2 k8s.io/apimachinery v0.30.2 k8s.io/client-go v0.30.2 diff --git a/go.sum b/go.sum index d60eb6e0..386a34cb 100644 --- a/go.sum +++ b/go.sum @@ -190,6 +190,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/internal/controller/etcdcluster_controller.go b/internal/controller/etcdcluster_controller.go index 879cfb34..03eaec99 100644 --- a/internal/controller/etcdcluster_controller.go +++ b/internal/controller/etcdcluster_controller.go @@ -102,18 +102,20 @@ func (r *EtcdClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) if client.IgnoreNotFound(err) != nil { return ctrl.Result{}, fmt.Errorf("couldn't get statefulset: %w", err) } - state.stsExists = state.statefulSet.UID != "" + // state.stsExists = state.statefulSet.UID != "" // fetch endpoints clusterClient, singleClients, err := factory.NewEtcdClientSet(ctx, state.instance, r.Client) if err != nil { return ctrl.Result{}, err } - state.endpointsFound = clusterClient != nil && singleClients != nil + // state.endpointsFound = clusterClient != nil && singleClients != nil - if clusterClient != nil { - state.endpoints = clusterClient.Endpoints() - } + // if clusterClient != nil { + // state.endpoints = clusterClient.Endpoints() + // } + state.clusterClient = clusterClient + state.singleClients = singleClients // fetch PVCs state.pvcs, err = factory.PVCs(ctx, state.instance, r.Client) @@ -121,8 +123,8 @@ func (r *EtcdClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, err } - if !state.endpointsFound { - if !state.stsExists { + if !state.endpointsFound() { + if !state.statefulSetExists() { return r.createClusterFromScratch(ctx, state) // TODO: needs implementing } diff --git a/internal/controller/etcdcluster_controller_new.go b/internal/controller/etcdcluster_controller_new.go new file mode 100644 index 00000000..e081c2e2 --- /dev/null +++ b/internal/controller/etcdcluster_controller_new.go @@ -0,0 +1,377 @@ +package controller + +import ( + "context" + "fmt" + "sync" + + etcdaenixiov1alpha1 "github.com/aenix-io/etcd-operator/api/v1alpha1" + "github.com/aenix-io/etcd-operator/internal/controller/factory" + "github.com/aenix-io/etcd-operator/internal/log" + clientv3 "go.etcd.io/etcd/client/v3" + "golang.org/x/sync/errgroup" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type ClusterReconciler struct { + client.Client + Scheme *runtime.Scheme + + ClusterDomain string +} + +// +kubebuilder:rbac:groups=etcd.aenix.io,resources=etcdclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=etcd.aenix.io,resources=etcdclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=etcd.aenix.io,resources=etcdclusters/finalizers,verbs=update +// +kubebuilder:rbac:groups="",resources=endpoints,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;watch;delete;patch +// +kubebuilder:rbac:groups="",resources=services,verbs=get;create;delete;update;patch;list;watch +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch +// +kubebuilder:rbac:groups="apps",resources=statefulsets,verbs=get;create;delete;update;patch;list;watch +// +kubebuilder:rbac:groups="policy",resources=poddisruptionbudgets,verbs=get;create;delete;update;patch;list;watch +// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;patch;watch +// +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch + +// Reconcile checks CR and current cluster state and performs actions to transform current state to desired. +func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (reconcile.Result, error) { + log.Info(ctx, "Reconciling object") + cluster := &etcdaenixiov1alpha1.EtcdCluster{} + err := r.Get(ctx, req.NamespacedName, cluster) + if errors.IsNotFound(err) { + log.Info(ctx, "resource not found") + return reconcile.Result{}, nil + } + if err != nil { + return reconcile.Result{}, err + } + state := &observables{instance: cluster} + return r.reconcile(ctx, state) +} + +// reconcile performs reconciliation of the cluster. +func (r *ClusterReconciler) reconcile(ctx context.Context, state *observables) (reconcile.Result, error) { + if !state.instance.DeletionTimestamp.IsZero() { + log.Debug(ctx, "resource is being deleted") + return reconcile.Result{}, nil + } + if err := r.ensureUnconditionalObjects(ctx, state.instance); err != nil { + return reconcile.Result{}, err + } + clusterClient, singleClients, err := r.etcdClientSet(ctx, state.instance) + if err != nil { + return ctrl.Result{}, err + } + + state.clusterClient = clusterClient + state.singleClients = singleClients + + // checking whether any endpoints exist. + if !state.endpointsFound() { + // no endpoints found: right branch in flowchart + return r.reconcileEndpointsAbsent(ctx, state) + } + // endpoints found: left branch in flowchart + return r.reconcileEndpointsPresent(ctx, state) +} + +// reconcileEndpointsAbsent is called in case there are no endpoints observed. +// It checks if statefulset exists and if not creates it. +func (r *ClusterReconciler) reconcileEndpointsAbsent(ctx context.Context, state *observables) (reconcile.Result, error) { + err := r.Get(ctx, client.ObjectKeyFromObject(state.instance), &state.statefulSet) + if client.IgnoreNotFound(err) != nil { + return reconcile.Result{}, err + } + + if !state.statefulSetExists() { + return reconcile.Result{}, r.createClusterFromScratch(ctx, state) // todo: not implemented yet + } + if !state.statefulSetPodSpecCorrect() { // todo: not implemented yet + return reconcile.Result{}, r.patchStatefulSetPodSpec(ctx, state) // todo: not implemented yet + } + if !state.statefulSetReady() { // todo: not implemented yet + log.Debug(ctx, "waiting etcd cluster statefulset to become ready") + return reconcile.Result{}, nil + } + if !state.statefulSetReplicasIsZero() { + log.Error(ctx, fmt.Errorf("invalid statefulset replicas with no endpoints: %d", state.statefulSet.Spec.Replicas), + "cluster is in invalid state, dropping from reconciliation queue") + return reconcile.Result{}, nil + } + if state.etcdClusterReplicasIsZero() { + return reconcile.Result{}, nil + } + return reconcile.Result{}, r.scaleUpFromZero(ctx, state) // todo: not implemented yet +} + +// reconcileEndpointsPresent is called in case there are endpoints observed. +func (r *ClusterReconciler) reconcileEndpointsPresent(ctx context.Context, state *observables) (reconcile.Result, error) { + memberReached, err := r.collectEtcdStatuses(ctx, state) + if err != nil { + return reconcile.Result{}, err + } + + // checking whether members are reachable + if !memberReached { + // no members reachable: right branch in flowchart + return r.reconcileMembersUnreachable(ctx, state) + } + // at least one member reachable: left branch in flowchart + return r.reconcileMembersReachable(ctx, state) +} + +// reconcileMembersUnreachable is called in case there are endpoints observed but not all members are reachable. +func (r *ClusterReconciler) reconcileMembersUnreachable(ctx context.Context, state *observables) (reconcile.Result, error) { + err := r.Get(ctx, client.ObjectKeyFromObject(state.instance), &state.statefulSet) + if client.IgnoreNotFound(err) != nil { + return reconcile.Result{}, err + } + + if !state.statefulSetExists() { + return reconcile.Result{}, r.createOrUpdateStatefulSet(ctx, state) // todo: not implemented yet + } + if !state.statefulSetPodSpecCorrect() { // todo: not implemented yet + return reconcile.Result{}, r.patchStatefulSetPodSpec(ctx, state) // todo: not implemented yet + } + return reconcile.Result{}, nil +} + +// reconcileMembersReachable is called in case there are endpoints observed and some(all) members are reachable. +func (r *ClusterReconciler) reconcileMembersReachable(ctx context.Context, state *observables) (reconcile.Result, error) { + state.setClusterID() + if state.inSplitbrain() { + log.Error(ctx, fmt.Errorf("etcd cluster in splitbrain"), "etcd cluster in split-brain, dropping from reconciliation queue") + baseEtcdCluster := state.instance.DeepCopy() + meta.SetStatusCondition( + &state.instance.Status.Conditions, + metav1.Condition{ + Type: etcdaenixiov1alpha1.EtcdConditionError, + Status: metav1.ConditionTrue, + Reason: string(etcdaenixiov1alpha1.EtcdCondTypeSplitbrain), + Message: string(etcdaenixiov1alpha1.EtcdErrorCondSplitbrainMessage), + }, + ) + return reconcile.Result{}, r.Status().Patch(ctx, state.instance, client.MergeFrom(baseEtcdCluster)) + } + if !state.clusterHasQuorum() { + log.Error(ctx, fmt.Errorf("cluster has lost quorum"), "cluster has lost quorum, dropping from reconciliation queue") + return reconcile.Result{}, nil + } + if !state.allMembersAreManaged() { // todo: not implemented yet + log.Error(ctx, fmt.Errorf("not all members are managed"), "not all members are managed, dropping from reconciliation queue") + return reconcile.Result{}, nil + } + if state.hasLearners() { + return reconcile.Result{}, r.promoteLearners(ctx, state) // todo: not implemented yet + } + if !state.allMembersAreHealthy() { // todo: not implemented yet + // todo: enqueue unhealthy member(s) eviction + // then delete pod, pvc & update config map respectively + return reconcile.Result{}, nil + } + if err := r.createOrUpdateClusterStateConfigMap(ctx, state); err != nil { // todo: not implemented yet + return reconcile.Result{}, err + } + if !state.statefulSetPodSpecCorrect() { // todo: not implemented yet + return reconcile.Result{}, r.patchStatefulSetPodSpec(ctx, state) // todo: not implemented yet + } + if !state.podsPresentInMembersList() { // todo: not implemented yet + // todo: delete pod, pvc & update config map respectively + return reconcile.Result{}, nil + } + return r.reconcileReplicas(ctx, state) +} + +// reconcileReplicas is called in case there are endpoints observed and all members are reachable, +// healthy and present in members list. +func (r *ClusterReconciler) reconcileReplicas(ctx context.Context, state *observables) (reconcile.Result, error) { + if *state.instance.Spec.Replicas == 0 && *state.statefulSet.Spec.Replicas == 1 { + return reconcile.Result{}, r.scaleDownToZero(ctx, state) // todo: not implemented yet + } + if *state.instance.Spec.Replicas < *state.statefulSet.Spec.Replicas { + return reconcile.Result{}, r.scaleDown(ctx, state) // todo: not implemented yet + } + if *state.instance.Spec.Replicas > *state.statefulSet.Spec.Replicas { + return reconcile.Result{}, r.scaleUp(ctx, state) // todo: not implemented yet + } + + baseEtcdCluster := state.instance.DeepCopy() + meta.SetStatusCondition( + &state.instance.Status.Conditions, + metav1.Condition{ + Type: etcdaenixiov1alpha1.EtcdConditionReady, + Status: metav1.ConditionTrue, + Reason: string(etcdaenixiov1alpha1.EtcdCondTypeInitComplete), + }, + ) + + return reconcile.Result{}, r.Status().Patch(ctx, state.instance, client.MergeFrom(baseEtcdCluster)) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&etcdaenixiov1alpha1.EtcdCluster{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Owns(&appsv1.StatefulSet{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.Service{}). + Owns(&policyv1.PodDisruptionBudget{}). + Complete(r) +} + +// ensureUnconditionalObjects ensures that objects that should always exist are created. +func (r *ClusterReconciler) ensureUnconditionalObjects(ctx context.Context, cluster *etcdaenixiov1alpha1.EtcdCluster) error { + g, ctx := errgroup.WithContext(ctx) + wrapWithMessage := func(err error, msg string) error { + if err != nil { + return fmt.Errorf("%s: %w", msg, err) + } + return nil + } + g.Go(func() error { + return wrapWithMessage(factory.CreateOrUpdateClientService(ctx, cluster, r.Client), "failed to ensure client service") + }) + g.Go(func() error { + return wrapWithMessage(factory.CreateOrUpdateHeadlessService(ctx, cluster, r.Client), "failed to ensure headless service") + }) + g.Go(func() error { + return wrapWithMessage(factory.CreateOrUpdatePdb(ctx, cluster, r.Client), "failed to ensure pod disruption budget") + }) + return g.Wait() +} + +// etcdClientSet returns etcd client set for given cluster. +func (r *ClusterReconciler) etcdClientSet(ctx context.Context, cluster *etcdaenixiov1alpha1.EtcdCluster) (*clientv3.Client, []*clientv3.Client, error) { + cfg, err := r.etcdClusterConfig(ctx, cluster) + if err != nil { + return nil, nil, err + } + if len(cfg.Endpoints) == 0 { + return nil, nil, nil + } + eps := cfg.Endpoints + clusterClient, err := clientv3.New(cfg) + if err != nil { + return nil, nil, fmt.Errorf("error building etcd cluster client: %w", err) + } + membersClients := make([]*clientv3.Client, len(eps)) + for i, ep := range eps { + cfg.Endpoints = []string{ep} + membersClients[i], err = clientv3.New(cfg) + if err != nil { + return nil, nil, fmt.Errorf("error building etcd single-endpoint client for endpoint %s: %w", ep, err) + } + } + return clusterClient, membersClients, nil +} + +// collectEtcdStatuses collects etcd members statuses for given cluster. +func (r *ClusterReconciler) collectEtcdStatuses(ctx context.Context, state *observables) (bool, error) { + state.etcdStatuses = make([]etcdStatus, len(state.singleClients)) + { + var wg sync.WaitGroup + ctx, cancel := context.WithTimeout(ctx, etcdDefaultTimeout) + for i := range state.singleClients { + wg.Add(1) + go func(i int) { + defer wg.Done() + state.etcdStatuses[i].fill(ctx, state.singleClients[i]) + }(i) + } + wg.Wait() + cancel() + } + + memberReached := false + for i := range state.etcdStatuses { + if state.etcdStatuses[i].endpointStatus != nil { + memberReached = true + break + } + } + return memberReached, nil +} + +// etcdClusterConfig returns etcd client config for given cluster. +func (r *ClusterReconciler) etcdClusterConfig(ctx context.Context, cluster *etcdaenixiov1alpha1.EtcdCluster) (clientv3.Config, error) { + ep := corev1.Endpoints{} + err := r.Get(ctx, types.NamespacedName{Name: factory.GetHeadlessServiceName(cluster), Namespace: cluster.Namespace}, &ep) + if client.IgnoreNotFound(err) != nil { + return clientv3.Config{}, err + } + if err != nil { + return clientv3.Config{Endpoints: []string{}}, nil + } + + names := map[string]struct{}{} + urls := make([]string, 0, 8) + for _, v := range ep.Subsets { + for _, addr := range v.Addresses { + names[addr.Hostname] = struct{}{} + } + for _, addr := range v.NotReadyAddresses { + names[addr.Hostname] = struct{}{} + } + } + for name := range names { + urls = append(urls, fmt.Sprintf("%s.%s.%s.svc.%s:%s", name, ep.Name, cluster.Namespace, r.ClusterDomain, "2379")) + } + + return clientv3.Config{Endpoints: urls}, nil +} + +// todo: implement this +func (r *ClusterReconciler) createClusterFromScratch(ctx context.Context, state *observables) error { + panic("not implemented") +} + +// todo: implement this +func (r *ClusterReconciler) patchStatefulSetPodSpec(ctx context.Context, state *observables) error { + panic("not implemented") +} + +// todo: implement this +func (r *ClusterReconciler) scaleUp(ctx context.Context, state *observables) error { + panic("not implemented") +} + +// todo: implement this +func (r *ClusterReconciler) scaleUpFromZero(ctx context.Context, state *observables) error { + panic("not implemented") +} + +// todo: implement this +func (r *ClusterReconciler) scaleDown(ctx context.Context, state *observables) error { + panic("not implemented") +} + +// todo: implement this +func (r *ClusterReconciler) scaleDownToZero(ctx context.Context, state *observables) error { + panic("not implemented") +} + +// todo: implement this +func (r *ClusterReconciler) createOrUpdateStatefulSet(ctx context.Context, state *observables) error { + panic("not implemented") +} + +// todo: implement this +func (r *ClusterReconciler) promoteLearners(ctx context.Context, state *observables) error { + panic("not implemented") +} + +// todo: implement this +func (r *ClusterReconciler) createOrUpdateClusterStateConfigMap(ctx context.Context, state *observables) error { + panic("not implemented") +} diff --git a/internal/controller/observables.go b/internal/controller/observables.go index 6c523c35..18cd10e9 100644 --- a/internal/controller/observables.go +++ b/internal/controller/observables.go @@ -27,14 +27,13 @@ type etcdStatus struct { // observables stores observations that the operator can make about // states of objects in kubernetes type observables struct { - instance *v1alpha1.EtcdCluster - statefulSet appsv1.StatefulSet - stsExists bool - endpoints []string //nolint:unused - endpointsFound bool - etcdStatuses []etcdStatus - clusterID uint64 - pvcs []corev1.PersistentVolumeClaim //nolint:unused + instance *v1alpha1.EtcdCluster + statefulSet appsv1.StatefulSet + etcdStatuses []etcdStatus + clusterID uint64 + pvcs []corev1.PersistentVolumeClaim //nolint:unused + clusterClient *clientv3.Client + singleClients []*clientv3.Client } // setClusterID populates the clusterID field based on etcdStatuses @@ -117,8 +116,8 @@ func (o *observables) pvcMaxIndex() (max int) { } func (o *observables) endpointMaxIndex() (max int) { - for i := range o.endpoints { - tokens := strings.Split(o.endpoints[i], ":") + for i := range o.endpoints() { + tokens := strings.Split(o.endpoints()[i], ":") if len(tokens) < 2 { continue } @@ -174,6 +173,18 @@ func (o *observables) desiredReplicas() (max int) { return max + 1 } +func (o *observables) statefulSetExists() bool { + return o.statefulSet.UID != "" +} + +func (o *observables) endpoints() []string { + return o.clusterClient.Endpoints() +} + +func (o *observables) endpointsFound() bool { + return o.clusterClient != nil && o.singleClients != nil +} + // TODO: compare the desired sts with what exists func (o *observables) statefulSetPodSpecCorrect() bool { return true @@ -184,6 +195,14 @@ func (o *observables) statefulSetReady() bool { return o.statefulSet.Status.ReadyReplicas == *o.statefulSet.Spec.Replicas } +func (o *observables) statefulSetReplicasIsZero() bool { + return *o.statefulSet.Spec.Replicas == 0 +} + +func (o *observables) etcdClusterReplicasIsZero() bool { + return *o.instance.Spec.Replicas == 0 +} + func (o *observables) clusterHasQuorum() bool { size := len(o.etcdStatuses) membersInQuorum := size @@ -203,3 +222,18 @@ func (o *observables) hasLearners() bool { } return false } + +// TODO: check if the pods are in the member list +func (o *observables) podsPresentInMembersList() bool { + return true +} + +// TODO: check whether all members are healthy +func (o *observables) allMembersAreHealthy() bool { + return true +} + +// TODO: check whether all members are managed +func (o *observables) allMembersAreManaged() bool { + return true +} diff --git a/site/content/en/docs/v0.4/reference/api.md b/site/content/en/docs/v0.4/reference/api.md index 4ce45aa5..7c972d28 100644 --- a/site/content/en/docs/v0.4/reference/api.md +++ b/site/content/en/docs/v0.4/reference/api.md @@ -167,8 +167,8 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `minAvailable` _[IntOrString](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30.0/#intorstring-intstr-util)_ | MinAvailable describes minimum ready replicas. If both are empty, controller will implicitly
calculate MaxUnavailable based on number of replicas
Mutually exclusive with MaxUnavailable. | | | -| `maxUnavailable` _[IntOrString](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30.0/#intorstring-intstr-util)_ | MinAvailable describes maximum not ready replicas. If both are empty, controller will implicitly
calculate MaxUnavailable based on number of replicas
Mutually exclusive with MinAvailable | | | +| `minAvailable` _[IntOrString](#intorstring)_ | MinAvailable describes minimum ready replicas. If both are empty, controller will implicitly
calculate MaxUnavailable based on number of replicas
Mutually exclusive with MaxUnavailable. | | | +| `maxUnavailable` _[IntOrString](#intorstring)_ | MinAvailable describes maximum not ready replicas. If both are empty, controller will implicitly
calculate MaxUnavailable based on number of replicas
Mutually exclusive with MinAvailable | | | #### PodTemplate