From ce67ee1bcaa0287ea2fda9947d5f43f8515dbee2 Mon Sep 17 00:00:00 2001 From: idanovinda Date: Tue, 19 Nov 2024 14:29:29 +0100 Subject: [PATCH 01/14] test on spilo new image with patroni v4 --- manifests/configmap.yaml | 4 +++- pkg/cluster/majorversionupgrade.go | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index 1c8c8fdfd..cd112c7ca 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -34,7 +34,7 @@ data: default_memory_request: 100Mi # delete_annotation_date_key: delete-date # delete_annotation_name_key: delete-clustername - docker_image: ghcr.io/zalando/spilo-16:3.3-p1 + docker_image: container-registry-test.zalando.net/acid/spilo-cdp-pr1050-16:4.0-p4 # downscaler_annotations: "deployment-time,downscaler/*" enable_admin_role_for_users: "true" enable_crd_registration: "true" @@ -134,6 +134,8 @@ data: pod_management_policy: "ordered_ready" # pod_priority_class_name: "postgres-pod-priority" pod_role_label: spilo-role + pod_leader_label_value: master + pod_standby_leader_label_value: standby_leader pod_service_account_definition: "" pod_service_account_name: "postgres-pod" pod_service_account_role_binding_definition: "" diff --git a/pkg/cluster/majorversionupgrade.go b/pkg/cluster/majorversionupgrade.go index ad431acc4..560f8977f 100644 --- a/pkg/cluster/majorversionupgrade.go +++ b/pkg/cluster/majorversionupgrade.go @@ -155,7 +155,7 @@ func (c *Cluster) majorVersionUpgrade() error { c.logger.Infof("identified non running pod, potentially skipping major version upgrade") } - if ps.Role == "master" { + if ps.Role == "master" || ps.Role == "primary" { masterPod = &pods[i] c.currentMajorVersion = ps.ServerVersion } From b59494dd29ba5e99379628d404ee43c4c5e8bfbf Mon Sep 17 00:00:00 2001 From: inovindasari Date: Tue, 19 Nov 2024 15:20:27 +0100 Subject: [PATCH 02/14] only change test image --- e2e/tests/test_e2e.py | 4 ++-- manifests/configmap.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index f89e2fb86..807a070a3 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -12,9 +12,9 @@ from tests.k8s_api import K8s from kubernetes.client.rest import ApiException -SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.1" +SPILO_CURRENT = "container-registry-test.zalando.net/acid/spilo-cdp-pr1050-16:4.0-p4" SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.2" -SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-16:3.2-p3" +SPILO_FULL_IMAGE = "container-registry-test.zalando.net/acid/spilo-cdp-pr1050-16:4.0-p4" def to_selector(labels): diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index cd112c7ca..a133c8093 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -34,7 +34,7 @@ data: default_memory_request: 100Mi # delete_annotation_date_key: delete-date # delete_annotation_name_key: delete-clustername - docker_image: container-registry-test.zalando.net/acid/spilo-cdp-pr1050-16:4.0-p4 + docker_image: ghcr.io/zalando/spilo-16:3.3-p1 # downscaler_annotations: "deployment-time,downscaler/*" enable_admin_role_for_users: "true" enable_crd_registration: "true" From 766597980fcf2dea6f5eca2a1ed0d00b4dc90dc5 Mon Sep 17 00:00:00 2001 From: inovindasari Date: Tue, 19 Nov 2024 16:00:03 +0100 Subject: [PATCH 03/14] add e2e test to run in cdp --- delivery.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/delivery.yaml b/delivery.yaml index 7eacd769b..3320a9c29 100644 --- a/delivery.yaml +++ b/delivery.yaml @@ -14,7 +14,9 @@ pipeline: - desc: Run unit tests cmd: | make deps mocks test - + - desc: 'Run e2e tests' + cmd: | + make e2e - desc: Build Docker image cmd: | IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"} From 8b79e33b1c4b588f8494f6b335ea01691bff3957 Mon Sep 17 00:00:00 2001 From: inovindasari Date: Mon, 25 Nov 2024 14:49:24 +0100 Subject: [PATCH 04/14] define new variables --- charts/postgres-operator/crds/operatorconfigurations.yaml | 6 ++++++ charts/postgres-operator/values.yaml | 2 ++ docs/reference/operator_parameters.md | 8 ++++++++ e2e/tests/test_e2e.py | 4 ++-- manifests/configmap.yaml | 2 +- manifests/operatorconfiguration.crd.yaml | 6 ++++++ manifests/postgresql-operator-default-configuration.yaml | 2 ++ pkg/apis/acid.zalan.do/v1/crds.go | 6 ++++++ pkg/apis/acid.zalan.do/v1/operator_configuration_type.go | 2 ++ pkg/cluster/k8sres.go | 8 ++++++++ pkg/util/config/config.go | 2 ++ 11 files changed, 45 insertions(+), 3 deletions(-) diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index 0a1e74613..a2436b894 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -327,6 +327,12 @@ spec: pod_role_label: type: string default: "spilo-role" + pod_leader_label_value: + type: string + default: "master" + pod_standby_leader_label_value: + type: string + default: "master" pod_service_account_definition: type: string default: "" diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 472be7443..d88660862 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -194,6 +194,8 @@ configKubernetes: pod_management_policy: "ordered_ready" # label assigned to the Postgres pods (and services/endpoints) pod_role_label: spilo-role + pod_leader_label_value: master + pod_standby_leader_label_value: master # service account definition as JSON/YAML string to be used by postgres cluster pods # pod_service_account_definition: "" diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 4d4d16cdb..a72c776e0 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -437,6 +437,14 @@ configuration they are grouped under the `kubernetes` key. name of the label assigned to the Postgres pods (and services/endpoints) by the operator. The default is `spilo-role`. +* **pod_leader_label_value** + value of the pod label if Postgres role is primary when running on Kubernetes. + The default is 'master'. + +* **pod_standby_leader_label_value** + value of the pod label if Postgres role is standby_leader when running on Kubernetes. + The default is 'master'. + * **cluster_labels** list of `name:value` pairs for additional labels assigned to the cluster objects. The default is `application:spilo`. diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 807a070a3..f89e2fb86 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -12,9 +12,9 @@ from tests.k8s_api import K8s from kubernetes.client.rest import ApiException -SPILO_CURRENT = "container-registry-test.zalando.net/acid/spilo-cdp-pr1050-16:4.0-p4" +SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.1" SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.2" -SPILO_FULL_IMAGE = "container-registry-test.zalando.net/acid/spilo-cdp-pr1050-16:4.0-p4" +SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-16:3.2-p3" def to_selector(labels): diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index a133c8093..b9ce21084 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -135,7 +135,7 @@ data: # pod_priority_class_name: "postgres-pod-priority" pod_role_label: spilo-role pod_leader_label_value: master - pod_standby_leader_label_value: standby_leader + pod_standby_leader_label_value: master pod_service_account_definition: "" pod_service_account_name: "postgres-pod" pod_service_account_role_binding_definition: "" diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index a7b1a7280..3f1444f62 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -325,6 +325,12 @@ spec: pod_role_label: type: string default: "spilo-role" + pod_leader_label_value: + type: string + default: "master" + pod_standby_leader_label_value: + type: string + default: "master" pod_service_account_definition: type: string default: "" diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index ecb7a03de..f702b3c83 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -99,6 +99,8 @@ configuration: pod_management_policy: "ordered_ready" # pod_priority_class_name: "postgres-pod-priority" pod_role_label: spilo-role + pod_leader_label_value: master + pod_standby_leader_label_value: master # pod_service_account_definition: "" pod_service_account_name: postgres-pod # pod_service_account_role_binding_definition: "" diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index c5c4b2706..27b7057b8 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -1497,6 +1497,12 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ "pod_role_label": { Type: "string", }, + "pod_leader_label_value": { + Type: "string", + }, + "pod_standby_leader_label_value": { + Type: "string", + }, "pod_service_account_definition": { Type: "string", }, diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index eb01d450c..65506313e 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -81,6 +81,8 @@ type KubernetesMetaConfiguration struct { InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"` InfrastructureRolesDefs []*config.InfrastructureRole `json:"infrastructure_roles_secrets,omitempty"` PodRoleLabel string `json:"pod_role_label,omitempty"` + PodLeaderLabelValue string `json:"pod_leader_label_value,omitempty"` + PodStandbyLeaderLabelValue string `json:"pod_standby_leader_label_value,omitempty"` ClusterLabels map[string]string `json:"cluster_labels,omitempty"` InheritedLabels []string `json:"inherited_labels,omitempty"` InheritedAnnotations []string `json:"inherited_annotations,omitempty"` diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 4e67dbd94..cbcbb897a 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -950,6 +950,14 @@ func (c *Cluster) generateSpiloPodEnvVars( Name: "KUBERNETES_ROLE_LABEL", Value: c.OpConfig.PodRoleLabel, }, + { + Name: "KUBERNETES_LEADER_LABEL_VALUE", + Value: c.OpConfig.PodLeaderLabelValue, + }, + { + Name: "KUBERNETES_STANDBY_LEADER_LABEL_VALUE", + Value: c.OpConfig.PodStandbyLeaderLabelValue, + } { Name: "PGPASSWORD_SUPERUSER", ValueFrom: &v1.EnvVarSource{ diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 4c7b8db10..452ce9645 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -48,6 +48,8 @@ type Resources struct { DeleteAnnotationDateKey string `name:"delete_annotation_date_key"` DeleteAnnotationNameKey string `name:"delete_annotation_name_key"` PodRoleLabel string `name:"pod_role_label" default:"spilo-role"` + PodLeaderLabelValue string `name:"pod_leader_label_value" default:"master"` + PodStandbyLeaderLabelValue string `name:"pod_standby_leader_label_value" default:"master"` PodToleration map[string]string `name:"toleration" default:""` DefaultCPURequest string `name:"default_cpu_request"` DefaultMemoryRequest string `name:"default_memory_request"` From a2328b9ec89755d84b07581bcc4681753c2704b7 Mon Sep 17 00:00:00 2001 From: inovindasari Date: Mon, 25 Nov 2024 14:56:45 +0100 Subject: [PATCH 05/14] more configurable variables --- pkg/cluster/k8sres.go | 4 ++-- pkg/controller/operator_config.go | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index cbcbb897a..9cb8419ae 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -955,9 +955,9 @@ func (c *Cluster) generateSpiloPodEnvVars( Value: c.OpConfig.PodLeaderLabelValue, }, { - Name: "KUBERNETES_STANDBY_LEADER_LABEL_VALUE", + Name: "KUBERNETES_STANDBY_LEADER_LABEL_VALUE", Value: c.OpConfig.PodStandbyLeaderLabelValue, - } + }, { Name: "PGPASSWORD_SUPERUSER", ValueFrom: &v1.EnvVarSource{ diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 78e752f1d..90583216f 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -110,6 +110,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur } result.PodRoleLabel = util.Coalesce(fromCRD.Kubernetes.PodRoleLabel, "spilo-role") + result.PodLeaderLabelValue = util.Coalesce(fromCRD.Kubernetes.PodLeaderLabelValue, "master") + result.PodStandbyLeaderLabelValue = util.Coalesce(fromCRD.Kubernetes.PodStandbyLeaderLabelValue, "master") result.ClusterLabels = util.CoalesceStrMap(fromCRD.Kubernetes.ClusterLabels, map[string]string{"application": "spilo"}) result.InheritedLabels = fromCRD.Kubernetes.InheritedLabels result.InheritedAnnotations = fromCRD.Kubernetes.InheritedAnnotations From a48b5b6fb6e4aa28194efcd99a3b8731ac10479a Mon Sep 17 00:00:00 2001 From: inovindasari Date: Wed, 27 Nov 2024 15:34:03 +0100 Subject: [PATCH 06/14] convert constant variable to function --- pkg/cluster/cluster.go | 16 ++++-- pkg/cluster/cluster_test.go | 2 + pkg/cluster/connection_pooler.go | 14 ++--- pkg/cluster/connection_pooler_test.go | 73 ++++++++++++------------- pkg/cluster/k8sres.go | 16 +++--- pkg/cluster/k8sres_test.go | 77 ++++++++++++++------------- pkg/cluster/pod.go | 12 ++--- pkg/cluster/resources.go | 14 ++--- pkg/cluster/sync.go | 12 ++--- pkg/cluster/types.go | 2 - pkg/cluster/util.go | 10 ++-- pkg/cluster/util_test.go | 10 ++-- pkg/controller/node.go | 2 +- 13 files changed, 135 insertions(+), 125 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 1a8d6f762..dc1108139 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -175,6 +175,14 @@ func (c *Cluster) clusterNamespace() string { return c.ObjectMeta.Namespace } +func (c *Cluster) masterRole() PostgresRole { + return PostgresRole(c.OpConfig.PodLeaderLabelValue) +} + +func (c *Cluster) replicaRole() PostgresRole { + return PostgresRole("replica") +} + func (c *Cluster) teamName() string { // TODO: check Teams API for the actual name (in case the user passes an integer Id). return c.Spec.TeamID @@ -294,7 +302,7 @@ func (c *Cluster) Create() (err error) { } c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Create", "Started creation of new cluster resources") - for _, role := range []PostgresRole{Master, Replica} { + for _, role := range []PostgresRole{c.masterRole(), c.replicaRole()} { // if kubernetes_use_configmaps is set Patroni will create configmaps // otherwise it will use endpoints @@ -302,7 +310,7 @@ func (c *Cluster) Create() (err error) { if c.Endpoints[role] != nil { return fmt.Errorf("%s endpoint already exists in the cluster", role) } - if role == Master { + if role == c.masterRole() { // replica endpoint will be created by the replica service. Master endpoint needs to be created by us, // since the corresponding master service does not define any selectors. ep, err = c.createEndpoint(role) @@ -1213,7 +1221,7 @@ func (c *Cluster) Delete() error { c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete pod disruption budget: %v", err) } - for _, role := range []PostgresRole{Master, Replica} { + for _, role := range []PostgresRole{c.masterRole(), c.replicaRole()} { if !c.patroniKubernetesUseConfigMaps() { if err := c.deleteEndpoint(role); err != nil { anyErrors = true @@ -1238,7 +1246,7 @@ func (c *Cluster) Delete() error { // Delete connection pooler objects anyway, even if it's not mentioned in the // manifest, just to not keep orphaned components in case if something went // wrong - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{c.masterRole(), c.replicaRole()} { if err := c.deleteConnectionPooler(role); err != nil { anyErrors = true c.logger.Warningf("could not remove connection pooler: %v", err) diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index 897ed6c0d..efdcc1ece 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -55,6 +55,7 @@ var cl = New( }, Resources: config.Resources{ DownscalerAnnotations: []string{"downscaler/*"}, + PodLeaderLabelValue: "master", }, ConnectionPooler: config.ConnectionPooler{ User: poolerUserName, @@ -147,6 +148,7 @@ func TestCreate(t *testing.T) { DefaultMemoryRequest: "300Mi", DefaultMemoryLimit: "300Mi", PodRoleLabel: "spilo-role", + PodLeaderLabelValue: "master", ResourceCheckInterval: time.Duration(3), ResourceCheckTimeout: time.Duration(10), }, diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 6cd46f745..e0d97b359 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -49,7 +49,7 @@ type ConnectionPoolerObjects struct { func (c *Cluster) connectionPoolerName(role PostgresRole) string { name := fmt.Sprintf("%s-%s", c.Name, constants.ConnectionPoolerResourceSuffix) - if role == Replica { + if role == c.replicaRole() { name = fmt.Sprintf("%s-%s", name, "repl") } return name @@ -537,8 +537,8 @@ func (c *Cluster) generatePoolerServiceAnnotations(role PostgresRole, spec *acid annotations[constants.ElbTimeoutAnnotationName] = constants.ElbTimeoutAnnotationValue } // -repl suffix will be added by replicaDNSName - clusterNameWithPoolerSuffix := c.connectionPoolerName(Master) - if role == Master { + clusterNameWithPoolerSuffix := c.connectionPoolerName(c.masterRole()) + if role == c.masterRole() { dnsString = c.masterDNSName(clusterNameWithPoolerSuffix) } else { dnsString = c.replicaDNSName(clusterNameWithPoolerSuffix) @@ -557,7 +557,7 @@ func (c *Cluster) shouldCreateLoadBalancerForPoolerService(role PostgresRole, sp switch role { - case Replica: + case c.replicaRole(): // if the value is explicitly set in a Postgresql manifest, follow this setting if spec.EnableReplicaPoolerLoadBalancer != nil { return *spec.EnableReplicaPoolerLoadBalancer @@ -565,7 +565,7 @@ func (c *Cluster) shouldCreateLoadBalancerForPoolerService(role PostgresRole, sp // otherwise, follow the operator configuration return c.OpConfig.EnableReplicaPoolerLoadBalancer - case Master: + case c.masterRole(): if spec.EnableMasterPoolerLoadBalancer != nil { return *spec.EnableMasterPoolerLoadBalancer } @@ -877,9 +877,9 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look logPoolerEssentials(c.logger, oldSpec, newSpec) // Check and perform the sync requirements for each of the roles. - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{c.masterRole(), c.replicaRole()} { - if role == Master { + if role == c.masterRole() { connectionPoolerNeeded = needMasterConnectionPoolerWorker(&newSpec.Spec) } else { connectionPoolerNeeded = needReplicaConnectionPoolerWorker(&newSpec.Spec) diff --git a/pkg/cluster/connection_pooler_test.go b/pkg/cluster/connection_pooler_test.go index 78d1c2527..db80749ac 100644 --- a/pkg/cluster/connection_pooler_test.go +++ b/pkg/cluster/connection_pooler_test.go @@ -42,7 +42,7 @@ func boolToPointer(value bool) *bool { } func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error { - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} { poolerLabels := cluster.labelsSet(false) poolerLabels["application"] = "db-connection-pooler" @@ -63,7 +63,7 @@ func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { return fmt.Errorf("Connection pooler resources are empty") } - for _, role := range []PostgresRole{Master, Replica} { + for _, role := range []PostgresRole{cluster.masterRole(), cluster.replicaRole()} { poolerLabels := cluster.labelsSet(false) poolerLabels["application"] = "db-connection-pooler" poolerLabels["connection-pooler"] = cluster.connectionPoolerName(role) @@ -87,14 +87,14 @@ func MasterObjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error poolerLabels := cluster.labelsSet(false) poolerLabels["application"] = "db-connection-pooler" - poolerLabels["connection-pooler"] = cluster.connectionPoolerName(Master) + poolerLabels["connection-pooler"] = cluster.connectionPoolerName(cluster.masterRole()) - if cluster.ConnectionPooler[Master].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[Master].Deployment.Labels, poolerLabels) { - return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[Master].Deployment.Labels) + if cluster.ConnectionPooler[cluster.masterRole()].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[cluster.masterRole()].Deployment.Labels, poolerLabels) { + return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[cluster.masterRole()].Deployment.Labels) } - if cluster.ConnectionPooler[Master].Service == nil || !util.MapContains(cluster.ConnectionPooler[Master].Service.Labels, poolerLabels) { - return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[Master].Service.Labels) + if cluster.ConnectionPooler[cluster.masterRole()].Service == nil || !util.MapContains(cluster.ConnectionPooler[cluster.masterRole()].Service.Labels, poolerLabels) { + return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[cluster.masterRole()].Service.Labels) } return nil @@ -107,21 +107,21 @@ func ReplicaObjectsAreSaved(cluster *Cluster, err error, reason SyncReason) erro poolerLabels := cluster.labelsSet(false) poolerLabels["application"] = "db-connection-pooler" - poolerLabels["connection-pooler"] = cluster.connectionPoolerName(Replica) + poolerLabels["connection-pooler"] = cluster.connectionPoolerName(cluster.replicaRole()) - if cluster.ConnectionPooler[Replica].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[Replica].Deployment.Labels, poolerLabels) { - return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[Replica].Deployment.Labels) + if cluster.ConnectionPooler[cluster.replicaRole()].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[cluster.replicaRole()].Deployment.Labels, poolerLabels) { + return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[cluster.replicaRole()].Deployment.Labels) } - if cluster.ConnectionPooler[Replica].Service == nil || !util.MapContains(cluster.ConnectionPooler[Replica].Service.Labels, poolerLabels) { - return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[Replica].Service.Labels) + if cluster.ConnectionPooler[cluster.replicaRole()].Service == nil || !util.MapContains(cluster.ConnectionPooler[cluster.replicaRole()].Service.Labels, poolerLabels) { + return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[cluster.replicaRole()].Service.Labels) } return nil } func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} { if cluster.ConnectionPooler[role] != nil && (cluster.ConnectionPooler[role].Deployment != nil || cluster.ConnectionPooler[role].Service != nil) { return fmt.Errorf("Connection pooler was not deleted for role %v", role) @@ -133,8 +133,8 @@ func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler[Master] != nil && - (cluster.ConnectionPooler[Master].Deployment != nil || cluster.ConnectionPooler[Master].Service != nil) { + if cluster.ConnectionPooler[cluster.masterRole()] != nil && + (cluster.ConnectionPooler[cluster.masterRole()].Deployment != nil || cluster.ConnectionPooler[cluster.masterRole()].Service != nil) { return fmt.Errorf("Connection pooler master was not deleted") } return nil @@ -142,8 +142,8 @@ func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error { func OnlyReplicaDeleted(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler[Replica] != nil && - (cluster.ConnectionPooler[Replica].Deployment != nil || cluster.ConnectionPooler[Replica].Service != nil) { + if cluster.ConnectionPooler[cluster.replicaRole()] != nil && + (cluster.ConnectionPooler[cluster.replicaRole()].Deployment != nil || cluster.ConnectionPooler[cluster.replicaRole()].Service != nil) { return fmt.Errorf("Connection pooler replica was not deleted") } return nil @@ -323,7 +323,7 @@ func TestConnectionPoolerCreateDeletion(t *testing.T) { cluster.Name = "acid-fake-cluster" cluster.Namespace = "default" - _, err := cluster.createService(Master) + _, err := cluster.createService(cluster.masterRole()) //PROBLEM1 assert.NoError(t, err) _, err = cluster.createStatefulSet() assert.NoError(t, err) @@ -334,7 +334,7 @@ func TestConnectionPoolerCreateDeletion(t *testing.T) { t.Errorf("%s: Cannot create connection pooler, %s, %+v", testName, err, reason) } - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} { poolerLabels := cluster.labelsSet(false) poolerLabels["application"] = "db-connection-pooler" poolerLabels["connection-pooler"] = cluster.connectionPoolerName(role) @@ -369,7 +369,7 @@ func TestConnectionPoolerCreateDeletion(t *testing.T) { t.Errorf("%s: Cannot sync connection pooler, %s", testName, err) } - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} { err = cluster.deleteConnectionPooler(role) if err != nil { t.Errorf("%s: Cannot delete connection pooler, %s", testName, err) @@ -424,6 +424,7 @@ func TestConnectionPoolerSync(t *testing.T) { DefaultMemoryRequest: "300Mi", DefaultMemoryLimit: "300Mi", PodRoleLabel: "spilo-role", + PodLeaderLabelValue: "master", }, }, }, client, pg, logger, eventRecorder) @@ -431,7 +432,7 @@ func TestConnectionPoolerSync(t *testing.T) { cluster.Name = "acid-fake-cluster" cluster.Namespace = "default" - _, err := cluster.createService(Master) + _, err := cluster.createService(cluster.masterRole()) assert.NoError(t, err) _, err = cluster.createStatefulSet() assert.NoError(t, err) @@ -765,7 +766,7 @@ func TestConnectionPoolerPodSpec(t *testing.T) { check: testEnvs, }, } - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} { for _, tt := range tests { podSpec, _ := tt.cluster.generateConnectionPoolerPodTemplate(role) @@ -802,12 +803,12 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) { }, } cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ - Master: { + cluster.masterRole(): { Deployment: nil, Service: nil, LookupFunction: true, Name: "", - Role: Master, + Role: cluster.masterRole(), }, } @@ -854,7 +855,7 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) { }, } for _, tt := range tests { - deployment, err := tt.cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[Master]) + deployment, err := tt.cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[cluster.masterRole()]) if err != tt.expected && err.Error() != tt.expected.Error() { t.Errorf("%s [%s]: Could not generate deployment spec,\n %+v, expected\n %+v", @@ -921,7 +922,7 @@ func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error { labels := deployment.Spec.Selector.MatchLabels - expected := cluster.connectionPoolerLabels(Master, true).MatchLabels + expected := cluster.connectionPoolerLabels(cluster.masterRole(), true).MatchLabels if labels["connection-pooler"] != expected["connection-pooler"] { return fmt.Errorf("Labels are incorrect, got %+v, expected %+v", @@ -1018,20 +1019,20 @@ func TestPoolerTLS(t *testing.T) { // create pooler resources cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{} - cluster.ConnectionPooler[Master] = &ConnectionPoolerObjects{ + cluster.ConnectionPooler[cluster.masterRole()] = &ConnectionPoolerObjects{ Deployment: nil, Service: nil, - Name: cluster.connectionPoolerName(Master), + Name: cluster.connectionPoolerName(cluster.masterRole()), ClusterName: clusterName, Namespace: namespace, LookupFunction: false, - Role: Master, + Role: cluster.masterRole(), } - _, err = cluster.syncConnectionPoolerWorker(nil, &pg, Master) + _, err = cluster.syncConnectionPoolerWorker(nil, &pg, cluster.masterRole()) assert.NoError(t, err) - deploy, err := client.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(Master), metav1.GetOptions{}) + deploy, err := client.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(cluster.masterRole()), metav1.GetOptions{}) assert.NoError(t, err) fsGroup := int64(103) @@ -1088,17 +1089,17 @@ func TestConnectionPoolerServiceSpec(t *testing.T) { }, } cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ - Master: { + cluster.masterRole(): { Deployment: nil, Service: nil, LookupFunction: false, - Role: Master, + Role: cluster.masterRole(), }, - Replica: { + cluster.replicaRole(): { Deployment: nil, Service: nil, LookupFunction: false, - Role: Replica, + Role: cluster.replicaRole(), }, } @@ -1138,7 +1139,7 @@ func TestConnectionPoolerServiceSpec(t *testing.T) { check: testServiceSelector, }, } - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} { for _, tt := range tests { service := tt.cluster.generateConnectionPoolerService(tt.cluster.ConnectionPooler[role]) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 9cb8419ae..b8aa8d7fa 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -77,7 +77,7 @@ func (c *Cluster) statefulSetName() string { func (c *Cluster) serviceName(role PostgresRole) string { name := c.Name switch role { - case Replica: + case c.replicaRole(): name = fmt.Sprintf("%s-%s", name, "repl") case Patroni: name = fmt.Sprintf("%s-%s", name, "config") @@ -1536,7 +1536,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef Spec: appsv1.StatefulSetSpec{ Replicas: &numberOfInstances, Selector: c.labelsSelector(), - ServiceName: c.serviceName(Master), + ServiceName: c.serviceName(c.masterRole()), Template: *podTemplate, VolumeClaimTemplates: []v1.PersistentVolumeClaim{*volumeClaimTemplate}, UpdateStrategy: updateStrategy, @@ -1955,7 +1955,7 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *ac switch role { - case Replica: + case c.replicaRole(): // if the value is explicitly set in a Postgresql manifest, follow this setting if spec.EnableReplicaLoadBalancer != nil { @@ -1965,7 +1965,7 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *ac // otherwise, follow the operator configuration return c.OpConfig.EnableReplicaLoadBalancer - case Master: + case c.masterRole(): if spec.EnableMasterLoadBalancer != nil { return *spec.EnableMasterLoadBalancer @@ -1987,7 +1987,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) // no selector for master, see https://github.com/zalando/postgres-operator/issues/340 // if kubernetes_use_configmaps is set master service needs a selector - if role == Replica || c.patroniKubernetesUseConfigMaps() { + if role == c.replicaRole() || c.patroniKubernetesUseConfigMaps() { serviceSpec.Selector = c.roleLabelsSet(false, role) } @@ -2054,9 +2054,9 @@ func (c *Cluster) getCustomServiceAnnotations(role PostgresRole, spec *acidv1.Po maps.Copy(annotations, spec.ServiceAnnotations) switch role { - case Master: + case c.masterRole(): maps.Copy(annotations, spec.MasterServiceAnnotations) - case Replica: + case c.replicaRole(): maps.Copy(annotations, spec.ReplicaServiceAnnotations) } } @@ -2227,7 +2227,7 @@ func (c *Cluster) generatePodDisruptionBudget() *policyv1.PodDisruptionBudget { // define label selector and add the master role selector if enabled labels := c.labelsSet(false) if pdbMasterLabelSelector == nil || *c.OpConfig.PDBMasterLabelSelector { - labels[c.OpConfig.PodRoleLabel] = string(Master) + labels[c.OpConfig.PodRoleLabel] = string(c.masterRole()) } return &policyv1.PodDisruptionBudget{ diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index bea229dda..8505b0095 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -576,67 +576,67 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) { } expectedSpiloWalPathCompat := []ExpectedValue{ { - envIndex: 12, + envIndex: 14, envVarConstant: "ENABLE_WAL_PATH_COMPAT", envVarValue: "true", }, } expectedValuesS3Bucket := []ExpectedValue{ { - envIndex: 15, + envIndex: 17, envVarConstant: "WAL_S3_BUCKET", envVarValue: "global-s3-bucket", }, { - envIndex: 16, + envIndex: 18, envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX", envVarValue: fmt.Sprintf("/%s", dummyUUID), }, { - envIndex: 17, + envIndex: 19, envVarConstant: "WAL_BUCKET_SCOPE_PREFIX", envVarValue: "", }, } expectedValuesGCPCreds := []ExpectedValue{ { - envIndex: 15, + envIndex: 17, envVarConstant: "WAL_GS_BUCKET", envVarValue: "global-gs-bucket", }, { - envIndex: 16, + envIndex: 18, envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX", envVarValue: fmt.Sprintf("/%s", dummyUUID), }, { - envIndex: 17, + envIndex: 19, envVarConstant: "WAL_BUCKET_SCOPE_PREFIX", envVarValue: "", }, { - envIndex: 18, + envIndex: 20, envVarConstant: "GOOGLE_APPLICATION_CREDENTIALS", envVarValue: "some-path-to-credentials", }, } expectedS3BucketConfigMap := []ExpectedValue{ { - envIndex: 17, + envIndex: 19, envVarConstant: "wal_s3_bucket", envVarValue: "global-s3-bucket-configmap", }, } expectedCustomS3BucketSpec := []ExpectedValue{ { - envIndex: 15, + envIndex: 17, envVarConstant: "WAL_S3_BUCKET", envVarValue: "custom-s3-bucket", }, } expectedCustomVariableSecret := []ExpectedValue{ { - envIndex: 16, + envIndex: 18, envVarConstant: "custom_variable", envVarValueRef: &v1.EnvVarSource{ SecretKeyRef: &v1.SecretKeySelector{ @@ -650,72 +650,72 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) { } expectedCustomVariableConfigMap := []ExpectedValue{ { - envIndex: 16, + envIndex: 18, envVarConstant: "custom_variable", envVarValue: "configmap-test", }, } expectedCustomVariableSpec := []ExpectedValue{ { - envIndex: 15, + envIndex: 17, envVarConstant: "CUSTOM_VARIABLE", envVarValue: "spec-env-test", }, } expectedCloneEnvSpec := []ExpectedValue{ { - envIndex: 16, + envIndex: 18, envVarConstant: "CLONE_WALE_S3_PREFIX", envVarValue: "s3://another-bucket", }, { - envIndex: 19, + envIndex: 21, envVarConstant: "CLONE_WAL_BUCKET_SCOPE_PREFIX", envVarValue: "", }, { - envIndex: 20, + envIndex: 22, envVarConstant: "CLONE_AWS_ENDPOINT", envVarValue: "s3.eu-central-1.amazonaws.com", }, } expectedCloneEnvSpecEnv := []ExpectedValue{ { - envIndex: 15, + envIndex: 17, envVarConstant: "CLONE_WAL_BUCKET_SCOPE_PREFIX", envVarValue: "test-cluster", }, { - envIndex: 17, + envIndex: 19, envVarConstant: "CLONE_WALE_S3_PREFIX", envVarValue: "s3://another-bucket", }, { - envIndex: 21, + envIndex: 23, envVarConstant: "CLONE_AWS_ENDPOINT", envVarValue: "s3.eu-central-1.amazonaws.com", }, } expectedCloneEnvConfigMap := []ExpectedValue{ { - envIndex: 16, + envIndex: 18, envVarConstant: "CLONE_WAL_S3_BUCKET", envVarValue: "global-s3-bucket", }, { - envIndex: 17, + envIndex: 19, envVarConstant: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", envVarValue: fmt.Sprintf("/%s", dummyUUID), }, { - envIndex: 21, + envIndex: 23, envVarConstant: "clone_aws_endpoint", envVarValue: "s3.eu-west-1.amazonaws.com", }, } expectedCloneEnvSecret := []ExpectedValue{ { - envIndex: 21, + envIndex: 23, envVarConstant: "clone_aws_access_key_id", envVarValueRef: &v1.EnvVarSource{ SecretKeyRef: &v1.SecretKeySelector{ @@ -729,12 +729,12 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) { } expectedStandbyEnvSecret := []ExpectedValue{ { - envIndex: 15, + envIndex: 17, envVarConstant: "STANDBY_WALE_GS_PREFIX", envVarValue: "gs://some/path/", }, { - envIndex: 20, + envIndex: 22, envVarConstant: "standby_google_application_credentials", envVarValueRef: &v1.EnvVarSource{ SecretKeyRef: &v1.SecretKeySelector{ @@ -2389,7 +2389,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With multiple instances", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2406,7 +2406,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With zero instances", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2423,7 +2423,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With PodDisruptionBudget disabled", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2440,7 +2440,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With non-default PDBNameFormat and PodDisruptionBudget explicitly enabled", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2457,7 +2457,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With PDBMasterLabelSelector disabled", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True(), PDBMasterLabelSelector: util.False()}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True(), PDBMasterLabelSelector: util.False()}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2474,7 +2474,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With OwnerReference enabled", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", EnableOwnerReferences: util.True()}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True()}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master", EnableOwnerReferences: util.True()}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True()}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2550,6 +2550,7 @@ func TestGenerateService(t *testing.T) { DefaultMemoryRequest: "0.7Gi", MaxMemoryRequest: "1.0Gi", DefaultMemoryLimit: "1.3Gi", + PodLeaderLabelValue: "master", }, SidecarImages: map[string]string{ "deprecated-global-sidecar": "image:123", @@ -2576,10 +2577,10 @@ func TestGenerateService(t *testing.T) { }, }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - service := cluster.generateService(Master, &spec) + service := cluster.generateService(cluster.masterRole(), &spec) assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeCluster, service.Spec.ExternalTrafficPolicy) cluster.OpConfig.ExternalTrafficPolicy = "Local" - service = cluster.generateService(Master, &spec) + service = cluster.generateService(cluster.masterRole(), &spec) assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeLocal, service.Spec.ExternalTrafficPolicy) } @@ -2605,28 +2606,28 @@ func TestCreateLoadBalancerLogic(t *testing.T) { }{ { subtest: "new format, load balancer is enabled for replica", - role: Replica, + role: cluster.replicaRole(), spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.True()}, opConfig: config.Config{}, result: true, }, { subtest: "new format, load balancer is disabled for replica", - role: Replica, + role: cluster.replicaRole(), spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.False()}, opConfig: config.Config{}, result: false, }, { subtest: "new format, load balancer isn't specified for replica", - role: Replica, + role: cluster.replicaRole(), spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: nil}, opConfig: config.Config{EnableReplicaLoadBalancer: true}, result: true, }, { subtest: "new format, load balancer isn't specified for replica", - role: Replica, + role: cluster.replicaRole(), spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: nil}, opConfig: config.Config{EnableReplicaLoadBalancer: false}, result: false, @@ -2690,7 +2691,7 @@ func TestEnableLoadBalancers(t *testing.T) { namespace := "default" clusterNameLabel := "cluster-name" roleLabel := "spilo-role" - roles := []PostgresRole{Master, Replica} + roles := []PostgresRole{cluster.masterRole(), cluster.replicaRole()} sourceRanges := []string{"192.186.1.2/22"} extTrafficPolicy := "Cluster" diff --git a/pkg/cluster/pod.go b/pkg/cluster/pod.go index bd2172c18..9420d6164 100644 --- a/pkg/cluster/pod.go +++ b/pkg/cluster/pod.go @@ -44,7 +44,7 @@ func (c *Cluster) getRolePods(role PostgresRole) ([]v1.Pod, error) { return nil, fmt.Errorf("could not get list of pods: %v", err) } - if role == Master && len(pods.Items) > 1 { + if role == c.masterRole() && len(pods.Items) > 1 { return nil, fmt.Errorf("too many masters") } @@ -234,7 +234,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error { return nil } - if role := PostgresRole(oldMaster.Labels[c.OpConfig.PodRoleLabel]); role != Master { + if role := PostgresRole(oldMaster.Labels[c.OpConfig.PodRoleLabel]); role != c.masterRole() { c.logger.Warningf("no action needed: pod %q is not the master (anymore)", podName) return nil } @@ -312,7 +312,7 @@ func (c *Cluster) MigrateReplicaPod(podName spec.NamespacedName, fromNodeName st return nil } - if role := PostgresRole(replicaPod.Labels[c.OpConfig.PodRoleLabel]); role != Replica { + if role := PostgresRole(replicaPod.Labels[c.OpConfig.PodRoleLabel]); role != c.replicaRole() { return fmt.Errorf("check failed: pod %q is not a replica", podName) } @@ -416,7 +416,7 @@ func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.Namesp for i, pod := range pods { role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel]) - if role == Master { + if role == c.masterRole() { masterPod = &pods[i] continue } @@ -428,9 +428,9 @@ func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.Namesp } newRole := PostgresRole(newPod.Labels[c.OpConfig.PodRoleLabel]) - if newRole == Replica { + if newRole == c.replicaRole() { replicas = append(replicas, util.NameFromMeta(pod.ObjectMeta)) - } else if newRole == Master { + } else if newRole == c.masterRole() { newMasterPod = newPod } } diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 3f47328ee..b7716bbf9 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -132,7 +132,7 @@ func getPodIndex(podName string) (int32, error) { } func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error { - masterPod, err := c.getRolePods(Master) + masterPod, err := c.getRolePods(c.masterRole()) if err != nil { return fmt.Errorf("could not get master pod: %v", err) } @@ -393,7 +393,7 @@ func (c *Cluster) generateEndpointSubsets(role PostgresRole) []v1.EndpointSubset result := make([]v1.EndpointSubset, 0) pods, err := c.getRolePods(role) if err != nil { - if role == Master { + if role == c.masterRole() { c.logger.Warningf("could not obtain the address for %s pod: %v", role, err) } else { c.logger.Warningf("could not obtain the addresses for %s pods: %v", role, err) @@ -410,7 +410,7 @@ func (c *Cluster) generateEndpointSubsets(role PostgresRole) []v1.EndpointSubset Addresses: endPointAddresses, Ports: []v1.EndpointPort{{Name: "postgresql", Port: 5432, Protocol: "TCP"}}, }) - } else if role == Master { + } else if role == c.masterRole() { c.logger.Warningf("master is not running, generated master endpoint does not contain any addresses") } @@ -682,22 +682,22 @@ func (c *Cluster) deleteLogicalBackupJob() error { // GetServiceMaster returns cluster's kubernetes master Service func (c *Cluster) GetServiceMaster() *v1.Service { - return c.Services[Master] + return c.Services[c.masterRole()] } // GetServiceReplica returns cluster's kubernetes replica Service func (c *Cluster) GetServiceReplica() *v1.Service { - return c.Services[Replica] + return c.Services[c.replicaRole()] } // GetEndpointMaster returns cluster's kubernetes master Endpoint func (c *Cluster) GetEndpointMaster() *v1.Endpoints { - return c.Endpoints[Master] + return c.Endpoints[c.masterRole()] } // GetEndpointReplica returns cluster's kubernetes replica Endpoint func (c *Cluster) GetEndpointReplica() *v1.Endpoints { - return c.Endpoints[Replica] + return c.Endpoints[c.replicaRole()] } // GetStatefulSet returns cluster's kubernetes StatefulSet diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index d1a339001..b69f9d379 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -340,7 +340,7 @@ func (c *Cluster) syncPatroniService() error { } func (c *Cluster) syncServices() error { - for _, role := range []PostgresRole{Master, Replica} { + for _, role := range []PostgresRole{c.masterRole(), c.replicaRole()} { c.logger.Debugf("syncing %s service", role) if !c.patroniKubernetesUseConfigMaps() { @@ -545,7 +545,7 @@ func (c *Cluster) syncStatefulSet() error { podsToRecreate = append(podsToRecreate, pod) } else { role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel]) - if role == Master { + if role == c.masterRole() { continue } switchoverCandidates = append(switchoverCandidates, util.NameFromMeta(pod.ObjectMeta)) @@ -616,7 +616,7 @@ func (c *Cluster) syncStatefulSet() error { podsToRecreate = append(podsToRecreate, pod) } else { role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel]) - if role == Master { + if role == c.masterRole() { continue } switchoverCandidates = append(switchoverCandidates, util.NameFromMeta(pod.ObjectMeta)) @@ -726,9 +726,9 @@ func (c *Cluster) restartInstances(pods []v1.Pod, restartWait uint32, restartPri errors := make([]string, 0) remainingPods := make([]*v1.Pod, 0) - skipRole := Master + skipRole := c.masterRole() if restartPrimaryFirst { - skipRole = Replica + skipRole = c.replicaRole() } for i, pod := range pods { @@ -1422,7 +1422,7 @@ func (c *Cluster) syncDatabases() error { if len(createDatabases) > 0 { // trigger creation of pooler objects in new database in syncConnectionPooler if c.ConnectionPooler != nil { - for _, role := range [2]PostgresRole{Master, Replica} { + for _, role := range [2]PostgresRole{c.masterRole(), c.replicaRole()} { c.ConnectionPooler[role].LookupFunction = false } } diff --git a/pkg/cluster/types.go b/pkg/cluster/types.go index 8e9263d49..845d350ef 100644 --- a/pkg/cluster/types.go +++ b/pkg/cluster/types.go @@ -15,8 +15,6 @@ type PostgresRole string const ( // spilo roles - Master PostgresRole = "master" - Replica PostgresRole = "replica" Patroni PostgresRole = "config" // roles returned by Patroni cluster endpoint diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index c570fcc3a..6afba2797 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -340,7 +340,7 @@ func (c *Cluster) waitForPodLabel(podEvents chan PodEvent, stopCh chan struct{}, podRole := PostgresRole(podEvent.CurPod.Labels[c.OpConfig.PodRoleLabel]) if role == nil { - if podRole == Master || podRole == Replica { + if podRole == c.masterRole() || podRole == c.replicaRole() { return podEvent.CurPod, nil } } else if *role == podRole { @@ -399,12 +399,12 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error { } masterListOption := metav1.ListOptions{ LabelSelector: labels.Merge(ls, labels.Set{ - c.OpConfig.PodRoleLabel: string(Master), + c.OpConfig.PodRoleLabel: string(c.masterRole()), }).String(), } replicaListOption := metav1.ListOptions{ LabelSelector: labels.Merge(ls, labels.Set{ - c.OpConfig.PodRoleLabel: string(Replica), + c.OpConfig.PodRoleLabel: string(c.replicaRole()), }).String(), } podsNumber = 1 @@ -515,7 +515,7 @@ func (c *Cluster) roleLabelsSet(shouldAddExtraLabels bool, role PostgresRole) la func (c *Cluster) dnsName(role PostgresRole) string { var dnsString, oldDnsString string - if role == Master { + if role == c.masterRole() { dnsString = c.masterDNSName(c.Name) } else { dnsString = c.replicaDNSName(c.Name) @@ -524,7 +524,7 @@ func (c *Cluster) dnsName(role PostgresRole) string { // if cluster name starts with teamID we might need to provide backwards compatibility clusterNameWithoutTeamPrefix, _ := acidv1.ExtractClusterName(c.Name, c.Spec.TeamID) if clusterNameWithoutTeamPrefix != "" { - if role == Master { + if role == c.masterRole() { oldDnsString = c.oldMasterDNSName(clusterNameWithoutTeamPrefix) } else { oldDnsString = c.oldReplicaDNSName(clusterNameWithoutTeamPrefix) diff --git a/pkg/cluster/util_test.go b/pkg/cluster/util_test.go index 2cb755c6c..2b7889de5 100644 --- a/pkg/cluster/util_test.go +++ b/pkg/cluster/util_test.go @@ -161,7 +161,7 @@ func checkResourcesInheritedAnnotations(cluster *Cluster, resultAnnotations map[ } checkPooler := func(annotations map[string]string) error { - for _, role := range []PostgresRole{Master, Replica} { + for _, role := range []PostgresRole{cluster.masterRole(), cluster.replicaRole()} { deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{}) if err != nil { return err @@ -244,7 +244,7 @@ func checkResourcesInheritedAnnotations(cluster *Cluster, resultAnnotations map[ func createPods(cluster *Cluster) []v1.Pod { podsList := make([]v1.Pod, 0) - for i, role := range []PostgresRole{Master, Replica} { + for i, role := range []PostgresRole{cluster.masterRole(), cluster.replicaRole()} { podsList = append(podsList, v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d", clusterName, i), @@ -325,7 +325,7 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster, if err != nil { return nil, err } - _, err = cluster.createService(Master) + _, err = cluster.createService(cluster.masterRole()) if err != nil { return nil, err } @@ -365,7 +365,7 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster, } func createPatroniResources(cluster *Cluster) error { - patroniService := cluster.generateService(Replica, &pg.Spec) + patroniService := cluster.generateService(cluster.replicaRole(), &pg.Spec) patroniService.ObjectMeta.Name = cluster.serviceName(Patroni) _, err := cluster.KubeClient.Services(namespace).Create(context.TODO(), patroniService, metav1.CreateOptions{}) if err != nil { @@ -479,7 +479,7 @@ func annotateResources(cluster *Cluster) error { } } - for _, role := range []PostgresRole{Master, Replica} { + for _, role := range []PostgresRole{cluster.masterRole(), cluster.replicaRole()} { deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{}) if err != nil { return err diff --git a/pkg/controller/node.go b/pkg/controller/node.go index 2836b4f7f..6de1b492f 100644 --- a/pkg/controller/node.go +++ b/pkg/controller/node.go @@ -108,7 +108,7 @@ func (c *Controller) attemptToMoveMasterPodsOffNode(node *v1.Node) error { podName := util.NameFromMeta(pod.ObjectMeta) role, ok := pod.Labels[c.opConfig.PodRoleLabel] - if !ok || cluster.PostgresRole(role) != cluster.Master { + if !ok || cluster.PostgresRole(role) != cluster.PostgresRole(c.opConfig.PodLeaderLabelValue) { if !ok { c.logger.Warningf("could not move pod %q: pod has no role", podName) } From 910c7ee51659d8b0a96c5ab2a771f6765d43a730 Mon Sep 17 00:00:00 2001 From: inovindasari Date: Wed, 27 Nov 2024 15:40:30 +0100 Subject: [PATCH 07/14] remove standby leader configurable variable --- charts/postgres-operator/crds/operatorconfigurations.yaml | 3 --- charts/postgres-operator/values.yaml | 1 - delivery.yaml | 3 --- docs/reference/operator_parameters.md | 4 ---- manifests/configmap.yaml | 1 - manifests/operatorconfiguration.crd.yaml | 3 --- manifests/postgresql-operator-default-configuration.yaml | 1 - pkg/apis/acid.zalan.do/v1/crds.go | 3 --- pkg/apis/acid.zalan.do/v1/operator_configuration_type.go | 1 - pkg/cluster/k8sres.go | 2 +- 10 files changed, 1 insertion(+), 21 deletions(-) diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index a2436b894..d71f1b067 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -330,9 +330,6 @@ spec: pod_leader_label_value: type: string default: "master" - pod_standby_leader_label_value: - type: string - default: "master" pod_service_account_definition: type: string default: "" diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index d88660862..27630843c 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -195,7 +195,6 @@ configKubernetes: # label assigned to the Postgres pods (and services/endpoints) pod_role_label: spilo-role pod_leader_label_value: master - pod_standby_leader_label_value: master # service account definition as JSON/YAML string to be used by postgres cluster pods # pod_service_account_definition: "" diff --git a/delivery.yaml b/delivery.yaml index 3320a9c29..1bd1cbad9 100644 --- a/delivery.yaml +++ b/delivery.yaml @@ -14,9 +14,6 @@ pipeline: - desc: Run unit tests cmd: | make deps mocks test - - desc: 'Run e2e tests' - cmd: | - make e2e - desc: Build Docker image cmd: | IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"} diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index a72c776e0..ae8b164a1 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -441,10 +441,6 @@ configuration they are grouped under the `kubernetes` key. value of the pod label if Postgres role is primary when running on Kubernetes. The default is 'master'. -* **pod_standby_leader_label_value** - value of the pod label if Postgres role is standby_leader when running on Kubernetes. - The default is 'master'. - * **cluster_labels** list of `name:value` pairs for additional labels assigned to the cluster objects. The default is `application:spilo`. diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index b9ce21084..70e3328f2 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -135,7 +135,6 @@ data: # pod_priority_class_name: "postgres-pod-priority" pod_role_label: spilo-role pod_leader_label_value: master - pod_standby_leader_label_value: master pod_service_account_definition: "" pod_service_account_name: "postgres-pod" pod_service_account_role_binding_definition: "" diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index 3f1444f62..996729696 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -328,9 +328,6 @@ spec: pod_leader_label_value: type: string default: "master" - pod_standby_leader_label_value: - type: string - default: "master" pod_service_account_definition: type: string default: "" diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index f702b3c83..d67132dab 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -100,7 +100,6 @@ configuration: # pod_priority_class_name: "postgres-pod-priority" pod_role_label: spilo-role pod_leader_label_value: master - pod_standby_leader_label_value: master # pod_service_account_definition: "" pod_service_account_name: postgres-pod # pod_service_account_role_binding_definition: "" diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 27b7057b8..01b38d97b 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -1500,9 +1500,6 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ "pod_leader_label_value": { Type: "string", }, - "pod_standby_leader_label_value": { - Type: "string", - }, "pod_service_account_definition": { Type: "string", }, diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index 65506313e..a0b0ada1b 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -82,7 +82,6 @@ type KubernetesMetaConfiguration struct { InfrastructureRolesDefs []*config.InfrastructureRole `json:"infrastructure_roles_secrets,omitempty"` PodRoleLabel string `json:"pod_role_label,omitempty"` PodLeaderLabelValue string `json:"pod_leader_label_value,omitempty"` - PodStandbyLeaderLabelValue string `json:"pod_standby_leader_label_value,omitempty"` ClusterLabels map[string]string `json:"cluster_labels,omitempty"` InheritedLabels []string `json:"inherited_labels,omitempty"` InheritedAnnotations []string `json:"inherited_annotations,omitempty"` diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index b8aa8d7fa..2a19b438d 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -956,7 +956,7 @@ func (c *Cluster) generateSpiloPodEnvVars( }, { Name: "KUBERNETES_STANDBY_LEADER_LABEL_VALUE", - Value: c.OpConfig.PodStandbyLeaderLabelValue, + Value: c.OpConfig.PodLeaderLabelValue, }, { Name: "PGPASSWORD_SUPERUSER", From 8ad5dabe8453df08d261444b5033713e03f720db Mon Sep 17 00:00:00 2001 From: inovindasari Date: Wed, 27 Nov 2024 15:43:06 +0100 Subject: [PATCH 08/14] remove unnecessary change --- delivery.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/delivery.yaml b/delivery.yaml index 1bd1cbad9..7eacd769b 100644 --- a/delivery.yaml +++ b/delivery.yaml @@ -14,6 +14,7 @@ pipeline: - desc: Run unit tests cmd: | make deps mocks test + - desc: Build Docker image cmd: | IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"} From b43306657732a9e3ddb122b1535e1d4dd81bea93 Mon Sep 17 00:00:00 2001 From: inovindasari Date: Wed, 27 Nov 2024 16:19:48 +0100 Subject: [PATCH 09/14] remove leftover unused variable --- pkg/controller/operator_config.go | 1 - pkg/util/config/config.go | 1 - 2 files changed, 2 deletions(-) diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 90583216f..b77036da9 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -111,7 +111,6 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.PodRoleLabel = util.Coalesce(fromCRD.Kubernetes.PodRoleLabel, "spilo-role") result.PodLeaderLabelValue = util.Coalesce(fromCRD.Kubernetes.PodLeaderLabelValue, "master") - result.PodStandbyLeaderLabelValue = util.Coalesce(fromCRD.Kubernetes.PodStandbyLeaderLabelValue, "master") result.ClusterLabels = util.CoalesceStrMap(fromCRD.Kubernetes.ClusterLabels, map[string]string{"application": "spilo"}) result.InheritedLabels = fromCRD.Kubernetes.InheritedLabels result.InheritedAnnotations = fromCRD.Kubernetes.InheritedAnnotations diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 452ce9645..09a5a9a93 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -49,7 +49,6 @@ type Resources struct { DeleteAnnotationNameKey string `name:"delete_annotation_name_key"` PodRoleLabel string `name:"pod_role_label" default:"spilo-role"` PodLeaderLabelValue string `name:"pod_leader_label_value" default:"master"` - PodStandbyLeaderLabelValue string `name:"pod_standby_leader_label_value" default:"master"` PodToleration map[string]string `name:"toleration" default:""` DefaultCPURequest string `name:"default_cpu_request"` DefaultMemoryRequest string `name:"default_memory_request"` From 1410daa1aae80441079b0550f495b60e0852d8ba Mon Sep 17 00:00:00 2001 From: inovindasari Date: Wed, 27 Nov 2024 16:20:27 +0100 Subject: [PATCH 10/14] unittest with non default label --- pkg/cluster/cluster_test.go | 31 ++++++++++++++------------- pkg/cluster/connection_pooler_test.go | 2 +- pkg/cluster/k8sres_test.go | 16 +++++++------- pkg/cluster/sync_test.go | 2 +- 4 files changed, 26 insertions(+), 25 deletions(-) diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index efdcc1ece..ff0e1a830 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -35,6 +35,7 @@ const ( adminUserName = "admin" exampleSpiloConfig = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}` spiloConfigDiff = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}` + leaderLabelValue = "primary" ) var logger = logrus.New().WithField("test", "cluster") @@ -55,7 +56,7 @@ var cl = New( }, Resources: config.Resources{ DownscalerAnnotations: []string{"downscaler/*"}, - PodLeaderLabelValue: "master", + PodLeaderLabelValue: leaderLabelValue, }, ConnectionPooler: config.ConnectionPooler{ User: poolerUserName, @@ -127,7 +128,7 @@ func TestCreate(t *testing.T) { Labels: map[string]string{ "application": "spilo", "cluster-name": clusterName, - "spilo-role": "master", + "spilo-role": leaderLabelValue, }, }, } @@ -148,7 +149,7 @@ func TestCreate(t *testing.T) { DefaultMemoryRequest: "300Mi", DefaultMemoryLimit: "300Mi", PodRoleLabel: "spilo-role", - PodLeaderLabelValue: "master", + PodLeaderLabelValue: leaderLabelValue, ResourceCheckInterval: time.Duration(3), ResourceCheckTimeout: time.Duration(10), }, @@ -665,7 +666,7 @@ func TestServiceAnnotations(t *testing.T) { //MASTER { about: "Master with no annotations and EnableMasterLoadBalancer disabled on spec and OperatorConfig", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerSpec: &disabled, enableMasterLoadBalancerOC: false, enableTeamIdClusterPrefix: false, @@ -675,7 +676,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with no annotations and EnableMasterLoadBalancer enabled on spec", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerSpec: &enabled, enableMasterLoadBalancerOC: false, enableTeamIdClusterPrefix: false, @@ -688,7 +689,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with no annotations and EnableMasterLoadBalancer enabled only on operator config", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerSpec: &disabled, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, @@ -698,7 +699,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with no annotations and EnableMasterLoadBalancer defined only on operator config", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, operatorAnnotations: make(map[string]string), @@ -710,7 +711,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with cluster annotations and load balancer enabled", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, operatorAnnotations: make(map[string]string), @@ -723,7 +724,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with cluster annotations and load balancer disabled", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerSpec: &disabled, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, @@ -733,7 +734,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with operator annotations and load balancer enabled", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, operatorAnnotations: map[string]string{"foo": "bar"}, @@ -746,7 +747,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with operator annotations override default annotations", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, operatorAnnotations: map[string]string{ @@ -760,7 +761,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with cluster annotations override default annotations", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, operatorAnnotations: make(map[string]string), @@ -774,7 +775,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with cluster annotations do not override external-dns annotations", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, operatorAnnotations: make(map[string]string), @@ -788,7 +789,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with cluster name teamId prefix enabled", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: true, serviceAnnotations: make(map[string]string), @@ -800,7 +801,7 @@ func TestServiceAnnotations(t *testing.T) { }, { about: "Master with master service annotations override service annotations", - role: "master", + role: leaderLabelValue, enableMasterLoadBalancerOC: true, enableTeamIdClusterPrefix: false, operatorAnnotations: make(map[string]string), diff --git a/pkg/cluster/connection_pooler_test.go b/pkg/cluster/connection_pooler_test.go index db80749ac..8ef1d94ae 100644 --- a/pkg/cluster/connection_pooler_test.go +++ b/pkg/cluster/connection_pooler_test.go @@ -424,7 +424,7 @@ func TestConnectionPoolerSync(t *testing.T) { DefaultMemoryRequest: "300Mi", DefaultMemoryLimit: "300Mi", PodRoleLabel: "spilo-role", - PodLeaderLabelValue: "master", + PodLeaderLabelValue: leaderLabelValue, }, }, }, client, pg, logger, eventRecorder) diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index 8505b0095..2e5a2973d 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -2359,7 +2359,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { } if !masterLabelSelectorDisabled && !reflect.DeepEqual(podDisruptionBudget.Spec.Selector, &metav1.LabelSelector{ - MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}}) { + MatchLabels: map[string]string{"spilo-role": leaderLabelValue, "cluster-name": "myapp-database"}}) { return fmt.Errorf("MatchLabels incorrect.") } @@ -2389,7 +2389,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With multiple instances", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: leaderLabelValue}, PDBNameFormat: "postgres-{cluster}-pdb"}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2406,7 +2406,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With zero instances", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: leaderLabelValue}, PDBNameFormat: "postgres-{cluster}-pdb"}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2423,7 +2423,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With PodDisruptionBudget disabled", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: leaderLabelValue}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2440,7 +2440,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With non-default PDBNameFormat and PodDisruptionBudget explicitly enabled", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: leaderLabelValue}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2457,7 +2457,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With PDBMasterLabelSelector disabled", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True(), PDBMasterLabelSelector: util.False()}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: leaderLabelValue}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True(), PDBMasterLabelSelector: util.False()}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2474,7 +2474,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { { scenario: "With OwnerReference enabled", spec: New( - Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master", EnableOwnerReferences: util.True()}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True()}}, + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: leaderLabelValue, EnableOwnerReferences: util.True()}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True()}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, @@ -2550,7 +2550,7 @@ func TestGenerateService(t *testing.T) { DefaultMemoryRequest: "0.7Gi", MaxMemoryRequest: "1.0Gi", DefaultMemoryLimit: "1.3Gi", - PodLeaderLabelValue: "master", + PodLeaderLabelValue: leaderLabelValue, }, SidecarImages: map[string]string{ "deprecated-global-sidecar": "image:123", diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go index d45a193cb..5e86c8a39 100644 --- a/pkg/cluster/sync_test.go +++ b/pkg/cluster/sync_test.go @@ -556,7 +556,7 @@ func TestSyncStandbyClusterConfiguration(t *testing.T) { podLabels := map[string]string{ "cluster-name": clusterName, "application": applicationLabel, - "spilo-role": "master", + "spilo-role": leaderLabelValue, } mockPod.Labels = podLabels client.PodsGetter.Pods(namespace).Create(context.TODO(), mockPod, metav1.CreateOptions{}) From daec1a40c2918998596260a268d016fb6774dfa0 Mon Sep 17 00:00:00 2001 From: inovindasari Date: Mon, 2 Dec 2024 17:29:05 +0100 Subject: [PATCH 11/14] convert hardcoded master words into a constant variable --- e2e/tests/constants.py | 4 ++ e2e/tests/k8s_api.py | 13 +++--- e2e/tests/test_e2e.py | 57 +++++++++++++-------------- pkg/cluster/connection_pooler_test.go | 2 +- 4 files changed, 39 insertions(+), 37 deletions(-) create mode 100644 e2e/tests/constants.py diff --git a/e2e/tests/constants.py b/e2e/tests/constants.py new file mode 100644 index 000000000..5c3a64d6d --- /dev/null +++ b/e2e/tests/constants.py @@ -0,0 +1,4 @@ +LEADER_LABEL_VALUE = "master" # master or primary +SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.1" +SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.2" +SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-16:3.2-p3" diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py index 1f42ad4bc..61fcd137e 100644 --- a/e2e/tests/k8s_api.py +++ b/e2e/tests/k8s_api.py @@ -6,6 +6,7 @@ from kubernetes import client, config from kubernetes.client.rest import ApiException +from tests.constants import LEADER_LABEL_VALUE def to_selector(labels): return ",".join(["=".join(lbl) for lbl in labels.items()]) @@ -47,7 +48,7 @@ def get_pg_nodes(self, pg_cluster_name, namespace='default'): replica_pod_nodes = [] podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_name) for pod in podsList.items: - if pod.metadata.labels.get('spilo-role') == 'master': + if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE: master_pod_node = pod.spec.node_name elif pod.metadata.labels.get('spilo-role') == 'replica': replica_pod_nodes.append(pod.spec.node_name) @@ -59,7 +60,7 @@ def get_cluster_nodes(self, cluster_labels='application=spilo,cluster-name=acid- r = [] podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels) for pod in podsList.items: - if pod.metadata.labels.get('spilo-role') == 'master' and pod.status.phase == 'Running': + if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE and pod.status.phase == 'Running': m.append(pod.spec.node_name) elif pod.metadata.labels.get('spilo-role') == 'replica' and pod.status.phase == 'Running': r.append(pod.spec.node_name) @@ -351,7 +352,7 @@ def get_cluster_pod(self, role, labels='application=spilo,cluster-name=acid-mini return pods[0] def get_cluster_leader_pod(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'): - return self.get_cluster_pod('master', labels, namespace) + return self.get_cluster_pod(LEADER_LABEL_VALUE, labels, namespace) def get_cluster_replica_pod(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'): return self.get_cluster_pod('replica', labels, namespace) @@ -383,7 +384,7 @@ def get_pg_nodes(self, pg_cluster_labels='cluster-name=acid-minimal-cluster', na replica_pod_nodes = [] podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_labels) for pod in podsList.items: - if pod.metadata.labels.get('spilo-role') == 'master': + if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE: master_pod_node = pod.spec.node_name elif pod.metadata.labels.get('spilo-role') == 'replica': replica_pod_nodes.append(pod.spec.node_name) @@ -395,7 +396,7 @@ def get_cluster_nodes(self, cluster_labels='cluster-name=acid-minimal-cluster', r = [] podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels) for pod in podsList.items: - if pod.metadata.labels.get('spilo-role') == 'master' and pod.status.phase == 'Running': + if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE and pod.status.phase == 'Running': m.append(pod.spec.node_name) elif pod.metadata.labels.get('spilo-role') == 'replica' and pod.status.phase == 'Running': r.append(pod.spec.node_name) @@ -622,7 +623,7 @@ def get_pg_nodes(self): replica_pod_nodes = [] podsList = self.api.core_v1.list_namespaced_pod(self.namespace, label_selector=self.labels) for pod in podsList.items: - if pod.metadata.labels.get('spilo-role') == 'master': + if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE: master_pod_node = pod.spec.node_name elif pod.metadata.labels.get('spilo-role') == 'replica': replica_pod_nodes.append(pod.spec.node_name) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index f89e2fb86..83c52d86d 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -8,13 +8,10 @@ from datetime import datetime, date, timedelta from kubernetes import client - -from tests.k8s_api import K8s from kubernetes.client.rest import ApiException -SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.1" -SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.2" -SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-16:3.2-p3" +from tests.k8s_api import K8s +from tests.constants import SPILO_CURRENT, SPILO_FULL_IMAGE, SPILO_LAZY, LEADER_LABEL_VALUE def to_selector(labels): @@ -155,7 +152,7 @@ def setUpClass(cls): result = k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml") print('stdout: {}, stderr: {}'.format(result.stdout, result.stderr)) try: - k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) @@ -224,7 +221,7 @@ def test_additional_pod_capabilities(self): k8s.update_config(patch_capabilities) # changed security context of postgres container should trigger a rolling update - k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") @@ -658,7 +655,7 @@ def test_custom_ssl_certificate(self): "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_tls) # wait for switched over - k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) self.eventuallyEqual(lambda: k8s.count_pods_with_env_variable("SSL_CERTIFICATE_FILE", cluster_label), 2, "TLS env variable SSL_CERTIFICATE_FILE missing in Spilo pods") @@ -861,7 +858,7 @@ def test_enable_load_balancer(self): k8s = self.k8s cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster,spilo-role={}' - self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")), + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format(LEADER_LABEL_VALUE)), 'ClusterIP', "Expected ClusterIP type initially, found {}") @@ -876,7 +873,7 @@ def test_enable_load_balancer(self): k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs) - self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")), + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format(LEADER_LABEL_VALUE)), 'LoadBalancer', "Expected LoadBalancer service type for master, found {}") @@ -894,7 +891,7 @@ def test_enable_load_balancer(self): k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs) - self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")), + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format(LEADER_LABEL_VALUE)), 'ClusterIP', "Expected LoadBalancer service type for master, found {}") @@ -1227,7 +1224,7 @@ def get_annotations(): self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) - k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) self.eventuallyEqual(check_version, 13, "Version should be upgraded from 12 to 13") @@ -1252,8 +1249,8 @@ def get_annotations(): "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_14) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") - k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label) - k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(master_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) + k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) self.eventuallyEqual(check_version, 13, "Version should not be upgraded") @@ -1278,7 +1275,7 @@ def get_annotations(): self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) - k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) self.eventuallyEqual(check_version, 15, "Version should be upgraded from 13 to 15") @@ -1304,8 +1301,8 @@ def get_annotations(): "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_16) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") - k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label) - k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(master_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) + k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) self.eventuallyEqual(check_version, 15, "Version should not be upgraded because annotation for last upgrade's failure is set") @@ -1315,7 +1312,7 @@ def get_annotations(): self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) - k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) fourth_annotations = get_annotations() @@ -1433,7 +1430,7 @@ def test_resource_generation(self): "Operator does not get in sync") # wait for switched over - k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) def verify_pod_resources(): @@ -1465,7 +1462,7 @@ def test_multi_namespace_support(self): try: k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml") - k8s.wait_for_pod_start("spilo-role=master", self.test_namespace) + k8s.wait_for_pod_start("spilo-role={}".format(LEADER_LABEL_VALUE), self.test_namespace) k8s.wait_for_pod_start("spilo-role=replica", self.test_namespace) self.assert_master_is_unique(self.test_namespace, "acid-test-cluster") # acid-test-cluster will be deleted in test_owner_references test @@ -1540,7 +1537,7 @@ def test_node_affinity(self): k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) # next master will be switched over and pod needs to be replaced as well to finish the rolling update - k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(master_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) podsList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label) @@ -1573,7 +1570,7 @@ def test_node_affinity(self): # node affinity change should cause another rolling update and relocation of replica k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) - k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) except timeout_decorator.TimeoutError: @@ -1634,7 +1631,7 @@ def test_node_readiness_label(self): k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) # next master will be switched over and pod needs to be replaced as well to finish the rolling update - k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) # patch also node where master ran before @@ -1922,7 +1919,7 @@ def test_rolling_update_flag(self): podsList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label) for pod in podsList.items: # add flag only to the master to make it appear to the operator as a leftover from a rolling update - if pod.metadata.labels.get('spilo-role') == 'master': + if pod.metadata.labels.get('spilo-role') == LEADER_LABEL_VALUE: old_creation_timestamp = pod.metadata.creation_timestamp k8s.patch_pod(flag, pod.metadata.name, pod.metadata.namespace) else: @@ -1933,7 +1930,7 @@ def test_rolling_update_flag(self): k8s.delete_operator_pod() # operator should now recreate the master pod and do a switchover before - k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) # check if the former replica is now the new master @@ -2004,7 +2001,7 @@ def test_rolling_update_label_timeout(self): self.eventuallyEqual(lambda: k8s.pg_get_status(), "SyncFailed", "Expected SYNC event to fail") # wait for next sync, replica should be running normally by now and be ready for switchover - k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_failover(replica_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) # check if the former replica is now the new master @@ -2079,7 +2076,7 @@ def test_service_annotations(self): "alice": "bob" } - self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=master", annotations), "Wrong annotations") + self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role={}".format(LEADER_LABEL_VALUE), annotations), "Wrong annotations") self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=replica", annotations), "Wrong annotations") # clean up @@ -2151,7 +2148,7 @@ def test_standby_cluster(self): try: k8s.create_with_kubectl("manifests/standby-manifest.yaml") - k8s.wait_for_pod_start("spilo-role=master," + cluster_label) + k8s.wait_for_pod_start("spilo-role={},".format(LEADER_LABEL_VALUE) + cluster_label) except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) @@ -2455,11 +2452,11 @@ def test_zz_cluster_deletion(self): def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"): ''' - Check that there is a single pod in the k8s cluster with the label "spilo-role=master" + Check that there is a single pod in the k8s cluster with the label "spilo-role=primary" or "spilo-role=master" To be called manually after operations that affect pods ''' k8s = self.k8s - labels = 'spilo-role=master,cluster-name=' + clusterName + labels = 'spilo-role={},cluster-name='.format(LEADER_LABEL_VALUE) + clusterName num_of_master_pods = k8s.count_pods_with_label(labels, namespace) self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods)) diff --git a/pkg/cluster/connection_pooler_test.go b/pkg/cluster/connection_pooler_test.go index 8ef1d94ae..8fb364541 100644 --- a/pkg/cluster/connection_pooler_test.go +++ b/pkg/cluster/connection_pooler_test.go @@ -323,7 +323,7 @@ func TestConnectionPoolerCreateDeletion(t *testing.T) { cluster.Name = "acid-fake-cluster" cluster.Namespace = "default" - _, err := cluster.createService(cluster.masterRole()) //PROBLEM1 + _, err := cluster.createService(cluster.masterRole()) assert.NoError(t, err) _, err = cluster.createStatefulSet() assert.NoError(t, err) From 2899729c0bf8b4ee3d7fe5d19fe739b4b29debec Mon Sep 17 00:00:00 2001 From: inovindasari Date: Wed, 4 Dec 2024 17:04:07 +0100 Subject: [PATCH 12/14] should compare label for service and update --- e2e/tests/constants.py | 2 +- pkg/cluster/cluster.go | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/e2e/tests/constants.py b/e2e/tests/constants.py index 5c3a64d6d..dea17438e 100644 --- a/e2e/tests/constants.py +++ b/e2e/tests/constants.py @@ -1,4 +1,4 @@ -LEADER_LABEL_VALUE = "master" # master or primary +LEADER_LABEL_VALUE = "master" # value should be the same as in the configmap: pod_leader_label_value SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.1" SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.2" SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-16:3.2-p3" diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index dc1108139..67548333f 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -837,6 +837,10 @@ func (c *Cluster) compareServices(old, new *v1.Service) (bool, string) { } } + if !reflect.DeepEqual(old.Labels, new.Labels) { + return false, "new service's labels do not match the current ones" + } + if !reflect.DeepEqual(old.ObjectMeta.OwnerReferences, new.ObjectMeta.OwnerReferences) { return false, "new service's owner references do not match the current ones" } From aa896edd36e248c443bc4f4180b60c137fb3ec34 Mon Sep 17 00:00:00 2001 From: inovindasari Date: Fri, 6 Dec 2024 11:28:30 +0100 Subject: [PATCH 13/14] compare endpoint label --- pkg/cluster/sync.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index b69f9d379..6b1b322f3 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -404,8 +404,8 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error { if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err == nil { desiredEp := c.generateEndpoint(role, ep.Subsets) // if owner references differ we update which would also change annotations - if !reflect.DeepEqual(ep.ObjectMeta.OwnerReferences, desiredEp.ObjectMeta.OwnerReferences) { - c.logger.Infof("new %s endpoints's owner references do not match the current ones", role) + if !reflect.DeepEqual(ep.ObjectMeta.OwnerReferences, desiredEp.ObjectMeta.OwnerReferences) || !reflect.DeepEqual(ep.Labels, desiredEp.Labels) { + c.logger.Infof("new %s endpoints's owner references or labels do not match the current ones", role) c.setProcessName("updating %v endpoint", role) ep, err = c.KubeClient.Endpoints(c.Namespace).Update(context.TODO(), desiredEp, metav1.UpdateOptions{}) if err != nil { From d1c4da2e67fb5dc10e726cd232392f7655bf54ec Mon Sep 17 00:00:00 2001 From: inovindasari Date: Fri, 3 Jan 2025 13:55:46 +0100 Subject: [PATCH 14/14] update spilo images --- e2e/tests/constants.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/e2e/tests/constants.py b/e2e/tests/constants.py index dea17438e..96594010d 100644 --- a/e2e/tests/constants.py +++ b/e2e/tests/constants.py @@ -1,4 +1,4 @@ LEADER_LABEL_VALUE = "master" # value should be the same as in the configmap: pod_leader_label_value -SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.1" -SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.2" -SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-16:3.2-p3" +SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.3" +SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.4" +SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-17:4.0-p2"