Deploy perconapgcluster 1.4.0 does not create ownerReferences to pgcluster

Hi,

I deployed “perconalab/percona-postgresql-operator:main-ppg14-postgres” and automatically the auto generated PostgresCluster had an ownerReferences like this:

  ownerReferences:
    - apiVersion: pg.percona.com/v2beta1
      blockOwnerDeletion: true
      controller: true
      kind: PerconaPGCluster
      name: disa
      uid: 23cfd650-ee7c-4d2b-9f69-00e0de837c17

but when I changed to percona/percona-postgresql-operator:1.4.0-ppg14-postgres-ha the ownerReferences was not deployed and the ArgoCD application did not associate the kind pgcluster to the kind PerconaPGCluster.

Do you have any information about which configuration I forgot?

I follow the next web to deploy v1.4.0: Generic Kubernetes installation - Percona Operator for PostgreSQL

and it is the other percona deployment:

apiVersion: pg.percona.com/v2beta1
kind: PerconaPGCluster
metadata:
  name: disa
spec:
#  secrets:
#    customTLSSecretName:
#    customReplicationTLSSecret:

#  standby:
#    enabled: true
#    host: "<primary-ip>"
#    port: "<primary-port>"

#  openshift: true

#  users:
#    - name: rhino
#      databases:
#        - zoo
#      options: "SUPERUSER"

#  databaseInitSQL:
#    key: init.sql
#    name: cluster1-init-sql

#  shutdown: true
#  paused: true

#  dataSource:
#    postgresCluster:
#      clusterName: disapsql
#      repoName: repo1
#      options:
#      - --type=time
#      - --target="2021-06-09 14:15:11-04"
#    pgbackrest:
#      stanza: db
#      configuration:
#      - secret:
#          name: pgo-s3-creds
#      global:
#        repo1-path: /pgbackrest/postgres-operator/hippo/repo1
#      repo:
#        name: repo1
#        s3:
#          bucket: "my-bucket"
#          endpoint: "s3.ca-central-1.amazonaws.com"
#          region: "ca-central-1"

  image: perconalab/percona-postgresql-operator:main-ppg14-postgres
  imagePullPolicy: Always
  postgresVersion: 14
  port: 5432

  expose:
#    annotations:
#      my-annotation: value1
#    labels:
#      my-label: value2
    type: LoadBalancer

  instances:
  - name: instance1
    replicas: 1
#    resources:
#      limits:
#        cpu: 2.0
#        memory: 4Gi
#
#    sidecars:
#    - name: testcontainer
#      image: mycontainer1:latest
#    - name: testcontainer2
#      image: mycontainer1:latest
#
#    topologySpreadConstraints:
#      - maxSkew: 1
#        topologyKey: my-node-label
#        whenUnsatisfiable: DoNotSchedule
#        labelSelector:
#          matchLabels:
#            postgres-operator.crunchydata.com/instance-set: instance1
#
#    tolerations:
#    - effect: NoSchedule
#      key: role
#      operator: Equal
#      value: connection-poolers
#
#    priorityClassName: high-priority
#
#    walVolumeClaimSpec:
#       accessModes:
#       - "ReadWriteOnce"
#       resources:
#         requests:
#           storage: 1Gi
#
    dataVolumeClaimSpec:
      accessModes:
      - ReadWriteMany
      resources:
        requests:
          storage: 50Gi
      storageClassName: postgresql-cluster1

#  proxy:
#    pgBouncer:
#      replicas: 3
#      image: perconalab/percona-postgresql-operator:main-ppg14-pgbouncer
#      resources:
#        limits:
#          cpu: 200m
#          memory: 128Mi
#
#      expose:
#        annotations:
#          my-annotation: value1
#        labels:
#          my-label: value2
#        type: LoadBalancer
#
#      affinity:
#        podAntiAffinity:
#          preferredDuringSchedulingIgnoredDuringExecution:
#          - weight: 1
#            podAffinityTerm:
#              labelSelector:
#                matchLabels:
#                  postgres-operator.crunchydata.com/cluster: keycloakdb
#                  postgres-operator.crunchydata.com/role: pgbouncer
#              topologyKey: kubernetes.io/hostname
#
#      tolerations:
#      - effect: NoSchedule
#        key: role
#        operator: Equal
#        value: connection-poolers
#
#      topologySpreadConstraints:
#        - maxSkew: 1
#          topologyKey: my-node-label
#          whenUnsatisfiable: ScheduleAnyway
#          labelSelector:
#            matchLabels:
#              postgres-operator.crunchydata.com/role: pgbouncer
#
#      sidecars:
#      - name: bouncertestcontainer1
#        image: mycontainer1:latest
#
#      customTLSSecret:
#        name: keycloakdb-pgbouncer.tls
#
#      configuration:
#        global:
#          pool_mode: transaction
#
  - name: instance2
    replicas: 1

    dataVolumeClaimSpec:
      accessModes:
      - ReadWriteMany
      resources:
        requests:
          storage: 50Gi
      storageClassName: postgresql-cluster2

  - name: instance3
    replicas: 1

    dataVolumeClaimSpec:
      accessModes:
      - ReadWriteMany
      resources:
        requests:
          storage: 50Gi
      storageClassName: postgresql-cluster3



  backups:
    pgbackrest:
#      metadata:
#        labels:
      image: perconalab/percona-postgresql-operator:main-ppg14-pgbackrest
#      configuration:
#        - secret:
#            name: cluster1-pgbackrest-secrets
#      jobs:
#        priorityClassName: high-priority
#        resources:
#          limits:
#            cpu: 200m
#            memory: 128Mi
#        tolerations:
#        - effect: NoSchedule
#          key: role
#          operator: Equal
#          value: connection-poolers
#
      global:
        repo1-retention-full: "4"
#        repo1-retention-full-type: time
#        repo1-path: /pgbackrest/postgres-operator/cluster1/repo1
#        repo1-cipher-type: aes-256-cbc
#        repo1-s3-uri-style: path
#        repo2-path: /pgbackrest/postgres-operator/cluster1-multi-repo/repo2
#        repo3-path: /pgbackrest/postgres-operator/cluster1-multi-repo/repo3
#        repo4-path: /pgbackrest/postgres-operator/cluster1-multi-repo/repo4
#      repoHost:
#        priorityClassName: high-priority
#
#        topologySpreadConstraints:
#        - maxSkew: 1
#          topologyKey: my-node-label
#          whenUnsatisfiable: ScheduleAnyway
#          labelSelector:
#            matchLabels:
#              postgres-operator.crunchydata.com/pgbackrest: ""
#        affinity:
#          podAntiAffinity:
#            preferredDuringSchedulingIgnoredDuringExecution:
#             - weight: 1
#             podAffinityTerm:
#               labelSelector:
#               matchLabels:
#                 postgres-operator.crunchydata.com/cluster: keycloakdb
#                 postgres-operator.crunchydata.com/role: pgbouncer
#               topologyKey: kubernetes.io/hostname
#
      manual:
        repoName: repo1
        options:
         - --type=full
      repos:
      - name: repo1
        schedules:
          full: "0 0 * * 6"
#          differential: "0 1 * * 1-6"
        volume:
          volumeClaimSpec:
            accessModes:
            - ReadWriteMany
            resources:
              requests:
                storage: 20Gi
            storageClassName: postgresql-repo1
#      - name: repo2
#        s3:
#          bucket: "<YOUR_AWS_S3_BUCKET_NAME>"
#          endpoint: "<YOUR_AWS_S3_ENDPOINT>"
#          region: "<YOUR_AWS_S3_REGION>"
#      - name: repo3
#        gcs:
#          bucket: "<YOUR_GCS_BUCKET_NAME>"
#      - name: repo4
#        azure:
#          container: "<YOUR_AZURE_CONTAINER>"
#
#    restore:
#      enabled: true
#      repoName: repo1
#      options:
#       PITR restore in place
#       - --type=time
#       - --target="2021-06-09 14:15:11-04"
#       restore individual databases
#       - --db-include=hippo

  pmm:
    enabled: true
    image: percona/pmm-client:2.32.0
#    imagePullPolicy: IfNotPresent
    secret: disa-pmm-secret
    serverHost: monitoring-service
#  patroni:
#    dynamicConfiguration:
#      postgresql:
#        parameters:
#          max_parallel_workers: 2
#          max_worker_processes: 2
#          shared_buffers: 1GB
#          work_mem: 2MB
1 Like

Here is the ArgoCD application without ownerReferences: