Nodeselector / tolerations validation error in cr.yaml file

Percona version - V/1-11-0
GKE Version - 1.22.8

Bundle.yaml is deployed. Not able to deploy cr.yaml file getting the attached below error

The cr.yaml configuration

apiVersion: pxc.percona.com/v1-11-0
kind: PerconaXtraDBCluster
metadata:
  name: cluster1
  finalizers:
    - delete-pxc-pods-in-order
#    - delete-proxysql-pvc
#    - delete-pxc-pvc
#  annotations:
#    percona.com/issue-vault-token: "true"
spec:
  crVersion: 1.11.0
  secretsName: my-cluster-secrets
#  vaultSecretName: keyring-secret-vault
#  sslSecretName: my-cluster-ssl
#  sslInternalSecretName: my-cluster-ssl-internal
#  logCollectorSecretName: my-log-collector-secrets
#  initImage: percona/percona-xtradb-cluster-operator:1.11.0
#  enableCRValidationWebhook: true
#  tls:
#    SANs:
#      - pxc-1.example.com
#      - pxc-2.example.com
#      - pxc-3.example.com
#    issuerConf:
#      name: special-selfsigned-issuer
#      kind: ClusterIssuer
#      group: cert-manager.io
  allowUnsafeConfigurations: true
#  pause: false
  updateStrategy: SmartUpdate
  upgradeOptions:
    versionServiceEndpoint: https://check.percona.com
    apply: 8.0-recommended
    schedule: "0 4 * * *"
  pxc:
    size: 1
    image: percona/percona-xtradb-cluster:8.0.27-18.1
    autoRecovery: true
#    expose:
#      enabled: true
#      type: LoadBalancer
#      trafficPolicy: Local
#      loadBalancerSourceRanges:
#        - 10.0.0.0/8
#      annotations:
#        networking.gke.io/load-balancer-type: "Internal"
#    replicationChannels:
#    - name: pxc1_to_pxc2
#      isSource: true
#    - name: pxc2_to_pxc1
#      isSource: false
#      configuration:
#        sourceRetryCount: 3
#        sourceConnectRetry: 60
#      sourcesList:
#      - host: 10.95.251.101
#        port: 3306
#        weight: 100
#    schedulerName: mycustom-scheduler
#    readinessDelaySec: 15
#    livenessDelaySec: 600
#    configuration: |
#      [mysqld]
#      wsrep_debug=CLIENT
#      wsrep_provider_options="gcache.size=1G; gcache.recover=yes"
#      [sst]
#      xbstream-opts=--decompress
#      [xtrabackup]
#      compress=lz4
#      for PXC 5.7
#      [xtrabackup]
#      compress
#    imagePullSecrets:
#      - name: private-registry-credentials
#    priorityClassName: high-priority
#    annotations:
#      iam.amazonaws.com/role: role-arn
#    labels:
#      rack: rack-22
#    readinessProbes:
#      initialDelaySeconds: 15
#      timeoutSeconds: 15
#      periodSeconds: 30
#      successThreshold: 1
#      failureThreshold: 5
#    livenessProbes:
#      initialDelaySeconds: 300
#      timeoutSeconds: 5
#      periodSeconds: 10
#      successThreshold: 1
#      failureThreshold: 3
#    containerSecurityContext:
#      privileged: false
#    podSecurityContext:
#      runAsUser: 1001
#      runAsGroup: 1001
#      supplementalGroups: [1001]
#    serviceAccountName: percona-xtradb-cluster-operator-workload
#    imagePullPolicy: Always
#    runtimeClassName: image-rc
#    sidecars:
#    - image: busybox
#      command: ["/bin/sh"]
#      args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
#      name: my-sidecar-1
#      resources:
#        requests:
#          memory: 100M
#          cpu: 100m
#        limits:
#          memory: 200M
#          cpu: 200m
#    envVarsSecret: my-env-var-secrets
    nodeSelector:
      mysql: "true"
    tolerations:
      - key: "mysql"
        operator: "Equal"
        value: "true"
        effect: "NoSchedule"
    resources:
      # requests:
      #   memory: 1G
      #   cpu: 600m
#        ephemeral-storage: 1G
#      limits:
#        memory: 1G
#        cpu: "1"
#        ephemeral-storage: 1G
#    nodeSelector:
#      disktype: ssd
    affinity:
      antiAffinityTopologyKey: "kubernetes.io/hostname"
#      advanced:
#        nodeAffinity:
#          requiredDuringSchedulingIgnoredDuringExecution:
#            nodeSelectorTerms:
#            - matchExpressions:
#              - key: kubernetes.io/e2e-az-name
#                operator: In
#                values:
#                - e2e-az1
#                - e2e-az2
#    tolerations:
#    - key: "node.alpha.kubernetes.io/unreachable"
#      operator: "Exists"
#      effect: "NoExecute"
#      tolerationSeconds: 6000
    podDisruptionBudget:
      maxUnavailable: 1
#      minAvailable: 0
    volumeSpec:
#      emptyDir: {}
#      hostPath:
#        path: /data
#        type: Directory
      persistentVolumeClaim:
#        storageClassName: standard
#        accessModes: [ "ReadWriteOnce" ]
        resources:
          requests:
            storage: 2G
    gracePeriod: 600
  haproxy:
    enabled: true
    size: 1
    image: percona/percona-xtradb-cluster-operator:1.11.0-haproxy
#    replicasServiceEnabled: false
#    imagePullPolicy: Always
#    schedulerName: mycustom-scheduler
#    readinessDelaySec: 15
#    livenessDelaySec: 600
#    configuration: |
#
#    the actual default configuration file can be found here https://github.com/percona/percona-docker/blob/main/haproxy/dockerdir/etc/haproxy/haproxy-global.cfg
#
#      global
#        maxconn 2048
#        external-check
#        insecure-fork-wanted
#        stats socket /etc/haproxy/pxc/haproxy.sock mode 600 expose-fd listeners level admin
#
#      defaults
#        default-server init-addr last,libc,none
#        log global
#        mode tcp
#        retries 10
#        timeout client 28800s
#        timeout connect 100500
#        timeout server 28800s
#
#      frontend galera-in
#        bind *:3309 accept-proxy
#        bind *:3306
#        mode tcp
#        option clitcpka
#        default_backend galera-nodes
#
#      frontend galera-admin-in
#        bind *:33062
#        mode tcp
#        option clitcpka
#        default_backend galera-admin-nodes
#
#      frontend galera-replica-in
#        bind *:3307
#        mode tcp
#        option clitcpka
#        default_backend galera-replica-nodes
#
#      frontend galera-mysqlx-in
#        bind *:33060
#        mode tcp
#        option clitcpka
#        default_backend galera-mysqlx-nodes
#
#      frontend stats
#        bind *:8404
#        mode http
#        option http-use-htx
#        http-request use-service prometheus-exporter if { path /metrics }
#    imagePullSecrets:
#      - name: private-registry-credentials
#    annotations:
#      iam.amazonaws.com/role: role-arn
#    labels:
#      rack: rack-22
#    readinessProbes:
#      initialDelaySeconds: 15
#      timeoutSeconds: 1
#      periodSeconds: 5
#      successThreshold: 1
#      failureThreshold: 3
#    livenessProbes:
#      initialDelaySeconds: 60
#      timeoutSeconds: 5
#      periodSeconds: 30
#      successThreshold: 1
#      failureThreshold: 4
#    serviceType: ClusterIP
#    externalTrafficPolicy: Cluster
#    replicasServiceType: ClusterIP
#    replicasExternalTrafficPolicy: Cluster
#    runtimeClassName: image-rc
#    sidecars:
#    - image: busybox
#      command: ["/bin/sh"]
#      args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
#      name: my-sidecar-1
#      resources:
#        requests:
#          memory: 100M
#          cpu: 100m
#        limits:
#          memory: 200M
#          cpu: 200m
#    envVarsSecret: my-env-var-secrets
    nodeSelector:
      mysql: "true"
    tolerations:
      - key: "mysql"
        operator: "Equal"
        value: "true"
        effect: "NoSchedule"
    resources:
      # requests:
      #   memory: 1G
      #   cpu: 600m
#      limits:
#        memory: 1G
#        cpu: 700m
#    priorityClassName: high-priority
#    nodeSelector:
#      disktype: ssd
#    sidecarResources:
#      requests:
#        memory: 1G
#        cpu: 500m
#      limits:
#        memory: 2G
#        cpu: 600m
#    containerSecurityContext:
#      privileged: false
#    podSecurityContext:
#      runAsUser: 1001
#      runAsGroup: 1001
#      supplementalGroups: [1001]
#    serviceAccountName: percona-xtradb-cluster-operator-workload
    affinity:
      antiAffinityTopologyKey: "kubernetes.io/hostname"
#      advanced:
#        nodeAffinity:
#          requiredDuringSchedulingIgnoredDuringExecution:
#            nodeSelectorTerms:
#            - matchExpressions:
#              - key: kubernetes.io/e2e-az-name
#                operator: In
#                values:
#                - e2e-az1
#                - e2e-az2
#    tolerations:
#    - key: "node.alpha.kubernetes.io/unreachable"
#      operator: "Exists"
#      effect: "NoExecute"
#      tolerationSeconds: 6000
    podDisruptionBudget:
      maxUnavailable: 1
#      minAvailable: 0
    gracePeriod: 30
#    loadBalancerSourceRanges:
#      - 10.0.0.0/8
#    serviceAnnotations:
#      service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
#    serviceLabels:
#      rack: rack-23
  logcollector:
    enabled: true
    image: percona/percona-xtradb-cluster-operator:1.11.0-logcollector
#    configuration: |
#      [OUTPUT]
#           Name  es
#           Match *
#           Host  192.168.2.3
#           Port  9200
#           Index my_index
#           Type  my_type
    nodeSelector:
      mysql: "true"
    tolerations:
      - key: "mysql"
        operator: "Equal"
        value: "true"
        effect: "NoSchedule"
    resources:
      # requests:
      #   memory: 100M
      #   cpu: 200m
  pmm:
    enabled: false
    image: percona/pmm-client:2.28.0
    serverHost: monitoring-service
#    serverUser: admin
#    pxcParams: "--disable-tablestats-limit=2000"
#    proxysqlParams: "--custom-labels=CUSTOM-LABELS"
    # nodeSelector:
    #   mysql: "true"
    # tolerations:
    #   - key: "mysql"
    #     operator: "Equal"
    #     value: "true"
    #     effect: "NoSchedule"
    resources:
      # requests:
      #   memory: 150M
      #   cpu: 300m
  backup:
    image: percona/percona-xtradb-cluster-operator:1.11.0-pxc8.0-backup
#    backoffLimit: 6
#    serviceAccountName: percona-xtradb-cluster-operator
#    imagePullSecrets:
#      - name: private-registry-credentials
    pitr:
      enabled: false
      storageName: STORAGE-NAME-HERE
      timeBetweenUploads: 60
#      resources:
#        requests:
#          memory: 0.1G
#          cpu: 100m
#        limits:
#          memory: 1G
#          cpu: 700m
    storages:
      s3-us-west:
        type: s3
        verifyTLS: true
#        nodeSelector:
#          storage: tape
#          backupWorker: 'True'
#        resources:
#          requests:
#            memory: 1G
#            cpu: 600m
#        affinity:
#          nodeAffinity:
#            requiredDuringSchedulingIgnoredDuringExecution:
#              nodeSelectorTerms:
#              - matchExpressions:
#                - key: backupWorker
#                  operator: In
#                  values:
#                  - 'True'
#        tolerations:
#          - key: "backupWorker"
#            operator: "Equal"
#            value: "True"
#            effect: "NoSchedule"
#        annotations:
#          testName: scheduled-backup
#        labels:
#          backupWorker: 'True'
#        schedulerName: 'default-scheduler'
#        priorityClassName: 'high-priority'
#        containerSecurityContext:
#          privileged: true
#        podSecurityContext:
#          fsGroup: 1001
#          supplementalGroups: [1001, 1002, 1003]
        s3:
          bucket: S3-BACKUP-BUCKET-NAME-HERE
          credentialsSecret: my-cluster-name-backup-s3
          region: us-west-2
      fs-pvc:
        type: filesystem
#        nodeSelector:
#          storage: tape
#          backupWorker: 'True'
    nodeSelector:
      mysql: "true"
    tolerations:
      - key: "mysql"
        operator: "Equal"
        value: "true"
        effect: "NoSchedule"
#        resources:
#          requests:
#            memory: 1G
#            cpu: 600m
#        affinity:
#          nodeAffinity:
#            requiredDuringSchedulingIgnoredDuringExecution:
#              nodeSelectorTerms:
#              - matchExpressions:
#                - key: backupWorker
#                  operator: In
#                  values:
#                  - 'True'
#        tolerations:
#          - key: "backupWorker"
#            operator: "Equal"
#            value: "True"
#            effect: "NoSchedule"
#        annotations:
#          testName: scheduled-backup
#        labels:
#          backupWorker: 'True'
#        schedulerName: 'default-scheduler'
#        priorityClassName: 'high-priority'
#        containerSecurityContext:
#          privileged: true
#        podSecurityContext:
#          fsGroup: 1001
#          supplementalGroups: [1001, 1002, 1003]
        volume:
          persistentVolumeClaim:
#            storageClassName: standard
            accessModes: [ "ReadWriteOnce" ]
            resources:
              requests:
                storage: 2G
    # schedule:
    #   - name: "sat-night-backup"
    #     schedule: "0 0 * * 6"
    #     keep: 3
    #     storageName: s3-us-west
    #   - name: "daily-backup"
    #     schedule: "0 0 * * *"
    #     keep: 5
    #     storageName: fs-pvc

The validation error