apiVersion: pxc.percona.com/v1-6-0 kind: PerconaXtraDBCluster metadata: name: sws finalizers: - delete-pxc-pods-in-order # - delete-proxysql-pvc - delete-pxc-pvc # annotations: # percona.com/issue-vault-token: "true" spec: crVersion: 1.6.0 secretsName: my-cluster-secrets vaultSecretName: keyring-secret-vault sslSecretName: my-cluster-ssl sslInternalSecretName: my-cluster-ssl-internal # tls: # SANs: # - pxc-1.example.com # - pxc-2.example.com # - pxc-3.example.com # issuerConf: # name: special-selfsigned-issuer # kind: ClusterIssuer # group: cert-manager.io allowUnsafeConfigurations: false # pause: false updateStrategy: SmartUpdate upgradeOptions: versionServiceEndpoint: https://check.percona.com apply: Disabled schedule: "0 4 * * *" pxc: size: 3 image: sws/percona-xtradb-cluster:8.0.20-11.1 # schedulerName: mycustom-scheduler readinessDelaySec: 15 livenessDelaySec: 300 # forceUnsafeBootstrap: false configuration: | [mysqld] pxc_strict_mode=DISABLED max_connections=250 wsrep_provider_options="gcache.size=1G; gcache.recover=yes" character-set-server = utf8 innodb_log_buffer_size = 32M innodb_log_file_size = 80M max_allowed_packet = 8M default-authentication-plugin=mysql_native_password log-bin-trust-function-creators=1 [sst] xbstream-opts=--decompress [xtrabackup] compress=lz4 # imagePullSecrets: # - name: private-registry-credentials # priorityClassName: high-priority # annotations: # iam.amazonaws.com/role: role-arn # labels: # rack: rack-22 # containerSecurityContext: # privileged: false # podSecurityContext: # runAsUser: 1001 # runAsGroup: 1001 # supplementalGroups: [1001] # serviceAccountName: percona-xtradb-cluster-operator-workload # imagePullPolicy: Always resources: requests: memory: 500Mi cpu: 600m ephemeral-storage: 1Gi limits: memory: 2500Mi cpu: "1" ephemeral-storage: 1Gi affinity: antiAffinityTopologyKey: "kubernetes.io/hostname" # advanced: # nodeAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # nodeSelectorTerms: # - matchExpressions: # - key: kubernetes.io/e2e-az-name # operator: In # values: # - e2e-az1 # - e2e-az2 # tolerations: # - key: "node.alpha.kubernetes.io/unreachable" # operator: "Exists" # effect: "NoExecute" # tolerationSeconds: 6000 podDisruptionBudget: maxUnavailable: 1 # minAvailable: 0 volumeSpec: persistentVolumeClaim: storageClassName: thin accessModes: [ "ReadWriteOnce" ] resources: requests: storage: 20Gi gracePeriod: 600 haproxy: enabled: true size: 3 image: sws/percona-xtradb-cluster-operator:1.6.0-haproxy # imagePullPolicy: Always # schedulerName: mycustom-scheduler # imagePullSecrets: # - name: private-registry-credentials # annotations: # iam.amazonaws.com/role: role-arn # labels: # rack: rack-22 serviceType: ClusterIP # externalTrafficPolicy: Cluster # schedulerName: "default" resources: requests: memory: 500Mi cpu: 600m limits: memory: 1G cpu: 700m # priorityClassName: high-priority sidecarResources: requests: memory: 1G cpu: 500m limits: memory: 2G cpu: 600m # serviceAccountName: percona-xtradb-cluster-operator-workload affinity: antiAffinityTopologyKey: "kubernetes.io/hostname" # advanced: # nodeAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # nodeSelectorTerms: # - matchExpressions: # - key: kubernetes.io/e2e-az-name # operator: In # values: # - e2e-az1 # - e2e-az2 # tolerations: # - key: "node.alpha.kubernetes.io/unreachable" # operator: "Exists" # effect: "NoExecute" # tolerationSeconds: 6000 podDisruptionBudget: maxUnavailable: 1 # minAvailable: 0 gracePeriod: 30 # loadBalancerSourceRanges: # - 10.0.0.0/8 # serviceAnnotations: # service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http pmm: enabled: false image: sws/percona-xtradb-cluster-operator:1.6.0-pmm serverHost: monitoring-service serverUser: admin resources: requests: memory: 500M cpu: 500m backup: image: sws/percona-xtradb-cluster-operator:1.6.0-pxc8.0-backup serviceAccountName: percona-xtradb-cluster-operator # imagePullSecrets: # - name: private-registry-credentials storages: fs-pvc: type: filesystem # nodeSelector: # storage: tape # backupWorker: 'True' resources: requests: memory: 2G cpu: 600m # affinity: # nodeAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # nodeSelectorTerms: # - matchExpressions: # - key: backupWorker # operator: In # values: # - 'True' # tolerations: # - key: "backupWorker" # operator: "Equal" # value: "True" # effect: "NoSchedule" # annotations: # testName: scheduled-backup # labels: # backupWorker: 'True' # schedulerName: 'default-scheduler' # priorityClassName: 'high-priority' # containerSecurityContext: # privileged: true # podSecurityContext: # fsGroup: 1001 # supplementalGroups: [1001, 1002, 1003] volume: persistentVolumeClaim: storageClassName: thin accessModes: [ "ReadWriteOnce" ] resources: requests: storage: 20Gi schedule: - name: "sws-weekly" schedule: "0 0 * * 6" keep: 3 storageName: fs-pvc - name: "sws-daily" schedule: "0 0 * * *" keep: 5 storageName: fs-pvc