The cr.yaml
apiVersion: pxc.percona.com/v1-11-0
kind: PerconaXtraDBCluster
metadata:
name: test-mysql
finalizers:
# - delete-pxc-pods-in-order
# - delete-proxysql-pvc
# - delete-pxc-pvc
# annotations:
# percona.com/issue-vault-token: "true"
spec:
crVersion: 1.11.0
secretsName: test-mysql-secrets
vaultSecretName: keyring-secret-vault
sslSecretName: test-mysql-ssl
sslInternalSecretName: test-mysql-ssl-internal
logCollectorSecretName: my-log-collector-secrets
# initImage: percona/percona-xtradb-cluster-operator:1.10.0
# enableCRValidationWebhook: true
# tls:
# SANs:
# - pxc-1.example.com
# - pxc-2.example.com
# - pxc-3.example.com
# issuerConf:
# name: special-selfsigned-issue
# kind: ClusterIssuer
# group: cert-manager.io
allowUnsafeConfigurations: true
# pause: false
updateStrategy: SmartUpdate
upgradeOptions:
versionServiceEndpoint: https://check.percona.com
apply: 8.0-recommended
schedule: "0 4 * * *"
pxc:
size: 3
image: percona/percona-xtradb-cluster:8.0.23-14.1
autoRecovery: true
# expose:
# enabled: true
# type: LoadBalancer
# trafficPolicy: Local
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# annotations:
# networking.gke.io/load-balancer-type: "Internal"
# replicationChannels:
# - name: pxc1_to_pxc2
# isSource: true
# - name: pxc2_to_pxc1
# isSource: false
# configuration:
# sourceRetryCount: 3
# sourceConnectRetry: 60
# sourcesList:
# - host: 10.95.251.101
# port: 3306
# weight: 100
# schedulerName: mycustom-scheduler
# readinessDelaySec: 15
# livenessDelaySec: 600
configuration: |
[mysqld]
server-id=5432
log_bin=log_bin
log-slave-updates=on
sync_binlog=1
expire_logs_days=1
binlog-format=ROW
binlog_row_image=minimal
binlog-checksum=CRC32
binlog_cache_size=128M
master_info_repository=Table
relay_log_info_repository=Table
max_binlog_size=1G
binlog_rows_query_log_events=ON
key_buffer_size=16M
innodb_print_all_deadlocks=ON
slow_query_log=ON
innodb_buffer_pool_size=2000M
pxc_strict_mode=PERMISSIVE
innodb_buffer_pool_instances=8
innodb_log_buffer_size=128M
innodb_log_file_size=1G
innodb_flush_log_at_trx_commit=2
innodb_flush_log_at_timeout=1800
innodb_print_all_deadlocks=ON
innodb_buffer_pool_dump_at_shutdown=on
innodb_buffer_pool_load_at_startup=on
innodb_max_dirty_pages_pct=80
innodb_max_dirty_pages_pct_lwm=20
innodb_buffer_pool_dump_pct=75
innodb_read_io_threads=1
innodb_write_io_threads=1
innodb_io_capacity=30000
innodb_io_capacity_max=60000
innodb_open_files=5000
innodb_strict_mode=ON
innodb_lock_wait_timeout=50
innodb_stats_persistent_sample_pages=300
innodb_online_alter_log_max_size=1G
innodb_purge_threads=1
innodb_page_cleaners=1
innodb_adaptive_hash_index=OFF
table_definition_cache=4000
thread_cache_size=512
group_concat_max_len=99999
lower_case_table_names=0
wait_timeout=1800
interactive_timeout=1800
event_scheduler=ON
max_connect_errors=99999
transaction-isolation=READ-COMMITTED
max_allowed_packet=1073741824
sort_buffer_size=4M
open_files_limit=65535
max_allowed_packet=1G
innodb_deadlock_detect=ON
innodb_rollback_on_timeout=ON
max_connections=1000
innodb_flush_method=O_DSYNC
max_heap_table_size=256M
tmp_table_size=32M
table_open_cache=4000
read_buffer_size=4M
myisam_sort_buffer_size=64M
read_rnd_buffer_size=4M
join_buffer_size=4M
wsrep_slave_threads=8
sql-mode="ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION"
audit_log_policy=ALL
audit_log_format=JSON
audit_log_file=/var/log/mysql/audit.log
audit_log_rotate_on_size=2G
audit_log_rotations=2
# wsrep_debug=CLIENT
# wsrep_provider_options="gcache.size=1G; gcache.recover=yes"
# [sst]
# xbstream-opts=--decompress
# [xtrabackup]
# compress=lz4
# for PXC 5.7
# [xtrabackup]
# compress
# imagePullSecrets:
# - name: private-registry-credentials
# priorityClassName: high-priority
# annotations:
# iam.amazonaws.com/role: role-arn
# labels:
# rack: rack-22
# readinessProbes:
# initialDelaySeconds: 15
# timeoutSeconds: 15
# periodSeconds: 30
# successThreshold: 1
# failureThreshold: 5
# livenessProbes:
# initialDelaySeconds: 300
# timeoutSeconds: 5
# periodSeconds: 10
# successThreshold: 1
# failureThreshold: 3
# containerSecurityContext:
# privileged: false
# podSecurityContext:
# runAsUser: 1001
# runAsGroup: 1001
# supplementalGroups: [1001]
# serviceAccountName: percona-xtradb-cluster-operator-workload
# imagePullPolicy: Always
# runtimeClassName: image-rc
# sidecars:
# - image: busybox
# command: ["/bin/sh"]
# args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
# name: my-sidecar-1
# resources:
# requests:
# memory: 100M
# cpu: 100m
# limits:
# memory: 200M
# cpu: 200m
# envVarsSecret: my-env-var-secrets
nodeSelector:
mysql: "true"
tolerations:
- key: "mysql"
operator: "Equal"
value: "true"
effect: "NoSchedule"
resources:
requests:
memory: 3500M
cpu: 1800m
ephemeral-storage: 2G
limits:
memory: 4000M
cpu: 1800m
ephemeral-storage: 2G
# ephemeral-storage: 1G
# limits:
# memory: 1G
# cpu: "1"
# ephemeral-storage: 1G
# nodeSelector:
# disktype: ssd
# affinity:
# antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
# tolerations:
# - key: "node.alpha.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationSeconds: 6000
podDisruptionBudget:
maxUnavailable: 1
# minAvailable: 0
volumeSpec:
# emptyDir: {}
# hostPath:
# path: /data
# type: Directory
persistentVolumeClaim:
storageClassName: premium-rwo
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 100Gi
gracePeriod: 600
haproxy:
enabled: true
size: 3
image: perconalab/percona-xtradb-cluster-operator:main-haproxy
# replicasServiceEnabled: false
# imagePullPolicy: Always
# schedulerName: mycustom-scheduler
# configuration: |
#
# the actual default configuration file can be found here https://github.com/percona/percona-docker/blob/main/haproxy/dockerdir/etc/haproxy/haproxy-global.cfg
#
# global
# maxconn 2048
# external-check
# insecure-fork-wanted
# stats socket /etc/haproxy/pxc/haproxy.sock mode 600 expose-fd listeners level admin
#
# defaults
# default-server init-addr last,libc,none
# log global
# mode tcp
# retries 10
# timeout client 28800s
# timeout connect 100500
# timeout server 28800s
#
# frontend galera-in
# bind *:3309 accept-proxy
# bind *:3306
# mode tcp
# option clitcpka
# default_backend galera-nodes
#
# frontend galera-admin-in
# bind *:33062
# mode tcp
# option clitcpka
# default_backend galera-admin-nodes
#
# frontend galera-replica-in
# bind *:3307
# mode tcp
# option clitcpka
# default_backend galera-replica-nodes
#
# frontend galera-mysqlx-in
# bind *:33060
# mode tcp
# option clitcpka
# default_backend galera-mysqlx-nodes
#
# frontend stats
# bind *:8404
# mode http
# option http-use-htx
# http-request use-service prometheus-exporter if { path /metrics }
# imagePullSecrets:
# - name: private-registry-credentials
# annotations:
# iam.amazonaws.com/role: role-arn
# labels:
# rack: rack-22
# readinessProbes:
# initialDelaySeconds: 15
# timeoutSeconds: 1
# periodSeconds: 5
# successThreshold: 1
# failureThreshold: 3
# livenessProbes:
# initialDelaySeconds: 60
# timeoutSeconds: 5
# periodSeconds: 30
# successThreshold: 1
# failureThreshold: 4
# serviceType: ClusterIP
serviceType: LoadBalancer
serviceAnnotations:
cloud.google.com/load-balancer-type: "Internal"
# externalTrafficPolicy: Cluster
# replicasServiceType: ClusterIP
# replicasExternalTrafficPolicy: Cluster
# runtimeClassName: image-rc
# sidecars:
# - image: busybox
# command: ["/bin/sh"]
# args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
# name: my-sidecar-1
# resources:
# requests:
# memory: 100M
# cpu: 100m
# limits:
# memory: 200M
# cpu: 200m
# envVarsSecret: my-env-var-secrets
nodeSelector:
mysql: "true"
tolerations:
- key: "mysql"
operator: "Equal"
value: "true"
effect: "NoSchedule"
resources:
requests:
memory: 500M
cpu: 800m
limits:
memory: 800M
cpu: 1000m
# limits:
# memory: 1G
# cpu: 700m
# priorityClassName: high-priority
# nodeSelector:
# disktype: ssd
# sidecarResources:
# requests:
# memory: 1G
# cpu: 500m
# limits:
# memory: 2G
# cpu: 600m
# serviceAccountName: percona-xtradb-cluster-operator-workload
# affinity:
# antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
# tolerations:
# - key: "node.alpha.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationSeconds: 6000
podDisruptionBudget:
maxUnavailable: 1
# minAvailable: 0
gracePeriod: 30
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# serviceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# serviceLabels:
# rack: rack-23
logcollector:
enabled: true
image: perconalab/percona-xtradb-cluster-operator:main-logcollector
# configuration: |
# [OUTPUT]
# Name es
# Match *
# Host 192.168.2.3
# Port 9200
# Index my_index
# Type my_type
nodeSelector:
mysql: "true"
tolerations:
- key: "mysql"
operator: "Equal"
value: "true"
effect: "NoSchedule"
resources:
requests:
memory: 1G
cpu: 1000m
limits:
memory: 1G
cpu: 1000m
pmm:
enabled: true
image: percona/pmm-client:2.23.0
serverHost: monitoring-service
serverUser: admin
# pxcParams: "--disable-tablestats-limit=2000"
# proxysqlParams: "--custom-labels=CUSTOM-LABELS"
nodeSelector:
mysql: "true"
tolerations:
- key: "mysql"
operator: "Equal"
value: "true"
effect: "NoSchedule"
resources:
requests:
memory: 1G
cpu: 1000m
limits:
memory: 1G
cpu: 1000m
backup:
image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup
# serviceAccountName: percona-xtradb-cluster-operator
# imagePullSecrets:
# - name: private-registry-credentials
pitr:
enabled: false
storageName: STORAGE-NAME-HERE
timeBetweenUploads: 60
storages:
s3-us-west:
type: s3
# nodeSelector:
# storage: tape
# backupWorker: 'True'
# resources:
# requests:
# memory: 1G
# cpu: 600m
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: backupWorker
# operator: In
# values:
# - 'True'
# tolerations:
# - key: "backupWorker"
# operator: "Equal"
# value: "True"
# effect: "NoSchedule"
# annotations:
# testName: scheduled-backup
# labels:
# db - true
# Taints:
# db - true
# labels:
# db - true
# Taints:
# db - true
# backupWorker: 'True'
# schedulerName: 'default-scheduler'
# priorityClassName: 'high-priority'
# containerSecurityContext:
# privileged: true
# podSecurityContext:
# fsGroup: 1001
# supplementalGroups: [1001, 1002, 1003]
s3:
bucket: mysql-test/test_backup
credentialsSecret: test-mysql-backup-gcs
region: us-central1
endpointUrl: https://storage.googleapis.com
fs-pvc:
type: filesystem
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: db
# operator: In
# values:
# - "true"
nodeSelector:
mysql: "true"
tolerations:
- key: "mysql"
operator: "Equal"
value: "true"
effect: "NoSchedule"
resources:
requests:
memory: 1G
cpu: 1000m
limits:
memory: 1G
cpu: 1000m
# affinity:
# antiAffinityTopologyKey: "kubernetes.io/hostname"
# # nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: db
# operator: In
# values:
# - "true"
# tolerations:
# - key: "db"
# operator: "Equal"
# value: "true"
# effect: "NoSchedule"
# nodeSelector:
# storage: tape
# backupWorker: 'True'
# resources:
# requests:
# memory: 1G
# cpu: 600m
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: backupWorker
# operator: In
# values:
# - 'True'
# tolerations:
# - key: "backupWorker"
# operator: "Equal"
# value: "True"
# effect: "NoSchedule"
# annotations:
# testName: scheduled-backup
# labels:
# backupWorker: 'True'
# schedulerName: 'default-scheduler'
# priorityClassName: 'high-priority'
# containerSecurityContext:
# privileged: true
# podSecurityContext:
# fsGroup: 1001
# supplementalGroups: [1001, 1002, 1003]
volume:
persistentVolumeClaim:
storageClassName: standard-ssd
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 220G
schedule:
- name: "daily-backup"
schedule: "0 0 * * *"
keep: 7
storageName: s3-us-west
# - name: "sat-night-backup"
# schedule: "0 0 * * 6"
# keep: 5
# storageName: fs-pvc