Dear All,
What is the minimum worker node number required for Percona Operator for Mysql?
Where can I set the desired replicas number?
I found this information here:
The recommended configuration is to have at least 3 nodes
Does It mean I can’t run the cluster with 2 worker nodes?
Currently I have only 2 worker nodes, is this the cause some pods in pending state?
root@kubernetes-master-ubuntu:~# k get node
NAME STATUS ROLES AGE VERSION
kubernetes-master-ubuntu Ready control-plane,master 176d v1.23.5
kubernetes-worker1-ubuntu Ready <none> 176d v1.23.5
kubernetes-worker2-ubuntu Ready <none> 176d v1.23.5
root@kubernetes-master-ubuntu:~# k get pod | grep pxc
my-db-pxc-db-haproxy-0 2/2 Running 0 66m
my-db-pxc-db-haproxy-1 2/2 Running 0 64m
my-db-pxc-db-haproxy-2 0/2 Pending 0 64m
my-db-pxc-db-pxc-0 3/3 Running 0 66m
my-db-pxc-db-pxc-1 3/3 Running 0 65m
my-db-pxc-db-pxc-2 0/3 Pending 0 64m
my-op-pxc-operator-78cc9595cc-kz4lg 1/1 Running 0 72m
root@kubernetes-master-ubuntu:~# k describe pod my-db-pxc-db-pxc-2
Name: my-db-pxc-db-pxc-2
Namespace: default
Priority: 0
Node: <none>
Labels: app.kubernetes.io/component=pxc
app.kubernetes.io/instance=my-db-pxc-db
app.kubernetes.io/managed-by=percona-xtradb-cluster-operator
app.kubernetes.io/name=percona-xtradb-cluster
app.kubernetes.io/part-of=percona-xtradb-cluster
controller-revision-hash=my-db-pxc-db-pxc-7588dbc56b
statefulset.kubernetes.io/pod-name=my-db-pxc-db-pxc-2
Annotations: percona.com/configuration-hash: d41d8cd98f00b204e9800998ecf8427e
percona.com/ssl-hash: c0de7d447a6761539fe00660e7fd4575
percona.com/ssl-internal-hash: f4377c36cc9bf18a4d87803ce1085bf8
Status: Pending
IP:
IPs: <none>
Controlled By: StatefulSet/my-db-pxc-db-pxc
Init Containers:
pxc-init:
Image: percona/percona-xtradb-cluster-operator:1.11.0
Port: <none>
Host Port: <none>
Command:
/pxc-init-entrypoint.sh
Requests:
cpu: 600m
memory: 1G
Environment: <none>
Mounts:
/var/lib/mysql from datadir (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-jpk2n (ro)
Containers:
logs:
Image: percona/percona-xtradb-cluster-operator:1.11.0-logcollector
Port: <none>
Host Port: <none>
Requests:
cpu: 200m
memory: 100M
Environment Variables from:
my-db-pxc-db-log-collector Secret Optional: true
Environment:
LOG_DATA_DIR: /var/lib/mysql
POD_NAMESPASE: default (v1:metadata.namespace)
POD_NAME: my-db-pxc-db-pxc-2 (v1:metadata.name)
Mounts:
/var/lib/mysql from datadir (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-jpk2n (ro)
logrotate:
Image: percona/percona-xtradb-cluster-operator:1.11.0-logcollector
Port: <none>
Host Port: <none>
Args:
logrotate
Requests:
cpu: 200m
memory: 100M
Environment:
SERVICE_TYPE: mysql
MONITOR_PASSWORD: <set to the key 'monitor' in secret 'internal-my-db-pxc-db'> Optional: false
Mounts:
/var/lib/mysql from datadir (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-jpk2n (ro)
pxc:
Image: percona/percona-xtradb-cluster:8.0.27-18.1
Ports: 3306/TCP, 4444/TCP, 4567/TCP, 4568/TCP, 33062/TCP, 33060/TCP
Host Ports: 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP
Command:
/var/lib/mysql/pxc-entrypoint.sh
Args:
mysqld
Requests:
cpu: 600m
memory: 1G
Liveness: exec [/var/lib/mysql/liveness-check.sh] delay=300s timeout=5s period=10s #success=1 #failure=3
Readiness: exec [/var/lib/mysql/readiness-check.sh] delay=15s timeout=15s period=30s #success=1 #failure=5
Environment Variables from:
my-db-pxc-db-env-vars-pxc Secret Optional: true
Environment:
PXC_SERVICE: my-db-pxc-db-pxc-unready
MONITOR_HOST: %
MYSQL_ROOT_PASSWORD: <set to the key 'root' in secret 'internal-my-db-pxc-db'> Optional: false
XTRABACKUP_PASSWORD: <set to the key 'xtrabackup' in secret 'internal-my-db-pxc-db'> Optional: false
MONITOR_PASSWORD: <set to the key 'monitor' in secret 'internal-my-db-pxc-db'> Optional: false
LOG_DATA_DIR: /var/lib/mysql
IS_LOGCOLLECTOR: yes
CLUSTER_HASH: 1182983
OPERATOR_ADMIN_PASSWORD: <set to the key 'operator' in secret 'internal-my-db-pxc-db'> Optional: false
LIVENESS_CHECK_TIMEOUT: 5
READINESS_CHECK_TIMEOUT: 15
Mounts:
/etc/my.cnf.d from auto-config (rw)
/etc/mysql/mysql-users-secret from mysql-users-secret-file (rw)
/etc/mysql/ssl from ssl (rw)
/etc/mysql/ssl-internal from ssl-internal (rw)
/etc/mysql/vault-keyring-secret from vault-keyring-secret (rw)
/etc/percona-xtradb-cluster.conf.d from config (rw)
/tmp from tmp (rw)
/var/lib/mysql from datadir (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-jpk2n (ro)
Conditions:
Type Status
PodScheduled False
Volumes:
datadir:
Type: EmptyDir (a temporary directory that shares a pod's lifetime)
Medium:
SizeLimit: <unset>
tmp:
Type: EmptyDir (a temporary directory that shares a pod's lifetime)
Medium:
SizeLimit: <unset>
config:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: my-db-pxc-db-pxc
Optional: true
ssl-internal:
Type: Secret (a volume populated by a Secret)
SecretName: my-db-pxc-db-ssl-internal
Optional: true
ssl:
Type: Secret (a volume populated by a Secret)
SecretName: my-db-pxc-db-ssl
Optional: false
auto-config:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: auto-my-db-pxc-db-pxc
Optional: true
vault-keyring-secret:
Type: Secret (a volume populated by a Secret)
SecretName: my-db-pxc-db-vault
Optional: true
mysql-users-secret-file:
Type: Secret (a volume populated by a Secret)
SecretName: internal-my-db-pxc-db
Optional: false
kube-api-access-jpk2n:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: Burstable
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 9m4s (x60 over 69m) default-scheduler 0/3 nodes are available: 1 node(s) had taint {node-role.kubernetes.io/master: }, that the pod didn't tolerate, 2 Insufficient cpu.
Thanks: Bela