Unable to Install Percona server for MongoDB on Kubernetes

When I completed the operation according to the documenthttps://www.percona.com/doc/kubernetes-operator-for-psmongodb/kubernetes.html,I got the following result:

I found that 3 pods are not ready,Hope I can get help from anyone。
The following are the results of some pods and services executing “kubectl describe”

root@k8s-master01[/home/chb/percona-server-mongodb-operator/deploy]# 
root@k8s-master01[/home/chb/percona-server-mongodb-operator/deploy]# kubectl describe pod my-cluster-name-mongos-c979989f9-2wvhg
Name:         my-cluster-name-mongos-c979989f9-2wvhg
Namespace:    percona
Priority:     0
Node:         k8s-node02/10.45.69.42
Start Time:   Thu, 09 Sep 2021 19:12:47 +0800
Labels:       app.kubernetes.io/component=mongos
              app.kubernetes.io/instance=my-cluster-name
              app.kubernetes.io/managed-by=percona-server-mongodb-operator
              app.kubernetes.io/name=percona-server-mongodb
              app.kubernetes.io/part-of=percona-server-mongodb
              pod-template-hash=c979989f9
Annotations:  cni.projectcalico.org/podIP: 172.30.58.204/32
              cni.projectcalico.org/podIPs: 172.30.58.204/32
              percona.com/ssl-hash: e2af018b5012197642cb32d1a895c58b
              percona.com/ssl-internal-hash: 61820a64f09b3134d44f8be88320c62f
Status:       Running
IP:           172.30.58.204
IPs:
  IP:           172.30.58.204
Controlled By:  ReplicaSet/my-cluster-name-mongos-c979989f9
Init Containers:
  mongo-init:
    Container ID:  docker://96181b1d5e6a0571c718f66561a75f7578fe1443535140a8ec3c239471e37a9b
    Image:         percona/percona-server-mongodb-operator:1.9.0
    Image ID:      docker-pullable://percona/percona-server-mongodb-operator@sha256:2daab5999a3a5bc407cc63ce8ae4d18d985f636685273c6678b118d770c3c014
    Port:          <none>
    Host Port:     <none>
    Command:
      /init-entrypoint.sh
    State:          Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Thu, 09 Sep 2021 19:13:18 +0800
      Finished:     Thu, 09 Sep 2021 19:13:18 +0800
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     300m
      memory:  500M
    Requests:
      cpu:        300m
      memory:     500M
    Environment:  <none>
    Mounts:
      /data/db from mongod-data (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-bls9x (ro)
Containers:
  mongos:
    Container ID:  docker://3d68d94b1517d2434f1612e5dede1471212678c55accf976933208bcd343031f
    Image:         percona/percona-server-mongodb:4.4.6-8
    Image ID:      docker-pullable://percona/percona-server-mongodb@sha256:0afd1ae855cd5090d106ba7462b0c1c73e0da7a180b46811b2f3c2149ecb7d4f
    Port:          27017/TCP
    Host Port:     0/TCP
    Command:
      /data/db/ps-entry.sh
    Args:
      mongos
      --bind_ip_all
      --port=27017
      --sslAllowInvalidCertificates
      --configdb
      cfg/my-cluster-name-cfg-0.my-cluster-name-cfg.percona.svc.cluster.local:27017,my-cluster-name-cfg-1.my-cluster-name-cfg.percona.svc.cluster.local:27017,my-cluster-name-cfg-2.my-cluster-name-cfg.percona.svc.cluster.local:27017
      --relaxPermChecks
      --sslMode=preferSSL
      --clusterAuthMode=x509
    State:          Running
      Started:      Thu, 09 Sep 2021 19:32:06 +0800
    Last State:     Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Thu, 09 Sep 2021 19:28:59 +0800
      Finished:     Thu, 09 Sep 2021 19:31:43 +0800
    Ready:          False
    Restart Count:  6
    Limits:
      cpu:     300m
      memory:  500M
    Requests:
      cpu:      300m
      memory:   500M
    Liveness:   exec [/data/db/mongodb-healthcheck k8s liveness --component mongos --ssl --sslInsecure --sslCAFile /etc/mongodb-ssl/ca.crt --sslPEMKeyFile /tmp/tls.pem] delay=60s timeout=5s period=30s #success=1 #failure=4
    Readiness:  exec [/data/db/mongodb-healthcheck k8s readiness --component mongos --ssl --sslInsecure --sslCAFile /etc/mongodb-ssl/ca.crt --sslPEMKeyFile /tmp/tls.pem] delay=10s timeout=2s period=1s #success=1 #failure=3
    Environment Variables from:
      my-cluster-name-secrets         Secret  Optional: false
      internal-my-cluster-name-users  Secret  Optional: false
    Environment:
      MONGODB_PORT:  27017
    Mounts:
      /data/db from mongod-data (rw)
      /etc/mongodb-secrets from my-cluster-name-mongodb-keyfile (ro)
      /etc/mongodb-ssl from ssl (ro)
      /etc/mongodb-ssl-internal from ssl-internal (ro)
      /etc/users-secret from users-secret-file (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-bls9x (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  my-cluster-name-mongodb-keyfile:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  my-cluster-name-mongodb-keyfile
    Optional:    false
  ssl:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  my-cluster-name-ssl
    Optional:    false
  ssl-internal:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  my-cluster-name-ssl-internal
    Optional:    true
  mongod-data:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
  users-secret-file:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  internal-my-cluster-name-users
    Optional:    false
  default-token-bls9x:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-bls9x
    Optional:    false
QoS Class:       Guaranteed
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                 node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason     Age   From               Message
  ----     ------     ----  ----               -------
  Normal   Scheduled  21m   default-scheduler  Successfully assigned percona/my-cluster-name-mongos-c979989f9-2wvhg to k8s-node02
  Normal   Pulling    21m   kubelet            Pulling image "percona/percona-server-mongodb-operator:1.9.0"
  Normal   Pulled     21m   kubelet            Successfully pulled image "percona/percona-server-mongodb-operator:1.9.0"
  Normal   Created    21m   kubelet            Created container mongo-init
  Normal   Started    21m   kubelet            Started container mongo-init
  Normal   Pulling    21m   kubelet            Pulling image "percona/percona-server-mongodb:4.4.6-8"
  Normal   Created    20m   kubelet            Created container mongos
  Normal   Started    20m   kubelet            Started container mongos
  Warning  Unhealthy  20m   kubelet            Readiness probe failed: {"level":"error","msg":"could not connect to localhost:27017. got: dial tcp 127.0.0.1:27017: connect: connection refused","time":"2021-09-09T11:13:59Z"}
{"level":"fatal","msg":"connection error: no reachable servers","time":"2021-09-09T11:13:59Z"}
  Warning  Unhealthy  20m  kubelet  Readiness probe failed: {"level":"error","msg":"could not connect to localhost:27017. got: dial tcp 127.0.0.1:27017: connect: connection refused","time":"2021-09-09T11:14:00Z"}
{"level":"fatal","msg":"connection error: no reachable servers","time":"2021-09-09T11:14:00Z"}
  Warning  Unhealthy  20m  kubelet  Readiness probe failed: {"level":"error","msg":"could not connect to localhost:27017. got: dial tcp 127.0.0.1:27017: connect: connection refused","time":"2021-09-09T11:14:01Z"}
{"level":"fatal","msg":"connection error: no reachable servers","time":"2021-09-09T11:14:01Z"}
  Warning  Unhealthy  20m  kubelet  Readiness probe failed: {"level":"error","msg":"could not connect to localhost:27017. got: dial tcp 127.0.0.1:27017: connect: connection refused","time":"2021-09-09T11:14:02Z"}
{"level":"fatal","msg":"connection error: no reachable servers","time":"2021-09-09T11:14:02Z"}
  Warning  Unhealthy  20m  kubelet  Readiness probe failed: {"level":"error","msg":"could not connect to localhost:27017. got: dial tcp 127.0.0.1:27017: connect: connection refused","time":"2021-09-09T11:14:03Z"}
{"level":"fatal","msg":"connection error: no reachable servers","time":"2021-09-09T11:14:03Z"}
  Warning  Unhealthy  20m  kubelet  Readiness probe failed: {"level":"error","msg":"could not connect to localhost:27017. got: dial tcp 127.0.0.1:27017: connect: connection refused","time":"2021-09-09T11:14:04Z"}
{"level":"fatal","msg":"connection error: no reachable servers","time":"2021-09-09T11:14:04Z"}
  Warning  Unhealthy  20m  kubelet  Readiness probe failed: {"level":"error","msg":"could not connect to localhost:27017. got: dial tcp 127.0.0.1:27017: connect: connection refused","time":"2021-09-09T11:14:05Z"}
{"level":"fatal","msg":"connection error: no reachable servers","time":"2021-09-09T11:14:05Z"}
  Warning  Unhealthy  20m  kubelet  Readiness probe failed: {"level":"error","msg":"could not connect to localhost:27017. got: dial tcp 127.0.0.1:27017: connect: connection refused","time":"2021-09-09T11:14:06Z"}
{"level":"fatal","msg":"connection error: no reachable servers","time":"2021-09-09T11:14:06Z"}
  Warning  Unhealthy  20m  kubelet  Readiness probe failed: {"level":"error","msg":"could not connect to localhost:27017. got: dial tcp 127.0.0.1:27017: connect: connection refused","time":"2021-09-09T11:14:07Z"}
{"level":"fatal","msg":"connection error: no reachable servers","time":"2021-09-09T11:14:07Z"}
  Normal   Pulled     11m (x4 over 20m)     kubelet  Successfully pulled image "percona/percona-server-mongodb:4.4.6-8"
  Warning  Unhealthy  102s (x989 over 20m)  kubelet  (combined from similar events): Readiness probe failed: {"level":"error","msg":"could not connect to localhost:27017. got: dial tcp 127.0.0.1:27017: connect: connection refused","time":"2021-09-09T11:32:49Z"}
{"level":"fatal","msg":"connection error: no reachable servers","time":"2021-09-09T11:32:49Z"}
root@k8s-master01[/home/chb/percona-server-mongodb-operator/deploy]# 
root@k8s-master01[/home/chb/percona-server-mongodb-operator/deploy]# 
root@k8s-master01[/home/chb/percona-server-mongodb-operator/deploy]# kubectl get pods -o wide
NAME                                               READY   STATUS             RESTARTS   AGE   IP               NODE             NOMINATED NODE   READINESS GATES
my-cluster-name-cfg-0                              1/2     CrashLoopBackOff   10         23m   172.30.85.206    k8s-node01       <none>           <none>
my-cluster-name-cfg-1                              2/2     Running            10         22m   172.30.58.236    k8s-node02       <none>           <none>
my-cluster-name-cfg-2                              2/2     Running            9          21m   172.30.219.123   dbaas-master01   <none>           <none>
my-cluster-name-mongos-c979989f9-2wvhg             0/1     Running            7          23m   172.30.58.204    k8s-node02       <none>           <none>
my-cluster-name-mongos-c979989f9-brc8q             0/1     Running            7          23m   172.30.85.207    k8s-node01       <none>           <none>
my-cluster-name-mongos-c979989f9-mc7s6             0/1     Running            1          23m   172.30.219.121   dbaas-master01   <none>           <none>
my-cluster-name-rs0-0                              2/2     Running            11         23m   172.30.58.233    k8s-node02       <none>           <none>
my-cluster-name-rs0-1                              1/2     CrashLoopBackOff   10         22m   172.30.85.200    k8s-node01       <none>           <none>
my-cluster-name-rs0-2                              2/2     Running            9          21m   172.30.219.72    dbaas-master01   <none>           <none>
percona-server-mongodb-operator-754cf45d87-v85vl   1/1     Running            0          39m   172.30.85.203    k8s-node01       <none>           <none>
root@k8s-master01[/home/chb/percona-server-mongodb-operator/deploy]# 
root@k8s-master01[/home/chb/percona-server-mongodb-operator/deploy]# 
root@k8s-master01[/home/chb/percona-server-mongodb-operator/deploy]# kubectl describe pod my-cluster-name-cfg-1 
Name:         my-cluster-name-cfg-1
Namespace:    percona
Priority:     0
Node:         k8s-node02/10.45.69.42
Start Time:   Thu, 09 Sep 2021 19:14:04 +0800
Labels:       app.kubernetes.io/component=cfg
              app.kubernetes.io/instance=my-cluster-name
              app.kubernetes.io/managed-by=percona-server-mongodb-operator
              app.kubernetes.io/name=percona-server-mongodb
              app.kubernetes.io/part-of=percona-server-mongodb
              app.kubernetes.io/replset=cfg
              controller-revision-hash=my-cluster-name-cfg-6d66dc46d
              statefulset.kubernetes.io/pod-name=my-cluster-name-cfg-1
Annotations:  cni.projectcalico.org/podIP: 172.30.58.236/32
              cni.projectcalico.org/podIPs: 172.30.58.236/32
              percona.com/ssl-hash: e2af018b5012197642cb32d1a895c58b
              percona.com/ssl-internal-hash: 61820a64f09b3134d44f8be88320c62f
Status:       Running
IP:           172.30.58.236
IPs:
  IP:           172.30.58.236
Controlled By:  StatefulSet/my-cluster-name-cfg
Init Containers:
  mongo-init:
    Container ID:  docker://66dbbb0b1bc96392b86ee4b5a56e42262b3f139b3d24e5264ade882ca5958c39
    Image:         percona/percona-server-mongodb-operator:1.9.0
    Image ID:      docker-pullable://percona/percona-server-mongodb-operator@sha256:2daab5999a3a5bc407cc63ce8ae4d18d985f636685273c6678b118d770c3c014
    Port:          <none>
    Host Port:     <none>
    Command:
      /init-entrypoint.sh
    State:          Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Thu, 09 Sep 2021 19:14:22 +0800
      Finished:     Thu, 09 Sep 2021 19:14:22 +0800
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     300m
      memory:  500M
    Requests:
      cpu:        300m
      memory:     500M
    Environment:  <none>
    Mounts:
      /data/db from mongod-data (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-bls9x (ro)
Containers:
  mongod:
    Container ID:  docker://36178678def5f990f4de622ff18d67c5cfe4e78b96883877dbe3b5a22f4e2744
    Image:         percona/percona-server-mongodb:4.4.6-8
    Image ID:      docker-pullable://percona/percona-server-mongodb@sha256:0afd1ae855cd5090d106ba7462b0c1c73e0da7a180b46811b2f3c2149ecb7d4f
    Port:          27017/TCP
    Host Port:     0/TCP
    Command:
      /data/db/ps-entry.sh
    Args:
      --bind_ip_all
      --auth
      --dbpath=/data/db
      --port=27017
      --replSet=cfg
      --storageEngine=wiredTiger
      --relaxPermChecks
      --sslAllowInvalidCertificates
      --sslMode=preferSSL
      --clusterAuthMode=x509
      --configsvr
      --slowms=100
      --profile=1
      --rateLimit=100
      --enableEncryption
      --encryptionKeyFile=/etc/mongodb-encryption/encryption-key
      --encryptionCipherMode=AES256-CBC
      --wiredTigerCacheSizeGB=0.25
      --wiredTigerCollectionBlockCompressor=snappy
      --wiredTigerJournalCompressor=snappy
      --wiredTigerIndexPrefixCompression=true
      --setParameter
      ttlMonitorSleepSecs=60
      --setParameter
      wiredTigerConcurrentReadTransactions=128
      --setParameter
      wiredTigerConcurrentWriteTransactions=128
    State:          Running
      Started:      Thu, 09 Sep 2021 19:36:09 +0800
    Last State:     Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Thu, 09 Sep 2021 19:32:54 +0800
      Finished:     Thu, 09 Sep 2021 19:35:38 +0800
    Ready:          True
    Restart Count:  7
    Limits:
      cpu:     300m
      memory:  500M
    Requests:
      cpu:      300m
      memory:   500M
    Liveness:   exec [/data/db/mongodb-healthcheck k8s liveness --ssl --sslInsecure --sslCAFile /etc/mongodb-ssl/ca.crt --sslPEMKeyFile /tmp/tls.pem --startupDelaySeconds 7200] delay=60s timeout=5s period=30s #success=1 #failure=4
    Readiness:  tcp-socket :27017 delay=10s timeout=2s period=3s #success=1 #failure=8
    Environment Variables from:
      internal-my-cluster-name-users  Secret  Optional: false
    Environment:
      SERVICE_NAME:     my-cluster-name
      NAMESPACE:        percona
      MONGODB_PORT:     27017
      MONGODB_REPLSET:  cfg
    Mounts:
      /data/db from mongod-data (rw)
      /etc/mongodb-encryption from my-cluster-name-mongodb-encryption-key (ro)
      /etc/mongodb-secrets from my-cluster-name-mongodb-keyfile (ro)
      /etc/mongodb-ssl from ssl (ro)
      /etc/mongodb-ssl-internal from ssl-internal (ro)
      /etc/users-secret from users-secret-file (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-bls9x (ro)
  backup-agent:
    Container ID:   docker://bf0deb6ffd38c87f5afc0bd7eb601d195f60074ebf691f09086581882078a1be
    Image:          percona/percona-server-mongodb-operator:1.9.0-backup
    Image ID:       docker-pullable://percona/percona-server-mongodb-operator@sha256:ae0ed563105871da37496667a9a9ae6323c32e0321c3448a9f965f7d54d0bc94
    Port:           <none>
    Host Port:      <none>
    State:          Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Thu, 09 Sep 2021 19:31:51 +0800
      Finished:     Thu, 09 Sep 2021 19:37:04 +0800
    Last State:     Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Thu, 09 Sep 2021 19:26:14 +0800
      Finished:     Thu, 09 Sep 2021 19:31:05 +0800
    Ready:          False
    Restart Count:  3
    Environment:
      PBM_AGENT_MONGODB_USERNAME:  <set to the key 'MONGODB_BACKUP_USER' in secret 'internal-my-cluster-name-users'>      Optional: false
      PBM_AGENT_MONGODB_PASSWORD:  <set to the key 'MONGODB_BACKUP_PASSWORD' in secret 'internal-my-cluster-name-users'>  Optional: false
      PBM_MONGODB_REPLSET:         cfg
      PBM_MONGODB_PORT:            27017
      SHARDED:                     TRUE
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-bls9x (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  mongod-data:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  mongod-data-my-cluster-name-cfg-1
    ReadOnly:   false
  my-cluster-name-mongodb-keyfile:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  my-cluster-name-mongodb-keyfile
    Optional:    false
  my-cluster-name-mongodb-encryption-key:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  my-cluster-name-mongodb-encryption-key
    Optional:    false
  ssl:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  my-cluster-name-ssl
    Optional:    false
  ssl-internal:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  my-cluster-name-ssl-internal
    Optional:    true
  users-secret-file:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  internal-my-cluster-name-users
    Optional:    false
  default-token-bls9x:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-bls9x
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                 node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason     Age   From               Message
  ----     ------     ----  ----               -------
  Normal   Scheduled  23m   default-scheduler  Successfully assigned percona/my-cluster-name-cfg-1 to k8s-node02
  Normal   Pulling    23m   kubelet            Pulling image "percona/percona-server-mongodb-operator:1.9.0"
  Normal   Pulled     22m   kubelet            Successfully pulled image "percona/percona-server-mongodb-operator:1.9.0"
  Normal   Created    22m   kubelet            Created container mongo-init
  Normal   Started    22m   kubelet            Started container mongo-init
  Normal   Pulled     22m   kubelet            Successfully pulled image "percona/percona-server-mongodb-operator:1.9.0-backup"
  Normal   Created    22m   kubelet            Created container backup-agent
  Normal   Started    22m   kubelet            Started container backup-agent
  Warning  Unhealthy  21m   kubelet            Liveness probe failed: {"level":"info","msg":"Running Kubernetes liveness check for mongod","time":"2021-09-09T11:16:07Z"}
{"level":"error","msg":"replSetGetStatus returned error command replSetGetStatus requires authentication","time":"2021-09-09T11:16:07Z"}
  Warning  Unhealthy  20m  kubelet  Liveness probe failed: {"level":"info","msg":"Running Kubernetes liveness check for mongod","time":"2021-09-09T11:16:37Z"}
{"level":"error","msg":"replSetGetStatus returned error command replSetGetStatus requires authentication","time":"2021-09-09T11:16:37Z"}
  Warning  Unhealthy  20m  kubelet  Liveness probe failed: {"level":"info","msg":"Running Kubernetes liveness check for mongod","time":"2021-09-09T11:17:07Z"}
{"level":"error","msg":"replSetGetStatus returned error command replSetGetStatus requires authentication","time":"2021-09-09T11:17:07Z"}
  Warning  Unhealthy  19m  kubelet  Liveness probe failed: {"level":"info","msg":"Running Kubernetes liveness check for mongod","time":"2021-09-09T11:17:37Z"}
{"level":"error","msg":"replSetGetStatus returned error command replSetGetStatus requires authentication","time":"2021-09-09T11:17:37Z"}
  Normal   Killing    19m                kubelet  Container mongod failed liveness probe, will be restarted
  Normal   Pulling    19m (x2 over 22m)  kubelet  Pulling image "percona/percona-server-mongodb:4.4.6-8"
  Warning  Unhealthy  19m                kubelet  Readiness probe failed: dial tcp 172.30.58.236:27017: connect: connection refused
  Normal   Started    19m (x2 over 22m)  kubelet  Started container mongod
  Normal   Pulled     19m (x2 over 22m)  kubelet  Successfully pulled image "percona/percona-server-mongodb:4.4.6-8"
  Normal   Created    19m (x2 over 22m)  kubelet  Created container mongod
  Warning  Unhealthy  18m                kubelet  Liveness probe failed: {"level":"info","msg":"Running Kubernetes liveness check for mongod","time":"2021-09-09T11:19:07Z"}
{"level":"error","msg":"replSetGetStatus returned error command replSetGetStatus requires authentication","time":"2021-09-09T11:19:07Z"}
  Warning  Unhealthy  17m  kubelet  Liveness probe failed: {"level":"info","msg":"Running Kubernetes liveness check for mongod","time":"2021-09-09T11:19:37Z"}
{"level":"error","msg":"replSetGetStatus returned error command replSetGetStatus requires authentication","time":"2021-09-09T11:19:37Z"}
  Warning  Unhealthy  17m  kubelet  Liveness probe failed: {"level":"info","msg":"Running Kubernetes liveness check for mongod","time":"2021-09-09T11:20:07Z"}
{"level":"error","msg":"replSetGetStatus returned error command replSetGetStatus requires authentication","time":"2021-09-09T11:20:07Z"}
  Normal   Pulling    16m (x2 over 22m)    kubelet  Pulling image "percona/percona-server-mongodb-operator:1.9.0-backup"
  Warning  Unhealthy  3m6s (x17 over 15m)  kubelet  (combined from similar events): Liveness probe failed: {"level":"info","msg":"Running Kubernetes liveness check for mongod","time":"2021-09-09T11:34:07Z"}
{"level":"error","msg":"replSetGetStatus returned error command replSetGetStatus requires authentication","time":"2021-09-09T11:34:07Z"}
root@k8s-master01[/home/chb/percona-server-mongodb-operator/deploy]# kubectl get service     NAME                     TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)     AGE
my-cluster-name-cfg      ClusterIP   None           <none>        27017/TCP   26m
my-cluster-name-mongos   ClusterIP   10.96.77.225   <none>        27017/TCP   26m
my-cluster-name-rs0      ClusterIP   None           <none>        27017/TCP   26m
root@k8s-master01[/home/chb/percona-server-mongodb-operator/deploy]# 
root@k8s-master01[/home/chb/percona-server-mongodb-operator/deploy]# 
root@k8s-master01[/home/chb/percona-server-mongodb-operator/deploy]# kubectl describe service my-cluster-name-mongos
Name:              my-cluster-name-mongos
Namespace:         percona
Labels:            <none>
Annotations:       percona.com/last-config-hash:
                     eyJwb3J0cyI6W3sibmFtZSI6Im1vbmdvcyIsInBvcnQiOjI3MDE3LCJ0YXJnZXRQb3J0IjoyNzAxN31dLCJzZWxlY3RvciI6eyJhcHAua3ViZXJuZXRlcy5pby9jb21wb25lbnQiOi...
Selector:          app.kubernetes.io/component=mongos,app.kubernetes.io/instance=my-cluster-name,app.kubernetes.io/managed-by=percona-server-mongodb-operator,app.kubernetes.io/name=percona-server-mongodb,app.kubernetes.io/part-of=percona-server-mongodb
Type:              ClusterIP
IP:                10.96.77.225
Port:              mongos  27017/TCP
TargetPort:        27017/TCP
Endpoints:         
Session Affinity:  None
Events:            <none>
root@k8s-master01[/home/chb/percona-server-mongodb-operator/deploy]# 
1 Like

This should be a problem with my k8s environment, I tried to redeploy the k8s environment, and then achieved the expected results:


My cr.yaml file is as follows:

apiVersion: psmdb.percona.com/v1-9-0
kind: PerconaServerMongoDB
metadata:
  name: my-cluster-name
#  finalizers:
#    - delete-psmdb-pvc
spec:
#  platform: openshift
#  clusterServiceDNSSuffix: svc.cluster.local
#  pause: true
  crVersion: 1.9.0
  image: percona/percona-server-mongodb:4.4.6-8
  imagePullPolicy: Always
#  imagePullSecrets:
#    - name: private-registry-credentials
#  runUid: 1001
  allowUnsafeConfigurations: false
  updateStrategy: SmartUpdate
  upgradeOptions:
    versionServiceEndpoint: https://check.percona.com
    apply: 4.4-recommended
    schedule: "0 2 * * *"
    setFCV: false
  secrets:
    users: my-cluster-name-secrets
  pmm:
    enabled: false
    image: percona/pmm-client:2.18.0
    serverHost: monitoring-service
#    mongodParams: --environment=ENVIRONMENT
#    mongosParams: --environment=ENVIRONMENT
  replsets:

  - name: rs0
    size: 3
#    # for more configuration fields refer to https://docs.mongodb.com/manual/reference/configuration-options/
#    configuration: |
#      operationProfiling:
#        mode: slowOp
#      systemLog:
#         verbosity: 1
#    storage:
#      engine: wiredTiger
#      inMemory:
#        engineConfig:
#          inMemorySizeRatio: 0.9
#      wiredTiger:
#        engineConfig:
#          cacheSizeRatio: 0.5
#          directoryForIndexes: false
#          journalCompressor: snappy
#        collectionConfig:
#          blockCompressor: snappy
#        indexConfig:
#          prefixCompression: true
    affinity:
      antiAffinityTopologyKey: "kubernetes.io/hostname"
#      advanced:
#        nodeAffinity:
#          requiredDuringSchedulingIgnoredDuringExecution:
#            nodeSelectorTerms:
#            - matchExpressions:
#              - key: kubernetes.io/e2e-az-name
#                operator: In
#                values:
#                - e2e-az1
#                - e2e-az2
#    tolerations:
#    - key: "node.alpha.kubernetes.io/unreachable"
#      operator: "Exists"
#      effect: "NoExecute"
#      tolerationSeconds: 6000
#    priorityClassName: high-priority
#    annotations:
#      iam.amazonaws.com/role: role-arn
#    labels:
#      rack: rack-22
#    nodeSelector:
#      disktype: ssd
#    livenessProbe:
#      failureThreshold: 4
#      initialDelaySeconds: 60
#      periodSeconds: 30
#      successThreshold: 1
#      timeoutSeconds: 5
#      startupDelaySeconds: 7200
#    runtimeClassName: image-rc
#    sidecars:
#    - image: busybox
#      command: ["/bin/sh"]
#      args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"]
#      name: rs-sidecar-1
    podDisruptionBudget:
      maxUnavailable: 1
#      minAvailable: 0
    expose:
      enabled: false
      exposeType: LoadBalancer
#      loadBalancerSourceRanges:
#        - 10.0.0.0/8
#      serviceAnnotations:
#        service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
    arbiter:
      enabled: false
      size: 1
      affinity:
        antiAffinityTopologyKey: "kubernetes.io/hostname"
#        advanced:
#          nodeAffinity:
#            requiredDuringSchedulingIgnoredDuringExecution:
#              nodeSelectorTerms:
#              - matchExpressions:
#                - key: kubernetes.io/e2e-az-name
#                  operator: In
#                  values:
#                  - e2e-az1
#                  - e2e-az2
#      tolerations:
#      - key: "node.alpha.kubernetes.io/unreachable"
#        operator: "Exists"
#        effect: "NoExecute"
#        tolerationSeconds: 6000
#      priorityClassName: high-priority
#      annotations:
#        iam.amazonaws.com/role: role-arn
#      labels:
#        rack: rack-22
#      nodeSelector:
#        disktype: ssd
#    schedulerName: "default"
    resources:
      limits:
        cpu: "300m"
        memory: "0.5G"
      requests:
        cpu: "300m"
        memory: "0.5G"
    volumeSpec:
#      emptyDir: {}
#      hostPath:
#        path: /data
#        type: Directory
      persistentVolumeClaim:
        storageClassName: dd-sc
        accessModes: [ "ReadWriteOnce" ]
        resources:
          requests:
            storage: 3Gi

  sharding:
    enabled: true

    configsvrReplSet:
      size: 3
#      # for more configuration fields refer to https://docs.mongodb.com/manual/reference/configuration-options/
#      configuration: |
#        operationProfiling:
#          mode: slowOp
#        systemLog:
#           verbosity: 1
      affinity:
        antiAffinityTopologyKey: "kubernetes.io/hostname"
#        advanced:
#          nodeAffinity:
#            requiredDuringSchedulingIgnoredDuringExecution:
#              nodeSelectorTerms:
#              - matchExpressions:
#                - key: kubernetes.io/e2e-az-name
#                  operator: In
#                  values:
#                  - e2e-az1
#                  - e2e-az2
#      tolerations:
#      - key: "node.alpha.kubernetes.io/unreachable"
#        operator: "Exists"
#        effect: "NoExecute"
#        tolerationSeconds: 6000
#      priorityClassName: high-priority
#      annotations:
#        iam.amazonaws.com/role: role-arn
#      labels:
#        rack: rack-22
#      nodeSelector:
#        disktype: ssd
#      storage:
#        engine: wiredTiger
#        wiredTiger:
#          engineConfig:
#            cacheSizeRatio: 0.5
#            directoryForIndexes: false
#            journalCompressor: snappy
#          collectionConfig:
#            blockCompressor: snappy
#          indexConfig:
#            prefixCompression: true
#      runtimeClassName: image-rc
#      sidecars:
#      - image: busybox
#        command: ["/bin/sh"]
#        args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"]
#        name: rs-sidecar-1
      podDisruptionBudget:
        maxUnavailable: 1
      resources:
        limits:
          cpu: "300m"
          memory: "0.5G"
        requests:
          cpu: "300m"
          memory: "0.5G"
      volumeSpec:
#       emptyDir: {}
#       hostPath:
#         path: /data
#         type: Directory
        persistentVolumeClaim:
          storageClassName: dd-sc
          accessModes: [ "ReadWriteOnce" ]
          resources:
            requests:
              storage: 3Gi

    mongos:
      size: 3
#      # for more configuration fields refer to https://docs.mongodb.com/manual/reference/configuration-options/
#      configuration: |
#        systemLog:
#           verbosity: 1
      affinity:
        antiAffinityTopologyKey: "kubernetes.io/hostname"
#        advanced:
#          nodeAffinity:
#            requiredDuringSchedulingIgnoredDuringExecution:
#              nodeSelectorTerms:
#              - matchExpressions:
#                - key: kubernetes.io/e2e-az-name
#                  operator: In
#                  values:
#                  - e2e-az1
#                  - e2e-az2
#      tolerations:
#      - key: "node.alpha.kubernetes.io/unreachable"
#        operator: "Exists"
#        effect: "NoExecute"
#        tolerationSeconds: 6000
#      priorityClassName: high-priority
#      annotations:
#        iam.amazonaws.com/role: role-arn
#      labels:
#        rack: rack-22
#      nodeSelector:
#        disktype: ssd
#      runtimeClassName: image-rc
#      sidecars:
#      - image: busybox
#        command: ["/bin/sh"]
#        args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"]
#        name: rs-sidecar-1
      podDisruptionBudget:
        maxUnavailable: 1
      resources:
        limits:
          cpu: "300m"
          memory: "0.5G"
        requests:
          cpu: "300m"
          memory: "0.5G"
      expose:
        exposeType: ClusterIP
#        loadBalancerSourceRanges:
#          - 10.0.0.0/8
#        serviceAnnotations:
#          service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
#      auditLog:
#        destination: file
#        format: BSON
#        filter: '{}'

  mongod:
    net:
      port: 27017
      hostPort: 0
    security:
      redactClientLogData: false
      enableEncryption: true
      encryptionKeySecret: my-cluster-name-mongodb-encryption-key
      encryptionCipherMode: AES256-CBC
    setParameter:
      ttlMonitorSleepSecs: 60
      wiredTigerConcurrentReadTransactions: 128
      wiredTigerConcurrentWriteTransactions: 128
    storage:
      engine: wiredTiger
      inMemory:
        engineConfig:
          inMemorySizeRatio: 0.9
      wiredTiger:
        engineConfig:
          cacheSizeRatio: 0.5
          directoryForIndexes: false
          journalCompressor: snappy
        collectionConfig:
          blockCompressor: snappy
        indexConfig:
          prefixCompression: true
    operationProfiling:
      mode: slowOp
      slowOpThresholdMs: 100
      rateLimit: 100
#    auditLog:
#      destination: file
#      format: BSON
#      filter: '{}'

  backup:
    enabled: true
    restartOnFailure: true
    image: percona/percona-server-mongodb-operator:1.9.0-backup
    serviceAccountName: percona-server-mongodb-operator
#    resources:
#      limits:
#        cpu: "300m"
#        memory: "0.5G"
#      requests:
#        cpu: "300m"
#        memory: "0.5G"
    storages:
#      s3-us-west:
#        type: s3
#        s3:
#          bucket: S3-BACKUP-BUCKET-NAME-HERE
#          credentialsSecret: my-cluster-name-backup-s3
#          region: us-west-2
#      minio:
#        type: s3
#        s3:
#          bucket: MINIO-BACKUP-BUCKET-NAME-HERE
#          region: us-east-1
#          credentialsSecret: my-cluster-name-backup-minio
#          endpointUrl: http://minio.psmdb.svc.cluster.local:9000/minio/
    pitr:
      enabled: false
    tasks:
#      - name: daily-s3-us-west
#        enabled: true
#        schedule: "0 0 * * *"
#        keep: 3
#        storageName: s3-us-west
#        compressionType: gzip
#      - name: weekly-s3-us-west
#        enabled: false
#        schedule: "0 0 * * 0"
#        keep: 5
#        storageName: s3-us-west
#        compressionType: gzip