Hello,
; TLDR
Seems there is something (missing?) in the certificates created by my own issuer and CA, that the mongo do not like…
I think the critical log output is this one:
{"code":17,"codeName":"ProtocolError","errmsg":"Attempt to switch database target during SASL authentication from __system@local to @$external"}}}
; details
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: mongodb-internal
spec:
secretName: my-cluster-name-ssl-internal
issuerRef:
name: reddoxx
kind: StepClusterIssuer
group: certmanager.step.sm
commonName: "my-cluster-name"
dnsNames:
- "localhost"
- "my-cluster-name-rs0"
- "my-cluster-name-rs0.default"
- "my-cluster-name-rs0.default.svc.cluster.local"
- "*.my-cluster-name-rs0"
- "*.my-cluster-name-rs0.default"
- "*.my-cluster-name-rs0.default.svc.cluster.local"
- "my-cluster-name-rs0.default.svc.clusterset.local"
- "*.my-cluster-name-rs0.default.svc.clusterset.local"
- "*.default.svc.clusterset.local"
- "my-cluster-name-mongos"
- "my-cluster-name-mongos.default"
- "my-cluster-name-mongos.default.svc.cluster.local"
- "*.my-cluster-name-mongos"
- "*.my-cluster-name-mongos.default"
- "*.my-cluster-name-mongos.default.svc.cluster.local"
- "my-cluster-name-cfg"
- "my-cluster-name-cfg.default"
- "my-cluster-name-cfg.default.svc.cluster.local"
- "*.my-cluster-name-cfg"
- "*.my-cluster-name-cfg.default"
- "*.my-cluster-name-cfg.default.svc.cluster.local"
- "my-cluster-name-mongos.default.svc.clusterset.local"
- "*.my-cluster-name-mongos.default.svc.clusterset.local"
- "my-cluster-name-cfg.default.svc.clusterset.local"
- "*.my-cluster-name-cfg.default.svc.clusterset.local"
- "mongodb.k08.reddoxx.com"
usages:
- signing
- key encipherment
- server auth
- client auth
subject:
organizations:
- PSMDB
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: mongodb-external
spec:
secretName: my-cluster-name-ssl
issuerRef:
name: reddoxx
kind: StepClusterIssuer
group: certmanager.step.sm
commonName: "my-cluster-name"
dnsNames:
- "localhost"
- "my-cluster-name-rs0"
- "my-cluster-name-rs0.default"
- "my-cluster-name-rs0.default.svc.cluster.local"
- "*.my-cluster-name-rs0"
- "*.my-cluster-name-rs0.default"
- "*.my-cluster-name-rs0.default.svc.cluster.local"
- "my-cluster-name-rs0.default.svc.clusterset.local"
- "*.my-cluster-name-rs0.default.svc.clusterset.local"
- "*.default.svc.clusterset.local"
- "my-cluster-name-mongos"
- "my-cluster-name-mongos.default"
- "my-cluster-name-mongos.default.svc.cluster.local"
- "*.my-cluster-name-mongos"
- "*.my-cluster-name-mongos.default"
- "*.my-cluster-name-mongos.default.svc.cluster.local"
- "my-cluster-name-cfg"
- "my-cluster-name-cfg.default"
- "my-cluster-name-cfg.default.svc.cluster.local"
- "*.my-cluster-name-cfg"
- "*.my-cluster-name-cfg.default"
- "*.my-cluster-name-cfg.default.svc.cluster.local"
- "my-cluster-name-mongos.default.svc.clusterset.local"
- "*.my-cluster-name-mongos.default.svc.clusterset.local"
- "my-cluster-name-cfg.default.svc.clusterset.local"
- "*.my-cluster-name-cfg.default.svc.clusterset.local"
- "mongodb.k08.reddoxx.com"
usages:
- signing
- key encipherment
- server auth
- client auth
subject:
organizations:
- PSMDB
The CR ist still “initializing”, the operator stopped its log output about 30 minutes ago with:
# ----------- snip -------
2023-05-16T15:42:18.044Z INFO initiating replset {"controller": "psmdb-controller", "object": {"name":"my-cluster-name","namespace":"default"}, "namespace": "default", "name": "my-cluster-name", "reconcileID": "2d69d797-e7d1-4259-abaf-5f2959474e15", "replset": "cfg", "pod": "my-cluster-name-cfg-0"}
2023-05-16T15:42:36.006Z INFO replset initialized {"controller": "psmdb-controller", "object": {"name":"my-cluster-name","namespace":"default"}, "namespace": "default", "name": "my-cluster-name", "reconcileID": "2d69d797-e7d1-4259-abaf-5f2959474e15", "replset": "cfg", "pod": "my-cluster-name-cfg-0"}
2023-05-16T15:42:46.518Z INFO initiating replset {"controller": "psmdb-controller", "object": {"name":"my-cluster-name","namespace":"default"}, "namespace": "default", "name": "my-cluster-name", "reconcileID": "2d69d797-e7d1-4259-abaf-5f2959474e15", "replset": "rs0", "pod": "my-cluster-name-rs0-0"}
2023-05-16T15:43:08.177Z INFO replset initialized {"controller": "psmdb-controller", "object": {"name":"my-cluster-name","namespace":"default"}, "namespace": "default", "name": "my-cluster-name", "reconcileID": "2d69d797-e7d1-4259-abaf-5f2959474e15", "replset": "rs0", "pod": "my-cluster-name-rs0-0"}
2023-05-16T15:43:10.113Z INFO Fixing member tags {"controller": "psmdb-controller", "object": {"name":"my-cluster-name","namespace":"default"}, "namespace": "default", "name": "my-cluster-name", "reconcileID": "f6b45ebf-2c42-4e6b-95c2-3acc4086bef3", "replset": "cfg"}
2023-05-16T15:43:10.127Z INFO Adding new nodes {"controller": "psmdb-controller", "object": {"name":"my-cluster-name","namespace":"default"}, "namespace": "default", "name": "my-cluster-name", "reconcileID": "f6b45ebf-2c42-4e6b-95c2-3acc4086bef3", "replset": "cfg"}
2023-05-16T15:43:10.149Z INFO Configuring member votes and priorities {"controller": "psmdb-controller", "object": {"name":"my-cluster-name","namespace":"default"}, "namespace": "default", "name": "my-cluster-name", "reconcileID": "f6b45ebf-2c42-4e6b-95c2-3acc4086bef3", "replset": "cfg"}
2023-05-16T15:43:10.305Z INFO Fixing member tags {"controller": "psmdb-controller", "object": {"name":"my-cluster-name","namespace":"default"}, "namespace": "default", "name": "my-cluster-name", "reconcileID": "f6b45ebf-2c42-4e6b-95c2-3acc4086bef3", "replset": "rs0"}
2023-05-16T15:43:10.312Z INFO Adding new nodes {"controller": "psmdb-controller", "object": {"name":"my-cluster-name","namespace":"default"}, "namespace": "default", "name": "my-cluster-name", "reconcileID": "f6b45ebf-2c42-4e6b-95c2-3acc4086bef3", "replset": "rs0"}
2023-05-16T15:43:10.346Z INFO Configuring member votes and priorities {"controller": "psmdb-controller", "object": {"name":"my-cluster-name","namespace":"default"}, "namespace": "default", "name": "my-cluster-name", "reconcileID": "f6b45ebf-2c42-4e6b-95c2-3acc4086bef3", "replset": "rs0"}
2023-05-16T15:43:15.091Z INFO Adding new nodes {"controller": "psmdb-controller", "object": {"name":"my-cluster-name","namespace":"default"}, "namespace": "default", "name": "my-cluster-name", "reconcileID": "f40eceab-5fa9-4ef5-b09c-3d9927537d53", "replset": "cfg"}
2023-05-16T15:43:15.142Z INFO Configuring member votes and priorities {"controller": "psmdb-controller", "object": {"name":"my-cluster-name","namespace":"default"}, "namespace": "default", "name": "my-cluster-name", "reconcileID": "f40eceab-5fa9-4ef5-b09c-3d9927537d53", "replset": "cfg"}
2023-05-16T15:43:15.287Z INFO Adding new nodes {"controller": "psmdb-controller", "object": {"name":"my-cluster-name","namespace":"default"}, "namespace": "default", "name": "my-cluster-name", "reconcileID": "f40eceab-5fa9-4ef5-b09c-3d9927537d53", "replset": "rs0"}
2023-05-16T15:43:15.326Z INFO Configuring member votes and priorities {"controller": "psmdb-controller", "object": {"name":"my-cluster-name","namespace":"default"}, "namespace": "default", "name": "my-cluster-name", "reconcileID": "f40eceab-5fa9-4ef5-b09c-3d9927537d53", "replset": "rs0"}
my-cluster-name-mongos-0 keeps restarting with:
Readiness probe failed: command "/opt/percona/mongodb-healthcheck k8s readiness --component mongos --ssl --sslInsecure --sslCAFile /etc/mongodb-ssl/ca.crt --sslPEMKeyFile /tmp/tls.pem" timed out
checking from within pod:
[mongodb@my-cluster-name-mongos-0 db]$ mongosh --tls --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tlsAllowInvalidCertificates --host 127.0.0.1
Current Mongosh Log ID: 6463aa65affb384ac3f470d4
Connecting to: mongodb://127.0.0.1:27017/?directConnection=true&serverSelectionTimeoutMS=2000&tls=true&tlsCertificateKeyFile=%2Ftmp%2Ftls.pem&tlsCAFile=%2Fetc%2Fmongodb-ssl%2Fca.crt&tlsAllowInvalidCertificates=true&appName=mongosh+1.6.2
MongoNetworkError: connect ECONNREFUSED 127.0.0.1:27017
my-cluster-name-rs0-1
keeps restarting with this log:
{"t":{"$date":"2023-05-16T16:17:22.927+00:00"},"s":"I", "c":"ACCESS", "id":5286202, "ctx":"conn650","msg":"Different user name was supplied to saslSupportedMechs","attr":{"error":{"code":17,"codeName":"ProtocolError","errmsg":"Attempt to switch database target during SASL authentication from __system@local to @$external"}}}
{"t":{"$date":"2023-05-16T16:17:22.927+00:00"},"s":"I", "c":"ACCESS", "id":20428, "ctx":"conn650","msg":"Failed to authenticate","attr":{"client":"172.25.8.29:56498","mechanism":"MONGODB-X509","user":"CN=my-cluster-name","db":"$external","error":{"code":11,"codeName":"UserNotFound","errmsg":"Could not find user \"CN=my-cluster-name\" for db \"$external\""}}}
{"t":{"$date":"2023-05-16T16:17:22.928+00:00"},"s":"I", "c":"ACCESS", "id":5286202, "ctx":"conn650","msg":"Different user name was supplied to saslSupportedMechs","attr":{"error":{"code":17,"codeName":"ProtocolError","errmsg":"Attempt to switch database target during SASL authentication from __system@local to CN=my-cluster-name@$external"}}}
{"t":{"$date":"2023-05-16T16:17:22.928+00:00"},"s":"I", "c":"ACCESS", "id":20428, "ctx":"conn650","msg":"Failed to authenticate","attr":{"client":"172.25.8.29:56498","mechanism":"MONGODB-X509","user":"CN=my-cluster-name","db":"$external","error":{"code":11,"codeName":"UserNotFound","errmsg":"Could not find user \"CN=my-cluster-name\" for db \"$external\""}}}
{"t":{"$date":"2023-05-16T16:17:22.929+00:00"},"s":"I", "c":"NETWORK", "id":22944, "ctx":"conn650","msg":"Connection ended","attr":{"remote":"172.25.8.29:56498","uuid":"48f8a857-383f-43eb-9ef6-c384617e92fd","connectionId":650,"connectionCount":4}}
{"t":{"$date":"2023-05-16T16:17:22.930+00:00"},"s":"I", "c":"NETWORK", "id":22943, "ctx":"listener","msg":"Connection accepted","attr":{"remote":"172.25.8.29:56508","uuid":"5bfca55f-e03c-4929-a8db-4d4efb3f5fe3","connectionId":651,"connectionCount":5}}
{"t":{"$date":"2023-05-16T16:17:22.933+00:00"},"s":"W", "c":"NETWORK", "id":23222, "ctx":"conn651","msg":"Peer certificate expiration information","attr":{"peerSubjectName":"CN=my-cluster-name","daysDays":6}}
{"t":{"$date":"2023-05-16T16:17:22.933+00:00"},"s":"W", "c":"NETWORK", "id":23236, "ctx":"conn651","msg":"Client connecting with server's own TLS certificate"}
{"t":{"$date":"2023-05-16T16:17:22.934+00:00"},"s":"I", "c":"NETWORK", "id":51800, "ctx":"conn651","msg":"client metadata","attr":{"remote":"172.25.8.29:56508","client":"conn651","doc":{"driver":{"name":"NetworkInterfaceTL-ReplNetwork","version":"6.0.4-3"},"os":{"type":"Linux","name":"Oracle Linux Server release 8.7","architecture":"x86_64","version":"Kernel 5.4.0-139-generic"}}}}
{"t":{"$date":"2023-05-16T16:17:22.934+00:00"},"s":"I", "c":"ACCESS", "id":5286202, "ctx":"conn651","msg":"Different user name was supplied to saslSupportedMechs","attr":{"error":{"code":17,"codeName":"ProtocolError","errmsg":"Attempt to switch database target during SASL authentication from __system@local to @$external"}}}
{"t":{"$date":"2023-05-16T16:17:22.934+00:00"},"s":"I", "c":"ACCESS", "id":20428, "ctx":"conn651","msg":"Failed to authenticate","attr":{"client":"172.25.8.29:56508","mechanism":"MONGODB-X509","user":"CN=my-cluster-name","db":"$external","error":{"code":11,"codeName":"UserNotFound","errmsg":"Could not find user \"CN=my-cluster-name\" for db \"$external\""}}}
{"t":{"$date":"2023-05-16T16:17:22.934+00:00"},"s":"I", "c":"ACCESS", "id":5286202, "ctx":"conn651","msg":"Different user name was supplied to saslSupportedMechs","attr":{"error":{"code":17,"codeName":"ProtocolError","errmsg":"Attempt to switch database target during SASL authentication from __system@local to CN=my-cluster-name@$external"}}}
The secrets to the cluster have been created and are valid:
$ kubectl --kubeconfig ~/.kube/k08 get secret/my-cluster-name-ssl-internal -o jsonpath='{.data.tls\.crt}' | base64 --decode | openssl x509 -noout -dates
notBefore=May 16 15:38:09 2023 GMT
notAfter=May 23 15:39:09 2023 GMT
$ kubectl --kubeconfig ~/.kube/k08 get secret/my-cluster-name-ssl -o jsonpath='{.data.tls\.crt}' | base64 --decode | openssl x509 -noout -dates
notBefore=May 16 15:38:09 2023 GMT
notAfter=May 23 15:39:09 2023 GMT
$ grep -v '#' cr.yaml
apiVersion: psmdb.percona.com/v1
kind: PerconaServerMongoDB
metadata:
name: my-cluster-name
finalizers:
- delete-psmdb-pods-in-order
- delete-psmdb-pvc
spec:
crVersion: 1.15.0
image: percona/percona-server-mongodb:6.0.4-3
imagePullPolicy: Always
allowUnsafeConfigurations: false
updateStrategy: SmartUpdate
upgradeOptions:
versionServiceEndpoint: https://check.percona.com
apply: disabled
schedule: "0 2 * * *"
setFCV: false
secrets:
users: my-cluster-name-secrets
encryptionKey: my-cluster-name-mongodb-encryption-key
pmm:
enabled: false
image: percona/pmm-client:2.35.0
serverHost: monitoring-service
replsets:
- name: rs0
size: 3
configuration: |
net:
tls:
mode: preferTLS
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
podDisruptionBudget:
maxUnavailable: 1
expose:
enabled: false
exposeType: LoadBalancer
resources:
limits:
cpu: "300m"
memory: "0.5G"
requests:
cpu: "300m"
memory: "0.5G"
volumeSpec:
persistentVolumeClaim:
resources:
requests:
storage: 3Gi
nonvoting:
enabled: false
size: 3
configuration: |
net:
tls:
mode: preferTLS
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
podDisruptionBudget:
maxUnavailable: 1
resources:
limits:
cpu: "300m"
memory: "0.5G"
requests:
cpu: "300m"
memory: "0.5G"
volumeSpec:
persistentVolumeClaim:
resources:
requests:
storage: 3Gi
arbiter:
enabled: false
size: 1
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
sharding:
enabled: true
configsvrReplSet:
size: 3
configuration: |
net:
tls:
mode: preferTLS
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
podDisruptionBudget:
maxUnavailable: 1
expose:
enabled: false
exposeType: LoadBalancer
resources:
limits:
cpu: "300m"
memory: "0.5G"
requests:
cpu: "300m"
memory: "0.5G"
volumeSpec:
persistentVolumeClaim:
resources:
requests:
storage: 3Gi
mongos:
size: 3
configuration: |
net:
tls:
mode: preferTLS
allowConnectionsWithoutCertificates: true
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
podDisruptionBudget:
maxUnavailable: 1
resources:
limits:
cpu: "300m"
memory: "0.5G"
requests:
cpu: "300m"
memory: "0.5G"
expose:
exposeType: ClusterIP
backup:
enabled: true
image: perconalab/percona-server-mongodb-operator:main-backup
serviceAccountName: percona-server-mongodb-operator
pitr:
enabled: false
compressionType: gzip
compressionLevel: 6