Hi Folk,
in the last days, but I suspect that is going on for a lot of time, I see a lot of log of the tipe:
mongod {"t":{"$date":"2025-07-03T17:59:31.341+00:00"},"s":"I", "c":"NETWORK", "id":6723804, "ctx":"conn1313048","msg":"Ingress TLS handshake complete","attr":{"durationMillis":2}} │
│ mongod {"t":{"$date":"2025-07-03T17:59:31.341+00:00"},"s":"W", "c":"NETWORK", "id":23235, "ctx":"conn1313048","msg":"SSL peer certificate validation failed","attr":{"reason":"certificate signature failure"}} │
│ mongod {"t":{"$date":"2025-07-03T17:59:31.344+00:00"},"s":"I", "c":"NETWORK", "id":6723804, "ctx":"conn1313049","msg":"Ingress TLS handshake complete","attr":{"durationMillis":2}} │
│ mongod {"t":{"$date":"2025-07-03T17:59:31.344+00:00"},"s":"W", "c":"NETWORK", "id":23235, "ctx":"conn1313049","msg":"SSL peer certificate validation failed","attr":{"reason":"certificate signature failure"}} │
│ mongod {"t":{"$date":"2025-07-03T17:59:31.351+00:00"},"s":"I", "c":"-", "id":20883, "ctx":"conn1313048","msg":"Interrupted operation as its client disconnected","attr":{"opId":1172105217}}
the database continue to work very well but I would fix all this morning. Below my cluster deployment file:
apiVersion: psmdb.percona.com/v1
kind: PerconaServerMongoDB
metadata:
name: elog-plus-cluster
finalizers:
- percona.com/delete-psmdb-pods-in-order
# - percona.com/delete-psmdb-pvc
# - percona.com/delete-pitr-chunks
spec:
crVersion: 1.20.1
image: percona/percona-server-mongodb:8.0.8-3
imagePullPolicy: Always
# pause: true
# unmanaged: false
# enableVolumeExpansion: false
allowUnsafeConfigurations: false
tls:
mode: preferTLS
# 90 days in hours
# certValidityDuration: 2160h
allowInvalidCertificates: true
# issuerConf:
# name: special-selfsigned-issuer
# kind: ClusterIssuer
# group: cert-manager.io
# imagePullSecrets:
# - name: private-registry-credentials
# initImage: percona/percona-server-mongodb-operator:1.20.1
# initContainerSecurityContext: {}
# unsafeFlags:
# tls: false
# replsetSize: false
# mongosSize: false
# terminationGracePeriod: false
# backupIfUnhealthy: false
updateStrategy: RollingUpdate
upgradeOptions:
versionServiceEndpoint: https://check.percona.com
apply: Disabled
schedule: "0 2 * * *"
setFCV: true
secrets:
users: mongodb-secret
encryptionKey: elog-plus-cluster-encryption-key
# vault: my-cluster-name-vault
pmm:
enabled: false
image: percona/pmm-client:2.43.2
serverHost: monitoring-service
# ssl: my-cluster-name-ssl
# sslInternal: my-cluster-name-ssl-internal
# keyFile: my-cluster-name-mongodb-keyfile
# vault: my-cluster-name-vault
# ldapSecret: my-ldap-secret
# sse: my-cluster-name-sse
replsets:
- name: rs0
size: 3
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '9216'
prometheus.io/path: '/metrics'
sidecars:
- image: percona/mongodb_exporter:2.37.0
name: mongodb-exporter
args: ["--compatible-mode", "--discovering-mode", "--collector.diagnosticdata", "--collector.replicasetstatus","--collector.dbstats", "--collector.topmetrics", "--collector.indexstats", "--mongodb.uri=$(MONGODB_URI)", "--web.listen-address=$(POD_IP):9216"]
env:
- name: EXPORTER_USER
valueFrom:
secretKeyRef:
name: mongodb-secret
key: MONGODB_CLUSTER_MONITOR_USER
- name: EXPORTER_PASS
valueFrom:
secretKeyRef:
name: mongodb-secret
key: MONGODB_CLUSTER_MONITOR_PASSWORD
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MONGODB_URI
value: "mongodb://$(EXPORTER_USER):$(EXPORTER_PASS)@$(POD_IP)/?replicaSet=rs0&authMechanism=SCRAM-SHA-256"
podDisruptionBudget:
maxUnavailable: 1
# minAvailable: 0
expose:
enabled: false
exposeType: ClusterIP
resources:
limits:
cpu: "4"
memory: "4G"
requests:
cpu: "300m"
memory: "0.5G"
volumeSpec:
persistentVolumeClaim:
resources:
requests:
storage: 100Gi
nonvoting:
enabled: false
size: 3
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
podDisruptionBudget:
maxUnavailable: 1
# minAvailable: 0
resources:
limits:
cpu: "300m"
memory: "0.5G"
requests:
cpu: "300m"
memory: "0.5G"
volumeSpec:
persistentVolumeClaim:
resources:
requests:
storage: 10Gi
arbiter:
enabled: false
size: 1
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
sharding:
enabled: false
configsvrReplSet:
size: 3
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
podDisruptionBudget:
maxUnavailable: 1
expose:
enabled: false
exposeType: ClusterIP
resources:
limits:
cpu: "300m"
memory: "0.5G"
requests:
cpu: "300m"
memory: "0.5G"
volumeSpec:
persistentVolumeClaim:
resources:
requests:
storage: 3Gi
mongos:
size: 3
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
podDisruptionBudget:
maxUnavailable: 1
resources:
limits:
cpu: "300m"
memory: "0.5G"
requests:
cpu: "300m"
memory: "0.5G"
expose:
type: ClusterIP
backup:
enabled: true
image: percona/percona-backup-mongodb:2.9.1
resources:
limits:
cpu: "1000m"
memory: "4G"
requests:
cpu: "300m"
memory: "1G"
storages:
s3-tid:
type: s3
s3:
bucket: accel-webapp-dev-elog-backup
credentialsSecret: s3-backup-secret
region: us-west-1
prefix: ""
uploadPartSize: 10485760
maxUploadParts: 10000
storageClass: STANDARD
endpointUrl: https://s3dfrgw.slac.stanford.edu
retryer:
numMaxRetries: 3
minRetryDelay: 30ms
maxRetryDelay: 5m
pitr:
enabled: true
oplogSpanMin: 20
compressionType: gzip
compressionLevel: 6
tasks:
- name: daily-s3-tid
enabled: true
schedule: "0 */1 * * *"
keep: 3
storageName: s3-tid
compressionType: gzip
compressionLevel: 6
# - name: weekly-s3-us-west
# enabled: false
# schedule: "0 0 * * 0"
# keep: 5
# storageName: s3-us-west
# compressionType: gzip
# compressionLevel: 6
# - name: weekly-s3-us-west-physical
# enabled: false
# schedule: "0 5 * * 0"
# keep: 5
# type: physical
# storageName: s3-us-west
# compressionType: gzip
# compressionLevel: 6
thank in advance