Sergey,
This is how we started off -
helm install mongodb-clu1 percona/psmdb-db --version 1.13.0 --namespace mongodb --set “replsets[0].volumeSpec.pvc.storageClassName=local-hostpath-mongo-slow-sc” --set “replsets[0].name=rs0” --set “replsets[0].size=3” --set “replsets[0].volumeSpec.pvc.resources.requests.storage=2000Gi” --set backup.enabled=true --set sharding.enabled=false --set pmm.enabled=true
This is our production install command - where we chose to use the slower, high capacity HDD based storage class. This developed on the test cluster as follows
helm install mongodb-clu1 percona/psmdb-db --version 1.14.0 --namespace mongodb -f ms-mdb-valules.yaml
root@kube-1:~# more ms-mdb-values.yaml
allowUnsafeConfigurations: true
sharding:
enabled: true
mongos:
size: 3
configrs:
volumeSpec:
pvc:
storageClassName: local-hostpath-mongo-prod-sc
replsets:
- name: rs0
size: 3
affinity:
antiAffinityTopologyKey: kubernetes.io/hostname
volumeSpec:
pvc:
storageClassName: local-hostpath-mongo-prod-sc
resources:
requests:
storage: 5Gi
backup:
enabled: true
pmm:
enabled: false
root@kube-1:~# cat local-hostpath-mongo-prod-sc.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-hostpath-mongo-prod-sc
annotations:
OpenEBS - Kubernetes storage simplified local
cas.openebs.io/config: |
- name: StorageType
value: hostpath
- name: BasePath
value: /mnt/mongo-cluster-ssd
provisioner: OpenEBS - Kubernetes storage simplified
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
allowedTopologies:
- matchLabelExpressions:
- key: kubernetes.io/hostname
values:
- kube-1
- kube-2
- kube-3
.
Where the filesystem mounted at /mnt/mongo-cluster-ssd is the 20 Gb device mentioned previously. It’s in the values file that I had assistance getting the configrs section correct.
This produced a working sharded cluster - so the next step was to switch the storage class
two sets of value files - just to pick up a slight change in the storage class definition - possible openebs issue which is open with them, removed the reclaim policy
ms-mdb-lvm-values.yaml
allowUnsafeConfigurations: true
sharding:
enabled: true
mongos:
size: 3
configrs:
volumeSpec:
pvc:
storageClassName: openebs-lvmpv
replsets:
- name: rs0
size: 3
affinity:
antiAffinityTopologyKey: kubernetes.io/hostname
volumeSpec:
pvc:
storageClassName: openebs-lvmpv
resources:
requests:
storage: 5Gi
backup:
enabled: true
pmm:
enabled: false
ms-mdb-lvm-int-values.yaml
allowUnsafeConfigurations: true
sharding:
enabled: true
mongos:
size: 3
configrs:
volumeSpec:
pvc:
storageClassName: test-immediate-lvm
replsets:
- name: rs0
size: 3
affinity:
antiAffinityTopologyKey: kubernetes.io/hostname
volumeSpec:
pvc:
storageClassName: test-immediate-lvm
resources:
requests:
storage: 5Gi
backup:
enabled: true
pmm:
enabled: false
storage classes
lvm-localpv-sc.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: openebs-lvmpv
allowVolumeExpansion: true
parameters:
storage: “lvm”
volgroup: lvmvg"
provisioner: local.csi.openebs.io
reclaimPolicy: Retain
allowedTopologies:
lvm-localpv-int-sc.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: test-immediate-lvm
allowVolumeExpansion: true
parameters:
storage: “lvm”
volgroup: “lvmvg”
provisioner: local.csi.openebs.io
allowedTopologies:
root@kube-1:~# pvs
PV VG Fmt Attr PSize PFree
/dev/sdc lvmvg lvm2 a-- <20.00g <20.00g
root@kube-1:~# vgs
VG #PV #LV #SN Attr VSize VFree
lvmvg 1 0 0 wz–n- <20.00g <20.00g
root@kube-1:~# lvs
root@kube-1:~#
so to the current install comannd
helm install mongodb-clu1 percona/psmdb-db --version 1.15.0 --namespace mongodb -f ms-mdb-lvm-int-values.yam
The operator has been updated to the latest version - result as follows
root@kube-1:~# helm install mongodb-clu1 percona/psmdb-db --version 1.15.0 --namespace mongodb -f ms-mdb-lvm-int-values.yaml
NAME: mongodb-clu1
LAST DEPLOYED: Wed Oct 18 13:24:44 2023
NAMESPACE: mongodb
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
% _____
%%% | __ \
###%%%%%%%%%%%%* | |__) |__ _ __ ___ ___ _ __ __ _
### ##%% %%%% | ___/ _ \ '__/ __/ _ \| '_ \ / _` |
#### ##% %%%% | | | __/ | | (_| (_) | | | | (_| |
### #### %%% |_| \___|_| \___\___/|_| |_|\__,_|
,((### ### %%% _ _ _____ _
(((( (### #### %%%% | | / _ \ / ____| | |
((( ((# ###### | | _| (_) |___ | (___ __ _ _ _ __ _ __| |
(((( (((# #### | |/ /> _ </ __| \___ \ / _` | | | |/ _` |/ _` |
/(( ,((( *### | <| (_) \__ \ ____) | (_| | |_| | (_| | (_| |
//// ((( #### |_|\_\\___/|___/ |_____/ \__, |\__,_|\__,_|\__,_|
/// (((( #### | |
/////////////(((((((((((((((((######## |_| Join @ percona.com/k8s
Join Percona Squad! Get early access to new product features, invite-only ”ask me anything” sessions with Percona Kubernetes experts, and monthly swag raffles.
Percona Kubernetes Squad - Join the community today - Percona <<<
Percona Server for MongoDB cluster is deployed now. Get the username and password:
ADMIN_USER=$(kubectl -n mongodb get secrets mongodb-clu1-psmdb-db-secrets -o jsonpath=“{.data.MONGODB_USER_ADMIN_USER}” | base64 --decode)
ADMIN_PASSWORD=$(kubectl -n mongodb get secrets mongodb-clu1-psmdb-db-secrets -o jsonpath=“{.data.MONGODB_USER_ADMIN_PASSWORD}” | base64 --decode)
Connect to the cluster:
kubectl run -i --rm --tty percona-client --image=percona/percona-server-mongodb:5.0 --restart=Never
– mongo “mongodb://${ADMIN_USER}:${ADMIN_PASSWORD}@mongodb-clu1-psmdb-db-mongos.mongodb.svc.cluster.local/admin?ssl=false”
root@kube-1:~#
root@kube-1:~# kubectl -n mongodb get all
No resources found in mongodb namespace.
root@kube-1:~# kubectl -n mongodb get pvc
No resources found in mongodb namespace.
root@kube-1:~# kubectl get pvc -A
No resources found
Not even a pending this time - so I can’t even describe the state of the pods - hope that helps a little
Mike