创建local volume storageclass
# 格式化
mkfs.xfs /dev/vdb
# 创建挂载目录
mkdir -p /mnt/fast-disks/prometheus-k8s-data
# 添加持久化挂载配置
echo '/dev/vdb /mnt/fast-disks/prometheus-k8s-data auto defaults,nofail,discard,comment=cloudconfig 0 2' >> /etc/fstab
# 执行挂载命令
mount -a
挂载后 登录master1 主机,就可以看到 local volume 的pv了, 因为在某些云厂商添加的磁盘,计算单位进位是1000,所以在挂载后显示pv 是 199Gi.
# oc get pv
local-pv-334f56e5 199Gi RWO ...
local-pv-4aaf7cd8 199Gi RWO ...
配置 ConfigMap cluster-monitoring-config
apiVersion: v1
data:
config.yaml: |
prometheusOperator:
baseImage: quay.io/coreos/prometheus-operator
prometheusConfigReloaderBaseImage: quay.io/coreos/prometheus-config-reloader
configReloaderBaseImage: quay.io/coreos/configmap-reload
nodeSelector:
node-role.kubernetes.io/infra: "true"
prometheusK8s:
baseImage: openshift/prometheus
nodeSelector:
node-role.kubernetes.io/infra: "true"
externalLabels:
cluster: master.offline-okd.com
volumeClaimTemplate:
spec:
accessModes:
- ReadWriteOnce
storageClassName: fast-disks
resources:
requests:
storage: 199Gi
alertmanagerMain:
baseImage: prometheus/alertmanager
nodeSelector:
node-role.kubernetes.io/infra: "true"
volumeClaimTemplate:
spec:
accessModes:
- ReadWriteOnce
storageClassName: custom-nfs-storage
resources:
requests:
storage: 20Gi
nodeExporter:
baseImage: prometheus/node-exporter
grafana:
baseImage: grafana/grafana
nodeSelector:
node-role.kubernetes.io/infra: "true"
kubeStateMetrics:
baseImage: offlineregistry.offline-okd.com:5000/coreos/kube-state-metrics
nodeSelector:
node-role.kubernetes.io/infra: "true"
kubeRbacProxy:
baseImage: offlineregistry.offline-okd.com:5000/coreos/kube-rbac-proxy
auth:
baseImage: openshift/oauth-proxy
kind: ConfigMap
metadata:
namespace: openshift-monitoring