Setting up a Prometheus monitoring stack in Kubernetes, utilising helm.

Pre-requisites

Creating a Namespace

.
├── application
├── cluster
│ └── namespace.yaml
└── monitoring

Running in changes

$ cd cluster$ kubectl apply -f *.yaml
namespace/monitoring created
$ kubectl get namespaces
NAME STATUS AGE
default Active 355d
kube-node-lease Active 355d
kube-public Active 355d
kube-system Active 355d
monitoring Active 65s

Creating a Helm Template

$ helm create prometheus
.
├── Chart.yaml
├── charts
├── templates
│ ├── NOTES.txt
│ ├── _helpers.tpl
│ ├── deployment.yaml
│ ├── hpa.yaml
│ ├── ingress.yaml
│ ├── service.yaml
│ ├── serviceaccount.yaml
│ └── tests
│ └── test-connection.yaml
└── values.yaml
$ helm upgrade \
prometheus . \
-n monitoring \
--install \
--atomic \
--debug="true" \
--dry-run="false"
history.go:53: [debug] getting history for release prometheus
Release "prometheus" does not exist. Installing it now.
install.go:172: [debug] Original chart version: ""
install.go:189: [debug] CHART PATH: /Users/craiggoddenpayne/Dropbox/Work/BeardyDigital/code/k8s-metrics-stack/prometheus
client.go:109: [debug] creating 4 resource(s)
wait.go:53: [debug] beginning wait for 4 resources with timeout of 5m0s
wait.go:225: [debug] Deployment is not ready: monitoring/prometheus. 0 out of 1 expected pods are ready
NAME: prometheus
LAST DEPLOYED: Sat Oct 31 12:37:45 2020
NAMESPACE: monitoring
STATUS: deployed
REVISION: 1
USER-SUPPLIED VALUES:
{}
COMPUTED VALUES:
affinity: {}
autoscaling:
enabled: true
maxReplicas: 3
minReplicas: 1
targetCPUUtilizationPercentage: 80
fullnameOverride: ""
image:
repository: nginx
imagePullSecrets: []
ingress:
annotations: {}
enabled: false
hosts:
- host: chart-example.local
paths: []
tls: []
nameOverride: ""
nodeSelector: {}
podAnnotations: {}
podSecurityContext: {}
replicaCount: 1
resources: {}
securityContext: null
service:
port: 80
type: ClusterIP
serviceAccount:
annotations: {}
create: true
name: ""
tolerations: []
HOOKS:
---
# Source: prometheus/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: "prometheus-test-connection"
labels:
helm.sh/chart: prometheus-0.1.0
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "1.16.0"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['prometheus:80']
restartPolicy: Never
MANIFEST:
---
# Source: prometheus/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
labels:
helm.sh/chart: prometheus-0.1.0
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "1.16.0"
app.kubernetes.io/managed-by: Helm
---
# Source: prometheus/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: prometheus
labels:
helm.sh/chart: prometheus-0.1.0
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "1.16.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
---
# Source: prometheus/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus
labels:
helm.sh/chart: prometheus-0.1.0
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "1.16.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
template:
metadata:
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
spec:
serviceAccountName: prometheus
securityContext:
{}
containers:
- name: prometheus
securityContext:
null
image: "nginx:1.16.0"
imagePullPolicy:
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
{}
---
# Source: prometheus/templates/hpa.yaml
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: prometheus
labels:
helm.sh/chart: prometheus-0.1.0
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "1.16.0"
app.kubernetes.io/managed-by: Helm
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: prometheus
minReplicas: 1
maxReplicas: 3
metrics:
- type: Resource
resource:
name: cpu
targetAverageUtilization: 80
NOTES:
1. Get the application URL by running these commands:
export POD_NAME=$(kubectl get pods --namespace monitoring -l "app.kubernetes.io/name=prometheus,app.kubernetes.io/instance=prometheus" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace monitoring $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace monitoring port-forward $POD_NAME 8080:$CONTAINER_PORT
$ kubectl get pod -n monitoringNAME                          READY   STATUS    RESTARTS   AGE
prometheus-54d964dbb7-qhztr 1/1 Running 0 3m33s
kubectl port-forward -n monitoring pods/prometheus-54d964dbb7-qhztr 8080:80
Forwarding from 127.0.0.1:8080 -> 80
Forwarding from [::1]:8080 -> 80
Handling connection for 8080
Handling connection for 8080

Updating the Helm Chart to setup Prometheus

image:
repository: docker.io/prom/prometheus
appVersion: v2.20.0
service:
type: ClusterIP
port: 9090
ports:
- name: http
containerPort: 9090
protocol: TCP
$ helm upgrade \
prometheus . \
-n monitoring \
--install \
--atomic \
--debug="true" \
--dry-run="false"
history.go:53: [debug] getting history for release prometheus
Release "prometheus" does not exist. Installing it now.
install.go:172: [debug] Original chart version: ""
install.go:189: [debug] CHART PATH: /Users/craiggoddenpayne/Dropbox/Work/BeardyDigital/code/k8s-metrics-stack/prometheus
client.go:109: [debug] creating 4 resource(s)
wait.go:53: [debug] beginning wait for 4 resources with timeout of 5m0s
wait.go:225: [debug] Deployment is not ready: monitoring/prometheus. 0 out of 1 expected pods are ready
NAME: prometheus
LAST DEPLOYED: Sat Oct 31 14:14:20 2020
NAMESPACE: monitoring
STATUS: deployed
REVISION: 1
USER-SUPPLIED VALUES:
{}
COMPUTED VALUES:
affinity: {}
autoscaling:
enabled: true
maxReplicas: 3
minReplicas: 1
targetCPUUtilizationPercentage: 80
fullnameOverride: ""
image:
repository: docker.io/prom/prometheus
imagePullSecrets: []
ingress:
annotations: {}
enabled: false
hosts:
- host: chart-example.local
paths: []
tls: []
nameOverride: ""
nodeSelector: {}
podAnnotations: {}
podSecurityContext: {}
replicaCount: 1
resources: {}
securityContext: null
service:
port: 9090
type: ClusterIP
serviceAccount:
annotations: {}
create: true
name: ""
tolerations: []
HOOKS:
---
# Source: prometheus/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: "prometheus-test-connection"
labels:
helm.sh/chart: prometheus-1.0.0
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "v2.20.0"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['prometheus:9090']
restartPolicy: Never
MANIFEST:
---
# Source: prometheus/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
labels:
helm.sh/chart: prometheus-1.0.0
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "v2.20.0"
app.kubernetes.io/managed-by: Helm
---
# Source: prometheus/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: prometheus
labels:
helm.sh/chart: prometheus-1.0.0
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "v2.20.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 9090
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
---
# Source: prometheus/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus
labels:
helm.sh/chart: prometheus-1.0.0
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "v2.20.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
template:
metadata:
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
spec:
serviceAccountName: prometheus
securityContext:
{}
containers:
- name: prometheus
securityContext:
null
image: "docker.io/prom/prometheus:v2.20.0"
imagePullPolicy:
ports:
- name: http
containerPort: 9090
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
{}
---
# Source: prometheus/templates/hpa.yaml
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: prometheus
labels:
helm.sh/chart: prometheus-1.0.0
app.kubernetes.io/name: prometheus
app.kubernetes.io/instance: prometheus
app.kubernetes.io/version: "v2.20.0"
app.kubernetes.io/managed-by: Helm
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: prometheus
minReplicas: 1
maxReplicas: 3
metrics:
- type: Resource
resource:
name: cpu
targetAverageUtilization: 80
$ kubectl get pods -n monitoring
NAME READY STATUS RESTARTS AGE
prometheus-8644c59db5-w6c8k 1/1 Running 0 106s
$ kubectl port-forward -n monitoring pods/prometheus-8644c59db5-w6c8k 8080:9090Forwarding from 127.0.0.1:8080 -> 9090
Forwarding from [::1]:8080 -> 9090

Reconfiguring Prometheus

apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-server-config
data:
prometheus.yml: |
global:
scrape_interval: 15s
evaluation_interval: 15s

scrape_configs:
- job_name: prometheus
metrics_path: /metrics
scheme: http

static_configs:
- targets: ['localhost:9090']
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "prometheus.fullname" . }}
labels:
{{- include "prometheus.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "prometheus.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "prometheus.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: prometheus-config-volume
configMap:
name: prometheus-server-config

serviceAccountName: {{ include "prometheus.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
volumeMounts:
- name: prometheus-config-volume
mountPath: /etc/prometheus/prometheus.yml
subPath: prometheus.yml

securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 9090
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
$ kubectl exec -n monitoring -it prometheus-859f599667-2vvts -- /bin/sh/prometheus $ cd /etc/prometheus//etc/prometheus $ cat prometheus.ymlglobal:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: prometheus
metrics_path: /metrics
scheme: http
static_configs:
- targets: ['localhost:9090']

Updating the Network Policy

{{- if .Values.ingress.enabled -}}
{{- $fullName := include "prometheus.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "prometheus.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ . }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-all-egress
spec:
podSelector:
matchLabels:
name: prometheus
policyTypes:
- Ingress
- Egress
egress:
- {}
ingress:
- from:
- namespaceSelector:
matchLabels:
project: monitoring

Setting up an ingress controller

Technologist who enjoys writing and working with software and infra. I write up all the things I learn as I go along to share the knowledge! beardy.digital

Get the Medium app

A button that says 'Download on the App Store', and if clicked it will lead you to the iOS App store
A button that says 'Get it on, Google Play', and if clicked it will lead you to the Google Play store