rbac.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rabbitmq
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rabbitmq
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
#- apiGroups: [""]
# resources: ["events"]
# verbs: ["create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rabbitmq
subjects:
- kind: ServiceAccount
name: rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rabbitmq
configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: rabbitmq-config
data:
RMQ_ADMIN_USER: admin
enabled_plugins: |
[rabbitmq_peer_discovery_k8s, rabbitmq_management, rabbitmq_prometheus].
rabbitmq.conf: |
## Clustering
#cluster_formation.peer_discovery_backend = k8s
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = <Your Cluster host>
cluster_formation.k8s.port = 443
#cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.k8s.address_type = hostname
cluster_formation.k8s.service_name = rabbitmq-headless
cluster_partition_handling = autoheal
#cluster_formation.k8s.hostname_suffix = .${CLUSTER_NAME}.${NAMESPACE}.svc.cluster.local
#cluster_formation.node_cleanup.interval = 10
#cluster_formation.node_cleanup.only_log_warning = true
## queue master locator
queue_master_locator=min-masters
loopback_users.guest = false
auth_mechanisms.1 = PLAIN
auth_mechanisms.2 = AMQPLAIN
## set max memory available to MQ
#vm_memory_high_watermark.absolute = 1GB
vm_memory_high_watermark.absolute = 900MB
## load definitions file
management.load_definitions = /etc/rabbitmq/definitions.json
management.path_prefix = /mqadmin
definitions.json
{
"users": [
{
"name": "proj_mq_dev",
"password": "<PWD>",
"tags": ""
},
{
"name": "admin",
"password": "<PWD>",
"tags": "administrator"
}
],
"vhosts": [
{
"name": "/"
}
],
"policies": [
{
"vhost": "/",
"name": "ha",
"pattern": "",
"apply-to": "all",
"definition": {
"ha-mode": "all",
"ha-sync-batch-size": 256,
"ha-sync-mode": "automatic"
},
"priority": 0
}
],
"permissions": [
{
"user": "proj_mq_dev",
"vhost": "/",
"configure": ".*",
"write": ".*",
"read": ".*"
},
{
"user": "admin",
"vhost": "/",
"configure": ".*",
"write": ".*",
"read": ".*"
}
],
"queues": [
{
"name": "lookup-import.triggered.queue",
"vhost": "/",
"durable": true,
"auto_delete": false,
"arguments": {}
},
],
"exchanges": [
{
"name": "lob-proj-dx",
"vhost": "/",
"type": "direct",
"durable": true,
"auto_delete": false,
"internal": false,
"arguments": {}
}
],
"bindings": [
{
"source": "lob-proj-dx",
"vhost": "/",
"destination": "lookup-import.triggered.queue",
"destination_type": "queue",
"routing_key": "lookup-import.event.triggered",
"arguments": {}
}
]
}
secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: rabbitmq-secrets
type: Opaque
data:
RMQ_ERLANG_COOKIE: Wm1GclpWOXdZWA==
definitions.json: >-
<Base 64 encoded definitions.json>
client-service-ci.yaml
kind: Service
apiVersion: v1
metadata:
name: rabbitmq-client
labels:
app: rabbitmq
spec:
type: ClusterIP
ports:
- name: http
protocol: TCP
port: 15672
targetPort: management
- name: prometheus
protocol: TCP
port: 15692
targetPort: prometheus
- name: amqp
protocol: TCP
port: 5672
targetPort: amqp
selector:
app: rabbitmq
client-service-lb.yaml
kind: Service
apiVersion: v1
metadata:
name: rabbitmq-client
labels:
app: rabbitmq
type: LoadBalancer
spec:
type: LoadBalancer
sessionAffinity: None
loadBalancerIP: <External IP Address>
externalTrafficPolicy: Cluster
ports:
- name: http
protocol: TCP
port: 15672
targetPort: management
- name: prometheus
protocol: TCP
port: 15692
targetPort: prometheus
- name: amqp
protocol: TCP
port: 5672
targetPort: amqp
selector:
app: rabbitmq
statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: rabbitmq
spec:
selector:
matchLabels:
app: "rabbitmq"
# headless service that gives network identity to the RMQ nodes, and enables them to cluster
serviceName: rabbitmq-headless # serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where "pod-specific-string" is managed by the StatefulSet controller.
replicas: 1
revisionHistoryLimit: 2
volumeClaimTemplates:
- metadata:
name: rabbitmq-data
spec:
storageClassName: nas-thin
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "6Gi"
template:
metadata:
name: rabbitmq
labels:
app: rabbitmq
annotations:
prometheus.io/scrape: "true"
#prometheus.io/port: "9090"
prometheus.io/path: "/metrics"
spec:
initContainers:
# Since k8s 1.9.4, config maps mount read-only volumes. Since the Docker image also writes to the config file,
# the file must be mounted as read-write. We use init containers to copy from the config map read-only
# path, to a read-write path
- name: "rabbitmq-config"
image: busybox:1.32.0
volumeMounts:
- name: rabbitmq-config
mountPath: /tmp/rabbitmq
- name: rabbitmq-config-rw
mountPath: /etc/rabbitmq
- name: mq-secret-def
mountPath: /tmp/rabbitsec
command:
- sh
- -c
# the newline is needed since the Docker image entrypoint scripts appends to the config file
- cp /tmp/rabbitmq/rabbitmq.conf /etc/rabbitmq/rabbitmq.conf && echo '' >> /etc/rabbitmq/rabbitmq.conf;
cp /tmp/rabbitmq/enabled_plugins /etc/rabbitmq/enabled_plugins;
cp /tmp/rabbitsec/definitions.json /etc/rabbitmq/definitions.json
securityContext:
runAsUser: 1001
runAsGroup: 53134
allowPrivilegeEscalation: false
capabilities:
drop:
- KILL
- MKNOD
- SYS_CHROOT
resources:
requests:
memory: 64Mi
cpu: 60m
limits:
memory: 64Mi
cpu: 60m
volumes:
- name: rabbitmq-config
configMap:
name: rabbitmq-config
optional: false
items:
- key: enabled_plugins
path: "enabled_plugins"
- key: rabbitmq.conf
path: "rabbitmq.conf"
- name: mq-secret-def
secret:
secretName: rabbitmq-secrets
items:
- key: definitions.json
path: definitions.json
# read-write volume into which to copy the rabbitmq.conf and enabled_plugins files
# this is needed since the docker image writes to the rabbitmq.conf file
# and Kubernetes Config Maps are mounted as read-only since Kubernetes 1.9.4
- name: rabbitmq-config-rw
emptyDir:
sizeLimit: 1Mi
- name: rabbitmq-data
persistentVolumeClaim:
claimName: rabbitmq-data
serviceAccount: rabbitmq
containers:
- name: rabbitmq
# Community Docker Image
image: rabbitmq:3.8-management
volumeMounts:
# mounting rabbitmq.conf and enabled_plugins
# this should have writeable access, this might be a problem
- name: rabbitmq-config-rw
mountPath: "/etc/rabbitmq"
# mountPath: "/etc/rabbitmq/conf.d/"
# rabbitmq data directory
- name: rabbitmq-data
mountPath: "/var/lib/rabbitmq/mnesia"
env:
- name: RABBITMQ_DEFAULT_USER
value: "admin"
- name: RABBITMQ_ERLANG_COOKIE
valueFrom:
secretKeyRef:
name: rabbitmq-secrets
key: RMQ_ERLANG_COOKIE
ports:
- name: amqp
containerPort: 5672
protocol: TCP
- name: management
containerPort: 15672
protocol: TCP
- name: metrics
containerPort: 15692
protocol: TCP
- name: prometheus
containerPort: 15692
protocol: TCP
- name: epmd
containerPort: 4369
protocol: TCP
resources:
requests:
memory: 1Gi
cpu: '2'
limits:
memory: 1Gi
cpu: '2'
livenessProbe:
exec:
# This is just an example. There is no "one true health check" but rather
# several rabbitmq-diagnostics commands that can be combined to form increasingly comprehensive
# and intrusive health checks.
# Learn more at https://www.rabbitmq.com/monitoring.html#health-checks.
#
# Stage 2 check:
command: ["rabbitmq-diagnostics", "status"]
initialDelaySeconds: 120
# See https://www.rabbitmq.com/monitoring.html for monitoring frequency recommendations.
periodSeconds: 60
timeoutSeconds: 15
readinessProbe: # probe to know when RMQ is ready to accept traffic
exec:
# This is just an example. There is no "one true health check" but rather
# several rabbitmq-diagnostics commands that can be combined to form increasingly comprehensive
# and intrusive health checks.
# Learn more at https://www.rabbitmq.com/monitoring.html#health-checks.
#
# Stage 1 check:
command: ["rabbitmq-diagnostics", "ping"]
initialDelaySeconds: 20
periodSeconds: 60
timeoutSeconds: 10
# The Docker image runs as the `rabbitmq` user with uid 999
# and writes to the `rabbitmq.conf` file
# The security context is needed since the image needs
# permission to write to this file. Without the security
# context, `rabbitmq.conf` is owned by root and inaccessible
# by the `rabbitmq` user
securityContext:
runAsUser: 1001
runAsGroup: 53134
allowPrivilegeEscalation: false
capabilities:
drop:
- KILL
- MKNOD
- SYS_CHROOT