11 部署spinnaker的armory发行版

部署spinnake的armory发行版

文章目录

提供者:MappleZF

版本:1.0.0

一、部署对象式存储组件-minio

1.1 准备docker镜像
[root@k8smaster01:/root]# docker pull minio/minio:latest
[root@k8smaster01:/root]# docker images | grep minio
[root@k8smaster01:/root]# docker tag docker.io/minio/minio:latest harbor.iot.com/armory/minio:latest
[root@k8smaster01:/root]# docker push harbor.iot.com/armory/minio:latest
1.2 准备资源配置清单
准备namespace
[root@k8smaster01:/data/yaml/spinnaker/minio]# vim deployment-minio.yaml
apiVersion: v1
kind: Namespace
metadata:
  name: armory

准备secret
[root@k8smaster01:/data/yaml/spinnaker/minio]# kubectl create secret docker-registry harbor --docker-server=harbor.iot.com --docker-username=admin --docker-password=appleMysql -n armory
secret/harbor created
# cat secret-armory.yaml
apiVersion: v1
data:
  .dockerconfigjson: eyJhdXRocyI6eyJoYXJib3IuaW90LmNvbSI6eyJ1c2VybmFtZSI6ImFkbWluIiwicGFzc3dvcmQiOiJhcHBsZU15c3FsIiwiYXV0aCI6IllXUnRhVzQ2WVhCd2JHVk5lWE54YkE9PSJ9fX0=
kind: Secret
metadata:
  creationTimestamp: null
  name: harbor-armory
  namespace: armory
type: kubernetes.io/dockerconfigjson

准备PV PVC 
[root@k8smaster01.host.com:/data/yaml/spinnaker/minio]# vim pvc-ceph-armory-minio.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-armory-secret
  namespace: armory
data:
  key: QVFES04yQmZ6WnhwQVJBQTBCUDZ1ck9QOWFTdjdNK2RCeW9CK0E9PQ==
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: cephfs-pvminio
  labels:
    pv: cephfs-pvminio
spec:
  accessModes: ["ReadWriteMany","ReadWriteOnce"]
  capacity:
    storage: 350Gi
  volumeMode: Filesystem
  cephfs:
    monitors:
    - 192.168.13.101:6789
    - 192.168.13.102:6789
    - 192.168.13.103:6789
    path: /cephfs-pvminio
    readOnly: false
    user: admin
    secretRef:
      name: ceph-armory-secret
  persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: cephfs-pvcminio
  namespace: armory
spec:
  volumeName: cephfs-pvminio
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 350Gi

准备deployment
[root@k8smaster01:/data/yaml/spinnaker/minio]# vim deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: minio
  namespace: armory
  labels:
    name: minio
spec:
  progressDeadlineSeconds: 600
  replicas: 1
  revisionHistoryLimit: 7
  selector:
    matchLabels:
      name: minio
  template:
    metadata:
      labels:
        app: minio
        name: minio
    spec:
      containers:
      - name: minio
        image: harbor.iot.com/armory/minio:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 9000
          protocol: TCP
        args:
        - server
        - /data
        env:
        - name: MINIO_ACCESS_KEY
          value: admin
        - name: MINIO_SECRET_KEY
          value: admin123
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /minio/health/ready
            port: 9000
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 5
        volumeMounts:
        - mountPath: /data
          name: minio-data
      imagePullSecrets:
      - name: harbor
      volumes:   
      - name: minio-data
        persistentVolumeClaim:
          claimName: cephfs-pvcminio
          readOnly: false

准备service
[root@k8smaster01:/data/yaml/spinnaker/minio]# vim svc-minio.yaml
apiVersion: v1
kind: Service
metadata:
  name: minio
  namespace: armory
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 9000
  selector:
    app: minio
~
准备ingressroute
[root@k8smaster01.host.com:/data/yaml/spinnaker/minio]# vim ingressroute-minio.yaml 
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
  name: minio
  namespace: armory
  annotations:
    traefik.ingress.kubernetes.io/router.entrypoints: web
    kubernetes.io/ingress.class: "traefik"
spec:
  entryPoints:
  - web
  routes:
  - match: Host(`minio.lowan.com`) && PathPrefix(`/`)
    kind: Rule
    services:
    - name: minio
      port: 80

或者使用ingress也可以(二选一即可),如下:
[root@k8smaster01:/data/yaml/spinnaker/minio]# vim ingress-minio.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: minio
  namespace: armory
spec:
  rules:
  - host: minio.iot.com
    http:
      paths:
      - path: /
      backend:
        serviceName: minio
        servicePort: 80
~   


1.3 DNS进行域名解析
[root@lb03.host.com:/root]# vim /var/named/lowan.com.zone 

添加:
minio           A       192.168.13.100

[root@lb03.host.com:/root]# systemctl restart named
1.4 应用资源配置清单
[root@k8smaster01:/data/yaml/spinnaker/minio]# kubectl apply -f namespace-armory.yaml
[root@k8smaster01:/data/yaml/spinnaker/minio]# kubectl apply -f svc-minio.yaml
[root@k8smaster01:/data/yaml/spinnaker/minio]# kubectl apply -f secret-armory.yaml
[root@k8smaster01:/data/yaml/spinnaker/minio]# kubectl apply -f pvc-ceph-armory-minio.yaml
[root@k8smaster01:/data/yaml/spinnaker/minio]# kubectl apply -f deployment-minio.yaml
[root@k8smaster01:/data/yaml/spinnaker/minio]# kubectl apply -f ingressroute-minio.yaml
1.5 验证

浏览器访问 http://minio.lowan.com

初始账户: admin 初始密码: admin123

[root@k8smaster01.host.com:/data/yaml/spinnaker/minio]# kubectl exec -it minio-649655b79f-mm9bl -n armory -- /bin/sh
/ # printenv
KUBERNETES_PORT=tcp://10.10.0.1:443
KUBERNETES_SERVICE_PORT=443
HOSTNAME=minio-649655b79f-mm9bl
MINIO_SERVICE_PORT=80
MINIO_PORT=tcp://10.10.182.37:80
MINIO_ACCESS_KEY_FILE=access_key
SHLVL=1
HOME=/root
MINIO_PORT_80_TCP_ADDR=10.10.182.37
MINIO_KMS_MASTER_KEY_FILE=kms_master_key
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
MINIO_ACCESS_KEY=admin
MINIO_PORT_80_TCP_PORT=80
MINIO_UPDATE=off
MINIO_PORT_80_TCP_PROTO=tcp
TERM=xterm
KUBERNETES_PORT_443_TCP_ADDR=10.10.0.1
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
MINIO_PORT_80_TCP=tcp://10.10.182.37:80
KUBERNETES_PORT_443_TCP_PORT=443
KUBERNETES_PORT_443_TCP_PROTO=tcp
MINIO_SECRET_KEY_FILE=secret_key
KUBERNETES_SERVICE_PORT_HTTPS=443
KUBERNETES_PORT_443_TCP=tcp://10.10.0.1:443
MINIO_SECRET_KEY=admin123
KUBERNETES_SERVICE_HOST=10.10.0.1
PWD=/
MINIO_SERVICE_HOST=10.10.182.37
1.6 部署redis
1.6.1 准备redis资源配置清单
[root@k8smaster01.host.com:/data/yaml/spinnaker/redis]# vim svc-redis.yaml
apiVersion: v1
kind: Service
metadata:
  name: redis
  namespace: armory
spec:
  ports:
  - port: 6379
    protocol: TCP
    targetPort: 6379
  selector:
    app: redis
~                 

[root@k8smaster01.host.com:/data/yaml/spinnaker/redis]# vim dp-redis.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    name: redis
  name: redis
  namespace: armory
spec:
  replicas: 1
  revisionHistoryLimit: 7
  selector:
    matchLabels:
      name: redis
  template:
    metadata:
      labels:
        app: redis
        name: redis
    spec:
      containers:
      - name: redis
        image: harbor.iot.com/armory/redis:6.0.8
        imagePullPolicy: IfNotPresent
        command: ["redis-server"]
        ports:
        - containerPort: 6379
          protocol: "TCP"
        resources:
          requests:
            cpu: "400m"
            memory: "900Mi"
      imagePullSecrets:
      - name: harbor

1.6.2 应用资源配置清单
kubectl apply -f svc-redis.yaml
kubectl apply -f dp-redis.yaml

二、部署k8s云驱动组件-clouddriver

2.1 准备docker镜像
docker pull armory/spinnaker-clouddriver-slim:release-1.11.x-bee52673a
docker tag docker.io/armory/spinnaker-clouddriver-slim:release-1.11.x-bee52673a harbor.iot.com/armory/spinnaker-clouddriver-slim:release-1.11.x-bee52673a
docker push harbor.iot.com/armory/spinnaker-clouddriver-slim:release-1.11.x-bee52673a
2.2 准备minio的secret
2.2.1 准备配置文件
[root@k8smaster01.host.com:/data/yaml/spinnaker/clouddriver]# vim credentials
[default]
aws_access_key_id=admin
aws_secret_access_key=admin123
2.2.2 创建secret
[root@k8smaster01.host.com:/data/yaml/spinnaker/clouddriver]# kubectl create secret generic credentials --from-file=./credentials -n armory
secret/credentials created
2.3 准备k8s的用户配置
2.3.1 签发证书和私钥
  • 准备证书签发请求文件

    [root@k8smaster01.host.com:/data/ssl]# 
    cat > admin-csr.json <<EOF
    {
      "CN": "admin",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "L": "BeiJing",
          "ST": "BeiJing",
          "O": "system:masters",
          "OU": "System"
        }
      ]
    }
    EOF
    
  • 签发生成admin.pem admin-key.pem

    [root@k8smaster01.host.com:/data/ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssl-json -bare admin
    2020/09/09 18:08:34 [INFO] generate received request
    2020/09/09 18:08:34 [INFO] received CSR
    2020/09/09 18:08:34 [INFO] generating key: rsa-2048
    2020/09/09 18:08:34 [INFO] encoded CSR
    2020/09/09 18:08:34 [INFO] signed certificate with serial number 662043138212200112498221957779202406541973930898
    2020/09/09 18:08:34 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
    websites. For more information see the Baseline Requirements for the Issuance and Management
    of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
    specifically, section 10.2.3 ("Information Requirements").
    [root@k8smaster01.host.com:/data/ssl]# ls
    admin.csr  admin-csr.json  admin-key.pem  admin.pem  ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem  server.csr  server-csr.json  server-key.pem  server.pem
    
    
2.3.2 做kubeconfig配置
//设置集群参数
kubectl config set-cluster kubernetes --certificate-authority=./ca.pem --embed-certs=true --server=https://lbvip.host.com:7443 --kubeconfig=admin.config
//设置客户端认证参数
kubectl config set-credentials admin --client-certificate=./admin.pem --client-key=./admin-key.pem --embed-certs=true --kubeconfig=admin.config
//设置上下文参数
kubectl config set-context default --cluster=kubernetes --user=admin --kubeconfig=admin.config
//设置默认上下文
kubectl config use-context default --kubeconfig=admin.config


2.3.3 验证admin用户
此处已worker01节点为例
没有签发证书的状态如下:
[root@k8sworker01.host.com:/root]# kubectl get pods
The connection to the server localhost:8080 was refused - did you specify the right host or port?

添加签发证书
mkdir -p $HOME/.kube
cd /root/.kube/
scp k8smaster01:/data/ssl/admin.config .


echo "export KUBECONFIG=/root/.kube/admin.config" >>~/.bash_profile
source /root/.bash_profile
或者 echo "export KUBECONFIG=/root/.kube/admin.config" >>/etc/profile    
    source /etc/profile 
此时就可以拥有master节点的权限了,如下:
[root@k8sworker01.host.com:/root/.kube]# kubectl get nodes          
NAME                   STATUS   ROLES    AGE   VERSION
k8smaster01.host.com   Ready    <none>   35d   v1.19.0
k8smaster02.host.com   Ready    <none>   35d   v1.19.0
k8smaster03.host.com   Ready    <none>   35d   v1.19.0
k8sworker01.host.com   Ready    <none>   35d   v1.19.0
k8sworker02.host.com   Ready    <none>   35d   v1.19.0
k8sworker03.host.com   Ready    <none>   35d   v1.19.0
k8sworker04.host.com   Ready    <none>   35d   v1.19.0

[root@k8sworker01.host.com:/root/.kube]# kubectl config view
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: DATA+OMITTED
    server: https://lbvip.host.com:7443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: admin
  name: default
current-context: default
kind: Config
preferences: {}
users:
- name: admin
  user:
    client-certificate-data: REDACTED
    client-key-data: REDACTED
2.3.4 创建comfigmap配置
[root@k8smaster01.host.com:/data/ssl]# cp admin.config /data/yaml/spinnaker/clouddriver/
[root@k8smaster01.host.com:/data/ssl]# cd /data/yaml/spinnaker/clouddriver/
[root@k8smaster01.host.com:/data/yaml/spinnaker/clouddriver]# mv admin.config default-kubeconfig
[root@k8smaster01.host.com:/data/yaml/spinnaker/clouddriver]# kubectl create configmap default-kubeconfig --from-file=default-kubeconfig -n armory
[root@k8smaster01.host.com:/data/yaml/spinnaker/clouddriver]# vim cm-default-kubeconfig.yaml
apiVersion: v1
data:
  default-kubeconfig: |
    apiVersion: v1
    clusters:
    - cluster:
        certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR2akNDQXFhZ0F3SUJBZ0lVYnd0a1lubktub1d4Vmt1K28zNFlDWm44bmt3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pURUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEREQUtCZ05WQkFvVEEyczRjekVQTUEwR0ExVUVDeE1HVTNsemRHVnRNUk13RVFZRFZRUURFd3ByCmRXSmxjbTVsZEdWek1CNFhEVEl3TURrd09UQTVORGd3TUZvWERUUXdNRGt3TkRBNU5EZ3dNRm93WlRFTE1Ba0cKQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbGFXcHBibWN4RERBSwpCZ05WQkFvVEEyczRjekVQTUEwR0ExVUVDeE1HVTNsemRHVnRNUk13RVFZRFZRUURFd3ByZFdKbGNtNWxkR1Z6Ck1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBclNacUZBOXRiYk5DVlpCUjlIZEMKRm53bHg4c1J4NEYvY3hpSGZzb3paZm1seE1vRHJJRElKcTVBOTVCS3FlK0VqT1BVRmxtcFBtdHR6UVBnR1FMSgpyNCtCSXJMTkNXR09GdkRHWDYxR2txSXpiNFhPM1ZuR3FhdUhWb1QyMzVqMk0raFN2QmY5YjdlY21OdUlBbk56CjV2WFlpU20zVVJjcnczSkNOa2NpbTd4K1NTZmphUk9uUGplaEpXVzQxLzQva3MwclZlbm9CUEtzU2gxcWU4d24KS0xOSnY3UGdJREozZnQ1bTkrN1FzTHlnYmVFeGJsZThMRkt1Z1RuUWdyaXllNHVGR2dpSWl0WHR0TDhIZkhKcApRaTVOMzRweEF4T2NPQytaVU1KbW1YRGZJOEhBS3YyNVJjc0JVeitFcFJOd0krenArRXBPeDBObVBTa0tjN2psCi93SURBUUFCbzJZd1pEQU9CZ05WSFE4QkFmOEVCQU1DQVFZd0VnWURWUjBUQVFIL0JBZ3dCZ0VCL3dJQkFqQWQKQmdOVkhRNEVGZ1FVREZ1L041Y1NMekdVbjRHcnFYOStQNDkrM040d0h3WURWUjBqQkJnd0ZvQVVERnUvTjVjUwpMekdVbjRHcnFYOStQNDkrM040d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFKZi9TSnEyelZDSzZORFJHNk5BCjJMWXhuSml3ZVRib0pXdG4zZHFaVURrbU1RRXpGWXJ0NS9uOEFzL2xCd09hUTVlVWNuMVRMcDd2OEVxN2lwN2IKTTQ4TWsvVkZ2cjM2WUNoeVExbE4yVFVudlpjamQ0cnZGTVhXUkdDOHNOSnVTSi9kMUNzRnUwQzZwT2V3YWsxSgpUZUFNSkpITU1sNk85Uk9ZS2l6VHRtUWJSZTR4UVYreXZESnlMaStIZXpvMmh2dzZWR1c0Yi9PS0YzMnRyTVoyCjlWdUtHR1kzRERXMWdlQi9wZm5SdFNnWFoyWUN6ZTBzYUl6b0E2SG95SjA3d2o0ZnArdGxkNzZ5Wm5HRHQ5RUgKT0ZyMkxEV0JGM1hBdzFjVzRFRXRSWUhiVUZ2Lzl5U1RJVTZVRVQ0ZHpRNmF4S0J4WnhmSVRYNXBkNEVvejN0KwpGNWM9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
        server: https://lbvip.host.com:7443
      name: kubernetes
    contexts:
    - context:
        cluster: kubernetes
        user: admin
      name: default
    current-context: default
    kind: Config
    preferences: {}
    users:
    - name: admin
      user:
        client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQzVENDQXNXZ0F3SUJBZ0lVYy9jS2dtdzVYY0JlNjhlZG1FU2xPRmorUTVJd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pURUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEREQUtCZ05WQkFvVEEyczRjekVQTUEwR0ExVUVDeE1HVTNsemRHVnRNUk13RVFZRFZRUURFd3ByCmRXSmxjbTVsZEdWek1CNFhEVEl3TURrd09URXdNRFF3TUZvWERUUXdNRGt3TkRFd01EUXdNRm93YXpFTE1Ba0cKQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFVcHBibWN4RURBT0JnTlZCQWNUQjBKbGFVcHBibWN4RnpBVgpCZ05WQkFvVERuTjVjM1JsYlRwdFlYTjBaWEp6TVE4d0RRWURWUVFMRXdaVGVYTjBaVzB4RGpBTUJnTlZCQU1UCkJXRmtiV2x1TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF2M21HNmZaV0ZiNEgKMUR0RUdzajc0c1NWWFd3NWFTc0RtMlpBbktoakU1bWdUMHlnTEl2SENJQVM5Y0xDMVc4MEhGN2NablRNRTYzcQp0aTIxTjU4bXBiMnBtSC9yb1YyRUtqUldKdk1peStRakNONDFTclpBaUFzTEVua1ZGV3RoZzFES0pob1NnZUYyCkNiUmUzUVRGb002a3dMbHlCdkpaRFpSYUZ2alNNVDhZeDc4cUpQd2VqdlFnTW11dUtNZ2lySVlsKy9oK01hdC8KWXk2YUt2NExhdUZ4VDV4WEl6QWxwZGZCQWZDb3U4MDVKV1dXejZvMll0RUpxY05sUkttZ253dFJiYzNCRHQ2UAoreDhiK2NoK0VuYkdjQldtd1FtbnNybElYOVRhOGpJTjFhTzZYQzlZUEVVYSt6cm1iOVdBTHpRU1FOSFFPMFZyCjN1SWZuYWh4elFJREFRQUJvMzh3ZlRBT0JnTlZIUThCQWY4RUJBTUNCYUF3SFFZRFZSMGxCQll3RkFZSUt3WUIKQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZJM0toY3NwRGJ2aQpweE5QMkt5RnFVRk5RWkJFTUI4R0ExVWRJd1FZTUJhQUZBeGJ2emVYRWk4eGxKK0JxNmwvZmorUGZ0emVNQTBHCkNTcUdTSWIzRFFFQkN3VUFBNElCQVFBanNBbGNFeTYyRTFIZlI4OGZqQmxRSlFMSXNjcFN1YmtpTG82UVQ0RkkKWmoxRGVERXJsQ0FHQVBhRFc1WUxTUHhwSCtSUlNUajkxMEJCeTZORndpRjJjL1EzZ1ZYZVlNamh1aytVbFFOWQoxRXMxQ2pOTlY4M0x3TXpjRDVUS3VCLzRMTXlyT2Mrc1dkeHNnMjRGV1hjQ3pRYkxlU3QvU0xyRFBMQmRjb0hqCk0wRHpCQ1QxS2pMMDdiWmp3YUN5Y09DN01UMXZ5dFZKdlJmZ29KbTF5MlhuMEp5WTV4b2pkN0srd0RSdFBHTkgKV2t4VW44UmJ1U1FhVnRleitQQlJlaUNGWXhtY1lwVWJQWkIwWWJXVU8ybzBMMjRwRzdOT1dsdjVxZFJaMWF5Qgo0cGErVDllbnV5OUpUQitTWXpyMFJZc3pDclpZbU9KTWJ3YU1iOUJsNzVEawotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
        client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdjNtRzZmWldGYjRIMUR0RUdzajc0c1NWWFd3NWFTc0RtMlpBbktoakU1bWdUMHlnCkxJdkhDSUFTOWNMQzFXODBIRjdjWm5UTUU2M3F0aTIxTjU4bXBiMnBtSC9yb1YyRUtqUldKdk1peStRakNONDEKU3JaQWlBc0xFbmtWRld0aGcxREtKaG9TZ2VGMkNiUmUzUVRGb002a3dMbHlCdkpaRFpSYUZ2alNNVDhZeDc4cQpKUHdlanZRZ01tdXVLTWdpcklZbCsvaCtNYXQvWXk2YUt2NExhdUZ4VDV4WEl6QWxwZGZCQWZDb3U4MDVKV1dXCno2bzJZdEVKcWNObFJLbWdud3RSYmMzQkR0NlAreDhiK2NoK0VuYkdjQldtd1FtbnNybElYOVRhOGpJTjFhTzYKWEM5WVBFVWErenJtYjlXQUx6UVNRTkhRTzBWcjN1SWZuYWh4elFJREFRQUJBb0lCQVFDdjg3MUd3bGZaSXdlTwp2K2VWdFp2Q2gyTzZ0ZmtMNjdzazBwSU5ZNitvRTBSOW1xeVdRdHhqczAyNUNZQ0JJZStCSXFybWUrNE5KNFRJClpMaGlwSjJmTU9yb2VLVFBaR0JQYUVqREFpcTNzcTEvT0xOQU5McVlTWTJqTENGenpYbU9jbTlSRkNSdHRnSUMKYVAzRG5ONy9KdVcvTktyVU4yQWVXblZ2SnJnQkI2THFlWXF6SVlRYUVoaW5sdGtWMlZxSi82cHdXcGtYSEx0UwpOWjE1Q3lvamExeEtxMmlkQWxEV0UyTEhENmg5aVVVd3lXVm5aRTREUHFTRUYxcFBnZjVLNHdNTjhoYkNocGpXClJZTHF0OEkydzN4QTBSZkdrNDROZm9FOTZPM2N1KzRxSmttNzd0bG53REh3NUlBbzFKNytBM0xXSXVVZFMzeHgKd01aMGUrZ0JBb0dCQU9wMkxNODVISHVqTjN5UWRuYXh4TlJybTM2b0ZKL2VlTGdaT0s1ajVscWlxaHhFOGRlUQp6bGJBR2hkR004aW50bVp6SDQ0WW5YOGpqcnJHRTBYZm5hU0hHc2loVEEzSFlvemNpN04rb1U4em96NFVLUDBwCkhpNk5aSW5SL3VEaTJIL25BWFd6RnJGUS8xQUJGUzRtdFB3OFJwWVQycG1DT1hrcThyMmJ5MHZKQW9HQkFORVEKYmpmb3NXek5MRHlOV2lVTUo0NEpDT2FDN3Z0K3hSQkZabGp4b2I0dnhyUk5TU2g2elo0WXU0UlpsY1VXM3NUWQoxOEpSaUl0SFhlTnE2alphbHF3T1JyY2NpU2xsMUUvOHRtVklrdjhRVUxzMjNtbEo3ZFdBcjlHYzVEV2RkSER1CjBhZ1Y4cjhKRnVRUnVHczlFWVQxT3MvYnpWYU5UOW4xUFRqNUplL2xBb0dCQUtHeHdETmxNcVI5cW8ySVgzcDYKSE1GV09OVi9lMEhKdnIxUC80d2RMMGNBZVlxbmJaWDVzcU9YTmljbzdodUt0anN2VitxNlJSdmlYTk9vT0xCdQppQnhvWkd0SytkVmkxUzlLQmpwU3NWS2F6UkRKL0g2ak5jSnJGcFFKczlwSjkzODF4QWIwN0VoN0VLaVBKcGsrCmJvQ3AzUkF4UHdleERJdm90NkZWNlQwcEFvR0FTcmFoZlErR29NaHVIN0d3WG44S2Y4TVEwRGhJcWM0ekpNNTQKZGg3Y1c3aDlQYUJGSGRkTk5rdkdiOVVJSmlnM01vZ0FDMnpSN0Z4ejNEc3dPbjZEVERmSjQ5U3AzQWFOS295NApybGVTV043K3l2a2Y0T2JQdlRaQkh4ci9uMzJHeUhsQXFkNDJJemxmV2VCUFZOdTlNQlBua2JNT25mamdhWmN2CjZ2VjNMbFVDZ1lBRjRjblhwdGlSU3ZiOWlWUG1QOVNHWWZiS2xaVmgyQWd5WVhaVldJS1RRVUxUL2lHd3gzbGgKWTdVbzJ6WjNSNW15RWtTWGJsaUcveXdrMjR1cTBiaDVrWHlOUy9MTno3TW15SVR2MC9kMkpjUy9EZzlXT0ZxLwpKNTBvZTNUSWhZYUtGVlF1UzJleFpNRlY2ZUJHTEZ4RHg4aTJoTTJCSHRTVUd1WTRCNTZoK3c9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
kind: ConfigMap
metadata:
  creationTimestamp: null
  name: default-kubeconfig
  namespace: armory

2.4.1 创建并应用资源配置清单
2.4.1.1 创建configmap-环境变量配置
[root@k8smaster01.host.com:/data/yaml/spinnaker/clouddriver]# vim cm-init-env.yaml

kind: ConfigMap
apiVersion: v1
metadata:
  name: init-env
  namespace: armory
data:
  API_HOST: http://spinnaker.lowan.com/api
  ARMORY_ID: c02f0781-92f5-4e80-86db-0ba8fe7b8544
  ARMORYSPINNAKER_CONF_STORE_BUCKET: armory-platform
  ARMORYSPINNAKER_CONF_STORE_PREFIX: front50
  ARMORYSPINNAKER_GCS_ENABLED: "false"
  ARMORYSPINNAKER_S3_ENABLED: "true"
  AUTH_ENABLED: "false"
  AWS_REGION: us-east-1
  BASE_IP: 127.0.0.1
  CLOUDDRIVER_OPTS: -Dspring.profiles.active=armory,configurator,local
  CONFIGURATOR_ENABLED: "false"
  DECK_HOST: http://spinnaker.lowan.com
  ECHO_OPTS: -Dspring.profiles.active=armory,configurator,local
  GATE_OPTS: -Dspring.profiles.active=armory,configurator,local
  IGOR_OPTS: -Dspring.profiles.active=armory,configurator,local
  PLATFORM_ARCHITECTURE: kubernetes
  REDIS_HOST: redis://redis:6379
  SERVER_ADDRESS: 0.0.0.0
  SPINNAKER_AWS_DEFAULT_REGION: us-east-1
  SPINNAKER_AWS_ENABLED: "false"
  SPINNAKER_CONFIG_DIR: /home/spinnaker/config
  SPINNAKER_GOOGLE_PROJECT_CREDENTIALS_PATH: ""
  SPINNAKER_HOME: /home/spinnaker
  SPRING_PROFILES_ACTIVE: armory,configurator,local
  
  
2.4.1.2 创建configmap-组件配置
[root@k8smaster01.host.com:/data/yaml/spinnaker/clouddriver]# vim cm-custom-config.yaml
kind: ConfigMap
apiVersion: v1
metadata:
  name: custom-config
  namespace: armory
data:
  clouddriver-local.yml: |
    kubernetes:
      enabled: true
      accounts:
        - name: admin
          serviceAccount: false
          dockerRegistries:
            - accountName: harbor
              namespace: []
          namespaces:
            - default
            - devops
          kubeconfigFile: /opt/spinnaker/credentials/custom/default-kubeconfig
      primaryAccount: admin
    dockerRegistry:
      enabled: true
      accounts:
        - name: harbor
          requiredGroupMembership: []
          providerVersion: V1
          insecureRegistry: true
          address: http://harbor.iot.com
          username: admin
          password: appleMysql
      primaryAccount: harbor
    artifacts:
      s3:
        enabled: true
        accounts:
        - name: armory-config-s3-account
          apiEndpoint: http://minio
          apiRegion: us-east-1
      gcs:
        enabled: false
        accounts:
        - name: armory-config-gcs-account
  custom-config.json: ""
  echo-configurator.yml: |
    diagnostics:
      enabled: true
  front50-local.yml: |
    spinnaker:
      s3:
        endpoint: http://minio
  igor-local.yml: |
    jenkins:
      enabled: true
      masters:
        - name: jenkins-admin
          address: http://jenkins.lowan.com
          username: devops-k8s
          password: applejenkins
      primaryAccount: jenkins-admin
  nginx.conf: |
    gzip on;
    gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/vnd.ms-fontobject application/x-font-ttf font/opentype image/svg+xml image/x-icon;
    server {
           listen 80;
           location / {
                proxy_pass http://armory-deck/;
           }
           location /api/ {
                proxy_pass http://armory-gate:8084/;
           }
           rewrite ^/login(.*)$ /api/login$1 last;
           rewrite ^/auth(.*)$ /api/auth$1 last;
    }
  spinnaker-local.yml: |
    services:
      igor:
        enabled: true
~                      
2.4.1.3 创建configmap-默认配置文件
[root@k8smaster01.host.com:/data/yaml/spinnaker/clouddriver]# vim cm-default-config.yaml

kind: ConfigMap
apiVersion: v1
metadata:
  name: default-config
  namespace: armory
data:
  barometer.yml: |
    server:
      port: 9092

    spinnaker:
      redis:
        host: ${services.redis.host}
        port: ${services.redis.port}
  clouddriver-armory.yml: |
    aws:
      defaultAssumeRole: role/${SPINNAKER_AWS_DEFAULT_ASSUME_ROLE:SpinnakerManagedProfile}
      accounts:
        - name: default-aws-account
          accountId: ${SPINNAKER_AWS_DEFAULT_ACCOUNT_ID:none}

      client:
        maxErrorRetry: 20

    serviceLimits:
      cloudProviderOverrides:
        aws:
          rateLimit: 15.0

      implementationLimits:
        AmazonAutoScaling:
          defaults:
            rateLimit: 3.0
        AmazonElasticLoadBalancing:
          defaults:
            rateLimit: 5.0

    security.basic.enabled: false
    management.security.enabled: false
  clouddriver-dev.yml: |

    serviceLimits:
      defaults:
        rateLimit: 2
  clouddriver.yml: |
    server:
      port: ${services.clouddriver.port:7002}
      address: ${services.clouddriver.host:localhost}

    redis:
      connection: ${REDIS_HOST:redis://localhost:6379}

    udf:
      enabled: ${services.clouddriver.aws.udf.enabled:true}
      udfRoot: /opt/spinnaker/config/udf
      defaultLegacyUdf: false

    default:
      account:
        env: ${providers.aws.primaryCredentials.name}

    aws:
      enabled: ${providers.aws.enabled:false}
      defaults:
        iamRole: ${providers.aws.defaultIAMRole:BaseIAMRole}
      defaultRegions:
        - name: ${providers.aws.defaultRegion:us-east-1}
      defaultFront50Template: ${services.front50.baseUrl}
      defaultKeyPairTemplate: ${providers.aws.defaultKeyPairTemplate}

    azure:
      enabled: ${providers.azure.enabled:false}

      accounts:
        - name: ${providers.azure.primaryCredentials.name}
          clientId: ${providers.azure.primaryCredentials.clientId}
          appKey: ${providers.azure.primaryCredentials.appKey}
          tenantId: ${providers.azure.primaryCredentials.tenantId}
          subscriptionId: ${providers.azure.primaryCredentials.subscriptionId}

    google:
      enabled: ${providers.google.enabled:false}

      accounts:
        - name: ${providers.google.primaryCredentials.name}
          project: ${providers.google.primaryCredentials.project}
          jsonPath: ${providers.google.primaryCredentials.jsonPath}
          consul:
            enabled: ${providers.google.primaryCredentials.consul.enabled:false}

    cf:
      enabled: ${providers.cf.enabled:false}

      accounts:
        - name: ${providers.cf.primaryCredentials.name}
          api: ${providers.cf.primaryCredentials.api}
          console: ${providers.cf.primaryCredentials.console}
          org: ${providers.cf.defaultOrg}
          space: ${providers.cf.defaultSpace}
          username: ${providers.cf.account.name:}
          password: ${providers.cf.account.password:}

    kubernetes:
      enabled: ${providers.kubernetes.enabled:false}
      accounts:
        - name: ${providers.kubernetes.primaryCredentials.name}
          dockerRegistries:
            - accountName: ${providers.kubernetes.primaryCredentials.dockerRegistryAccount}

    openstack:
      enabled: ${providers.openstack.enabled:false}
      accounts:
        - name: ${providers.openstack.primaryCredentials.name}
          authUrl: ${providers.openstack.primaryCredentials.authUrl}
          username: ${providers.openstack.primaryCredentials.username}
          password: ${providers.openstack.primaryCredentials.password}
          projectName: ${providers.openstack.primaryCredentials.projectName}
          domainName: ${providers.openstack.primaryCredentials.domainName:Default}
          regions: ${providers.openstack.primaryCredentials.regions}
          insecure: ${providers.openstack.primaryCredentials.insecure:false}
          userDataFile: ${providers.openstack.primaryCredentials.userDataFile:}

          lbaas:
            pollTimeout: 60
            pollInterval: 5

    dockerRegistry:
      enabled: ${providers.dockerRegistry.enabled:false}
      accounts:
        - name: ${providers.dockerRegistry.primaryCredentials.name}
          address: ${providers.dockerRegistry.primaryCredentials.address}
          username: ${providers.dockerRegistry.primaryCredentials.username:}
          passwordFile: ${providers.dockerRegistry.primaryCredentials.passwordFile}

    credentials:
      primaryAccountTypes: ${providers.aws.primaryCredentials.name}, ${providers.google.primaryCredentials.name}, ${providers.cf.primaryCredentials.name}, ${providers.azure.primaryCredentials.name}
      challengeDestructiveActionsEnvironments: ${providers.aws.primaryCredentials.name}, ${providers.google.primaryCredentials.name}, ${providers.cf.primaryCredentials.name}, ${providers.azure.primaryCredentials.name}

    spectator:
      applicationName: ${spring.application.name}
      webEndpoint:
        enabled: ${services.spectator.webEndpoint.enabled:false}
        prototypeFilter:
          path: ${services.spectator.webEndpoint.prototypeFilter.path:}

      stackdriver:
        enabled: ${services.stackdriver.enabled}
        projectName: ${services.stackdriver.projectName}
        credentialsPath: ${services.stackdriver.credentialsPath}

    stackdriver:
      hints:
        - name: controller.invocations
          labels:
          - account
          - region
  dinghy.yml: ""
  echo-armory.yml: |
    diagnostics:
      enabled: true
      id: ${ARMORY_ID:unknown}

    armorywebhooks:
      enabled: false
      forwarding:
        baseUrl: http://armory-dinghy:8081
        endpoint: v1/webhooks
  echo-noncron.yml: |
    scheduler:
      enabled: false
  echo.yml: |
    server:
      port: ${services.echo.port:8089}
      address: ${services.echo.host:localhost}

    cassandra:
      enabled: ${services.echo.cassandra.enabled:false}
      embedded: ${services.cassandra.embedded:false}
      host: ${services.cassandra.host:localhost}

    spinnaker:
      baseUrl: ${services.deck.baseUrl}
      cassandra:
         enabled: ${services.echo.cassandra.enabled:false}
      inMemory:
         enabled: ${services.echo.inMemory.enabled:true}

    front50:
      baseUrl: ${services.front50.baseUrl:http://localhost:8080 }

    orca:
      baseUrl: ${services.orca.baseUrl:http://localhost:8083 }

    endpoints.health.sensitive: false

    slack:
      enabled: ${services.echo.notifications.slack.enabled:false}
      token: ${services.echo.notifications.slack.token}

    spring:
      mail:
        host: ${mail.host}

    mail:
      enabled: ${services.echo.notifications.mail.enabled:false}
      host: ${services.echo.notifications.mail.host}
      from: ${services.echo.notifications.mail.fromAddress}

    hipchat:
      enabled: ${services.echo.notifications.hipchat.enabled:false}
      baseUrl: ${services.echo.notifications.hipchat.url}
      token: ${services.echo.notifications.hipchat.token}

    twilio:
      enabled: ${services.echo.notifications.sms.enabled:false}
      baseUrl: ${services.echo.notifications.sms.url:https://api.twilio.com/ }
      account: ${services.echo.notifications.sms.account}
      token: ${services.echo.notifications.sms.token}
      from: ${services.echo.notifications.sms.from}

    scheduler:
      enabled: ${services.echo.cron.enabled:true}
      threadPoolSize: 20
      triggeringEnabled: true
      pipelineConfigsPoller:
        enabled: true
        pollingIntervalMs: 30000
      cron:
        timezone: ${services.echo.cron.timezone}

    spectator:
      applicationName: ${spring.application.name}
      webEndpoint:
        enabled: ${services.spectator.webEndpoint.enabled:false}
        prototypeFilter:
          path: ${services.spectator.webEndpoint.prototypeFilter.path:}

      stackdriver:
        enabled: ${services.stackdriver.enabled}
        projectName: ${services.stackdriver.projectName}
        credentialsPath: ${services.stackdriver.credentialsPath}

    webhooks:
      artifacts:
        enabled: true
  fetch.sh: |+
  
    CONFIG_LOCATION=${SPINNAKER_HOME:-"/opt/spinnaker"}/config
    CONTAINER=$1

    rm -f /opt/spinnaker/config/*.yml

    mkdir -p ${CONFIG_LOCATION}

    for filename in /opt/spinnaker/config/default/*.yml; do
        cp $filename ${CONFIG_LOCATION}
    done

    if [ -d /opt/spinnaker/config/custom ]; then
        for filename in /opt/spinnaker/config/custom/*; do
            cp $filename ${CONFIG_LOCATION}
        done
    fi

    add_ca_certs() {
      ca_cert_path="$1"
      jks_path="$2"
      alias="$3"

      if [[ "$(whoami)" != "root" ]]; then
        echo "INFO: I do not have proper permisions to add CA roots"
        return
      fi

      if [[ ! -f ${ca_cert_path} ]]; then
        echo "INFO: No CA cert found at ${ca_cert_path}"
        return
      fi
      keytool -importcert \
          -file ${ca_cert_path} \
          -keystore ${jks_path} \
          -alias ${alias} \
          -storepass changeit \
          -noprompt
    }

    if [ `which keytool` ]; then
      echo "INFO: Keytool found adding certs where appropriate"
      add_ca_certs "${CONFIG_LOCATION}/ca.crt" "/etc/ssl/certs/java/cacerts" "custom-ca"
    else
      echo "INFO: Keytool not found, not adding any certs/private keys"
    fi

    saml_pem_path="/opt/spinnaker/config/custom/saml.pem"
    saml_pkcs12_path="/tmp/saml.pkcs12"
    saml_jks_path="${CONFIG_LOCATION}/saml.jks"

    x509_ca_cert_path="/opt/spinnaker/config/custom/x509ca.crt"
    x509_client_cert_path="/opt/spinnaker/config/custom/x509client.crt"
    x509_jks_path="${CONFIG_LOCATION}/x509.jks"
    x509_nginx_cert_path="/opt/nginx/certs/ssl.crt"

    if [ "${CONTAINER}" == "gate" ]; then
        if [ -f ${saml_pem_path} ]; then
            echo "Loading ${saml_pem_path} into ${saml_jks_path}"
            openssl pkcs12 -export -out ${saml_pkcs12_path} -in ${saml_pem_path} -password pass:changeit -name saml
            keytool -genkey -v -keystore ${saml_jks_path} -alias saml \
                    -keyalg RSA -keysize 2048 -validity 10000 \
                    -storepass changeit -keypass changeit -dname "CN=armory"
            keytool -importkeystore \
                    -srckeystore ${saml_pkcs12_path} \
                    -srcstoretype PKCS12 \
                    -srcstorepass changeit \
                    -destkeystore ${saml_jks_path} \
                    -deststoretype JKS \
                    -storepass changeit \
                    -alias saml \
                    -destalias saml \
                    -noprompt
        else
            echo "No SAML IDP pemfile found at ${saml_pem_path}"
        fi
        if [ -f ${x509_ca_cert_path} ]; then
            echo "Loading ${x509_ca_cert_path} into ${x509_jks_path}"
            add_ca_certs ${x509_ca_cert_path} ${x509_jks_path} "ca"
        else
            echo "No x509 CA cert found at ${x509_ca_cert_path}"
        fi
        if [ -f ${x509_client_cert_path} ]; then
            echo "Loading ${x509_client_cert_path} into ${x509_jks_path}"
            add_ca_certs ${x509_client_cert_path} ${x509_jks_path} "client"
        else
            echo "No x509 Client cert found at ${x509_client_cert_path}"
        fi

        if [ -f ${x509_nginx_cert_path} ]; then
            echo "Creating a self-signed CA (EXPIRES IN 360 DAYS) with java keystore: ${x509_jks_path}"
            echo -e "\n\n\n\n\n\ny\n" | keytool -genkey -keyalg RSA -alias server -keystore keystore.jks -storepass changeit -validity 360 -keysize 2048
            keytool -importkeystore \
                    -srckeystore keystore.jks \
                    -srcstorepass changeit \
                    -destkeystore "${x509_jks_path}" \
                    -storepass changeit \
                    -srcalias server \
                    -destalias server \
                    -noprompt
        else
            echo "No x509 nginx cert found at ${x509_nginx_cert_path}"
        fi
    fi

    if [ "${CONTAINER}" == "nginx" ]; then
        nginx_conf_path="/opt/spinnaker/config/default/nginx.conf"
        if [ -f ${nginx_conf_path} ]; then
            cp ${nginx_conf_path} /etc/nginx/nginx.conf
        fi
    fi

  fiat.yml: |-
    server:
      port: ${services.fiat.port:7003}
      address: ${services.fiat.host:localhost}

    redis:
      connection: ${services.redis.connection:redis://localhost:6379}

    spectator:
      applicationName: ${spring.application.name}
      webEndpoint:
        enabled: ${services.spectator.webEndpoint.enabled:false}
        prototypeFilter:
          path: ${services.spectator.webEndpoint.prototypeFilter.path:}
      stackdriver:
        enabled: ${services.stackdriver.enabled}
        projectName: ${services.stackdriver.projectName}
        credentialsPath: ${services.stackdriver.credentialsPath}

    hystrix:
     command:
       default.execution.isolation.thread.timeoutInMilliseconds: 20000

    logging:
      level:
        com.netflix.spinnaker.fiat: DEBUG
  front50-armory.yml: |
    spinnaker:
      redis:
        enabled: true
        host: redis
  front50.yml: |
    server:
      port: ${services.front50.port:8080}
      address: ${services.front50.host:localhost}

    hystrix:
      command:
        default.execution.isolation.thread.timeoutInMilliseconds: 15000

    cassandra:
      enabled: ${services.front50.cassandra.enabled:false}
      embedded: ${services.cassandra.embedded:false}
      host: ${services.cassandra.host:localhost}

    aws:
      simpleDBEnabled: ${providers.aws.simpleDBEnabled:false}
      defaultSimpleDBDomain: ${providers.aws.defaultSimpleDBDomain}

    spinnaker:
      cassandra:
        enabled: ${services.front50.cassandra.enabled:false}
        host: ${services.cassandra.host:localhost}
        port: ${services.cassandra.port:9042}
        cluster: ${services.cassandra.cluster:CASS_SPINNAKER}
        keyspace: front50
        name: global

      redis:
        enabled: ${services.front50.redis.enabled:false}

      gcs:
        enabled: ${services.front50.gcs.enabled:false}
        bucket: ${services.front50.storage_bucket:}
        bucketLocation: ${services.front50.bucket_location:}
        rootFolder: ${services.front50.rootFolder:front50}
        project: ${providers.google.primaryCredentials.project}
        jsonPath: ${providers.google.primaryCredentials.jsonPath}

      s3:
        enabled: ${services.front50.s3.enabled:false}
        bucket: ${services.front50.storage_bucket:}
        rootFolder: ${services.front50.rootFolder:front50}

    spectator:
      applicationName: ${spring.application.name}
      webEndpoint:
        enabled: ${services.spectator.webEndpoint.enabled:false}
        prototypeFilter:
          path: ${services.spectator.webEndpoint.prototypeFilter.path:}

      stackdriver:
        enabled: ${services.stackdriver.enabled}
        projectName: ${services.stackdriver.projectName}
        credentialsPath: ${services.stackdriver.credentialsPath}

    stackdriver:
      hints:
        - name: controller.invocations
          labels:
          - application
          - cause
        - name: aws.request.httpRequestTime
          labels:
          - status
          - exception
          - AWSErrorCode
        - name: aws.request.requestSigningTime
          labels:
          - exception
  gate-armory.yml: |+
    lighthouse:
        baseUrl: http://${DEFAULT_DNS_NAME:lighthouse}:5000

  gate.yml: |
    server:
      port: ${services.gate.port:8084}
      address: ${services.gate.host:localhost}

    redis:
      connection: ${REDIS_HOST:redis://localhost:6379}
      configuration:
        secure: true

    spectator:
      applicationName: ${spring.application.name}
      webEndpoint:
        enabled: ${services.spectator.webEndpoint.enabled:false}
        prototypeFilter:
          path: ${services.spectator.webEndpoint.prototypeFilter.path:}

      stackdriver:
        enabled: ${services.stackdriver.enabled}
        projectName: ${services.stackdriver.projectName}
        credentialsPath: ${services.stackdriver.credentialsPath}

    stackdriver:
      hints:
        - name: EurekaOkClient_Request
          labels:
          - cause
          - reason
          - status
  igor-nonpolling.yml: |
    jenkins:
      polling:
        enabled: false
  igor.yml: |
    server:
      port: ${services.igor.port:8088}
      address: ${services.igor.host:localhost}

    jenkins:
      enabled: ${services.jenkins.enabled:false}
      masters:
        - name: ${services.jenkins.defaultMaster.name}
          address: ${services.jenkins.defaultMaster.baseUrl}
          username: ${services.jenkins.defaultMaster.username}
          password: ${services.jenkins.defaultMaster.password}
          csrf: ${services.jenkins.defaultMaster.csrf:false}
          
    travis:
      enabled: ${services.travis.enabled:false}
      masters:
        - name: ${services.travis.defaultMaster.name}
          baseUrl: ${services.travis.defaultMaster.baseUrl}
          address: ${services.travis.defaultMaster.address}
          githubToken: ${services.travis.defaultMaster.githubToken}


    dockerRegistry:
      enabled: ${providers.dockerRegistry.enabled:false}


    redis:
      connection: ${REDIS_HOST:redis://localhost:6379}

    spectator:
      applicationName: ${spring.application.name}
      webEndpoint:
        enabled: ${services.spectator.webEndpoint.enabled:false}
        prototypeFilter:
          path: ${services.spectator.webEndpoint.prototypeFilter.path:}
      stackdriver:
        enabled: ${services.stackdriver.enabled}
        projectName: ${services.stackdriver.projectName}
        credentialsPath: ${services.stackdriver.credentialsPath}

    stackdriver:
      hints:
        - name: controller.invocations
          labels:
          - master
  kayenta-armory.yml: |
    kayenta:
      aws:
        enabled: ${ARMORYSPINNAKER_S3_ENABLED:false}
        accounts:
          - name: aws-s3-storage
            bucket: ${ARMORYSPINNAKER_CONF_STORE_BUCKET}
            rootFolder: kayenta
            supportedTypes:
              - OBJECT_STORE
              - CONFIGURATION_STORE

      s3:
        enabled: ${ARMORYSPINNAKER_S3_ENABLED:false}

      google:
        enabled: ${ARMORYSPINNAKER_GCS_ENABLED:false}
        accounts:
          - name: cloud-armory
            bucket: ${ARMORYSPINNAKER_CONF_STORE_BUCKET}
            rootFolder: kayenta-prod
            supportedTypes:
              - METRICS_STORE
              - OBJECT_STORE
              - CONFIGURATION_STORE
              
      gcs:
        enabled: ${ARMORYSPINNAKER_GCS_ENABLED:false}
  kayenta.yml: |2

    server:
      port: 8090

    kayenta:
      atlas:
        enabled: false

      google:
        enabled: false

      aws:
        enabled: false

      datadog:
        enabled: false

      prometheus:
        enabled: false

      gcs:
        enabled: false

      s3:
        enabled: false

      stackdriver:
        enabled: false

      memory:
        enabled: false

      configbin:
        enabled: false

    keiko:
      queue:
        redis:
          queueName: kayenta.keiko.queue
          deadLetterQueueName: kayenta.keiko.queue.deadLetters

    redis:
      connection: ${REDIS_HOST:redis://localhost:6379}

    spectator:
      applicationName: ${spring.application.name}
      webEndpoint:
        enabled: true

    swagger:
      enabled: true
      title: Kayenta API
      description:
      contact:
      patterns:
        - /admin.*
        - /canary.*
        - /canaryConfig.*
        - /canaryJudgeResult.*
        - /credentials.*
        - /fetch.*
        - /health
        - /judges.*
        - /metadata.*
        - /metricSetList.*
        - /metricSetPairList.*
        - /pipeline.*

    security.basic.enabled: false
    management.security.enabled: false
  nginx.conf: |
    user  nginx;
    worker_processes  1;

    error_log  /var/log/nginx/error.log warn;
    pid        /var/run/nginx.pid;

    events {
        worker_connections  1024;
    }

    http {
        include       /etc/nginx/mime.types;
        default_type  application/octet-stream;
        log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                          '$status $body_bytes_sent "$http_referer" '
                          '"$http_user_agent" "$http_x_forwarded_for"';
        access_log  /var/log/nginx/access.log  main;
        
        sendfile        on;
        keepalive_timeout  65;
        include /etc/nginx/conf.d/*.conf;
    }

    stream {
        upstream gate_api {
            server armory-gate:8085;
        }

        server {
            listen 8085;
            proxy_pass gate_api;
        }
    }
  nginx.http.conf: |
    gzip on;
    gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/vnd.ms-fontobject application/x-font-ttf font/opentype image/svg+xml image/x-icon;

    server {
           listen 80;
           listen [::]:80;

           location / {
                proxy_pass http://armory-deck/;
           }

           location /api/ {
                proxy_pass http://armory-gate:8084/;
           }

           location /slack/ {
               proxy_pass http://armory-platform:10000/;
           }

           rewrite ^/login(.*)$ /api/login$1 last;
           rewrite ^/auth(.*)$ /api/auth$1 last;
    }
  nginx.https.conf: |
    gzip on;
    gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/vnd.ms-fontobject application/x-font-ttf font/opentype image/svg+xml image/x-icon;

    server {
        listen 80;
        listen [::]:80;
        return 301 https://$host$request_uri;
    }

    server {
        listen 443 ssl;
        listen [::]:443 ssl;
        ssl on;
        ssl_certificate /opt/nginx/certs/ssl.crt;
        ssl_certificate_key /opt/nginx/certs/ssl.key;

        location / {
            proxy_pass http://armory-deck/;
        }

        location /api/ {
            proxy_pass http://armory-gate:8084/;
            proxy_set_header Host            $host;
            proxy_set_header X-Real-IP       $proxy_protocol_addr;
            proxy_set_header X-Forwarded-For $proxy_protocol_addr;
            proxy_set_header X-Forwarded-Proto $scheme;
        }

        location /slack/ {
            proxy_pass http://armory-platform:10000/;
        }
        rewrite ^/login(.*)$ /api/login$1 last;
        rewrite ^/auth(.*)$ /api/auth$1 last;
    }
  orca-armory.yml: |
    mine:
      baseUrl: http://${services.barometer.host}:${services.barometer.port}

    pipelineTemplate:
      enabled: ${features.pipelineTemplates.enabled:false}
      jinja:
        enabled: true

    kayenta:
      enabled: ${services.kayenta.enabled:false}
      baseUrl: ${services.kayenta.baseUrl}

    jira:
      enabled: ${features.jira.enabled:false}
      basicAuth:  "Basic ${features.jira.basicAuthToken}"
      url: ${features.jira.createIssueUrl}

    webhook:
      preconfigured:
        - label: Enforce Pipeline Policy
          description: Checks pipeline configuration against policy requirements
          type: enforcePipelinePolicy
          enabled: ${features.certifiedPipelines.enabled:false}
          url: "http://lighthouse:5000/v1/pipelines/${execution.application}/${execution.pipelineConfigId}?check_policy=yes"
          headers:
            Accept:
              - application/json
          method: GET
          waitForCompletion: true
          statusUrlResolution: getMethod
          statusJsonPath: $.status
          successStatuses: pass
          canceledStatuses:
          terminalStatuses: TERMINAL

        - label: "Jira: Create Issue"
          description:  Enter a Jira ticket when this pipeline runs
          type: createJiraIssue
          enabled: ${jira.enabled}
          url:  ${jira.url}
          customHeaders:
            "Content-Type": application/json
            Authorization: ${jira.basicAuth}
          method: POST
          parameters:
            - name: summary
              label: Issue Summary
              description: A short summary of your issue.
            - name: description
              label: Issue Description
              description: A longer description of your issue.
            - name: projectKey
              label: Project key
              description: The key of your JIRA project.
            - name: type
              label: Issue Type
              description: The type of your issue, e.g. "Task", "Story", etc.
          payload: |
            {
              "fields" : {
                "description": "${parameterValues['description']}",
                "issuetype": {
                   "name": "${parameterValues['type']}"
                },
                "project": {
                   "key": "${parameterValues['projectKey']}"
                },
                "summary":  "${parameterValues['summary']}"
              }
            }
          waitForCompletion: false

        - label: "Jira: Update Issue"
          description:  Update a previously created Jira Issue
          type: updateJiraIssue
          enabled: ${jira.enabled}
          url: "${execution.stages.?[type == 'createJiraIssue'][0]['context']['buildInfo']['self']}"
          customHeaders:
            "Content-Type": application/json
            Authorization: ${jira.basicAuth}
          method: PUT
          parameters:
            - name: summary
              label: Issue Summary
              description: A short summary of your issue.
            - name: description
              label: Issue Description
              description: A longer description of your issue.
          payload: |
            {
              "fields" : {
                "description": "${parameterValues['description']}",
                "summary": "${parameterValues['summary']}"
              }
            }
          waitForCompletion: false

        - label: "Jira: Transition Issue"
          description:  Change state of existing Jira Issue
          type: transitionJiraIssue
          enabled: ${jira.enabled}
          url: "${execution.stages.?[type == 'createJiraIssue'][0]['context']['buildInfo']['self']}/transitions"
          customHeaders:
            "Content-Type": application/json
            Authorization: ${jira.basicAuth}
          method: POST
          parameters:
            - name: newStateID
              label: New State ID
              description: The ID of the state you want to transition the issue to.
          payload: |
            {
              "transition" : {
                "id" : "${parameterValues['newStateID']}"
              }
            }
          waitForCompletion: false
        - label: "Jira: Add Comment"
          description:  Add a comment to an existing Jira Issue
          type: commentJiraIssue
          enabled: ${jira.enabled}
          url: "${execution.stages.?[type == 'createJiraIssue'][0]['context']['buildInfo']['self']}/comment"
          customHeaders:
            "Content-Type": application/json
            Authorization: ${jira.basicAuth}
          method: POST
          parameters:
            - name: body
              label: Comment body
              description: The text body of the component.
          payload: |
            {
              "body" : "${parameterValues['body']}"
            }
          waitForCompletion: false

  orca.yml: |
    server:
        port: ${services.orca.port:8083}
        address: ${services.orca.host:localhost}
    oort:
        baseUrl: ${services.oort.baseUrl:localhost:7002}
    front50:
        baseUrl: ${services.front50.baseUrl:localhost:8080}
    mort:
        baseUrl: ${services.mort.baseUrl:localhost:7002}
    kato:
        baseUrl: ${services.kato.baseUrl:localhost:7002}
    bakery:
        baseUrl: ${services.bakery.baseUrl:localhost:8087}
        extractBuildDetails: ${services.bakery.extractBuildDetails:true}
        allowMissingPackageInstallation: ${services.bakery.allowMissingPackageInstallation:true}
    echo:
        enabled: ${services.echo.enabled:false}
        baseUrl: ${services.echo.baseUrl:8089}
    igor:
        baseUrl: ${services.igor.baseUrl:8088}
    flex:
      baseUrl: http://not-a-host
    default:
      bake:
        account: ${providers.aws.primaryCredentials.name}
      securityGroups:
      vpc:
        securityGroups:
    redis:
      connection: ${REDIS_HOST:redis://localhost:6379}
    tasks:
      executionWindow:
        timezone: ${services.orca.timezone}
    spectator:
      applicationName: ${spring.application.name}
      webEndpoint:
        enabled: ${services.spectator.webEndpoint.enabled:false}
        prototypeFilter:
          path: ${services.spectator.webEndpoint.prototypeFilter.path:}        
      stackdriver:
        enabled: ${services.stackdriver.enabled}
        projectName: ${services.stackdriver.projectName}
        credentialsPath: ${services.stackdriver.credentialsPath}
    stackdriver:
      hints:
        - name: controller.invocations
          labels:
          - application
  rosco-armory.yml: |
    redis:
      timeout: 50000

    rosco:
      jobs:
        local:
          timeoutMinutes: 60
  rosco.yml: |
    server:
      port: ${services.rosco.port:8087}
      address: ${services.rosco.host:localhost}

    redis:
      connection: ${REDIS_HOST:redis://localhost:6379}

    aws:
      enabled: ${providers.aws.enabled:false}

    docker:
      enabled: ${services.docker.enabled:false}
      bakeryDefaults:
        targetRepository: ${services.docker.targetRepository}

    google:
      enabled: ${providers.google.enabled:false}
      accounts:
        - name: ${providers.google.primaryCredentials.name}
          project: ${providers.google.primaryCredentials.project}
          jsonPath: ${providers.google.primaryCredentials.jsonPath}
      gce:
        bakeryDefaults:
          zone: ${providers.google.defaultZone}

    rosco:
      configDir: ${services.rosco.configDir}
      jobs:
        local:
          timeoutMinutes: 30

    spectator:
      applicationName: ${spring.application.name}
      webEndpoint:
        enabled: ${services.spectator.webEndpoint.enabled:false}
        prototypeFilter:
          path: ${services.spectator.webEndpoint.prototypeFilter.path:}
      stackdriver:
        enabled: ${services.stackdriver.enabled}
        projectName: ${services.stackdriver.projectName}
        credentialsPath: ${services.stackdriver.credentialsPath}

    stackdriver:
      hints:
        - name: bakes
          labels:
          - success
  spinnaker-armory.yml: |
    armory:
      architecture: 'k8s'
      
    features:
      artifacts:
        enabled: true
      pipelineTemplates:
        enabled: ${PIPELINE_TEMPLATES_ENABLED:false}
      infrastructureStages:
        enabled: ${INFRA_ENABLED:false}
      certifiedPipelines:
        enabled: ${CERTIFIED_PIPELINES_ENABLED:false}
      configuratorEnabled:
        enabled: true
      configuratorWizard:
        enabled: true
      configuratorCerts:
        enabled: true
      loadtestStage:
        enabled: ${LOADTEST_ENABLED:false}
      jira:
        enabled: ${JIRA_ENABLED:false}
        basicAuthToken: ${JIRA_BASIC_AUTH}
        url: ${JIRA_URL}
        login: ${JIRA_LOGIN}
        password: ${JIRA_PASSWORD}

      slaEnabled:
        enabled: ${SLA_ENABLED:false}
      chaosMonkey:
        enabled: ${CHAOS_ENABLED:false}

      armoryPlatform:
        enabled: ${PLATFORM_ENABLED:false}
        uiEnabled: ${PLATFORM_UI_ENABLED:false}

    services:
      default:
        host: ${DEFAULT_DNS_NAME:localhost}

      clouddriver:
        host: ${DEFAULT_DNS_NAME:armory-clouddriver}
        entityTags:
          enabled: false

      configurator:
        baseUrl: http://${CONFIGURATOR_HOST:armory-configurator}:8069

      echo:
        host: ${DEFAULT_DNS_NAME:armory-echo}

      deck:
        gateUrl: ${API_HOST:service.default.host}
        baseUrl: ${DECK_HOST:armory-deck}

      dinghy:
        enabled: ${DINGHY_ENABLED:false}
        host: ${DEFAULT_DNS_NAME:armory-dinghy}
        baseUrl: ${services.default.protocol}://${services.dinghy.host}:${services.dinghy.port}
        port: 8081

      front50:
        host: ${DEFAULT_DNS_NAME:armory-front50}
        cassandra:
          enabled: false
        redis:
          enabled: true
        gcs:
          enabled: ${ARMORYSPINNAKER_GCS_ENABLED:false}
        s3:
          enabled: ${ARMORYSPINNAKER_S3_ENABLED:false}
        storage_bucket: ${ARMORYSPINNAKER_CONF_STORE_BUCKET}
        rootFolder: ${ARMORYSPINNAKER_CONF_STORE_PREFIX:front50}

      gate:
        host: ${DEFAULT_DNS_NAME:armory-gate}

      igor:
        host: ${DEFAULT_DNS_NAME:armory-igor}


      kayenta:
        enabled: true
        host: ${DEFAULT_DNS_NAME:armory-kayenta}
        canaryConfigStore: true
        port: 8090
        baseUrl: ${services.default.protocol}://${services.kayenta.host}:${services.kayenta.port}
        metricsStore: ${METRICS_STORE:stackdriver}
        metricsAccountName: ${METRICS_ACCOUNT_NAME}
        storageAccountName: ${STORAGE_ACCOUNT_NAME}
        atlasWebComponentsUrl: ${ATLAS_COMPONENTS_URL:}
        
      lighthouse:
        host: ${DEFAULT_DNS_NAME:armory-lighthouse}
        port: 5000
        baseUrl: ${services.default.protocol}://${services.lighthouse.host}:${services.lighthouse.port}

      orca:
        host: ${DEFAULT_DNS_NAME:armory-orca}

      platform:
        enabled: ${PLATFORM_ENABLED:false}
        host: ${DEFAULT_DNS_NAME:armory-platform}
        baseUrl: ${services.default.protocol}://${services.platform.host}:${services.platform.port}
        port: 5001

      rosco:
        host: ${DEFAULT_DNS_NAME:armory-rosco}
        enabled: true
        configDir: /opt/spinnaker/config/packer

      bakery:
        allowMissingPackageInstallation: true

      barometer:
        enabled: ${BAROMETER_ENABLED:false}
        host: ${DEFAULT_DNS_NAME:armory-barometer}
        baseUrl: ${services.default.protocol}://${services.barometer.host}:${services.barometer.port}
        port: 9092
        newRelicEnabled: ${NEW_RELIC_ENABLED:false}

      redis:
        host: redis
        port: 6379
        connection: ${REDIS_HOST:redis://localhost:6379}

      fiat:
        enabled: ${FIAT_ENABLED:false}
        host: ${DEFAULT_DNS_NAME:armory-fiat}
        port: 7003
        baseUrl: ${services.default.protocol}://${services.fiat.host}:${services.fiat.port}

    providers:
      aws:
        enabled: ${SPINNAKER_AWS_ENABLED:true}
        defaultRegion: ${SPINNAKER_AWS_DEFAULT_REGION:us-west-2}
        defaultIAMRole: ${SPINNAKER_AWS_DEFAULT_IAM_ROLE:SpinnakerInstanceProfile}
        defaultAssumeRole: ${SPINNAKER_AWS_DEFAULT_ASSUME_ROLE:SpinnakerManagedProfile}
        primaryCredentials:
          name: ${SPINNAKER_AWS_DEFAULT_ACCOUNT:default-aws-account}

      kubernetes:
        proxy: localhost:8001
        apiPrefix: api/v1/proxy/namespaces/kube-system/services/kubernetes-dashboard/#
  spinnaker.yml: |2
    global:
      spinnaker:
        timezone: 'America/Los_Angeles'
        architecture: ${PLATFORM_ARCHITECTURE}

    services:
      default:
        host: localhost
        protocol: http
      clouddriver:
        host: ${services.default.host}
        port: 7002
        baseUrl: ${services.default.protocol}://${services.clouddriver.host}:${services.clouddriver.port}
        aws:
          udf:
            enabled: true

      echo:
        enabled: true
        host: ${services.default.host}
        port: 8089
        baseUrl: ${services.default.protocol}://${services.echo.host}:${services.echo.port}
        cassandra:
          enabled: false
        inMemory:
          enabled: true

        cron:
          enabled: true
          timezone: ${global.spinnaker.timezone}

        notifications:
          mail:
            enabled: false
            host: # the smtp host
            fromAddress: # the address for which emails are sent from
          hipchat:
            enabled: false
            url: # the hipchat server to connect to
            token: # the hipchat auth token
            botName: # the username of the bot
          sms:
            enabled: false
            account: # twilio account id
            token: # twilio auth token
            from: # phone number by which sms messages are sent
          slack:
            enabled: false
            token: # the API token for the bot
            botName: # the username of the bot

      deck:
        host: ${services.default.host}
        port: 9000
        baseUrl: ${services.default.protocol}://${services.deck.host}:${services.deck.port}
        gateUrl: ${API_HOST:services.gate.baseUrl}
        bakeryUrl: ${services.bakery.baseUrl}
        timezone: ${global.spinnaker.timezone}
        auth:
          enabled: ${AUTH_ENABLED:false}


      fiat:
        enabled: false
        host: ${services.default.host}
        port: 7003
        baseUrl: ${services.default.protocol}://${services.fiat.host}:${services.fiat.port}

      front50:
        host: ${services.default.host}
        port: 8080
        baseUrl: ${services.default.protocol}://${services.front50.host}:${services.front50.port}
        storage_bucket: ${SPINNAKER_DEFAULT_STORAGE_BUCKET:}
        bucket_location:
        bucket_root: front50
        cassandra:
          enabled: false
        redis:
          enabled: false
        gcs:
          enabled: false
        s3:
          enabled: false

      gate:
        host: ${services.default.host}
        port: 8084
        baseUrl: ${services.default.protocol}://${services.gate.host}:${services.gate.port}

      igor:
        enabled: false
        host: ${services.default.host}
        port: 8088
        baseUrl: ${services.default.protocol}://${services.igor.host}:${services.igor.port}

      kato:
        host: ${services.clouddriver.host}
        port: ${services.clouddriver.port}
        baseUrl: ${services.clouddriver.baseUrl}

      mort:
        host: ${services.clouddriver.host}
        port: ${services.clouddriver.port}
        baseUrl: ${services.clouddriver.baseUrl}

      orca:
        host: ${services.default.host}
        port: 8083
        baseUrl: ${services.default.protocol}://${services.orca.host}:${services.orca.port}
        timezone: ${global.spinnaker.timezone}
        enabled: true

      oort:
        host: ${services.clouddriver.host}
        port: ${services.clouddriver.port}
        baseUrl: ${services.clouddriver.baseUrl}

      rosco:
        host: ${services.default.host}
        port: 8087
        baseUrl: ${services.default.protocol}://${services.rosco.host}:${services.rosco.port}
        configDir: /opt/rosco/config/packer

      bakery:
        host: ${services.rosco.host}
        port: ${services.rosco.port}
        baseUrl: ${services.rosco.baseUrl}
        extractBuildDetails: true
        allowMissingPackageInstallation: false

      docker:
        targetRepository: # Optional, but expected in spinnaker-local.yml if specified.

      jenkins:
        enabled: ${services.igor.enabled:false}
        defaultMaster:
          name: Jenkins
          baseUrl:   # Expected in spinnaker-local.yml
          username:  # Expected in spinnaker-local.yml
          password:  # Expected in spinnaker-local.yml

      redis:
        host: redis
        port: 6379
        connection: ${REDIS_HOST:redis://localhost:6379}

      cassandra:
        host: ${services.default.host}
        port: 9042
        embedded: false
        cluster: CASS_SPINNAKER

      travis:
        enabled: false
        defaultMaster:
          name: ci # The display name for this server. Gets prefixed with "travis-"
          baseUrl: https://travis-ci.com
          address: https://api.travis-ci.org
          githubToken: # GitHub scopes currently required by Travis is required.

      spectator:
        webEndpoint:
          enabled: false

      stackdriver:
        enabled: ${SPINNAKER_STACKDRIVER_ENABLED:false}
        projectName: ${SPINNAKER_STACKDRIVER_PROJECT_NAME:${providers.google.primaryCredentials.project}}
        credentialsPath: ${SPINNAKER_STACKDRIVER_CREDENTIALS_PATH:${providers.google.primaryCredentials.jsonPath}}


    providers:
      aws:
        enabled: ${SPINNAKER_AWS_ENABLED:false}
        simpleDBEnabled: false
        defaultRegion: ${SPINNAKER_AWS_DEFAULT_REGION:us-west-2}
        defaultIAMRole: BaseIAMRole
        defaultSimpleDBDomain: CLOUD_APPLICATIONS
        primaryCredentials:
          name: default
        defaultKeyPairTemplate: "{{name}}-keypair"


      google:
        enabled: ${SPINNAKER_GOOGLE_ENABLED:false}
        defaultRegion: ${SPINNAKER_GOOGLE_DEFAULT_REGION:us-central1}
        defaultZone: ${SPINNAKER_GOOGLE_DEFAULT_ZONE:us-central1-f}


        primaryCredentials:
          name: my-account-name
          project: ${SPINNAKER_GOOGLE_PROJECT_ID:}
          jsonPath: ${SPINNAKER_GOOGLE_PROJECT_CREDENTIALS_PATH:}
          consul:
            enabled: ${SPINNAKER_GOOGLE_CONSUL_ENABLED:false}


      cf:
        enabled: false
        defaultOrg: spinnaker-cf-org
        defaultSpace: spinnaker-cf-space
        primaryCredentials:
          name: my-cf-account
          api: my-cf-api-uri
          console: my-cf-console-base-url

      azure:
        enabled: ${SPINNAKER_AZURE_ENABLED:false}
        defaultRegion: ${SPINNAKER_AZURE_DEFAULT_REGION:westus}
        primaryCredentials:
          name: my-azure-account

          clientId:
          appKey:
          tenantId:
          subscriptionId:

      titan:
        enabled: false
        defaultRegion: us-east-1
        primaryCredentials:
          name: my-titan-account

      kubernetes:

        enabled: ${SPINNAKER_KUBERNETES_ENABLED:false}
        primaryCredentials:
          name: my-kubernetes-account
          namespace: default
          dockerRegistryAccount: ${providers.dockerRegistry.primaryCredentials.name}

      dockerRegistry:
        enabled: ${SPINNAKER_KUBERNETES_ENABLED:false}

        primaryCredentials:
          name: my-docker-registry-account
          address: ${SPINNAKER_DOCKER_REGISTRY:https://index.docker.io/ }
          repository: ${SPINNAKER_DOCKER_REPOSITORY:}
          username: ${SPINNAKER_DOCKER_USERNAME:}
          passwordFile: ${SPINNAKER_DOCKER_PASSWORD_FILE:}
          
      openstack:
        enabled: false
        defaultRegion: ${SPINNAKER_OPENSTACK_DEFAULT_REGION:RegionOne}
        primaryCredentials:
          name: my-openstack-account
          authUrl: ${OS_AUTH_URL}
          username: ${OS_USERNAME}
          password: ${OS_PASSWORD}
          projectName: ${OS_PROJECT_NAME}
          domainName: ${OS_USER_DOMAIN_NAME:Default}
          regions: ${OS_REGION_NAME:RegionOne}
          insecure: false

2.4.1.4 创建deployment
[root@k8smaster01.host.com:/data/yaml/spinnaker/clouddriver]# vim dp-clouddriver.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: armory-clouddriver
  name: armory-clouddriver
  namespace: armory
spec:
  replicas: 1
  revisionHistoryLimit: 7
  selector:
    matchLabels:
      app: armory-clouddriver
  template:
    metadata:
      annotations:
        artifact.spinnaker.io/location: '"armory"'
        artifact.spinnaker.io/name: '"armory-clouddriver"'
        artifact.spinnaker.io/type: '"kubernetes/deployment"'
        moniker.spinnaker.io/application: '"armory"'
        moniker.spinnaker.io/cluster: '"clouddriver"'
      labels:
        app: armory-clouddriver
    spec:
      containers:
      - name: armory-clouddriver
        image: harbor.iot.com/armory/spinnaker-clouddriver-slim:release-1.11.x-bee52673a
        imagePullPolicy: IfNotPresent
        command:
        - bash
        - -c
        args:
        - bash /opt/spinnaker/config/default/fetch.sh && cd /home/spinnaker/config
          && /opt/clouddriver/bin/clouddriver
        ports:
        - containerPort: 7002
          protocol: TCP
        env:
        - name: JAVA_OPTS
          value: -Xmx4096M
        envFrom:
        - configMapRef:
            name: init-env
        livenessProbe:
          failureThreshold: 5
          httpGet:
            path: /health
            port: 7002
            scheme: HTTP
          initialDelaySeconds: 600
          periodSeconds: 3
          successThreshold: 1
          timeoutSeconds: 1
        readinessProbe:
          failureThreshold: 5
          httpGet:
            path: /health
            port: 7002
            scheme: HTTP
          initialDelaySeconds: 180
          periodSeconds: 3
          successThreshold: 5
          timeoutSeconds: 1
        securityContext:
          runAsUser: 0
        volumeMounts:
        - mountPath: /etc/podinfo
          name: podinfo
        - mountPath: /home/spinnaker/.aws
          name: credentials
        - mountPath: /opt/spinnaker/credentials/custom
          name: default-kubeconfig
        - mountPath: /opt/spinnaker/config/default
          name: default-config
        - mountPath: /opt/spinnaker/config/custom
          name: custom-config
      imagePullSecrets:
      - name: harbor
      volumes:
      - configMap:
          defaultMode: 420
          name: default-kubeconfig
        name: default-kubeconfig
      - configMap:
          defaultMode: 420
          name: custom-config
        name: custom-config
      - configMap:
          defaultMode: 420
          name: default-config
        name: default-config
      - name: credentials
        secret:
          defaultMode: 420
          secretName: credentials
      - downwardAPI:
          defaultMode: 420
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.labels
            path: labels
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.annotations
            path: annotations
        name: podinfo
~                                                     
2.4.1.5 创建svc
[root@k8smaster01.host.com:/data/yaml/spinnaker/clouddriver]# vim svc-clouddriver.yaml
apiVersion: v1
kind: Service
metadata:
  name: armory-clouddriver
  namespace: armory
spec:
  ports:
  - port: 7002
    protocol: TCP
    targetPort: 7002
  selector:
    app: armory-clouddriver 
2.5.1 应用资源配置清单
kubectl apply -f cm-init-env.yaml 
kubectl apply -f cm-custom-config.yaml
kubectl apply -f cm-default-config.yaml 
kubectl apply -f svc-clouddriver.yaml 
kubectl apply -f dp-clouddriver.yaml 
2.5.2 检测服务
[root@k8smaster01.host.com:/data/yaml/spinnaker/clouddriver]# kubectl get pod -n armory -o wide
NAME                                 READY   STATUS    RESTARTS   AGE     IP              NODE                   NOMINATED NODE   
armory-clouddriver-595946645-2746x   1/1     Running   0          4m20s   10.244.236.16   k8sworker03.host.com   <none>           
minio-649655b79f-mm9bl               1/1     Running   0          2d20h   10.244.15.216   k8sworker01.host.com   <none>           
redis-785b797f56-sfp9t               1/1     Running   0          9h      10.244.236.13   k8sworker03.host.com   <none>           

[root@k8smaster01.host.com:/data/yaml/spinnaker/clouddriver]# kubectl exec -it minio-649655b79f-mm9bl -n armory -- /bin/sh
/ # curl armory-clouddriver:7002/health
{"status":"UP","kubernetes":{"status":"UP"},"dockerRegistry":{"status":"UP"},"redisHealth":{"status":"UP","maxIdle":100,"minIdle":25,"numActive":0,"numIdle":4,"numWaiters":0},"diskSpace":{"status":"UP","total":228674990080,"free":220168634368,"threshold":10485760}}

三、部署数据持久化-front50

3.1 准备docker镜像
docker pull armory/spinnaker-front50-slim:release-1.8.x-93febf2
docker image ls |grep front50
docker tag 0d353788f4f2 harbor.iot.com/armory/spinnaker-front50-slim:release-1.8.x-93febf2
docker push harbor.iot.com/armory/spinnaker-front50-slim:release-1.10.x-98b4ab9
3.2 准备资源配置清单
3.2.1 创建deployment
[root@k8smaster01.host.com:/data/yaml/spinnaker/front50]# vim dp-front50.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: armory-front50
  name: armory-front50
  namespace: armory
spec:
  replicas: 1
  revisionHistoryLimit: 7
  selector:
    matchLabels:
      app: armory-front50
  template:
    metadata:
      annotations:
        artifact.spinnaker.io/location: '"armory"'
        artifact.spinnaker.io/name: '"armory-front50"'
        artifact.spinnaker.io/type: '"kubernetes/deployment"'
        moniker.spinnaker.io/application: '"armory"'
        moniker.spinnaker.io/cluster: '"front50"'
      labels:
        app: armory-front50
    spec:
      containers:
      - name: armory-front50
        image: harbor.iot.com/armory/spinnaker-front50-slim:release-1.8.x-93febf2
        imagePullPolicy: IfNotPresent
        command:
        - bash
        - -c
        args:
        - bash /opt/spinnaker/config/default/fetch.sh && cd /home/spinnaker/config
          && /opt/front50/bin/front50
        ports:
        - containerPort: 8080
          protocol: TCP
        env:
        - name: JAVA_OPTS
          value: -javaagent:/opt/front50/lib/jamm-0.2.5.jar -Xmx1000M
        envFrom:
        - configMapRef:
            name: init-env
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 600
          periodSeconds: 3
          successThreshold: 1
          timeoutSeconds: 1
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 180
          periodSeconds: 5
          successThreshold: 8
          timeoutSeconds: 1
        volumeMounts:
        - mountPath: /etc/podinfo
          name: podinfo
        - mountPath: /home/spinnaker/.aws
          name: credentials
        - mountPath: /opt/spinnaker/config/default
          name: default-config
        - mountPath: /opt/spinnaker/config/custom
          name: custom-config
      imagePullSecrets:
      - name: harbor
      volumes:
      - configMap:
          defaultMode: 420
          name: custom-config
        name: custom-config
      - configMap:
          defaultMode: 420
          name: default-config
        name: default-config
      - name: credentials
        secret:
          defaultMode: 420
          secretName: credentials
      - downwardAPI:
          defaultMode: 420
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.labels
            path: labels
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.annotations
            path: annotations
        name: podinfo

3.2.2 创建service
[root@k8smaster01.host.com:/data/yaml/spinnaker/front50]# vim svc-front50.yaml
apiVersion: v1
kind: Service
metadata:
  name: armory-front50
  namespace: armory
spec:
  ports:
  - port: 8080
    protocol: TCP
    targetPort: 8080
  selector:
    app: armory-front50
3.3 应用资源配置清单
kubectl apply -f svc-front50.yaml
kubectl apply -f dp-front50.yaml
3.4 验证
[root@k8smaster01.host.com:/data/yaml/spinnaker/front50]# kubectl get pods -n armory
NAME                                 READY   STATUS    RESTARTS   AGE
armory-clouddriver-595946645-2746x   1/1     Running   0          4h34m
armory-front50-bfd6dc68d-rjpr9       1/1     Running   0          4m50s
minio-649655b79f-mm9bl               1/1     Running   0          3d1h
redis-785b797f56-sfp9t               1/1     Running   0          13h
[root@k8smaster01.host.com:/data/yaml/spinnaker/front50]# kubectl exec -it minio-649655b79f-mm9bl -n armory -- /bin/sh
/ # curl armory-front50:8080/health
{"status":"UP"} 

四、部署任务编排组件-orca

4.1准备docker镜像
docker pull armory/spinnaker-orca-slim:release-1.10.x-769f4e5
docker images |grep orca
docker tag bdff7ea2c035 harbor.iot.com/armory/spinnaker-orca-slim:release-1.10.x-769f4e5
docker push harbor.iot.com/armory/spinnaker-orca-slim:release-1.10.x-769f4e5
4.2 准备资源配置清单
4.2.1 创建service
[root@k8smaster01.host.com:/data/yaml/spinnaker/orca]# vim svc-orca.yaml
apiVersion: v1
kind: Service
metadata:
  name: armory-orca
  namespace: armory
spec:
  ports:
  - port: 8083
    protocol: TCP
    targetPort: 8083
  selector:
    app: armory-orca

4.2.2 创建deployment
[root@k8smaster01.host.com:/data/yaml/spinnaker/orca]# vim dp-orca.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: armory-orca
  name: armory-orca
  namespace: armory
spec:
  replicas: 1
  revisionHistoryLimit: 7
  selector:
    matchLabels:
      app: armory-orca
  template:
    metadata:
      annotations:
        artifact.spinnaker.io/location: '"armory"'
        artifact.spinnaker.io/name: '"armory-orca"'
        artifact.spinnaker.io/type: '"kubernetes/deployment"'
        moniker.spinnaker.io/application: '"armory"'
        moniker.spinnaker.io/cluster: '"orca"'
      labels:
        app: armory-orca
    spec:
      containers:
      - name: armory-orca
        image: harbor.iot.com/armory/spinnaker-orca-slim:release-1.10.x-769f4e5
        imagePullPolicy: IfNotPresent
        command:
        - bash
        - -c
        args:
        - bash /opt/spinnaker/config/default/fetch.sh && cd /home/spinnaker/config
          && /opt/orca/bin/orca
        ports:
        - containerPort: 8083
          protocol: TCP
        env:
        - name: JAVA_OPTS
          value: -Xmx2000M
        envFrom:
        - configMapRef:
            name: init-env
        livenessProbe:
          failureThreshold: 5
          httpGet:
            path: /health
            port: 8083
            scheme: HTTP
          initialDelaySeconds: 600
          periodSeconds: 5
          successThreshold: 1
          timeoutSeconds: 1
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /health
            port: 8083
            scheme: HTTP
          initialDelaySeconds: 180
          periodSeconds: 3
          successThreshold: 5
          timeoutSeconds: 1
        volumeMounts:
        - mountPath: /etc/podinfo
          name: podinfo
        - mountPath: /opt/spinnaker/config/default
          name: default-config
        - mountPath: /opt/spinnaker/config/custom
          name: custom-config
      imagePullSecrets:
      - name: harbor
      volumes:
      - configMap:
          defaultMode: 420
          name: custom-config
        name: custom-config
      - configMap:
          defaultMode: 420
          name: default-config
        name: default-config
      - downwardAPI:
          defaultMode: 420
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.labels
            path: labels
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.annotations
            path: annotations
        name: podinfo
4.3 应用资源配置清单
kubectl apply -f svc-orca.yaml
kubectl apply -f dp-orca.yaml 
4.4 验证
[root@k8smaster01.host.com:/data/yaml/spinnaker/orca]# kubectl get pods -n armory
NAME                                 READY   STATUS    RESTARTS   AGE
armory-clouddriver-595946645-2746x   1/1     Running   0          4h59m
armory-front50-bfd6dc68d-rjpr9       1/1     Running   0          29m
armory-orca-756c9d46d4-hm8h6         1/1     Running   0          8m18s
minio-649655b79f-mm9bl               1/1     Running   0          3d1h
redis-785b797f56-sfp9t               1/1     Running   0          13h
[root@k8smaster01.host.com:/data/yaml/spinnaker/orca]# kubectl exec -it minio-649655b79f-mm9bl -n armory -- /bin/sh
/ # curl armory-orca:8083/health
{"status":"UP"}

五、部署消息总线组件-echo

5.1 准备docker镜像
docker pull docker.io/armory/echo-armory:c36d576-release-1.8.x-617c567
docker images | grep echo
docker tag 415efd46f474 harbor.iot.com/armory/echo-armory:c36d576-release-1.8.x-617c567
docker push harbor.iot.com/armory/echo-armory:c36d576-release-1.8.x-617c567
5.2 准备资源配置清单
5.2.1 创建service
[root@k8smaster01.host.com:/data/yaml/spinnaker/echo]# vim svc-echo.yaml
apiVersion: v1
kind: Service
metadata:
  name: armory-echo
  namespace: armory
spec:
  ports:
  - port: 8089
    protocol: TCP
    targetPort: 8089
  selector:
    app: armory-echo

5.2.2 创建deployment
[root@k8smaster01.host.com:/data/yaml/spinnaker/echo]# vim dp-echo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: armory-echo
  name: armory-echo
  namespace: armory
spec:
  replicas: 1
  revisionHistoryLimit: 7
  selector:
    matchLabels:
      app: armory-echo
  template:
    metadata:
      annotations:
        artifact.spinnaker.io/location: '"armory"'
        artifact.spinnaker.io/name: '"armory-echo"'
        artifact.spinnaker.io/type: '"kubernetes/deployment"'
        moniker.spinnaker.io/application: '"armory"'
        moniker.spinnaker.io/cluster: '"echo"'
      labels:
        app: armory-echo
    spec:
      containers:
      - name: armory-echo
        image: harbor.iot.com/armory/echo-armory:c36d576-release-1.8.x-617c567
        imagePullPolicy: IfNotPresent
        command:
        - bash
        - -c
        args:
        - bash /opt/spinnaker/config/default/fetch.sh && cd /home/spinnaker/config
          && /opt/echo/bin/echo
        ports:
        - containerPort: 8089
          protocol: TCP
        env:
        - name: JAVA_OPTS
          value: -javaagent:/opt/echo/lib/jamm-0.2.5.jar -Xmx2000M
        envFrom:
        - configMapRef:
            name: init-env
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /health
            port: 8089
            scheme: HTTP
          initialDelaySeconds: 600
          periodSeconds: 3
          successThreshold: 1
          timeoutSeconds: 1
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /health
            port: 8089
            scheme: HTTP
          initialDelaySeconds: 180
          periodSeconds: 3
          successThreshold: 5
          timeoutSeconds: 1
        volumeMounts:
        - mountPath: /etc/podinfo
          name: podinfo
        - mountPath: /opt/spinnaker/config/default
          name: default-config
        - mountPath: /opt/spinnaker/config/custom
          name: custom-config
      imagePullSecrets:
      - name: harbor
      volumes:
      - configMap:
          defaultMode: 420
          name: custom-config
        name: custom-config
      - configMap:
          defaultMode: 420
          name: default-config
        name: default-config
      - downwardAPI:
          defaultMode: 420
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.labels
            path: labels
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.annotations
            path: annotations
        name: podinfo

5.3 应用资源配置清单
kubectl apply -f svc-echo.yaml 
kubectl apply -f dp-echo.yaml 
5.4 验证
[root@k8smaster01.host.com:/data/yaml/spinnaker/echo]# kubectl get pods -n armory
NAME                                 READY   STATUS    RESTARTS   AGE
armory-clouddriver-595946645-2746x   1/1     Running   0          5h18m
armory-echo-8688d7856f-xg8dg         1/1     Running   0          8m19s
armory-front50-bfd6dc68d-rjpr9       1/1     Running   0          48m
armory-orca-756c9d46d4-hm8h6         1/1     Running   0          27m
minio-649655b79f-mm9bl               1/1     Running   0          3d2h
redis-785b797f56-sfp9t               1/1     Running   0          14h
[root@k8smaster01.host.com:/data/yaml/spinnaker/echo]# kubectl exec -it minio-649655b79f-mm9bl -n armory -- /bin/sh
/ # curl armory-echo:8089/health
{"status":"UP"}

六、 部署流水线交互组件-igor

6.1 准备docker镜像
docker pull armory/spinnaker-igor-slim:release-1.10.x-a4fd897
docker images | grep igor
docker tag 38eb6e415e04 harbor.iot.com/armory/spinnaker-igor-slim:release-1.10.x-a4fd897
docker push harbor.iot.com/armory/spinnaker-igor-slim:release-1.10.x-a4fd897
6.2 准备资源配置清单
6.2.1 创建service
[root@k8smaster01.host.com:/data/yaml/spinnaker/igor]# vim svc-igor.yaml
apiVersion: v1
kind: Service
metadata:
  name: armory-igor
  namespace: armory
spec:
  ports:
  - port: 8088
    protocol: TCP
    targetPort: 8088
  selector:
    app: armory-igor
6.2.2 创建deployment
[root@k8smaster01.host.com:/data/yaml/spinnaker/igor]# vim dp-igor.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: armory-igor
  name: armory-igor
  namespace: armory
spec:
  replicas: 1
  revisionHistoryLimit: 7
  selector:
    matchLabels:
      app: armory-igor
  template:
    metadata:
      annotations:
        artifact.spinnaker.io/location: '"armory"'
        artifact.spinnaker.io/name: '"armory-igor"'
        artifact.spinnaker.io/type: '"kubernetes/deployment"'
        moniker.spinnaker.io/application: '"armory"'
        moniker.spinnaker.io/cluster: '"igor"'
      labels:
        app: armory-igor
    spec:
      containers:
      - name: armory-igor
        image: harbor.iot.com/armory/spinnaker-igor-slim:release-1.10.x-a4fd897
        imagePullPolicy: IfNotPresent
        command:
        - bash
        - -c
        args:
        - bash /opt/spinnaker/config/default/fetch.sh && cd /home/spinnaker/config
          && /opt/igor/bin/igor
        ports:
        - containerPort: 8088
          protocol: TCP
        env:
        - name: IGOR_PORT_MAPPING
          value: -8088:8088
        - name: JAVA_OPTS
          value: -Xmx2000M
        envFrom:
        - configMapRef:
            name: init-env
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /health
            port: 8088
            scheme: HTTP
          initialDelaySeconds: 600
          periodSeconds: 3
          successThreshold: 1
          timeoutSeconds: 1
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /health
            port: 8088
            scheme: HTTP
          initialDelaySeconds: 180
          periodSeconds: 5
          successThreshold: 5
          timeoutSeconds: 1
        volumeMounts:
        - mountPath: /etc/podinfo
          name: podinfo
        - mountPath: /opt/spinnaker/config/default
          name: default-config
        - mountPath: /opt/spinnaker/config/custom
          name: custom-config
      imagePullSecrets:
      - name: harbor
      securityContext:
        runAsUser: 0
      volumes:
      - configMap:
          defaultMode: 420
          name: custom-config
        name: custom-config
      - configMap:
          defaultMode: 420
          name: default-config
        name: default-config
      - downwardAPI:
          defaultMode: 420
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.labels
            path: labels
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.annotations
            path: annotations
        name: podinfo

6.3 应用资源配置清单
kubectl apply -f svc-igor.yaml 
kubectl apply -f dp-igor.yaml 
6.4 验证
[root@k8smaster01.host.com:/data/yaml/spinnaker/igor]# kubectl get pods -n armory
NAME                                 READY   STATUS    RESTARTS   AGE
armory-clouddriver-595946645-2746x   1/1     Running   0          5h31m
armory-echo-8688d7856f-xg8dg         1/1     Running   0          21m
armory-front50-bfd6dc68d-rjpr9       1/1     Running   0          62m
armory-igor-55686997f-zmnpn          1/1     Running   0          6m12s
armory-orca-756c9d46d4-hm8h6         1/1     Running   0          40m
minio-649655b79f-mm9bl               1/1     Running   0          3d2h
redis-785b797f56-sfp9t               1/1     Running   0          14h
[root@k8smaster01.host.com:/data/yaml/spinnaker/igor]# kubectl exec -it minio-649655b79f-mm9bl -n armory -- /bin/sh
/ # curl armory-igor:8088/health
{"status":"UP"}

七、部署API提供组件-gate

7.1 准备docker镜像
docker pull docker.io/armory/gate-armory:dfafe73-release-1.8.x-5d505ca
docker images | grep gate
docker tag b092d4665301 harbor.iot.com/armory/gate-armory:dfafe73-release-1.8.x-5d505ca
docker push harbor.iot.com/armory/gate-armory:dfafe73-release-1.8.x-5d505ca
7.2 准备资源配置清单
7.2.1 创建service
[root@k8smaster01.host.com:/data/yaml/spinnaker/gate]# vim svc-gate.yaml
apiVersion: v1
kind: Service
metadata:
  name: armory-gate
  namespace: armory
spec:
  ports:
  - name: gate-port
    port: 8084
    protocol: TCP
    targetPort: 8084
  - name: gate-api-port
    port: 8085
    protocol: TCP
    targetPort: 8085
  selector:
    app: armory-gate

7.2.2 创建deployment
[root@k8smaster01.host.com:/data/yaml/spinnaker/gate]# vim dp-gate.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: armory-gate
  name: armory-gate
  namespace: armory
spec:
  replicas: 1
  revisionHistoryLimit: 7
  selector:
    matchLabels:
      app: armory-gate
  template:
    metadata:
      annotations:
        artifact.spinnaker.io/location: '"armory"'
        artifact.spinnaker.io/name: '"armory-gate"'
        artifact.spinnaker.io/type: '"kubernetes/deployment"'
        moniker.spinnaker.io/application: '"armory"'
        moniker.spinnaker.io/cluster: '"gate"'
      labels:
        app: armory-gate
    spec:
      containers:
      - name: armory-gate
        image: harbor.iot.com/armory/gate-armory:dfafe73-release-1.8.x-5d505ca
        imagePullPolicy: IfNotPresent
        command:
        - bash
        - -c
        args:
        - bash /opt/spinnaker/config/default/fetch.sh gate && cd /home/spinnaker/config
          && /opt/gate/bin/gate
        ports:
        - containerPort: 8084
          name: gate-port
          protocol: TCP
        - containerPort: 8085
          name: gate-api-port
          protocol: TCP
        env:
        - name: GATE_PORT_MAPPING
          value: -8084:8084
        - name: GATE_API_PORT_MAPPING
          value: -8085:8085
        - name: JAVA_OPTS
          value: -Xmx2000M
        envFrom:
        - configMapRef:
            name: init-env
        livenessProbe:
          exec:
            command:
            - /bin/bash
            - -c
            - wget -O - http://localhost:8084/health || wget -O - https://localhost:8084/health
          failureThreshold: 5
          initialDelaySeconds: 600
          periodSeconds: 5
          successThreshold: 1
          timeoutSeconds: 1
        readinessProbe:
          exec:
            command:
            - /bin/bash
            - -c
            - wget -O - http://localhost:8084/health?checkDownstreamServices=true&downstreamServices=true
              || wget -O - https://localhost:8084/health?checkDownstreamServices=true&downstreamServices=true
          failureThreshold: 3
          initialDelaySeconds: 180
          periodSeconds: 5
          successThreshold: 10
          timeoutSeconds: 1
        volumeMounts:
        - mountPath: /etc/podinfo
          name: podinfo
        - mountPath: /opt/spinnaker/config/default
          name: default-config
        - mountPath: /opt/spinnaker/config/custom
          name: custom-config
      imagePullSecrets:
      - name: harbor
      securityContext:
        runAsUser: 0
      volumes:
      - configMap:
          defaultMode: 420
          name: custom-config
        name: custom-config
      - configMap:
          defaultMode: 420
          name: default-config
        name: default-config
      - downwardAPI:
          defaultMode: 420
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.labels
            path: labels
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.annotations
            path: annotations
        name: podinfo

7.3 应用资源配置清单
kubectl apply -f svc-gate.yaml
kubectl apply -f dp-gate.yaml 

7.4 验证
[root@k8smaster01.host.com:/data/yaml/spinnaker/gate]# kubectl get pods -n armory
NAME                                 READY   STATUS    RESTARTS   AGE
armory-clouddriver-595946645-2746x   1/1     Running   0          5h43m
armory-echo-8688d7856f-xg8dg         1/1     Running   0          33m
armory-front50-bfd6dc68d-rjpr9       1/1     Running   0          74m
armory-gate-846bb9666d-rrrjt         1/1     Running   0          4m34s
armory-igor-55686997f-zmnpn          1/1     Running   0          18m
armory-orca-756c9d46d4-hm8h6         1/1     Running   0          52m
minio-649655b79f-mm9bl               1/1     Running   0          3d2h
redis-785b797f56-sfp9t               1/1     Running   0          14h
[root@k8smaster01.host.com:/data/yaml/spinnaker/gate]# kubectl exec -it minio-649655b79f-mm9bl -n armory -- /bin/sh
/ # curl armory-gate:8084/health
{"status":"UP"}

八、部署前端网站项目-deck

8.1 准备docker镜像
docker pull docker.io/armory/deck-armory:d4bf0cf-release-1.8.x-0a33f94
docker images | grep deck
docker tag 9a87ba3b319f harbor.iot.com/armory/deck-armory:d4bf0cf-release-1.8.x-0a33f94
docker push harbor.iot.com/armory/deck-armory:d4bf0cf-release-1.8.x-0a33f94

docker pull nginx:1.12.2
docker tag nginx:1.12.2 harbor.iot.com/armory/nginx:1.12.2
docker push harbor.iot.com/armory/nginx:1.12.2
8.2 准备资源配置清单
8.2.1 创建service
[root@k8smaster01.host.com:/data/yaml/spinnaker/deck]# vim svc-deck.yaml
apiVersion: v1
kind: Service
metadata:
  name: armory-deck
  namespace: armory
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 9000
  selector:
    app: armory-deck

8.2.2 创建deployment
[root@k8smaster01.host.com:/data/yaml/spinnaker/deck]# vim dp-deck.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: armory-deck
  name: armory-deck
  namespace: armory
spec:
  replicas: 1
  revisionHistoryLimit: 7
  selector:
    matchLabels:
      app: armory-deck
  template:
    metadata:
      annotations:
        artifact.spinnaker.io/location: '"armory"'
        artifact.spinnaker.io/name: '"armory-deck"'
        artifact.spinnaker.io/type: '"kubernetes/deployment"'
        moniker.spinnaker.io/application: '"armory"'
        moniker.spinnaker.io/cluster: '"deck"'
      labels:
        app: armory-deck
    spec:
      containers:
      - name: armory-deck
        image: harbor.iot.com/armory/deck-armory:d4bf0cf-release-1.8.x-0a33f94
        imagePullPolicy: IfNotPresent
        command:
        - bash
        - -c
        args:
        - bash /opt/spinnaker/config/default/fetch.sh && /entrypoint.sh
        ports:
        - containerPort: 9000
          protocol: TCP
        envFrom:
        - configMapRef:
            name: init-env
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /
            port: 9000
            scheme: HTTP
          initialDelaySeconds: 180
          periodSeconds: 3
          successThreshold: 1
          timeoutSeconds: 1
        readinessProbe:
          failureThreshold: 5
          httpGet:
            path: /
            port: 9000
            scheme: HTTP
          initialDelaySeconds: 30
          periodSeconds: 3
          successThreshold: 5
          timeoutSeconds: 1
        volumeMounts:
        - mountPath: /etc/podinfo
          name: podinfo
        - mountPath: /opt/spinnaker/config/default
          name: default-config
        - mountPath: /opt/spinnaker/config/custom
          name: custom-config
      imagePullSecrets:
      - name: harbor
      volumes:
      - configMap:
          defaultMode: 420
          name: custom-config
        name: custom-config
      - configMap:
          defaultMode: 420
          name: default-config
        name: default-config
      - downwardAPI:
          defaultMode: 420
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.labels
            path: labels
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.annotations
            path: annotations
        name: podinfo

8.3 应用资源配置清单
kubectl apply -f svc-deck.yaml 
kubectl apply -f dp-deck.yaml 
8.4验证
[root@k8smaster01.host.com:/data/yaml/spinnaker/deck]# kubectl get pods -n armory
NAME                                 READY   STATUS    RESTARTS   AGE
armory-clouddriver-595946645-2746x   1/1     Running   0          5h53m
armory-deck-6c5cb8fff5-sr7zn         1/1     Running   0          76s
armory-echo-8688d7856f-xg8dg         1/1     Running   0          43m
armory-front50-bfd6dc68d-rjpr9       1/1     Running   0          83m
armory-gate-846bb9666d-rrrjt         1/1     Running   0          13m
armory-igor-55686997f-zmnpn          1/1     Running   0          27m
armory-orca-756c9d46d4-hm8h6         1/1     Running   0          61m
minio-649655b79f-mm9bl               1/1     Running   0          3d2h
redis-785b797f56-sfp9t               1/1     Running   0          14h
[root@k8smaster01.host.com:/data/yaml/spinnaker/deck]# kubectl exec -it minio-649655b79f-mm9bl -n armory -- /bin/sh
/ # curl armory-igor:8088/health
{"status":"UP"}

九、部署前端代理-nginx

9.1 准备docker镜像
docker pull nginx:1.12.2
docker tag nginx:1.12.2 harbor.iot.com/armory/nginx:1.12.2
docker push harbor.iot.com/armory/nginx:1.12.2
9.2 准备资源配置清单
9.2.1 创建service
[root@k8smaster01.host.com:/data/yaml/spinnaker/nginx]# vim svc-nginx.yaml
apiVersion: v1
kind: Service
metadata:
  name: armory-nginx
  namespace: armory
spec:
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
  - name: https
    port: 443
    protocol: TCP
    targetPort: 443
  - name: api
    port: 8085
    protocol: TCP
    targetPort: 8085
  selector:
    app: armory-nginx

9.2.2 创建deployment
[root@k8smaster01.host.com:/data/yaml/spinnaker/nginx]# vim dp-nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: armory-nginx
  name: armory-nginx
  namespace: armory
spec:
  replicas: 1
  revisionHistoryLimit: 7
  selector:
    matchLabels:
      app: armory-nginx
  template:
    metadata:
      annotations:
        artifact.spinnaker.io/location: '"armory"'
        artifact.spinnaker.io/name: '"armory-nginx"'
        artifact.spinnaker.io/type: '"kubernetes/deployment"'
        moniker.spinnaker.io/application: '"armory"'
        moniker.spinnaker.io/cluster: '"nginx"'
      labels:
        app: armory-nginx
    spec:
      containers:
      - name: armory-nginx
        image: harbor.iot.com/armory/nginx:1.12.2
        imagePullPolicy: Always
        command:
        - bash
        - -c
        args:
        - bash /opt/spinnaker/config/default/fetch.sh nginx && nginx -g 'daemon off;'
        ports:
        - containerPort: 80
          name: http
          protocol: TCP
        - containerPort: 443
          name: https
          protocol: TCP
        - containerPort: 8085
          name: api
          protocol: TCP
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /
            port: 80
            scheme: HTTP
          initialDelaySeconds: 180
          periodSeconds: 3
          successThreshold: 1
          timeoutSeconds: 1
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /
            port: 80
            scheme: HTTP
          initialDelaySeconds: 30
          periodSeconds: 3
          successThreshold: 5
          timeoutSeconds: 1
        volumeMounts:
        - mountPath: /opt/spinnaker/config/default
          name: default-config
        - mountPath: /etc/nginx/conf.d
          name: custom-config
      imagePullSecrets:
      - name: harbor
      volumes:
      - configMap:
          defaultMode: 420
          name: custom-config
        name: custom-config
      - configMap:
          defaultMode: 420
          name: default-config
        name: default-config

9.2.3 创建ingressroute
[root@k8smaster01.host.com:/data/yaml/spinnaker/nginx]# vim ingressroute-spinnaker.yaml
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
  name: spinnaker-route
  namespace: armory
  labels:
    app: spinnaker
    web: spinnaker.lowan.com
  annotations:
    traefik.ingress.kubernetes.io/router.entrypoints: web
    kubernetes.io/ingress.class: "traefik"
spec:
  entryPoints:
  - web
  routes:
  - match: Host(`spinnaker.lowan.com`) && PathPrefix(`/`)
    kind: Rule
    services:
    - name: armory-nginx
      port: 80

9.3 应用资源配置清单
kubectl apply -f svc-nginx.yaml 
kubectl apply -f dp-nginx.yaml 
kubectl apply -f ingressroute-spinnaker.yaml 
9.4 添加域名解析
[root@lb03.host.com:/root]# vim /var/named/lowan.com.zone
添加:
spinnaker       A       192.168.13.100

[root@lb03.host.com:/root]# systemctl restart named 
9.5 验证
[root@k8smaster03.host.com:/root]# kubectl get pods -n armory -o wide
NAME                                 READY   STATUS    RESTARTS   AGE     IP               NODE                   NOMINATED NODE   READINESS GATES
armory-clouddriver-595946645-2746x   1/1     Running   0          2d21h   10.244.236.16    k8sworker03.host.com   <none>           <none>
armory-deck-6c5cb8fff5-sr7zn         1/1     Running   0          2d15h   10.244.15.227    k8sworker01.host.com   <none>           <none>
armory-echo-8688d7856f-xg8dg         1/1     Running   0          2d16h   10.244.15.226    k8sworker01.host.com   <none>           <none>
armory-front50-bfd6dc68d-rjpr9       1/1     Running   0          2d16h   10.244.111.202   k8sworker02.host.com   <none>           <none>
armory-gate-846bb9666d-rrrjt         1/1     Running   0          2d15h   10.244.111.247   k8sworker02.host.com   <none>           <none>
armory-igor-55686997f-zmnpn          1/1     Running   0          2d15h   10.244.111.218   k8sworker02.host.com   <none>           <none>
armory-nginx-9769948dd-mws9x         1/1     Running   0          2d15h   10.244.222.169   k8sworker04.host.com   <none>           <none>
armory-orca-756c9d46d4-hm8h6         1/1     Running   0          2d16h   10.244.111.207   k8sworker02.host.com   <none>           <none>
minio-649655b79f-mm9bl               1/1     Running   0          5d18h   10.244.15.216    k8sworker01.host.com   <none>           <none>
redis-785b797f56-sfp9t               1/1     Running   0          3d6h    10.244.236.13    k8sworker03.host.com   <none>           <none>

在这里插入图片描述

访问网址:http://spinnaker.lowan.com/

在这里插入图片描述

在这里插入图片描述

后续参考(集群):

01 kubernetes二进制部署
02 kubernetes辅助环境设置
03 K8S集群网络ACL规则
04 Ceph集群部署
05 部署zookeeper和kafka集群
06 部署日志系统
07 部署Indluxdb-telegraf
08 部署jenkins
09 部署k3s和Helm-Rancher
10 部署maven软件

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值