Kubernetes部署apisix的理论与最佳实践(二)

#作者: stackofumbrella

etcd集群部署安装

$ vim etcd.yaml

apiVersion: v1
kind: Namespace
metadata:
  name: gv-public
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: apisix-etcd
  namespace: gv-public
  labels:
    app.kubernetes.io/instance: apisix-etcd
    app.kubernetes.io/name: apisix-etcd
spec:
  podManagementPolicy: Parallel
  replicas: 3
  serviceName: apisix-etcd-headless
  selector:
    matchLabels:
      app.kubernetes.io/instance: apisix-etcd
      app.kubernetes.io/name: apisix-etcd
  template:
    metadata:
      labels:
        app.kubernetes.io/instance: apisix-etcd
        app.kubernetes.io/name: apisix-etcd
    spec:
      affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - podAffinityTerm:
              labelSelector:
                matchLabels:
                  app.kubernetes.io/instance: apisix-etcd
                  app.kubernetes.io/name: apisix-etcd
              topologyKey: kubernetes.io/hostname
            weight: 1
      containers:
      - name: apisix-etcd-app
        image: swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/bitnami/etcd:3.5.14
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 2379
          name: client
          protocol: TCP
        - containerPort: 2380
          name: peer
          protocol: TCP
        env:
        - name: BITNAMI_DEBUG
          value: 'false'
        - name: MY_POD_IP
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: status.podIP
        - name: MY_POD_NAME
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.name
        - name: MY_STS_NAME
          value: apisix-etcd
        - name: ETCDCTL_API
          value: '3'
        - name: ETCD_ON_K8S
          value: 'yes'
        - name: ETCD_START_FROM_SNAPSHOT
          value: 'no'
        - name: ETCD_DISASTER_RECOVERY
          value: 'no'
        - name: ETCD_NAME
          value: $(MY_POD_NAME)
        - name: ETCD_DATA_DIR
          value: /bitnami/etcd/data
        - name: ETCD_LOG_LEVEL
          value: info
        - name: ALLOW_NONE_AUTHENTICATION
          value: 'yes'
        - name: ETCD_ADVERTISE_CLIENT_URLS
          value: http://$(MY_POD_NAME).apisix-etcd-headless.gv-public.svc.cluster.local:2379
        - name: ETCD_LISTEN_CLIENT_URLS
          value: http://0.0.0.0:2379
        - name: ETCD_INITIAL_ADVERTISE_PEER_URLS
          value: http://$(MY_POD_NAME).apisix-etcd-headless.gv-public.svc.cluster.local:2380
        - name: ETCD_LISTEN_PEER_URLS
          value: http://0.0.0.0:2380
        - name: ETCD_INITIAL_CLUSTER_TOKEN
          value: apisix-etcd-cluster-k8s
        - name: ETCD_INITIAL_CLUSTER_STATE
          value: new
        - name: ETCD_INITIAL_CLUSTER
          value: apisix-etcd-0=http://apisix-etcd-0.apisix-etcd-headless.gv-public.svc.cluster.local:2380,apisix-etcd-1=http://apisix-etcd-1.apisix-etcd-headless.gv-public.svc.cluster.local:2380,apisix-etcd-2=http://apisix-etcd-2.apisix-etcd-headless.gv-public.svc.cluster.local:2380
        - name: ETCD_CLUSTER_DOMAIN
          value: apisix-etcd-headless.gv-public.svc.cluster.local
        volumeMounts:
        - name: data-etcd
          mountPath: /bitnami/etcd
        lifecycle:
          preStop:
            exec:
              command:
              - /opt/bitnami/scripts/etcd/prestop.sh
        livenessProbe:
          exec:
            command:
            - /opt/bitnami/scripts/etcd/healthcheck.sh
          initialDelaySeconds: 60
          timeoutSeconds: 5
          periodSeconds: 30
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          exec:
            command:
            - /opt/bitnami/scripts/etcd/healthcheck.sh
          initialDelaySeconds: 60
          timeoutSeconds: 5
          periodSeconds: 10
          successThreshold: 1
          failureThreshold: 5
      securityContext:
        fsGroup: 1001
  volumeClaimTemplates:
  - metadata:
      name: data-etcd
    spec:
      accessModes: 
      - ReadWriteOnce
      storageClassName: "nfs-client"
      resources:
        requests:
          storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
  name: apisix-etcd-headless
  namespace: gv-public
  labels:
    app.kubernetes.io/instance: apisix-etcd
    app.kubernetes.io/name: apisix-etcd
spec:
  ports:
  - name: client
    port: 2379
    protocol: TCP
    targetPort: 2379
  - name: peer
    port: 2380
    protocol: TCP
    targetPort: 2380
  clusterIP: None
  selector:
    app.kubernetes.io/instance: apisix-etcd
    app.kubernetes.io/name: apisix-etcd
  publishNotReadyAddresses: true
--- 
apiVersion: v1
kind: Service
metadata:
  name: apisix-etcd
  namespace: gv-public
  labels:
    app.kubernetes.io/instance: apisix-etcd
    app.kubernetes.io/name: apisix-etcd
spec:
  ports:
  - name: client
    port: 2379
    protocol: TCP
    targetPort: 2379
  - name: peer
    port: 2380
    protocol: TCP
    targetPort: 2380
  selector:
    app.kubernetes.io/instance: apisix-etcd
    app.kubernetes.io/name: apisix-etcd

$ kubectl apply -f etcd.yaml
在这里插入图片描述
$ kubectl get pod -n gv-public
在这里插入图片描述

apisix安装

$ vim apisix.yaml

kind: Deployment
apiVersion: apps/v1
metadata:
  name: apisix
  namespace: gv-public
  labels:
    app.kubernetes.io/instance: apisix
    app.kubernetes.io/name: apisix
    app.kubernetes.io/version: 2.10.0
spec:
  replicas: 2
  selector:
    matchLabels:
      app.kubernetes.io/instance: apisix
      app.kubernetes.io/name: apisix
  template:
    metadata:
      creationTimestamp: null
      labels:
        app.kubernetes.io/instance: apisix
        app.kubernetes.io/name: apisix
    spec:
      volumes:
        - name: apisix-config
          configMap:
            name: apisix
            defaultMode: 420
      initContainers:
        - name: wait-etcd
          image: swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/library/busybox:1.28
          command:
            - sh
            - '-c'
            - >-
              until nc -z apisix-etcd.gv-public.svc.cluster.local 2379; do echo
              waiting for etcd `date`; sleep 2; done;
          resources: {}
          terminationMessagePath: /dev/termination-log
          terminationMessagePolicy: File
          imagePullPolicy: IfNotPresent
      containers:
        - name: apisix
          image: swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/apache/apisix:3.9.1-debian
          ports:
            - name: http
              containerPort: 9080
              protocol: TCP
            - name: tls
              containerPort: 9443
              protocol: TCP
            - name: admin
              containerPort: 9180
              protocol: TCP
          resources: {}
          volumeMounts:
            - name: apisix-config
              mountPath: /usr/local/apisix/conf/config.yaml
              subPath: config.yaml
          readinessProbe:
            tcpSocket:
              port: 9080
            initialDelaySeconds: 10
            timeoutSeconds: 1
            periodSeconds: 10
            successThreshold: 1
            failureThreshold: 6
          lifecycle:
            preStop:
              exec:
                command:
                - /bin/sh
                - '-c'
                - sleep 30
          terminationMessagePath: /dev/termination-log
          terminationMessagePolicy: File
          imagePullPolicy: IfNotPresent
      restartPolicy: Always
      terminationGracePeriodSeconds: 30
      dnsPolicy: ClusterFirst
      securityContext: {}
      schedulerName: default-scheduler
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 25%
      maxSurge: 25%
  revisionHistoryLimit: 10
  progressDeadlineSeconds: 600
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: apisix
  namespace: gv-public
data:
  config.yaml: >-
    apisix:
      node_listen: 9080              # APISIX listening port
      enable_ipv6: false
      enable_control: true
      control:
        ip: "0.0.0.0"
        port: 9092
    deployment:
      admin:
        allow_admin:               # https://nginx.org/en/docs/http/ngx_http_access_module.html#allow
          - 0.0.0.0/0              # We need to restrict ip access rules for security. 0.0.0.0/0 is for test.
    
        admin_key:
          - name: "admin"
            key: edd1c9f034335f136f87ad84b625c8f1
            role: admin                 # admin: manage all configuration data
    
          - name: "viewer"
            key: 4054f7cf07e344346cd3f287985e76a2
            role: viewer
    
      etcd:
        host:                           # it's possible to define multiple etcd hosts addresses of the same etcd cluster.
          - "http://apisix-etcd.gv-public.svc.cluster.local:2379"
        prefix: "/apisix"               # apisix configurations prefix
        timeout: 30                     # 30 seconds
      plugins:                          # plugin list
      - api-breaker
      - authz-keycloak
      - basic-auth
      - batch-requests
      - consumer-restriction
      - cors
      - echo
      - fault-injection
      - grpc-transcode
      - hmac-auth
      - http-logger
      - ip-restriction
      - ua-restriction
      - jwt-auth
      - kafka-logger
      - key-auth
      - limit-conn
      - limit-count
      - limit-req
      - node-status
      - openid-connect
      - authz-casbin
      - prometheus
      - proxy-cache
      - proxy-mirror
      - proxy-rewrite
      - redirect
      - referer-restriction
      - request-id
      - request-validation
      - response-rewrite
      - serverless-post-function
      - serverless-pre-function
      - sls-logger
      - syslog
      - tcp-logger
      - udp-logger
      - uri-blocker
      - wolf-rbac
      - zipkin
      - server-info
      - traffic-split
      - gzip
      - real-ip
      stream_plugins:
      - mqtt-proxy
      - ip-restriction
      - limit-conn
 
    plugin_attr:
      prometheus:
        export_addr:
          ip: "0.0.0.0"
          port: 9091
---
kind: Service
apiVersion: v1
metadata:
  name: apisix-admin
  namespace: gv-public
  labels:
    app.kubernetes.io/instance: apisix
    app.kubernetes.io/name: apisix
    app.kubernetes.io/version: 2.10.0
spec:
  ports:
    - name: apisix-admin
      protocol: TCP
      port: 9180
      targetPort: 9180
  selector:
    app.kubernetes.io/instance: apisix
    app.kubernetes.io/name: apisix
  type: ClusterIP
---
kind: Service
apiVersion: v1
metadata:
  name: apisix-gateway
  namespace: gv-public
  labels:
    app.kubernetes.io/instance: apisix
    app.kubernetes.io/name: apisix
    app.kubernetes.io/version: 2.10.0
spec:
  ports:
    - name: apisix-gateway
      protocol: TCP
      port: 80
      targetPort: 9080
      nodePort: 31784
  selector:
    app.kubernetes.io/instance: apisix
    app.kubernetes.io/name: apisix
  type: NodePort
  sessionAffinity: None
  externalTrafficPolicy: Cluster
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值