目录
  1. 1. 基础环境
  2. 2. 系统基础配置
    1. 2.0.1. 设置hostname
    2. 2.0.2. 关闭防火墙 & selinux
    3. 2.0.3. YUM源设置
    4. 2.0.4. 安装Docker & kubernetes组件
  • 3. 部署etcd集群
    1. 3.0.1. 安装etcd
    2. 3.0.2. 配置etcd
    3. 3.0.3. 启动etcd集群
    4. 3.0.4. 查看etcd集群状态
    5. 3.0.5. etcd集群增加新节点
  • 4. 启动Docker & Kubelet
  • 5. 导出导入镜像
    1. 5.0.1. 导出镜像
    2. 5.0.2. 导入镜像
  • 6. 部署k8s集群
    1. 6.1. 初始化集群
      1. 6.1.1. 记录Node加入集群命令
      2. 6.1.2. 查看k8s节点集群状态
      3. 6.1.3. 设置master运行Pod
    2. 6.2. 添加Node节点
    3. 6.3. 部署Pod网络
    4. 6.4. DNS注意事项
      1. 6.4.1. 扩展kube-dns数量
    5. 6.5. 部署Dashboard
  • 7. Kubernetes应用实例
    1. 7.1. Kubernetes部署Nginx
      1. 7.1.1. 下载镜像
      2. 7.1.2. 定义RC文件
      3. 7.1.3. 创建Pod & 查看Pod
      4. 7.1.4. 定义Service文件
      5. 7.1.5. 查看Service情况
      6. 7.1.6. 手动扩容RC
    2. 7.2. 使用Kubernetes Ingress访问kubernetes Dashboard
      1. 7.2.1. 部署默认后端
      2. 7.2.2. 部署Ingress Controller
      3. 7.2.3. 部署Ingress
    3. 7.3. Kubernetes常用命令简录
  • 基础环境

    • CentOS 7.3 64bit
    • Docker 1.13.0
    • Kubernetes 1.5.4
    • etcd 3.1.0

    系统基础配置

    • 10.201.3.222 Master
    • 10.201.3.223 Node、etcd(leader)
    • 10.201.3.224 Node、etcd(follower)

    设置hostname

    1
    hostnamectl --static set-hostname <hostname>

    关闭防火墙 & selinux

    1
    2
    3
    4
    systemctl disable firewalld
    systemctl stop firewalld
    sed -i '/SELINUX/s/\(enforcing\|permissive\)/disabled/' /etc/selinux/config
    setenforce 0

    YUM源设置

    • epel

      1
      2
      wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
      rpm -ivh epel-release-latest-7.noarch.rpm
    • docker.repo:/etc/yum.repos.d/docker.repo

      1
      2
      3
      4
      5
      6
      [dockerrepo]
      name=Docker Repository
      baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker/yum/repo/centos7
      enabled=1
      gpgcheck=1
      gpgkey=https://mirrors.tuna.tsinghua.edu.cn/docker/yum/gpg
    • kubernetes.repo:/etc/yum.repos.d/kubernetes.repo

      1
      2
      3
      4
      5
      6
      [kubernetes]
      name=Kubernetes
      baseurl=http://yum.kubernetes.io/repos/kubernetes-el7-x86_64
      enabled=1
      gpgcheck=0
      repo_gpgcheck=0

    安装Docker & kubernetes组件

    1
    yum install -y ebtables socat docker-engine-1.13.0 docker-engine-selinux-1.13.0 kubelet kubeadm kubectl kubernetes-cni

    部署etcd集群

      Kubernetes使用etcd做存储,默认会部署单点的etcd,为了高可用单独部署etcd集群。

    • 10.201.3.223:leader
    • 10.201.3.224:follower
    • 2379端口:etcd提供给外部客户端端口(etcd2.0前为4001)
    • 2380端口:etcd集群节点通信端口(etcd2.0前为7001)

    安装etcd

    1
    yum install -y etcd

    配置etcd

      /etc/etcd/etcd.conf

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    #节点名称
    ETCD_NAME=etcd0
    #数据目录
    ETCD_DATA_DIR="/var/lib/etcd/etcd0"
    #etcd监听地址
    ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
    #客户端监听地址
    ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379,http://0.0.0.0:4001"
    #集群节点通信地址
    ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.201.3.223:2380"
    #初始化集群节点
    ETCD_INITIAL_CLUSTER="etcd0=http://10.201.3.223:2380,etcd1=http://10.201.3.224:2380"
    #初始化集群状态,new为新建
    ETCD_INITIAL_CLUSTER_STATE="new"
    #集群token
    ETCD_INITIAL_CLUSTER_TOKEN="test-etcd-cluster"
    #通知客户端地址
    ETCD_ADVERTISE_CLIENT_URLS="http://10.201.3.223:2379,http://10.201.3.223:4001"

    启动etcd集群

    1
    2
    3
    systemctl enable etcd
    systemctl start etcd
    systemctl status etcd

    查看etcd集群状态

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    # 查看集群监控状态
    etcdctl cluster-health
    #member 2076196e6a1f8bed is healthy: got healthy result from http://10.201.3.223:2379
    #member 703f1d56a3b37f45 is healthy: got healthy result from http://10.201.3.224:2379
    #cluster is healthy

    # 查看集群成员列表
    etcdctl member list
    #2076196e6a1f8bed: name=etcd0 peerURLs=http://10.201.3.223:2380 clientURLs=http://10.201.3.223:2379,http://10.201.3.223:4001 isLeader=true
    #703f1d56a3b37f45: name=etcd1 peerURLs=http://10.201.3.224:2380 clientURLs=http://10.201.3.224:2379,http://10.201.3.224:4001 isLeader=false

    etcd集群增加新节点

    • 添加新节点

      1
      2
      3
      4
      etcdctl member add etcd2 http://10.201.3.222:2380
      #Added member named etcd2 with ID 9975afe1f36cc116 to cluster
      #ETCD_NAME="etcd2"
      #ETCD_INITIAL_CLUSTER="etcd0=http://10.201.3.223:2380,etcd1=http://10.201.3.224:2380,etcd2=http://10.201.3.222:2380"
    • 配置新节点
      ETCD_NAMEETCD_INITIAL_CLUSTEETCD_INITIAL_CLUSTER_STATE用于etcd2的配置文件中

      1
      2
      3
      4
      5
      6
      7
      8
      9
      ETCD_NAME=etcd2
      ETCD_DATA_DIR="/var/lib/etcd/etcd2"
      ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
      ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379,http://0.0.0.0:4001"
      ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.201.3.222:2380"
      ETCD_INITIAL_CLUSTER="etcd0=http://10.201.3.223:2380,etcd1=http://10.201.3.224:2380,etcd2=http://10.201.3.222:2380"
      ETCD_INITIAL_CLUSTER_STATE="existing"
      ETCD_INITIAL_CLUSTER_TOKEN="ssj-etcd-cluster"
      ETCD_ADVERTISE_CLIENT_URLS="http://10.201.3.222:2379,http://10.201.3.222:4001"
    • 启动新节点

      1
      2
      systemctl start etcd
      etcdctl member list
    • 移除etcd2节点
      演示移除节点操作

      1
      2
      3
      4
      5
      etcdctl member list
      #2076196e6a1f8bed: name=etcd0 peerURLs=http://10.201.3.223:2380 clientURLs=http://10.201.3.223:2379,http://10.201.3.223:4001 isLeader=true
      #703f1d56a3b37f45: name=etcd1 peerURLs=http://10.201.3.224:2380 clientURLs=http://10.201.3.224:2379,http://10.201.3.224:4001 isLeader=false
      #9975afe1f36cc116: name=etcd2 peerURLs=http://10.201.3.222:2380 clientURLs=http://10.201.3.222:2379,http://10.201.3.222:4001 isLeader=false
      etcdctl member remove 9975afe1f36cc116

    启动Docker & Kubelet

      在所有节点启动Docker、Kubelet,Kubelet因为没有配置可能无法启动,但没关系。

    1
    2
    systemctl enable docker && systemctl start docker
    systemctl enable kubelet && systemctl start kubelet

    导出导入镜像

      由于Kubernetes组件都容器化了,需要到gcr.io上pull镜像,但由于GFW的原因无法pull。要么在国外机器pull镜像后导入然后在导入到本地集群,要么使用代理,这里使用前者方法。再或者绑定hosts也可,pull镜像比较慢61.91.161.217 gcr.io www.gcr.io
      Kubernetes1.5.4所需要的镜像

    • gcr.io/google_containers/dnsmasq-metrics-amd64:1.0
    • gcr.io/google_containers/exechealthz-amd64:1.2
    • gcr.io/google_containers/kube-apiserver-amd64:v1.5.4
    • gcr.io/google_containers/kube-controller-manager-amd64:v1.5.4
    • gcr.io/google_containers/kube-discovery-amd64:1.0
    • gcr.io/google_containers/kubedns-amd64:1.9
    • gcr.io/google_containers/kube-dnsmasq-amd64:1.4
    • gcr.io/google_containers/kube-proxy-amd64:v1.5.4
    • gcr.io/google_containers/kube-scheduler-amd64:v1.5.4
    • gcr.io/google_containers/pause-amd64:3.0
    • gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.0
    • gcr.io/google_containers/etcd-amd64:3.0.14-kubeadm

    导出镜像

      所有节点都需要导入镜像

    1
    docker save `docker images | grep -v TAG | awk '{print $1":"$2}'` > kube.tar

    导入镜像

      将导出的镜像下载到本地集群然后导入

    1
    docker load < kube.tar

    部署k8s集群

    初始化集群

      在master节点使用kubeadm初始化集群,初始化过程中若发现本地没有镜像会去gcr.iopull镜像。
      使用kubeadm init初始化集群时若是使用Flannel网络的话需要添加--pod-network-cidr=10.244.0.0/16,性能上Flannel会比Weave好,生产环境的话可以考虑Open vSwitch。

    1
    2
    3
    4
    5
    6
    #指定日志级别
    export KUBE_COMPONENT_LOGLEVEL='--v=1'
    #weave网络init
    kubeadm init --use-kubernetes-version v1.5.4 --api-advertise-addresses=10.201.3.222 --external-etcd-endpoints http://10.201.3.223:2379,http://10.201.3.224:2379
    #flannel网络init
    kubeadm init --use-kubernetes-version v1.5.4 --api-advertise-addresses=10.201.3.222 --pod-network-cidr=10.244.0.0/16 --external-etcd-endpoints http://10.201.3.223:2379,http://10.201.3.224:2379

      执行完kubeadm init初始化集群后会生成一个token,需要记录便于以后添加Node进集群使用。
      假若丢失了token可使用命令查看

    1
    kubectl -n kube-system get secret clusterinfo -o yaml | grep token-map | awk '{print $2}' | base64 --decode | sed "s|{||g;s|}||g;s|:|.|g;s/\"//g;" | xargs echo

    记录Node加入集群命令

      kubeadm init初始化完后会给出其余Node节点加入集群的命令,记录下来方便其余Node加入集群

    1
    kubeadm join --token=06ac4c.a0f93b8da729ac48 10.201.3.222

    查看k8s节点集群状态

    1
    2
    3
    kubectl get nodes
    #NAME STATUS AGE
    #sd-3-centos222 Ready,master 12m

    设置master运行Pod

      默认master节点不会运行Pod,可通过命令设置运行在master节点运行Pod

    1
    kubectl taint nodes --all dedicated-

    添加Node节点

      Kubernetes的master节点部署成功后,可将其余Node节点加入集群
      之前步骤已将Docker、Kubelet启动并且将所需的镜像都导入,所以直接将节点添加入集群即可。在Node节点上执行(10.201.3.222为master节点):

    1
    kubeadm join --token=06ac4c.a0f93b8da729ac48 10.201.3.222

    部署Pod网络

      为了使不同Node之间的Pod能相互通信,需要专门部署Pod网络。一般默认可使用Weave,性能上有要求可使用Flannel,生产环境可考虑Open vSwitch。
      若是部署Flannel网络,kube-flannel.yml文件中的net-conf.json/Network网段要和--pod-network-cidr=10.244.0.0/16设置保持一致

    1
    2
    3
    4
    #部署Weave网络
    kubectl apply -f https://git.io/weave-kube
    #部署Flannel网络
    kubectl create -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

    DNS注意事项

      Kubernetes的DNS功能是有kube-dns提供的,kube-dns依赖Pod网络,所以在Pod网络弄好前kube-dns都会是ContainerCreating状态。通过以下命令查看所有Pod服务,若都正常则状态都是Running

    1
    kubectl get pods --all-namespaces

    扩展kube-dns数量

      DNS服务为重要服务,使用kubeadm创建的kube-dns是单点的,一旦该Pod出现问题整个集群的DNS服务就瘫痪了,所有要扩展kube-dns数量以防单点故障。

    1
    2
    3
    4
    # 扩展kube-dns数量
    kubectl --namespace=kube-system scale deployment kube-dns --replicas=<NUMBER>
    # 查看kube-dns数量
    kubectl get pods --namespace=kube-system | grep 'kube-dns'

    部署Dashboard

      Kubernetes提供Web的Dashboard图形界面管理集群
      镜像同样需要pull后导出再导入到本地,这里kubernetes-dashboard.yaml中的镜像为gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.0

    1
    2
    3
    4
    5
    6
    wget https://rawgit.com/kubernetes/dashboard/master/src/deploy/kubernetes-dashboard.yaml
    kubectl create -f ./kubernetes-dashboard.yaml
    #查看监听端口NodePort
    kubectl describe svc kubernetes-dashboard --namespace=kube-system
    #Web访问kubernetes-dashboard
    http://10.201.3.222:NodePort

    Kubernetes应用实例

    Kubernetes部署Nginx

      Kubernetes部署服务一般有两种模式,这里用第2种模式:

    1. 先定义RC创建Pod,再定义与之关联的Service
    2. 先定义Service,再定义RC来创建Pod

    下载镜像

    1
    docker pull nginx

    定义RC文件

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    # nginx.yaml
    apiVersion: v1
    kind: ReplicationController
    metadata:
    name: nginx
    labels:
    name: nginx
    spec:
    replicas: 2
    selector:
    name: nginx
    template:
    metadata:
    labels:
    name: nginx
    spec:
    containers:
    - name: nginx
    image: nginx
    imagePullPolicy: IfNotPresent
    ports:
    - containerPort: 80
    • kind表明类型
    • spec.selector为RC的Pod选择器,监控和管理拥有此label的Pod实例,确保集群中始终有且仅有replicas个Pod实例运行
    • spec.template定义生产Pod实例的模板,此处的labels必须和spec.selector一致
    • spec.template.spec.containers指明容器的一些属性

    现在Kubernetes推荐使用Deployment代替RC

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    #nginx-deployment.yaml
    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
    name: nginx
    spec:
    replicas: 2
    template:
    metadata:
    labels:
    app: nginx
    spec:
    containers:
    - name: nginx
    image: nginx
    ports:
    - containerPort: 80

    创建Pod & 查看Pod

    1
    2
    3
    4
    5
    6
    kubectl create -f nginx.yaml

    kubectl get pods -o wide
    #NAME READY STATUS RESTARTS AGE IP NODE
    #nginx-dftws 1/1 Running 0 2d 10.32.0.4 sd-3-centos224
    #nginx-vj60j 1/1 Running 0 2d 10.40.0.4 sd-3-centos223

    定义Service文件

      nginx-service.yaml创建的Service是无法被外部访问的,要想外部能访问需要创建带有NodePort的Service

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    #nginx-service.yaml
    apiVersion: v1
    kind: Service
    metadata:
    name: nginx-service
    labels:
    name: nginx-service
    spec:
    ports:
    - port: 80
    targetPort: 80
    protocol: TCP
    selector:
    name: nginx

    • metadata.name为Service的名称(ServerName)
    • spec.selector表明哪些Pod对应此Service(此处指明拥有nginx labels的Pod属于nginx-service这个Service)
    • spec.ports.targetPort具体进程在容器内的端口,spec.ports.port是该Service的端口

    nginx-service-nodeport.yaml创建的Service使用Node节点系统上的Port,可直接通过Node:Port访问

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    #nginx-service-nodeport.yaml
    apiVersion: v1
    kind: Service
    metadata:
    name: nginx-service-nodeport
    labels:
    name: nginx-service-nodeport
    spec:
    type: NodePort
    ports:
    - port: 80
    nodePort: 32222
    targetPort: 80
    protocol: TCP
    selector:
    name: nginx

    • NodePort表示使用Node节点上的物理机端口提供外网访问功能
    • spec.ports.nodePort端口范围定义必须在30000~32767内,故一般可不指定spec.ports.nodePort而随机分配。然后用kubectl describe svc <service-name>查看分配的NodePord

    查看Service情况

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    kubectl get svc -o wide
    #NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
    #kubernetes 10.96.0.1 <none> 443/TCP 4d <none>
    #nginx-service 10.107.214.134 <none> 80/TCP 29m name=nginx
    #nginx-service-nodeport 10.100.84.60 <nodes> 80:32222/TCP 3m name=nginx

    kubectl describe svc nginx-service-nodeport
    #Name: nginx-service-nodeport
    #Namespace: default
    #Labels: name=nginx-service-nodeport
    #Selector: name=nginx
    #Type: NodePort
    #IP: 10.100.84.60
    #Port: <unset> 80/TCP
    #NodePort: <unset> 32222/TCP
    #Endpoints: 10.32.0.4:80,10.40.0.4:80
    #Session Affinity: None
    #No events.

    手动扩容RC

    1
    kubectl scale rc nginx --replicas=<NUMBER>

    使用Kubernetes Ingress访问kubernetes Dashboard

      Ingress是Kubernetes1.2后引入,用于暴露服务用的。在1.2版本前只有LoadBalancer、NodePort两种方式。Ingress利用Nginx/HAProxy等反向代理软件实现所有Service的对外暴露服务,根据域名/URL规则动态将请求转到相应的Service。
      这里演示使用Nginx Ingress来反向代理Kubernetes Dashboard。

    部署默认后端

      一般可部署一个默认后端,用于当域名、URL全都不匹配时候用。default-backend什么也不做,只返回404页面。官方提供的default-backend.yaml

    1
    kubectl create -f default-backend.yaml

    部署Ingress Controller

      Ingress Controller部署方式可选DeploymentDaemonSet,这里选择使用DaemonSet方式部署。官方有个nginx-ingress-daemonset

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    apiVersion: extensions/v1beta1
    kind: DaemonSet
    metadata:
    name: nginx-ingress-lb
    labels:
    name: nginx-ingress-lb
    namespace: kube-system
    spec:
    template:
    metadata:
    labels:
    name: nginx-ingress-lb
    annotations:
    prometheus.io/port: '10254'
    prometheus.io/scrape: 'true'
    spec:
    terminationGracePeriodSeconds: 60
    hostNetwork: true
    containers:
    - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.5
    name: nginx-ingress-lb
    readinessProbe:
    httpGet:
    path: /healthz
    port: 10254
    scheme: HTTP
    livenessProbe:
    httpGet:
    path: /healthz
    port: 10254
    scheme: HTTP
    initialDelaySeconds: 10
    timeoutSeconds: 1
    ports:
    - containerPort: 80
    hostPort: 80
    - containerPort: 443
    hostPort: 443
    env:
    - name: POD_NAME
    valueFrom:
    fieldRef:
    fieldPath: metadata.name
    - name: POD_NAMESPACE
    valueFrom:
    fieldRef:
    fieldPath: metadata.namespace
    args:
    - /nginx-ingress-controller
    - --default-backend-service=$(POD_NAMESPACE)/default-http-backend

      创建Nginx Ingress Controller

    1
    kubectl create -f nginx-ingress-controller.yaml

    部署Ingress

      kind: Ingress用于创建Ingress资源,设置Nginx Ingress规则(域名/URL)。

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    #kubernetes-dashboard-ingress.yaml
    apiVersion: extensions/v1beta1
    kind: Ingress
    metadata:
    name: kubernetes-dashboard-ingress
    namespace: kube-system
    spec:
    rules:
    - host: your.domain.com
    http:
    paths:
    - backend:
    serviceName: kubernetes-dashboard
    servicePort: 80

    1
    2
    3
    4
    5
    kubectl create -f kubernetes-dashboard-ingress.yaml

    kubectl get ingress -n kube-system
    #NAME HOSTS ADDRESS PORTS AGE
    #kubernetes-dashboard-ingress your.domain.com 10.201.3.222,... 80 1m

      部署成功后直接使用域名http://your.domain.com访问就能直接跳转到kubernetes-dashboard。

    Kubernetes常用命令简录

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    #查看Kubernetes集群状态(master节点上执行)
    kubectl cluster-info

    #查看节点信息
    kubectl get nodes

    #查看ReplicationController情况
    kubectl get rc

    #查看Pods情况
    kubectl get pods
    kubectl get pods -o wide
    kubectl get pods -n kube-system

    #查看Pods详细情况
    kubectl describe pods <pod-name>

    #查看Service情况
    kubectl get services
    kubectl get svc

    #查看Service详细情况
    kubectl describe services <service-name>
    kubectl describe svc <service-name>

    #查看Pods环境变量
    kubectl exec <pod-name> env

    #在Pod上新建bash终端
    kubectl exec -ti <pod-name> -- bash

    #根据yaml文件创建资源
    kubectl create -f <xxx.yaml>

    #删除资源
    kubectl delete pods,services -l name=<labels-name>

    #删除所有Pods
    kubectl delete pods --all

    #停止资源
    kubectl stop replicationcontroller foo
    kubectl stop pods,services -l name=myLabel
    kubectl stop -f service.json

    #动态扩展pod
    kubectl --namespace=kube-system scale deployment kube-dns --replicas=<NUMBER>

    Powered: Hexo, Theme: Nadya remastered from NadyMain