ansible搭建k8s

1.主机规划

类型

主机ip

域名

vip

k8s-master

192.168.47.47

k8s-master.example.com

192.168.47.49

k8s-harbar/haproxy

192.168.47.48

k8s-​harbar.example.linux是什么操作系统com

k8s-etcd1

192.168.47.linux必学的60个命令50

k8s-​​​etcd1.example.com

k8s-etcd2

192.168.47.51

k8s-​​​et系统运维面试题cd2.exampllinux是什么操作系统e.com

k8s-etcd3

192.168.47.52

k8s系统运维是干嘛的-​​​etcd3​.example.com

k8s-node1

192.168.47.53

k8s-​​​node1.example.com

k8s-node2

192.168.47.54

k8s-​​​node2.example.com

环境

root@ubuntu:~# cat /etc/issue
Ubuntu 18.04.3 LTS \n \l

2.基础环linux必学的60个命令境准备

2.1.更改网卡名称为eth0

vim /etc/default/grub
------------------------------------------------------
GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0"
------------------------------------------------------
update-grub

2.2.更改系统ip地址

vim /etc/netplan/01-netcfg.yaml
------------------------------------------------------
network:
version: 2
renderer: networkd
ethernets:
eth0:
dhcp4: no
addresses: [192.168.47.47/24]
gateway4: 192.168.47.2
nameservers:
addresses: [192.168.47.2]
------------------------------------------------------
netplan apply

2.3.更改主机名

cat /etc/hostname
------------------------
k8s-master.example.com
------------------------

2.4.apt源改为阿里源

​​https://dlinux系统eveloper.aliyun.com/mirror/ubuntu?splinux是什么操作系统m=a2c6h.13651102.0.0.3e221b11mmN9H3​​

apt-get upgrade

2.5.安装常用命令

apt-get  install iproute2  ntpdate  tcpdump telnet traceroute \
nfs-kernel-server nfs-common lrzsz tree openssl libssl-dev \
libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute \
gcc openssh-server lrzsz tree openssl libssl-dev libpcre3 libpcre3-dev \
zlib1g-dev ntpdate tcpdump telnet traceroute iotop unzip zip -y

2.6.其他配置

grep "^[a-Z]" /etc/sysctl.conf 
---------------------------------------------
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
net.ipv4.ip_forward = 1
---------------------------------------------

2.7.安装docker

参考:​​https://developer.al系统运维主要做什么iyun.com/alinux系统rticle/110806​​

#方式1:官方安装脚本自动安装
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun

#方式2:
apt-get update
apt-get -y install apt-transport-https ca-certificates curl software-properties-common
curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
apt-get -y update && apt-get -y install docker-ce
docker info

#镜像加速:
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://vmaivck0.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker

2.8linux系统安装.禁用swap,selinux(ubuntu没有)系统运维工程师,iptables

swapoff -a #临时关闭swap

2.9master/node/etcd安装

apt-get install python2.7 -y && ln -s /usr/bin/python2.7 /usr/bin/python

2.10.reboot做快照

3.keepalived

apt-get install keepalived  -y

cp /usr/share/doc/keepalived/samples/keepalived.conf.vrrp /etc/keepalived/keepalived.conf
vim /etc/keepalived/keepalived.conf
-----------------------------------------------------------------
virtual_ipaddress {
192.168.47.49/24 dev eth0 label eth0:1 #设置的vip必须被master访问
}
-----------------------------------------------------------------
systemctl restart keepalived && systemctl enable keepalived

4.harproxy

apt-get install haproxy -y

vim /etc/haproxy/haproxy.cfg
-----------------------------------------------------------------
listen k8s_api_nodes_6443
bind 192.168.47.49:6443
mode tcp
server 192.168.47.47 192.168.47.47:6443 check inter 2000 fall 3 rise 5
-----------------------------------------------------------------
systemctl restart haproxy && systemctl enable haproxy

5.harbor-https

参考:​​​htlinux系统tpslinux系统安装://blog.51cto.com/taow系统运维工程师enwu/5217570​​

将halinuxrbor的证书及/lib/systemd/system/docker.service拷贝到master

root@k8s-harbar:~# scp /lib/systemd/system/docker.service 192.168.47.47:/lib/systemd/system/docker.service

#master 创建目录
mkdir /etc/docker/certs.d/harbor.gesila.com -p
scp /etc/docker/certs.d/harbor.gesila.com/harbor.gesila.com.crt 192.168.47.47:/etc/docker/certs.d/harbor.gesila.com

systemctl daemon-reload && systemctl restart docker

注意:要使其他主机登录harbor,需要将/lib/systemd/system/docke系统运维主要做什么r.service中Ip改为域名(因为这里用了域名),如下:


                                            ansible搭建k8s

6.mlinux系统安装aster免密要登录

apt-get install sshpass  #ssh同步公钥
root@k8s-master1:~# ssh-keygen

同步证书及秘钥脚本:
-----------------------------------------------------------------------------------
#!/bin/bash
#目标主机列表
IP="
192.168.47.50
192.168.47.51
192.168.47.52
192.168.47.53
192.168.47.54
"
for node in ${IP};do
sshpass -p tao123 ssh-copy-id ${node} -o StrictHostKeyChecking=no
if [ $? -eq 0 ];then
echo "${node} 秘钥copy完成"
echo "${node} 秘钥copy完成,准备环境初始化....."
ssh ${node} "mkdir /etc/docker/certs.d/harbor.gesila.com -p"
echo "Harbor 证书目录创建成功!"
scp /etc/docker/certs.d/harbor.gesila.com/harbor.gesila.com.crt ${node}:/etc/docker/certs.d/harbor.gesila.com/harbor.gesila.com.crt > /dev/null
echo "Harbor 证书拷贝成功!"
scp -r /root/.docker ${node}:/root/ > /dev/null
echo "Harbor 认证文件拷贝完成!"
scp /etc/hosts ${node}:/etc/hosts > /dev/null
echo "host 文件拷贝完成"
scp /lib/systemd/system/docker.service ${node}:/lib/systemd/system/docker.service > /dev/null
systemctl daemon-reload && systemctl restart docker
echo "docker.service 文件拷贝完成并重启成功"
else
echo "${node} 秘钥copy失败"
fi
done
-----------------------------------------------------------------------------------
tao123 #是登录用户的密码
#测试
sshpass -p "tao123" ssh-copy-id 192.168.47.47 -o StrictHostKeyChecking=no


                                            ansible搭建k8s

其他linux必学的60个命令主机测试登录


                                            ansible搭建k8s

7.ansible部署

7.1.ansible安装

apt-get install git  ansible 

git clone -b 0.6.0 https://github.com/easzlab/kubeasz.git
root@k8s-master1:~/kubeasz# mkdir /data/ansible
root@k8s-master1:~/kubeasz# mv /etc/ansible/* /data/ansible/
root@k8s-master1:~/kubeasz# pwd
/root/kubeasz
root@k8s-master1:~/kubeasz# cp -r ./* /etc/ansible/
root@k8s-master1:~/kubeasz# cd /etc/ansible/
root@k8s-master1:/etc/ansible# cp example/hosts.m-masters.example ./hosts
root@k8s-master1:/etc/ansible# ll
-------------------------------------------------------------------
total 132
drwxr-xr-x 10 root root 4096 Dec 27 15:33 ./
drwxr-xr-x 96 root root 4096 Dec 27 15:28 ../
-rw-r--r-- 1 root root 499 Dec 27 15:31 01.prepare.yml
-rw-r--r-- 1 root root 58 Dec 27 15:31 02.etcd.yml
-rw-r--r-- 1 root root 115 Dec 27 15:31 03.docker.yml
-rw-r--r-- 1 root root 532 Dec 27 15:31 04.kube-master.yml
-rw-r--r-- 1 root root 72 Dec 27 15:31 05.kube-node.yml
-rw-r--r-- 1 root root 346 Dec 27 15:31 06.network.yml
-rw-r--r-- 1 root root 77 Dec 27 15:31 07.cluster-addon.yml
-rw-r--r-- 1 root root 1549 Dec 27 15:31 11.harbor.yml
-rw-r--r-- 1 root root 1667 Dec 27 15:31 19.addetcd.yml
-rw-r--r-- 1 root root 1.10 Dec 27 15:31 20.addnode.yml
-rw-r--r-- 1 root root 1666 Dec 27 15:31 21.addmaster.yml
-rw-r--r-- 1 root root 467 Dec 27 15:31 22.upgrade.yml
-rw-r--r-- 1 root root 1394 Dec 27 15:31 23.backup.yml
-rw-r--r-- 1 root root 1447 Dec 27 15:31 24.restore.yml
-rw-r--r-- 1 root root 1723 Dec 27 15:31 90.setup.yml
-rw-r--r-- 1 root root 5496 Dec 27 15:31 99.clean.yml
-rw-r--r-- 1 root root 10283 Dec 27 15:31 ansible.cfg
drwxr-xr-x 2 root root 4096 Dec 27 15:31 bin/
drwxr-xr-x 8 root root 4096 Dec 27 15:31 docs/
drwxr-xr-x 2 root root 4096 Dec 27 15:31 down/
drwxr-xr-x 2 root root 4096 Dec 27 15:31 example/
-rw-r--r-- 1 root root 2667 Dec 27 15:33 hosts
drwxr-xr-x 14 root root 4096 Dec 27 15:31 manifests/
drwxr-xr-x 2 root root 4096 Dec 27 15:31 pics/
-rw-r--r-- 1 root root 4963 Dec 27 15:31 README.md
drwxr-xr-x 23 root root 4096 Dec 27 15:31 roles/
drwxr-xr-x 2 root root 4096 Dec 27 15:31 tools/

7.2.修改host文件

root@k8s-master:/etc/ansible# grep -Ev '^($|#)' hosts
[deploy]
192.168.47.47 NTP_ENABLED=no
[etcd]
192.168.47.50 NODE_NAME=etcd1
192.168.47.51 NODE_NAME=etcd2
192.168.47.52 NODE_NAME=etcd3
[new-etcd] # 预留组,后续添加etcd节点使用
[kube-master]
192.168.47.47
[new-master] # 预留组,后续添加master节点使用
[kube-node]
192.168.47.53
192.168.47.54
[new-node] # 预留组,后续添加node节点使用
[harbor]
[lb]
[ex-lb]
[all:vars]
DEPLOY_MODE=multi-master
K8S_VER="v1.13"
MASTER_IP="192.168.47.49"
KUBE_APISERVER="https://{{ MASTER_IP }}:6443"
CLUSTER_NETWORK="calico"
SERVICE_CIDR="10.20.0.0/16"
CLUSTER_CIDR="172.31.0.0/16"
NODE_PORT_RANGE="20000-40000"
CLUSTER_KUBERNETES_SVC_IP="10.20.0.1"
CLUSTER_DNS_SVC_IP="10.20.254.254"
CLUSTER_DNS_DOMAIN="linux36.local."
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="test1234"
bin_dir="/usr/bin"
ca_dir="/etc/kubernetes/ssl"
base_dir="/etc/ansible"

7.3.二进制文件

root@k8s-master1:~# tar xf k8s.1-13-5.tar.gz 
root@k8s-master1:~# ls
bin k8s.1-13-5.tar.gz
root@k8s-master1:~# cd bin/
root@k8s-master1:~/bin# mv * /etc/ansible/bin/
root@k8s-master1:~/bin# cd /etc/ansible/bin/
root@k8s-master1:/etc/ansible/bin# ll
total 763036
drwxr-xr-x 2 root root 4096 Dec 27 16:52 ./
drwxr-xr-x 10 root root 4096 Dec 27 15:39 ../
-rwxr-xr-x 1 root root 4028260 Mar 16 2019 bridge*
-rwxr-xr-x 1 root root 30863968 Mar 10 2019 calicoctl*
-rwxr-xr-x 1 root root 10376657 Jun 22 2018 cfssl*
-rwxr-xr-x 1 root root 6595195 Jun 22 2018 cfssl-certinfo*
-rwxr-xr-x 1 root root 2277873 Jun 22 2018 cfssljson*
-rwxr-xr-x 1 root root 27941976 Feb 10 2019 containerd*
-rwxr-xr-x 1 root root 4964704 Feb 10 2019 containerd-shim*
-rwxr-xr-x 1 root root 15678392 Feb 10 2019 ctr*
-rwxr-xr-x 1 root root 50683148 Feb 10 2019 docker*
-rwxr-xr-x 1 root root 10858808 Jul 6 2018 docker-compose*
-rwxr-xr-x 1 root root 54320560 Feb 10 2019 dockerd*
-rwxr-xr-x 1 root root 764144 Feb 10 2019 docker-init*
-rwxr-xr-x 1 root root 2837280 Feb 10 2019 docker-proxy*
-rwxr-xr-x 1 root root 19237536 Oct 11 2018 etcd*
-rwxr-xr-x 1 root root 15817472 Oct 11 2018 etcdctl*
-rwxr-xr-x 1 root root 2856252 Mar 16 2019 flannel*
-rwxr-xr-x 1 root root 36844864 Jan 23 2019 helm*
-rwxr-xr-x 1 root root 3036768 Mar 16 2019 host-local*
-rwxr-xr-x 1 root root 138710240 Mar 26 2019 kube-apiserver*
-rwxr-xr-x 1 root root 103982976 Mar 26 2019 kube-controller-manager*
-rwxr-xr-x 1 root root 39239104 Mar 26 2019 kubectl*
-rwxr-xr-x 1 root root 11.104888 Mar 26 2019 kubelet*
-rwxr-xr-x 1 root root 34820416 Mar 26 2019 kube-proxy*
-rwxr-xr-x 1 root root 37279968 Mar 26 2019 kube-scheduler*
-rwxr-xr-x 1 root root 3084347 Mar 16 2019 loopback*
-rwxr-xr-x 1 root root 3551125 Mar 16 2019 portmap*
-rwxr-xr-x 1 root root 171 Mar 11 2019 readme.md*
-rwxr-xr-x 1 root root 7522464 Feb 10 2019 runc*

7.4系统/运维.开始部署

7.4.1.环境初始化

ansible-playbook 01.prepare.yml


                                            ansible搭建k8s

7.4.2.部署etcd集群

ansible-playbook 02.etcd.yml

任一etcd节点进行验证:
root@k8s-etcd1:~# export NODE_IPS="192.168.47.50 192.168.47.51 192.168.47.52"
root@k8s-etcd1:~# for ip in ${NODE_IPS}; do ETCDCTL_API=3 /usr/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint health;done
https://192.168.47.50:2379 is healthy: successfully committed proposal: took = 1.002603ms
https://192.168.47.51:2379 is healthy: successfully committed proposal: took = 1.888051ms
https://192.168.47.52:2379 is healthy: successfully committed proposal: took = 1.666994ms


                                            ansible搭建k8s


                                            ansible搭建k8s

7.4.3系统运维是干嘛的.部linux重启命令署docker

之前已经安装来docker,这一步直接跳过

7.4.4.部署master

7.4.4.1.安装v1.13.5所需镜像
kubeadm config images list --kubernetes-version v1.13.5
--------------------------------------------------
k8s.gcr.io/kube-apiserver:v1.13.5
k8s.gcr.io/kube-controller-manager:v1.13.5
k8s.gcr.io/kube-scheduler:v1.13.5
k8s.gcr.io/kube-proxy:v1.13.5
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.2.24
k8s.gcr.io/coredns:1.2.6
7.4.4.2.pause镜像

为了让拉取镜像速度更ansible搭建k8s快,将下载好的镜像上传到habor

pause镜像在每个node节点都要有
#上传镜像到本地harbor
docker tag da86e6ba6ca1 harbor.gesila.com/k8s/pause:3.1
docker images
docker push harbor.gesila.com/k8s/pause:3.1

#修改镜像源
vim /etc/ansible/roles/kube-node/defaults/main.yml
----------------------------------------------------------------
# 基础容器镜像
SANDBOX_IMAGE: "harbor.gesila.com/k8s/pause:3.1"


                                            ansible搭建k8s


                                            ansible搭建k8s

注意:记得把项目设置为公开,不然镜像无法拉取


                                            ansible搭建k8s
7.4.4.3.测试master能否连接vip
root@k8s-master1:/etc/ansible/images# telnet 192.168.47.49 6443
Trying 192.168.47.49...
Connected to 192.168.47.49.
Escape character is '^]'.
Connection closed by foreign host.
7.4.4.4.执行
root@k8s-master1:/etc/ansible# ansible-playbook 04.kube-master.yml
root@k8s-master:/etc/ansible/manifests/dashboard# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.47.47 Ready,SchedulingDisabled master 115m v1.13.5
192.168.47.53 Ready node 114m v1.13.5
192.168.47.54 Ready node 114m v1.13.5


                                            ansible搭建k8s

7.4.5.部署node

root@k8s-master1:/etc/ansible# ansible-playbook 05.kube-node.yml


                                            ansible搭建k8s

7.4.6.部linux命令署网络服务calic系统运维工程师o

7.4.6.1.下载包
下载地址:https://github.com/projectcalico/calico
https://github.com/projectcalico/calico/releases?after=v3.7.2
用到的包:calico-release-v3.3.6.tgz
root@k8s-master1:/etc/ansible/images# tar xf calico-release-v3.3.6.tgz
root@k8s-master1:/etc/ansible/images/release-v3.3.6# tree
.
├── bin
│ ├── calicoctl
│ ├── calicoctl-darwin-amd64
│ └── calicoctl-windows-amd64.exe
├── images
│ ├── calico-cni.tar
│ ├── calico-kube-controllers.tar
│ ├── calico-node.tar
│ └── calico-typha.tar
├── k8s-manifests
│ ├── calico-kube-controllers.yaml
│ ├── hosted
│ │ ├── calicoctl.yaml
│ │ ├── calico.yaml
│ │ ├── canal
│ │ │ ├── canal-etcd.yaml
│ │ │ ├── canal.yaml
│ │ │ ├── rbac-etcd.yaml
│ │ │ └── rbac.yaml
│ │ ├── etcd.yaml
│ │ ├── kubernetes-datastore
│ │ │ ├── calicoctl.yaml
│ │ │ ├── calico-networking
│ │ │ │ └── 1.7
│ │ │ │ └── calico.yaml
│ │ │ └── policy-only
│ │ │ └── 1.7
│ │ │ └── calico.yaml
│ │ └── rbac-kdd.yaml
│ └── rbac.yaml
└── README
7.4.6.2.将镜像上传到本系统运维主要做什么地harbor
root@k8s-master1:/etc/ansible/images/release-v3.3.6/images# tree
.
├── calico-cni.tar
├── calico-kube-controllers.tar
├── calico-node.tar
└── calico-typha.tar

#导入镜像
docker load -i calico-cni.tar
docker load -i calico-kube-controllers.tar
docker load -i calico-node.tar
docker images

#打tag
docker tag ce902e610f51 harbor.gesila.com/k8s/node:v3.3.6 && \
docker tag b8eeeae14aa4 harbor.gesila.com/k8s/cni:v3.3.6 && \
docker tag 2fd138c9cb06 harbor.gesila.com/k8s/kube-controllers:v3.3.6

#上传到harbor
docker push harbor.gesila.com/k8s/node:v3.3.6 && \
docker push harbor.gesila.com/k8s/cni:v3.3.6 && \
docker push harbor.gesila.com/k8s/kube-controllers:v3.3.6
7.4.6.3.查看版本
root@k8s-master1:/etc/ansible/images/release-v3.3.6/bin# ./calicoctl version
Client Version: v3.3.6
7.4.6.4.修改系统运维主要做什么版本
vim /etc/ansible/roles/calico/defaults/main.yml
----------------------------------------------------------
# 更新支持calico 版本: [v3.2.x] [v3.3.x] [v3.4.x]
calico_ver: "v3.3.6"
----------------------------------------------------------
7.4.6.5.修改配置文件镜像源
vim /etc/ansible/roles/calico/templates/calico-v3.3.yaml.j2
----------------------------------------------------------
- name: calico-node
image: harbor.gesila.com/k8s/node:v3.3.6
- name: install-cni
image: harbor.gesila.com/k8s/cni:v3.3.6
- name: calico-kube-controllers
image: harbor.gesila.com/k8s/kube-controllers:v3.3.6
----------------------------------------------------------
7.4.6.6.执行
ansible-playbook 06.network.yml  
#如果node节点无法自动拉取镜像,这一步会执行失败,pause镜像会影响其他3个镜像的拉取
calicoctl node status
kubectl get nodes


                                            ansible搭建k8s

8.k8s-web管理界面dashboansible搭建k8sard

8.1.下载镜像并上传到本地仓库

docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.1
docker tag f9aed6605b81 harbor.gesila.com/k8s/kubernetes-dashboard-amd64:v1.10.1
docker push harbor.gesila.com/k8s/kubernetes-dashboard-amd64:v1.10.1


                                            ansible搭建k8s

8.2.修改镜像源

root@k8s-master:/etc/ansible/manifests/dashboard# ll
-rw-r--r-- 1 root root 357 Dec 27 21:24 admin-user-sa-rbac.yaml
-rw-r--r-- 1 root root 4766 Dec 27 21:24 kubernetes-dashboard.yaml
-rw-r--r-- 1 root root 2223 Dec 27 21:24 read-user-sa-rbac.yaml
-rw-r--r-- 1 root root 458 Dec 27 21:24 ui-admin-rbac.yaml
-rw-r--r-- 1 root root 477 Dec 27 21:24 ui-read-rbac.yaml

vim kubernetes-dashboard.yaml
---------------------------------------------------------------------------
- name: kubernetes-dashboard
image: harbor.gesila.com/k8s/kubernetes-dashboard-amd64:v1.10.1
---------------------------------------------------------------------------

8.3.运行

root@k8s-master1:/etc/ansible/manifests/dashboard/1.10.1# kubectl create -f .
---------------------------------------------------------------------------
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
secret/kubernetes-dashboard-certs created
serviceaccount/kubernetes-dashboard created
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
deployment.apps/kubernetes-dashboard created
service/kubernetes-dashboard created
serviceaccount/dashboard-read-user created
clusterrolebinding.rbac.authorization.k8s.io/dashboard-read-binding created
clusterrole.rbac.authorization.k8s.io/dashboard-read-clusterrole created
clusterrole.rbac.authorization.k8s.io/ui-admin created
rolebinding.rbac.authorization.k8s.io/ui-admin-binding created
clusterrole.rbac.authorization.k8s.io/ui-read created
rolebinding.rbac.authorization.k8s.io/ui-read-binding created
kubectl get pods  -n kube-system    #确认kubernetes-dashboard 运行
kubectl get service -n kube-system
kubectl cluster-info #获取访问地址


                                            ansible搭建k8s

8.4.登录密码

vim /etc/ansible/hosts
---------------------------------------------------------------------------
# 集群basic auth 使用的用户名和密码
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="test1234"
---------------------------------------------------------------------------

8.5linux常用命令.获取token登录dashboard

root@k8s-master:/etc/ansible/manifests/dashboard# kubectl -n kube-system get secret | grep admin-user
admin-user-token-mlfkq kubernetes.io/service-account-token 3 5m14s

kubectl -n kube-system describe secret admin-user-token-mlfkq


                                            ansible搭建k8s


                                            ansible搭建k8s


                                            ansible搭建k8s