二进制部署 K8S Harbor 环境


系统初始化

创建任务目录

1
2
3
4
5
[root@k8s-master1 ~]# mkdir -p ansible-k8s-deploy/{roles,group_vars}

# 创建common的任务和模板目录
[root@k8s-master1 ~]# mkdir -p ansible-k8s-deploy/roles/common/tasks # tasks任务目录
[root@k8s-master1 ~]# mkdir -p ansible-k8s-deploy/roles/common/templates # 模板目录

创建任务

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
[root@k8s-master1 ~]# cd ansible-k8s-deploy/roles/common/tasks/

[root@k8s-master1 tasks]# vim main.yaml

---
# 系统初始化 所有节点上执行

- name: 关闭 selinux
lineinfile:
dest: /etc/selinux/config
regexp: '^SELINUX='
line: 'SELINUX=disabled'

- name: 关闭 firewalld
systemd:
name: firewalld
state: stopped
enabled: no

- name: 关闭 swap
lineinfile:
dest: /etc/fstab
regexp: "UUID.*swap"
line: ""

- name: 关闭 swap 和 selinux 即时生效
shell: setenforce 0 ; swapoff -a

- name: 设置 主机名
# 截取hosts文件中 自己的 node_name
shell: hostnamectl set-hostname {{node_name|quote}}

- name: 设置 hosts
# 将templates下的hosts.j2 分发到各个目标主机的/etc/hosts下
template: src=hosts.j2 dest=/etc/hosts
1
2
3
4
5
6
# 模板文件 hosts.j2
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
{% for host in groups['node_list'] %}
{{ hostvars[host].inventory_hostname }} {{ hostvars[host].node_name }}
{% endfor %}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# host 文件
[root@k8s-master1 ansible-k8s-deploy]# vim hosts

[master]
# 如果部署单Master,只保留一个Master节点
172.31.228.67 node_name=k8s-master1

[node]
172.31.228.69 node_name=k8s-node1
172.31.228.70 node_name=k8s-node2

[harbor]
# 本身是想用他做高可用
172.31.228.68 node_name=k8s-master2

# k8s组下面包含着master和node组
[k8s:children]
master
node

# 用于all资源变量引用
[node_list:children]
master
node
harbor
1
2
3
4
5
6
7
8
9
10
11
12
13
# ansible 配置文件

[root@k8s-master1 ansible-k8s-deploy]# vim ansible.cfg

[defaults]
inventory = /hosts
forks = 5
become = root
remote_port = 22
host_key_checking = False
timeout = 10
log_path = /var/log/ansible.log
private_key_file = /root/.ssh/id_rsa
1
2
3
4
5
6
7
8
9
10
11
12
# 入口文件

[root@k8s-master1 ansible-k8s-deploy]# vim single-master-deploy.yaml
---
- name: 0.系统初始化
gather_facts: false
hosts:
- k8s
- harbor
roles:
- common
tags: common
1
2
3
4
5
6
7
8
# 查看目录

[root@k8s-master1 ansible-k8s-deploy]# tree /root/ansible-k8s-deploy/roles/common/
/root/ansible-k8s-deploy/roles/common/
├── tasks
│   └── main.yaml
└── templates
└── hosts.j2

执行任务

1
[root@k8s-master1 ansible-k8s-deploy]# tree /root/ansible-k8s-deploy/
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[root@k8s-master1 ansible-k8s-deploy]# yum install ansible

[root@k8s-master1 ansible-k8s-deploy]# ansible-playbook -i hosts single-master-deploy.yaml -uroot -k
SSH password:
...
PLAY RECAP *********************************************************************************************************************************************************************************************************************************
172.31.228.67 : ok=6 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
172.31.228.68 : ok=6 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
172.31.228.69 : ok=6 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
172.31.228.70 : ok=6 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0

[root@k8s-master1 ansible-k8s-deploy]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.31.228.67 k8s-master1
172.31.228.69 k8s-node1
172.31.228.70 k8s-node2
172.31.228.68 k8s-master2

二级制部署 Docker

1
2
3
4
5
6
7
8
9
10
11
12
13
# Docker 二进制包下载地址:
https://download.docker.com/linux/static/stable/x86_64/

# 查看 Kubernetes 所需的Docker版本
https://github.com/kubernetes/kubernetes/releases

# 选择好K8S版本 进入 查找 Docker 本次选择1.16.8 版本
Unchanged
The list of validated docker versions remains unchanged.
The current list is 1.13.1, 17.03, 17.06, 17.09, 18.06, 18.09. (#72823, #72831)

# 本次 Docker 版本选择 18.09.6
# k8s 1.17和18 版本需要 docker-19 版本

创建任务目录

1
2
3
4
# 创建Docker模块的任务和模板目录
[root@k8s-master1 ~]# mkdir -p ansible-k8s-deploy/roles/docker/tasks # tasks任务目录
[root@k8s-master1 ~]# mkdir -p ansible-k8s-deploy/roles/docker/files # 文件目录
[root@k8s-master1 ~]# mkdir -p ansible-k8s-deploy/roles/docker/templates # 模板目录

创建任务

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
[root@k8s-master1 tasks]# cd /root/ansible-k8s-deploy/roles/docker/tasks/

[root@k8s-master1 tasks]# vim main.yaml

---
- name: 创建临时目录
# 每台节点都创建 引用 group_vars/all.yaml 的 tmp_dir: '/tmp/k8s' 目录
file: dest={{ tmp_dir }} state=directory

- name: 分发并解压 docker 二进制包
unarchive: src={{ item }} dest={{ tmp_dir }}
with_fileglob:
- "{{ software_dir }}/docker-*.tgz"

- name: 移动 docker 二进制文件到/usr/bin
shell: cp -rf {{ tmp_dir }}/docker/* /usr/bin

- name: 配置 service 文件
copy: src=docker.service dest=/usr/lib/systemd/system/

- name: 创建 docker 配置目录
file: dest=/etc/docker state=directory

- name: 配置 docker
template: src=daemon.json.j2 dest=/etc/docker/daemon.json

- name: 启动 docker
systemd: name=docker state=restarted enabled=yes daemon_reload=yes

- name: 查看状态
shell: docker info
register: docker
- debug: var=docker.stdout_lines
1
2
3
4
5
6
7
8
9
10
11
12
13
# 创建模板 运用配置文件变量

[root@k8s-master1 templates]# vim daemon.json.j2

{
"registry-mirrors": ["http://bc437cce.m.daocloud.io"],
"insecure-registries": ["{{ harbor_ip }}"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# 创建配置文件

[root@k8s-master1 files]# vim docker.service

[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target

[Service]
Type=notify
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target
1
2
3
4
5
6
7
8
9
10
# 增加配置
[root@k8s-master1 ansible-k8s-deploy]# cat group_vars/all.yaml
# 安装目录
tmp_dir: '/tmp/k8s'

# 二进制包存放目录
software_dir: '/root/binary_pkg'

# harbor 地址
harbor_ip: "172.31.228.68"
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# 添加任务到入口文件 使用node_list让每台节点都安装docker环境

[root@k8s-master1 ansible-k8s-deploy]# cat single-master-deploy.yaml
---
- name: 0.系统初始化
gather_facts: false
hosts:
- k8s
- harbor
roles:
- common
tags: common

- name: 1.部署 Docker
gather_facts: false
hosts:
- node_list
roles:
- docker
tags: docker
1
2
3
4
5
6
7
8
9
10
# 查看目录

[root@k8s-master1 files]# tree /root/ansible-k8s-deploy/roles/docker/
/root/ansible-k8s-deploy/roles/docker/
├── files
│   └── docker.service
├── tasks
│   └── main.yaml
└── templates
└── daemon.json.j2

执行任务

1
2
3
4
5
# 按任务条目执行任务 --tags=任务名称
[root@k8s-master1 ansible-k8s-deploy]# ansible-playbook -i hosts single-master-deploy.yaml -uroot -k --tags=docker

# 检查
[root@k8s-master1 ansible-k8s-deploy]# docker info

Harbor 镜像仓库

创建任务目录

1
2
3
4
# 创建Harbor模块的任务和模板目录
[root@k8s-master1 ~]# mkdir -p ansible-k8s-deploy/roles/harbor/tasks # tasks任务目录
[root@k8s-master1 ~]# mkdir -p ansible-k8s-deploy/roles/harbor/files # 文件目录
[root@k8s-master1 ~]# mkdir -p ansible-k8s-deploy/roles/harbor/templates # 模板目录

创建任务

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
[root@k8s-master1 tasks]# vim main.yaml

---
- name: 创建工作目录
file: dest={{ harbor_work_dir }} state=directory

- name: 创建临时目录
file: dest={{ tmp_dir }} state=directory

- name: 分发并解压 harbor 安装包
unarchive: src={{ software_dir }}/harbor-offline-installer-*.tgz dest=/opt/

- name: 部署 compose
copy: src={{ software_dir }}/docker-compose-Linux-x86_64 dest=/usr/local/bin/docker-compose mode=755

- name: 分发 harbor 配置文件
template: src=harbor.yml.j2 dest={{ harbor_work_dir }}/harbor.yml

- name: 准备安装 harbor
shell: cd {{ harbor_work_dir }}/ && ./prepare && ./install.sh

- name: 列出 harbor 状态
shell: cd {{ harbor_work_dir }} && docker-compose ps
register: status
- debug: var=status.stdout_lines

- name: 登录 harbor
shell: sleep 5;docker login -u admin -p lx@68328153 {{ harbor_ip }}
1
2
3
4
5
6
7
8
9
10
11
12
# 添加 harbor.yml.j2 到模板目录,该文件信息过长,主要是替换用户名密码和访问方式等配置
hostname: {{ harbor_ip }}
...
http:
# port for http, default is 80. If https enabled, this port will redirect to https port
port: 80
...
harbor_admin_password: 密码
...
database:
# The password for the root user of Harbor DB. Change this before any production use.
password: 密码
1
2
3
4
5
6
7
8
9
10
11
12
# 添加配置
[root@k8s-master1 group_vars]# vim all.yaml

# 安装目录
tmp_dir: '/tmp/k8s'

# 二进制包存放目录
software_dir: '/root/binary_pkg'

# harbor 地址和工作目录
harbor_ip: "172.31.228.68"
harbor_work_dir: "/opt/harbor"
1
2
3
4
5
6
7
8
# 查看目录
[root@k8s-master1 ansible-k8s-deploy]# tree /root/ansible-k8s-deploy/roles/harbor/
/root/ansible-k8s-deploy/roles/harbor/
├── files
├── tasks
│   └── main.yaml
└── templates
└── harbor.yml.j2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# 添加任务到入口文件 harbor安装在K8S集群之外
[root@k8s-master1 ansible-k8s-deploy]# vim single-master-deploy.yaml

---
- name: 0.系统初始化
gather_facts: false
hosts:
- k8s
- harbor
roles:
- common
tags: common

- name: 1.部署 Docker
gather_facts: false
hosts:
- node_list
roles:
- docker
tags: docker

- name: 2.部署 Harbor 仓库
gather_facts: false
hosts: harbor
roles:
- harbor
tags: harbor

执行任务

1
[root@k8s-master1 ansible-k8s-deploy]# ansible-playbook -i hosts single-master-deploy.yaml -uroot -k --tags=harbor
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# 查看
TASK [harbor : debug] ********************
ok: [172.31.228.68] => {
"status.stdout_lines": [
" Name Command State Ports ",
"------------------------------------------------------------------------------------------------------",
"harbor-core /harbor/start.sh Up (health: starting) ",
"harbor-db /entrypoint.sh postgres Up (health: starting) 5432/tcp ",
"harbor-jobservice /harbor/start.sh Up ",
"harbor-log /bin/sh -c /usr/local/bin/ ... Up (health: starting) 127.0.0.1:1514->10514/tcp",
"harbor-portal nginx -g daemon off; Up (health: starting) 80/tcp ",
"nginx nginx -g daemon off; Up (health: starting) 0.0.0.0:80->80/tcp ",
"redis docker-entrypoint.sh redis ... Up 6379/tcp ",
"registry /entrypoint.sh /etc/regist ... Up (health: starting) 5000/tcp ",
"registryctl /harbor/start.sh Up (health: starting) "
]
}

准备证书 cfssl

1
2
3
4
5
6
1. ETCD 应该部署在K8S集群之外
2. ETCD 应该使用SSD硬盘
3. 需要先删除 安装包中的 ssl目录 因为是之前生成过的文件
rm -rf /root/ansible-k8s-deploy/roles/master
rm -rf /root/ansible-k8s-deploy/roles/node
rm -rf /root/ansible-k8s-deploy/ssl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# 生成的 CA 证书和秘钥文件如下: 

# etcd 证书 用于访问etcd资源
[root@k8s-master1 etcd_cert]# ls
ca-key.pem ca.pem server-key.pem server.pem

# k8s服务证书 和 kubectl 访问证书
[root@k8s-master1 k8s_cert]#
admin-key.pem # kubectl 访问证书
admin.pem
ca-key.pem
ca.pem # 根证书
server-key.pem # kube-apiserver 证书
server.pem

# kube-proxy 证书
[root@k8s-master1 k8s_cert]# ls
ca.pem kube-proxy-key.pem kube-proxy.pem #
1
kube-controller、kube-scheduler 当前需要和 kube-apiserver 部署在同一台机器上且使用非安全端口通信,故不需要证书。

创建任务目录

1
2
3
4
# 创建tls模块的任务和模板目录
[root@k8s-master1 ~]# mkdir -p ansible-k8s-deploy/roles/tls/tasks # tasks任务目录
[root@k8s-master1 ~]# mkdir -p ansible-k8s-deploy/roles/tls/files # 文件目录
[root@k8s-master1 ~]# mkdir -p ansible-k8s-deploy/roles/tls/templates # 模板目录

创建任务

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
# 任务流程
[root@k8s-master1 tasks]# vim main.yaml

---
- name: 获取 Ansible 工作目录
shell: pwd |sed 's#roles/tls##'
register: root_dir
- debug:
var=root_dir["stdout"]

- name: 创建 tls 工作目录
file: dest={{ root_dir.stdout }}/ssl/{{ item }} state=directory
with_items:
- etcd
- k8s

- name: 准备 cfssl 工具,二进制包解压到 /usr/bin 目录
unarchive: src={{ software_dir }}/cfssl.tar.gz dest=/usr/bin/ mode=u+x

- name: 准备 etcd 证书请求文件
template: src=etcd/{{ item }} dest={{ root_dir.stdout }}/ssl/etcd/{{ item.split('.')[:-1]|join('.') }}
with_items:
- ca-config.json.j2
- ca-csr.json.j2
- server-csr.json.j2

- name: 准备生成 etcd 证书脚本
copy: src=generate_etcd_cert.sh dest={{ root_dir.stdout }}/ssl/etcd mode=u+x

- name: 生成 etcd 证书
shell: cd {{ root_dir.stdout }}/ssl/etcd && /bin/bash generate_etcd_cert.sh

- name: 准备 k8s 证书请求文件
template: src=k8s/{{ item }} dest={{ root_dir.stdout }}/ssl/k8s/{{ item.split('.')[:-1]|join('.') }}
with_items:
- ca-config.json.j2
- ca-csr.json.j2
- server-csr.json.j2
- admin-csr.json.j2
- kube-proxy-csr.json.j2

- name: 准备生成 k8s 证书脚本
copy: src=generate_k8s_cert.sh dest={{ root_dir.stdout }}/ssl/k8s mode=u+x

- name: 生成 k8s 证书
shell: cd {{ root_dir.stdout }}/ssl/k8s && /bin/bash generate_k8s_cert.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
[root@k8s-master1 ansible-k8s-deploy]# vim single-master-deploy.yaml 

---
- name: 0.系统初始化
gather_facts: false
hosts:
- k8s
- harbor
roles:
- common
tags: common

- name: 1.部署 Docker
gather_facts: false
hosts:
- node_list
roles:
- docker
tags: docker

- name: 2.部署 Harbor 仓库
gather_facts: false
hosts: harbor
roles:
- harbor
tags: harbor

- name: 3.自签证书
gather_facts: false
hosts: localhost
roles:
- tls
tags: tls
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# 目录结构
[root@k8s-master1 ansible-k8s-deploy]# tree /root/ansible-k8s-deploy/roles/tls/
/root/ansible-k8s-deploy/roles/tls/
├── files
│   ├── generate_etcd_cert.sh
│   └── generate_k8s_cert.sh
├── tasks
│   └── main.yaml
└── templates
├── etcd
│   ├── ca-config.json.j2
│   ├── ca-csr.json.j2
│   └── server-csr.json.j2
└── k8s
├── admin-csr.json.j2
├── ca-config.json.j2
├── ca-csr.json.j2
├── kube-proxy-csr.json.j2
└── server-csr.json.j2

执行任务

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
[root@k8s-master1 ansible-k8s-deploy]# ansible-playbook -i hosts single-master-deploy.yaml -uroot -k --tags=tls

# 查看证书生成的目录文件
[root@k8s-master1 ansible-k8s-deploy]# tree /root/ansible-k8s-deploy/ssl/

/root/ansible-k8s-deploy/ssl/
├── etcd
│   ├── ca-config.json
│   ├── ca.csr
│   ├── ca-csr.json
│   ├── ca-key.pem
│   ├── ca.pem
│   ├── generate_etcd_cert.sh
│   ├── server.csr
│   ├── server-csr.json
│   ├── server-key.pem
│   └── server.pem
└── k8s
├── admin.csr
├── admin-csr.json
├── admin-key.pem
├── admin.pem
├── ca-config.json
├── ca.csr
├── ca-csr.json
├── ca-key.pem
├── ca.pem
├── generate_k8s_cert.sh
├── kube-proxy.csr
├── kube-proxy-csr.json
├── kube-proxy-key.pem
├── kube-proxy.pem
├── server.csr
├── server-csr.json
├── server-key.pem
└── server.pem

安装 ETCD 集群

创建任务目录

1
2
3
[root@k8s-master1 ~]# mkdir -p ansible-k8s-deploy/roles/etcd/tasks        # tasks任务目录
[root@k8s-master1 ~]# mkdir -p ansible-k8s-deploy/roles/etcd/files # 文件目录
[root@k8s-master1 ~]# mkdir -p ansible-k8s-deploy/roles/etcd/templates # 模板目录

创建任务

1
2
# 本次预定安装的k8s版本是 1.16.8
# etcd版本使用 3.3.20
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
[root@k8s-master1 etcd]# vim tasks/main.yaml 

- name: 创建工作目录
file: dest={{ etcd_work_dir }}/{{ item }} state=directory
with_items:
- bin
- cfg
- ssl

- name: 创建临时目录
file: dest={{ tmp_dir }} state=directory

- name: 分发并解压 etcd 二进制包
unarchive: src={{ item }} dest={{ tmp_dir }}
with_fileglob:
- "{{ software_dir }}/etcd-v*.tar.gz"

- name: 分发 etcd 二进制文件
shell: cp -rf {{ tmp_dir }}/etcd-v*/{etcd,etcdctl} {{ etcd_work_dir }}/bin

- name: 分发证书
# files/etcd_cert/证书 该目录在之前创建tls已经创建,并已将etcd证书传到该目录下
copy: src=etcd_cert/{{ item }} dest={{ etcd_work_dir }}/ssl
with_items:
- ca.pem
- server.pem
- server-key.pem

- name: 分发 etcd 配置文件
template: src=etcd.conf.j2 dest={{ etcd_work_dir }}/cfg/etcd.conf

- name: 分发 etcd service 文件
template: src=etcd.service.j2 dest=/usr/lib/systemd/system/etcd.service

- name: 启动 etcd
systemd: name=etcd state=restarted enabled=yes daemon_reload=yes

- name: 分发 etcd 测试脚本
template: src={{ item }} dest={{ tmp_dir }}/{{ item.split('.')[:-1]|join('.') }} mode=u+x
with_items:
- etcd_member_list.sh.j2
- etcd_status.sh.j2

- name: 获取 etcd 集群状态
shell: /bin/bash {{ tmp_dir }}/etcd_status.sh
register: status
- debug: var=status.stdout_lines

- name: 获取 etcd 集群成员
shell: /bin/bash {{ tmp_dir }}/etcd_member_list.sh
register: etcd_member_list_status
- debug: var=etcd_member_list_status.stdout_lines
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# 查看目录
[root@k8s-master1 etcd]# tree /root/ansible-k8s-deploy/roles/etcd/
/root/ansible-k8s-deploy/roles/etcd/
├── files
│   └── etcd_cert
│   ├── ca-key.pem
│   ├── ca.pem
│   ├── server-key.pem
│   └── server.pem
├── tasks
│   └── main.yaml
└── templates
├── etcd.conf.j2
├── etcd_member_list.sh.j2
├── etcd.service.j2
└── etcd_status.sh.j2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# 查看入口文件
[root@k8s-master1 etcd]# cat /root/ansible-k8s-deploy/single-master-deploy.yaml
---
- name: 0.系统初始化
gather_facts: false
hosts:
- k8s
- harbor
roles:
- common
tags: common

- name: 1.部署 Docker
gather_facts: false
hosts:
- node_list
roles:
- docker
tags: docker

- name: 2.部署 Harbor 仓库
gather_facts: false
hosts: harbor
roles:
- harbor
tags: harbor

- name: 3.自签证书
gather_facts: false
hosts: localhost
roles:
- tls
tags: tls

- name: 4.部署 ETCD 集群
gather_facts: false
hosts: etcd
roles:
- etcd
tags: etcd

Master 节点

创建工作目录

1
2
3
mkdir -p ansible-k8s-deploy/roles/node/tasks
mkdir -p ansible-k8s-deploy/roles/node/files
mkdir -p ansible-k8s-deploy/roles/node/templates