18 Deployment 声明式地升级应用


使用 RC 实现自动滚动升级

先定义两个容器镜像 v1 和 v2 版本

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# v1 版本
[root@k8s-master2 v1]# cat app.js
const http = require('http');
const os = require('os');

console.log("Kubia server starting...");

var handler = function(request, response) {
console.log("Received request from " + request.connection.remoteAddress);
response.writeHead(200);
response.end("This is v1 running in pod " + os.hostname() + "\n");
};

var www = http.createServer(handler);
www.listen(8080);

[root@k8s-master2 v1]# cat Dockerfile
FROM node:7
ADD app.js /app.js
ENTRYPOINT ["node", "app.js"]

[root@k8s-master2 v1]# docker build -t 172.31.228.68/game/kubia:v1 .
[root@k8s-master2 v1]# docker push 172.31.228.68/game/kubia:v1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
# v2 版本 区别在于返回的是 v2的信息


[root@k8s-master2 v2]# docker build -t 172.31.228.68/game/kubia:v2 .
[root@k8s-master2 v2]# cat app.js
const http = require('http');
const os = require('os');

console.log("Kubia server starting...");

var handler = function(request, response) {
console.log("Received request from " + request.connection.remoteAddress);
response.writeHead(200);
response.end("This is v2 running in pod " + os.hostname() + "\n");
};

var www = http.createServer(handler);
www.listen(8080);

[root@k8s-master2 v2]# cat Dockerfile
FROM node:7
ADD app.js /app.js
ENTRYPOINT ["node", "app.js"]

[root@k8s-master2 v2]# docker build -t 172.31.228.68/game/kubia:v2 .
[root@k8s-master2 v2]# docker push 172.31.228.68/game/kubia:v2

单个 yaml 运行 rc 和 service

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# 多种资源定义通过 --- 分割

[root@k8s-master1 deployment]# vim kubia-rc-svc-v1.yaml

apiVersion: v1
kind: ReplicationController
metadata:
name: kubia-v1
spec:
replicas: 3
template:
metadata:
name: kubia
labels:
app: kubia
spec:
imagePullSecrets:
- name: my-harbor-secret
containers:
- image: 172.31.228.68/game/kubia:v1
name: nodejs
---
apiVersion: v1
kind: Service
metadata:
name: kubia
spec:
type: NodePort
selector:
app: kubia
ports:
- port: 80
targetPort: 8080
nodePort: 30123
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
[root@k8s-master1 deployment]# kubectl get pods,svc
NAME READY STATUS RESTARTS AGE
pod/kubia-v1-5cgjx 1/1 Running 0 2m21s
pod/kubia-v1-tbhqr 1/1 Running 0 2m21s
pod/kubia-v1-th6rr 1/1 Running 0 2m21s
pod/nfs-client-provisioner-56f4b98d47-v4nf6 1/1 Running 8 3d14h

NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 13d
service/kubia NodePort 10.0.0.119 <none> 80:30123/TCP 2m21s

# 测试程序一直循环访问
[root@k8s-master1 demo]# while true;do curl http://10.0.0.119;sleep 1; done
This is v1 running in pod kubia-v1-tbhqr
This is v1 running in pod kubia-v1-th6rr
This is v1 running in pod kubia-v1-5cgjx

RC 的滚动升级 rolling-update

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
[root@k8s-master1 deployment]# kubectl rolling-update kubia-v1 kubia-v2 --image=172.31.228.68/game/kubia:v2
Command "rolling-update" is deprecated, use "rollout" instead
Created kubia-v2
Scaling up kubia-v2 from 0 to 3, scaling down kubia-v1 from 3 to 0 (keep 3 pods available, don't exceed 4 pods)
Scaling kubia-v2 up to 1
Scaling kubia-v1 down to 2
Scaling kubia-v2 up to 2
Scaling kubia-v1 down to 1
Scaling kubia-v2 up to 3
Scaling kubia-v1 down to 0
Update succeeded. Deleting kubia-v1
replicationcontroller/kubia-v2 rolling updated to "kubia-v2"


[root@k8s-master1 ~]# kubectl describe rc kubia-v2
Name: kubia-v2
Namespace: default
Selector: app=kubia,deployment=1c9b94fec2662e272187373534aef5c6
Labels: app=kubia
...

[root@k8s-master1 ~]# kubectl describe rc kubia-v1
Name: kubia-v1
Namespace: default
Selector: app=kubia,deployment=9b82b6cdfb3cd18a5e53068dda002fe1-orig
Labels: app=kubia
...

# 查看 Pod
[root@k8s-master1 ~]# kubectl get pods --show-labels
NAME READY STATUS RESTARTS AGE LABELS
kubia-v1-tbhqr 1/1 Running 0 18m app=kubia,deployment=9b82b6cdfb3cd18a5e53068dda002fe1-orig
kubia-v2-p8vbl 1/1 Running 0 3m15s app=kubia,deployment=1c9b94fec2662e272187373534aef5c6
kubia-v2-x7g6g 1/1 Running 0 2m8s app=kubia,deployment=1c9b94fec2662e272187373534aef5c6
kubia-v2-xr8hz 1/1 Running 0 62s app=kubia,deployment=1c9b94fec2662e272187373534aef5c6
nfs-client-provisioner-56f4b98d47-v4nf6 1/1 Running 8 3d14h app=nfs-client-provisioner,pod-template-hash=56f4b98d47

# v1的pod被滚动升级到v2版本 并且多了一个标签 deployment=9b82...

[root@k8s-master1 deployment]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kubia-v2-p8vbl 1/1 Running 0 5m54s 10.244.1.75 k8s-node2 <none> <none>
kubia-v2-x7g6g 1/1 Running 0 4m47s 10.244.0.61 k8s-node1 <none> <none>
kubia-v2-xr8hz 1/1 Running 0 3m41s 10.244.2.62 k8s-master1 <none> <none>
nfs-client-provisioner-56f4b98d47-v4nf6 1/1 Running 8 3d14h 10.244.0.59 k8s-node1 <none> <none>

# 请求也都到了 v2
This is v2 running in pod kubia-v2-x7g6g
This is v2 running in pod kubia-v2-p8vbl
This is v2 running in pod kubia-v2-xr8hz

# rolling-update 已过时,现在使用 deployment
# deployment 是否可以通过修改pod中的镜像 升级应用 而不是通过删除手法的自动伸缩,应该通过期望副本数来做升级

使用 Deployment 声明式升级应用

1
2
3
1. Deployment -> rs -> pod
2. Deployment 是一种更高级资源 用于部署应用程序,并且用声明式方式更新应用
3. Deployment 被创建时也会创建 ReplicaSet ,在使用Deployment时实际上Pod是由ReplicaSet创建和管理的

创建 Deployment

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
[root@k8s-master1 deployment]# kubectl explain deployment
KIND: Deployment
VERSION: apps/v1

[root@k8s-master1 deployment]# vim kubia-deployment-v1.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
name: kubia
spec:
replicas: 3
selector:
matchLabels:
app: kubia
template:
metadata:
name: kubia
labels:
app: kubia
spec:
containers:
- image: 172.31.228.68/game/kubia:v1
name: nodejs


[root@k8s-master1 deployment]# kubectl delete rc --all
replicationcontroller "kubia-v2" deleted

# 创建deployment并记录版本号
[root@k8s-master1 deployment]# kubectl create -f kubia-deployment-v1.yaml --record
deployment.apps/kubia created

[root@k8s-master1 deployment]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kubia-dbd9664c7-4mggm 1/1 Running 0 35s 10.244.2.64 k8s-master1 <none> <none>
kubia-dbd9664c7-8b629 1/1 Running 0 35s 10.244.1.77 k8s-node2 <none> <none>
kubia-dbd9664c7-8npt4 1/1 Running 0 35s 10.244.0.63 k8s-node1 <none> <none>

[root@k8s-master1 deployment]# kubectl get rs
NAME DESIRED CURRENT READY AGE
kubia-dbd9664c7 3 3 3 93s

# 别忘记deployment是通过rs来创建pod 所有rs的名字中包含了pod模板的哈希值
# Deployment可以创建多个RS来对应管理一组Pod模板
# 可以通过RS这个值去区分

[root@k8s-master1 demo]# kubectl get pod --show-labels
NAME READY STATUS RESTARTS AGE LABELS
kubia-dbd9664c7-4mggm 1/1 Running 0 5m36s app=kubia,pod-template-hash=dbd9664c7
kubia-dbd9664c7-8b629 1/1 Running 0 5m36s app=kubia,pod-template-hash=dbd9664c7
kubia-dbd9664c7-8npt4 1/1 Running 0 5m36s app=kubia,pod-template-hash=dbd9664c7

升级 Deployment

1
2
3
Deployment 有两种升级策略
1. 默认滚动更新 渐进删除旧Pod,同时创建新Pod ,整个应用在升级中都处于可用状态 RollingUpdate
2. 一次删除所有旧Pod,然后创建新Pod 应用如果不支持多版本同时对外提供服务,就需要再启动新版本之前停止旧版本,会导致短期业务无不可用 Recre-ate

减慢滚动升级速度

1
2
3
4
5
6
7
8
9
10
# 用来测试查看观察升级过程 
# 设置为10秒
# kubectl patch 可以用来修改单个资源信息
[root@k8s-master1 demo]# kubectl patch deployment kubia -p '{"spec": {"minReadySeconds": 10}}'
deployment.apps/kubia patched

# 查看下
[root@k8s-master1 deployment]# kubectl edit deploy kubia
spec:
minReadySeconds: 10

修改镜像 触发滚动升级

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
[root@k8s-master1 demo]# while true;do curl http://10.0.0.119;sleep 1; done
This is v1 running in pod kubia-dbd9664c7-8npt4
This is v1 running in pod kubia-dbd9664c7-4mggm
This is v1 running in pod kubia-dbd9664c7-8b629

[root@k8s-master1 deployment]# kubectl set image deployment kubia nodejs=172.31.228.68/game/kubia:v2
deployment.apps/kubia image updated

# 滚动升级过程 一个个的将v1版本pod 替换成 v2 pod
This is v2 running in pod kubia-56fd945996-lz847
This is v2 running in pod kubia-56fd945996-glj6d
This is v2 running in pod kubia-56fd945996-ccg9c

[root@k8s-master1 deployment]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kubia-56fd945996-ccg9c 1/1 Running 0 61s 10.244.1.78 k8s-node2 <none> <none>
kubia-56fd945996-glj6d 1/1 Running 0 49s 10.244.0.64 k8s-node1 <none> <none>
kubia-56fd945996-lz847 1/1 Running 0 38s 10.244.2.65 k8s-master1 <none> <none>
kubia-dbd9664c7-8b629 1/1 Terminating 0 20m 10.244.1.77 k8s-node2 <none> <none>

# 升级过程
# 1. 创建新的的RS(v2) 并且慢慢扩容
# 2. 之前版本(v1) 会缩容至0

[root@k8s-master1 deployment]# kubectl get rs
NAME DESIRED CURRENT READY AGE
kubia-56fd945996 3 3 3 5m28s
kubia-dbd9664c7 0 0 0 25m

# 旧版本的RS会被保留,可以用来做更多的操作
# RC 无法实现这些 比如修改镜像版本触发更新,或者是回滚

更新 ConfigMap

1
2
3
# 如果 deployment 中的pod 引用了1个 ConfigMap(或Secret) 
# 1. 更改ConfigMap资源本身不会触发升级操作
# 2. 如果需要修改应用程序的配置并且想触发更新 需要创建一个新的ConfigMap 并且修改Pod模板引用新的CM

回滚 Deployment

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# 先创建一个错误版本 v3 
# 5个请求后 会返回500错误

[root@k8s-master2 v3]# cat app.js
const http = require('http');
const os = require('os');

var requestCount = 0;

console.log("Kubia server starting...");

var handler = function(request, response) {
console.log("Received request from " + request.connection.remoteAddress);
if (++requestCount >= 5) {
response.writeHead(500);
response.end("Some internal error has occurred! This is pod " + os.hostname() + "\n");
return;
}
response.writeHead(200);
response.end("This is v3 running in pod " + os.hostname() + "\n");
};

var www = http.createServer(handler);
www.listen(8080);


[root@k8s-master2 v3]# docker build -t 172.31.228.68/game/kubia:v3 .
[root@k8s-master2 v3]# docker push 172.31.228.68/game/kubia:v3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# 升级 然后查看错误请求
[root@k8s-master1 deployment]# kubectl set image deployment kubia nodejs=172.31.228.68/game/kubia:v3
deployment.apps/kubia image updated

[root@k8s-master1 deployment]# kubectl set image deployment kubia nodejs=172.31.228.68/game/kubia:v3
deployment.apps/kubia image updated

[root@k8s-master1 ~]# while true;do curl http://10.0.0.119;sleep 1; done
This is v3 running in pod kubia-698c869b67-z7qpw
This is v3 running in pod kubia-698c869b67-228w5
Some internal error has occurred! This is pod kubia-698c869b67-vkvch
Some internal error has occurred! This is pod kubia-698c869b67-z7qpw
Some internal error has occurred! This is pod kubia-698c869b67-228w5
Some internal error has occurred! This is pod kubia-698c869b67-vkvch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
# 回滚升级 到之前的版本
[root@k8s-master1 deployment]# kubectl rollout undo deployment kubia
deployment.apps/kubia rolled back


...
This is v2 running in pod kubia-56fd945996-t28ch
This is v2 running in pod kubia-56fd945996-n7cwd
This is v2 running in pod kubia-56fd945996-jzzl4

# 显示Deployment的滚动升级历史
[root@k8s-master1 deployment]# kubectl rollout history deployment kubia
deployment.apps/kubia
REVISION CHANGE-CAUSE
1 kubectl create --filename=kubia-deployment-v1.yaml --record=true
3 kubectl create --filename=kubia-deployment-v1.yaml --record=true
4 kubectl create --filename=kubia-deployment-v1.yaml --record=true



# 版本记录大小
[root@k8s-master1 deployment]# kubectl edit deploy kubia
revisionHistoryLimit: 10

# 根据版本号回滚
[root@k8s-master1 deployment]# kubectl rollout undo deployment kubia --to-revision=1
deployment.apps/kubia rolled back
[root@k8s-master1 deployment]# kubectl get rs
NAME DESIRED CURRENT READY AGE
kubia-56fd945996 0 0 0 3m26s
kubia-698c869b67 0 0 0 2m47s
kubia-dbd9664c7 3 3 3 8m52s

# RS中记录着版本号
[root@k8s-master1 deployment]# kubectl edit rs kubia-dbd9664c7

apiVersion: apps/v1
kind: ReplicaSet
metadata:
annotations:
deployment.kubernetes.io/desired-replicas: "3"
deployment.kubernetes.io/max-replicas: "4"
deployment.kubernetes.io/revision: "5"
deployment.kubernetes.io/revision-history: "1"
kubernetes.io/change-cause: kubectl create --filename=kubia-deployment-v1.yaml
--record=true

控制滚动升级速率

1
2
3
4
5
6
7
8
9
[root@k8s-master1 deployment]# kubectl edit deploy kubia
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate

# maxSurge 最多超出副本数之外 设置成1或者2 最多超出1到2个Pod
# maxUnavailable 滚动升级期间,有多少个Pod成为不可用状态

暂停滚动升级 使用试探性发布

1
2
3
4
# 假金丝雀发布
# 让少数请求到升级后的服务
# 之前的继续使用旧版本
# 有问题回滚到旧版本,没问题全部升级
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
# 先做一个最新版本
[root@k8s-master2 v4]# cat app.js
const http = require('http');
const os = require('os');

console.log("Kubia server starting...");

var handler = function(request, response) {
console.log("Received request from " + request.connection.remoteAddress);
response.writeHead(200);
response.end("This is v4 running in pod " + os.hostname() + "\n");
};

var www = http.createServer(handler);
www.listen(8080);

# 暂停滚动升级
[root@k8s-master1 deployment]# kubectl set image deployment kubia nodejs=172.31.228.68/game/kubia:v4
deployment.apps/kubia image updated

[root@k8s-master1 deployment]# kubectl rollout pause deployment kubia
deployment.apps/kubia paused

[root@k8s-master1 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kubia-557944968b-579xn 1/1 Running 0 71s 10.244.1.86 k8s-node2 <none> <none>
kubia-557944968b-vr6s5 1/1 Running 0 59s 10.244.2.73 k8s-master1 <none> <none>
kubia-dbd9664c7-bhpf5 1/1 Running 0 26m 10.244.0.71 k8s-node1 <none> <none>
kubia-dbd9664c7-hc4kj 1/1 Running 0 27m 10.244.1.85 k8s-node2 <none> <none>

[root@k8s-master1 ~]# while true;do curl http://10.0.0.119;sleep 1; done
This is v4 running in pod kubia-557944968b-579xn
This is v1 running in pod kubia-dbd9664c7-bhpf5
This is v1 running in pod kubia-dbd9664c7-hc4kj
This is v4 running in pod kubia-557944968b-vr6s5
This is v4 running in pod kubia-557944968b-579xn

# 正常 恢复滚动升级
[root@k8s-master1 deployment]# kubectl rollout resume deployment kubia
deployment.apps/kubia resumed

# 如果想完全的对应pod数量 可以考虑使用两个不同的Deployment 同时数量一致,同时上线,再调整下线的Deployment

[root@k8s-master1 deployment]# vim kubia-deployment-v2.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
name: kubia-v2
spec:
replicas: 3
selector:
matchLabels:
app: kubia
template:
metadata:
name: kubia
labels:
app: kubia
spec:
containers:
- image: 172.31.228.68/game/kubia:v2
name: nodejs

[root@k8s-master1 deployment]# kubectl create -f kubia-deployment-v2.yaml
deployment.apps/kubia-v2 created

[root@k8s-master1 deployment]# kubectl get ep
NAME ENDPOINTS AGE
kubernetes 172.31.228.67:6443 13d
kubia 10.244.0.72:8080,10.244.0.73:8080,10.244.1.86:8080 + 3 more... 128m

# 由于svc匹配两个Pod的标签,所以被加入服务
This is v4 running in pod kubia-557944968b-6bw9r
This is v4 running in pod kubia-557944968b-vr6s5
This is v4 running in pod kubia-557944968b-579xn
This is v2 running in pod kubia-v2-56fd945996-t9zdb
This is v2 running in pod kubia-v2-56fd945996-76g6l
This is v2 running in pod kubia-v2-56fd945996-fgn2k

增加 就绪探针

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# 先全部回滚到v2版本

[root@k8s-master1 deployment]# vim kubia-deployment-v3-with-readinesscheck.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
name: kubia
spec:
replicas: 3
selector:
matchLabels:
app: kubia
minReadySeconds: 10
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
template:
metadata:
name: kubia
labels:
app: kubia
spec:
containers:
- image: 172.31.228.68/game/kubia:v3
name: nodejs
readinessProbe:
periodSeconds: 1
httpGet:
path: /
port: 8080

[root@k8s-master1 deployment]# kubectl apply -f kubia-deployment-v3-with-readinesscheck.yaml

This is v2 running in pod kubia-56fd945996-8zr5j
This is v2 running in pod kubia-56fd945996-6g4zl
This is v2 running in pod kubia-56fd945996-hfvk5
This is v2 running in pod kubia-56fd945996-8zr5j
This is v2 running in pod kubia-56fd945996-6g4zl

# 没有出现v3版本
[root@k8s-master1 deployment]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kubia-56fd945996-6g4zl 1/1 Running 0 3m59s 10.244.0.75 k8s-node1 <none> <none>
kubia-56fd945996-8zr5j 1/1 Running 0 4m22s 10.244.2.75 k8s-master1 <none> <none>
kubia-56fd945996-hfvk5 1/1 Running 0 4m10s 10.244.1.89 k8s-node2 <none> <none>
kubia-58c74659b7-mf2bv 0/1 Running 0 69s 10.244.1.90 k8s-node2 <none> <none>

# kubia-58c74659b7-mf2bv 为处于就绪状态
# 当新的Pod启动时候,就绪探针每隔1秒去发起请求 在上面的pod里 第5个请求会出现500返回,就绪探针会报错
# 因为 pod会从Svc的ep中被剔除 所以curl也请求不到新的Pod
[root@k8s-master1 deployment]# kubectl get ep
NAME ENDPOINTS AGE
kubernetes 172.31.228.67:6443 13d
kubia 10.244.0.75:8080,10.244.1.89:8080,10.244.2.75:8080 143m

# 滚动升级不继续下去,新的Pod一直处于不可用状态,就绪10秒后才真可用
# maxUnavailable: 0 使滚动更新正好卡在这里 除非之前的Pod准备就绪才行1

滚动升级时限

1
2
3
4
5
[root@k8s-master1 deployment]# kubectl edit deploy kubia
spec:
progressDeadlineSeconds: 600

# 滚动超时会自动取消

取消出错版本的滚动升级

1
2
3
# 直接回滚到上一版本
[root@k8s-master1 deployment]# kubectl rollout undo deployment kubia
deployment.apps/kubia rolled back