12 k8s 日志收集


收集哪些日志

  1. k8s 系统的组件日志
  2. k8s pod中应用程序日志
  3. 主流的日志方案

容器中的日志怎么收集

简易安装 ELK 环境

jdk

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# [root@k8s-node3 yum.repos.d]# yum -y remove java-1.8.0-openjdk

[root@k8s-node3 ~]# tar -zxvf jdk-11.0.5_linux-x64_bin.tar.gz -C /usr/local/

[root@k8s-node3 ~]# vim /etc/profile
export JAVA_HOME=/usr/local/jdk-11.0.5
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar:$CLASSPATH

[root@k8s-node3 ~]# source /etc/profile

[root@k8s-node3 ~]# java -version
java version "11.0.5" 2019-10-15 LTS
Java(TM) SE Runtime Environment 18.9 (build 11.0.5+10-LTS)
Java HotSpot(TM) 64-Bit Server VM 18.9 (build 11.0.5+10-LTS, mixed mode)

yum 安装 elk 6.8

1
2
3
4
5
6
7
8
9
10
11
12
13
# JKD 11
[root@k8s-node3 ~]# tar -zxvf jdk-11.0.5_linux-x64_bin.tar.gz -C /usr/local/

[root@k8s-node3 ~]# vim /etc/yum.repos.d/elastic.repo

[logstash-6.x]
name=Elastic repository for 6.x packages
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
1
2
# 安装 elk 
[root@k8s-node3 ~]# yum install logstash elasticsearch kibana

配置启动 kibana

1
2
3
4
5
# 修改配置文件
[root@k8s-node3 ~]# grep ^'[a-Z]' /etc/kibana/kibana.yml
server.port: 5601 # 端口
server.host: "0.0.0.0" # 外网访问
elasticsearch.hosts: ["http://localhost:9200"] # elasticsearch 地址 当前是本地部署
1
2
# 启动 kibana
[root@k8s-node3 ~]# systemctl start kibana

配置启动 elasticsearch

1
2
# 本地部署 保持默认 也没有优化
[root@k8s-node3 ~]# vim /etc/elasticsearch/elasticsearch.yml
1
2
3
4
# 启动
[root@k8s-node3 ~]# ln -s /usr/local/jdk-11.0.5/bin/java /usr/bin/java

[root@k8s-node3 ~]# systemctl start elasticsearch
1
2
3
4
5
6
7
8
9
# 查看服务状态
[root@k8s-node3 ~]# systemctl status elasticsearch
[root@k8s-node3 ~]# systemctl status kibana
[root@k8s-node3 ~]# ps -ef|grep java
[root@k8s-node3 ~]# ps -ef|grep kibana


# 访问 kibana 页面
http://123.56.14.192:5601/
1
# 点击监控 elasticsearch

配置 logstash

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# 本地启动logstash 监听 filebeat 输出到es和控制台

[root@k8s-node3 conf.d]# cd /etc/logstash/conf.d/
[root@k8s-node3 conf.d]# vim logstash-to-es.conf

input {
# 输入插件 指定输入源 beats
beats{
port => 5044
}
}
filter {
# 过滤匹配插件
}
output {
# 输出插件
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
# 索引 按天存储
index => "k8s-log-%{+YYYY-MM-dd}"
}
# 输出到控制台 调试
stdout { codec => rubydebug }
}


# 启动
[root@k8s-node3 conf.d]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash-to-es.conf

配置 filebeat

1
2
# 镜像地址: https://www.docker.elastic.co/#
# configmap 管理 filebeat 配置文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
[root@k8s-master1 ELK-Logs]# cat k8s-logs.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
name: k8s-logs-filebeat-config
namespace: kube-system

data:
filebeat.yml: |-
filebeat.prospectors:
- type: log
# 采集目录
paths:
- /messages
fields:
app: k8s
type: module
fields_under_root: true

output.logstash:
# lostash 地址
hosts: ['172.17.70.252:5044']

---

apiVersion: apps/v1
kind: DaemonSet
metadata:
name: k8s-logs
namespace: kube-system
spec:
selector:
matchLabels:
project: k8s
app: filebeat
template:
metadata:
labels:
project: k8s
app: filebeat
spec:
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat:6.8.6
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 500m
memory: 500Mi
securityContext:
runAsUser: 0
volumeMounts:
- name: filebeat-config
# 挂载配置文件 会写到 /etc/filebeat.yml filebeat 启动会使用到这个文件
mountPath: /etc/filebeat.yml
subPath: filebeat.yml
# 挂载 日志 文件
- name: k8s-logs
mountPath: /messages
volumes:
- name: k8s-logs
hostPath:
# 挂载宿主机文件 /var/log/messages 到 /messages 文件
path: /var/log/messages
type: File
- name: filebeat-config
configMap:
name: k8s-logs-filebeat-config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# 每个node 都会创建 daemonset 去采集自己下面的k8s组件日志
# 镜像导出/导入
[root@k8s-node2 ~]# docker save -o filebeat-6.8.6.tar docker.elastic.co/beats/filebeat:6.8.6
[root@k8s-node2 ~]# scp filebeat-6.8.6.tar root@172.17.70.253:/root
[root@k8s-node1 ~]# docker load --input filebeat-6.8.6.tar


[root@k8s-master1 ELK-Logs]# kubectl apply -f k8s-logs.yaml
configmap/k8s-logs-filebeat-config created
daemonset.apps/k8s-logs created

[root@k8s-master1 ELK-Logs]# kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-6d8cfdd59d-xwv6v 1/1 Running 16 9d 10.244.2.105 k8s-node2 <none> <none>
k8s-logs-8h7fh 1/1 Running 0 28s 10.244.0.70 k8s-master1 <none> <none>
k8s-logs-cpnmn 1/1 Running 0 28s 10.244.2.108 k8s-node2 <none> <none>
k8s-logs-l8fph 1/1 Running 0 28s 10.244.1.79 k8s-node1 <none> <none>
kube-flannel-ds-amd64-gb6n7 1/1 Running 13 9d 172.17.70.251 k8s-master1 <none> <none>
kube-flannel-ds-amd64-j59qp 1/1 Running 11 9d 172.17.70.253 k8s-node1 <none> <none>
kube-flannel-ds-amd64-w6s9d 1/1 Running 11 9d 172.17.70.254 k8s-node2 <none> <none>
metrics-server-7dbbcf4c7-gxb9m 1/1 Running 16 9d 10.244.2.102 k8s-node2 <none> <none>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# 查看pod上的日志
[root@k8s-master1 ELK-Logs]# kubectl exec -it k8s-logs-8h7fh bash -n kube-system

[root@k8s-logs-8h7fh filebeat]# ls -lh /messages
-rw------- 1 root root 72M Dec 27 03:58 /messages

# 配置文件
[root@k8s-logs-8h7fh filebeat]# vi /etc/filebeat.yml

filebeat.prospectors:
- type: log
paths:
- /messages
fields:
app: k8s
type: module
fields_under_root: true

output.logstash:
hosts: ['172.17.70.252:5044']

收集 Nginx 日志

php demo 部署

1
2
3
4
5
6
[root@k8s-master1 php-demo]# kubectl apply -f namespace.yaml 
kubectl create secret docker-registry registry-pull-secret --docker-username=admin --docker-password=lx@68328153 --docker-email=253911339@qq.com --docker-server=172.17.70.252 -n test
[root@k8s-master1 php-demo]# kubectl apply -f mysql.yaml
[root@k8s-master1 php-demo]# kubectl apply -f deployment.yaml
[root@k8s-master1 php-demo]# kubectl apply -f service.yaml
[root@k8s-master1 php-demo]# kubectl apply -f ingress.yaml

改造 php deployment

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# pod里增加一个 filebeat 容器 用来收集日志 

[root@k8s-master1 ELK-Logs]# cat nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: php-demo
namespace: test
spec:
replicas: 3
selector:
matchLabels:
project: www
app: php-demo
template:
metadata:
labels:
project: www
app: php-demo
spec:
imagePullSecrets:
- name: registry-pull-secret
containers:
- name: nginx
image: 172.17.70.252/project/php-demo:1.0
imagePullPolicy: Always
ports:
- containerPort: 80
name: web
protocol: TCP
resources:
requests:
cpu: 0.5
memory: 256Mi
limits:
cpu: 1
memory: 1Gi
resources:
requests:
cpu: 0.5
memory: 256Mi
limits:
cpu: 1
memory: 1Gi
livenessProbe:
httpGet:
path: /index.html
port: 80
initialDelaySeconds: 6
timeoutSeconds: 20
volumeMounts:
- name: nginx-logs
mountPath: /usr/local/nginx/logs

- name: filebeat
image: docker.elastic.co/beats/filebeat:6.8.6
# filebeat.yml 通过 configMap 保存
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
resources:
# 限制
limits:
memory: 500Mi
# 需求
requests:
cpu: 100m
memory: 100Mi
securityContext:
runAsUser: 0
volumeMounts:
- name: filebeat-config
mountPath: /etc/filebeat.yml
subPath: filebeat.yml
- name: nginx-logs
mountPath: /usr/local/nginx/logs

volumes:
# 两个容器共享目录
- name: nginx-logs
emptyDir: {}
# 配置文件
- name: filebeat-config
configMap:
name: filebeat-nginx-config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
# 配置文件
[root@k8s-master1 ELK-Logs]# cat filebeat-nginx-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-nginx-config
namespace: test

data:
filebeat.yml: |-
filebeat.prospectors:
- type: log
paths:
- /usr/local/nginx/logs/access.log
# tags: ["access"]
fields:
app: www
type: nginx-access
fields_under_root: true

- type: log
paths:
- /usr/local/nginx/logs/error.log
# tags: ["error"]
fields:
app: www
type: nginx-error
fields_under_root: true

output.logstash:
hosts: ['172.17.70.252:5044']

启动 configmap

1
2
[root@k8s-master1 ELK-Logs]# kubectl apply -f filebeat-nginx-configmap.yaml 
configmap/filebeat-nginx-config created

滚动更新 deployment

1
2
3
4
5
6
7
# 变成了 两个容器 
[root@k8s-master1 ELK-Logs]# kubectl get pods -n test
NAME READY STATUS RESTARTS AGE
db-0 1/1 Running 0 25m
php-demo-69dc8b596c-8kvw8 2/2 Running 0 31s
php-demo-69dc8b596c-jznj4 2/2 Running 0 29s
php-demo-69dc8b596c-jzxm6 2/2 Running 0 27s

进入filebeat容器查看

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# 多个容器用 -n 指定

[root@k8s-master1 ELK-Logs]# kubectl exec -it php-demo-69dc8b596c-8kvw8 bash -c filebeat -n test
[root@php-demo-69dc8b596c-8kvw8 filebeat]#

# 可以看到日志的挂载目录
[root@php-demo-69dc8b596c-8kvw8 filebeat]# cd /usr/local/nginx/logs/
[root@php-demo-69dc8b596c-8kvw8 logs]# ls
access.log error.log

# 健康检查 日志 deoloy中配置了 index.html 的健康检查
[root@php-demo-69dc8b596c-8kvw8 logs]# tail /usr/local/nginx/logs/access.log
10.244.0.1 - - [28/Dec/2019:17:49:06 +0800] "GET /index.html HTTP/1.1" 200 52 "-" "kube-probe/1.16"
10.244.0.1 - - [28/Dec/2019:17:49:16 +0800] "GET /index.html HTTP/1.1" 200 52 "-" "kube-probe/1.16"
10.244.0.1 - - [28/Dec/2019:17:49:26 +0800] "GET /index.html HTTP/1.1" 200 52 "-" "kube-probe/1.16"
10.244.0.1 - - [28/Dec/2019:17:49:36 +0800] "GET /index.html HTTP/1.1" 200 52 "-" "kube-probe/1.16"
10.244.0.1 - - [28/Dec/2019:17:49:46 +0800] "GET /index.html HTTP/1.1" 200 52 "-" "kube-probe/1.16"
10.244.0.1 - - [28/Dec/2019:17:49:56 +0800] "GET /index.html HTTP/1.1" 200 52 "-" "kube-probe/1.16"
10.244.0.1 - - [28/Dec/2019:17:50:06 +0800] "GET /index.html HTTP/1.1" 200 52 "-" "kube-probe/1.16"
10.244.0.1 - - [28/Dec/2019:17:50:16 +0800] "GET /index.html HTTP/1.1" 200 52 "-" "kube-probe/1.16"
10.244.0.1 - - [28/Dec/2019:17:50:26 +0800] "GET /index.html HTTP/1.1" 200 52 "-" "kube-probe/1.16"
10.244.0.1 - - [28/Dec/2019:17:50:36 +0800] "GET /index.html HTTP/1.1" 200 52 "-" "kube-probe/1.16"

修改 filebeat 配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
# 之前的配置文件中 无论收集什么日志 索引都在 k8s-log-日期
# 新增索引
# 不同的日志存储到 不同的索引数据中 方便检索和管理
[root@k8s-node3 conf.d]# vim logstash-to-es.conf

input {
# 输入插件 指定输入源 beats
beats{
port => 5044
}
}
filter {
# 过滤匹配插件
}

output {
if [app] == "www" {
if [type] == "nginx-access" {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "nginx-access-%{+YYYY.MM.dd}"
}
}
else if [type] == "nginx-error" {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "nginx-error-%{+YYYY.MM.dd}"
}
}
else if [type] == "tomcat-catalina" {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "tomcat-catalina-%{+YYYY.MM.dd}"
}
}
} else if [app] == "k8s" {
if [type] == "module" {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "k8s-log-%{+YYYY.MM.dd}"
}
}
}
stdout { codec=> rubydebug }
}
[root@k8s-node3 conf.d]# vim logstash-to-es.conf

input {
# 输入插件 指定输入源 beats
beats{
port => 5044
}
}
filter {
# 过滤匹配插件
}

output {
if [app] == "www" {
if [type] == "nginx-access" {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "nginx-access-%{+YYYY.MM.dd}"
}
}
else if [type] == "nginx-error" {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "nginx-error-%{+YYYY.MM.dd}"
}
}
else if [type] == "tomcat-catalina" {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "tomcat-catalina-%{+YYYY.MM.dd}"
}
}
} else if [app] == "k8s" {
if [type] == "module" {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "k8s-log-%{+YYYY.MM.dd}"
}
}
}
stdout { codec=> rubydebug }
}
1
[root@k8s-node3 conf.d]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash-to-es.conf
1
# filebeat 命令行也应该正常输出

kibana 配置

1
# 查看 索引管理

1
# 配置索引

1
# 刷新页面 查看数据

收集 Tomcat 日志

启动 tomcat 项目

1
2
3
4
5
# 数据存储 mysql 使用同一个

[root@k8s-master1 java-demo]# kubectl apply -f deployment.yaml
[root@k8s-master1 java-demo]# kubectl apply -f service.yaml
[root@k8s-master1 java-demo]# kubectl apply -f ingress.yaml

更新 deployment

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
[root@k8s-master1 ELK-Logs]# cat tomcat-deployment.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
name: tomcat-java-demo
namespace: test
spec:
replicas: 3
selector:
matchLabels:
project: www
app: java-demo
template:
metadata:
labels:
project: www
app: java-demo
spec:
imagePullSecrets:
- name: registry-pull-secret
containers:
- name: tomcat
image: 172.17.70.252/project/java-demo:1.0.1
imagePullPolicy: Always
ports:
- containerPort: 8080
name: web
protocol: TCP
resources:
requests:
cpu: 0.5
memory: 1Gi
limits:
cpu: 1
memory: 2Gi
livenessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 20
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 20
volumeMounts:
- name: tomcat-logs
mountPath: /usr/local/tomcat/logs

- name: filebeat
image: docker.elastic.co/beats/filebeat:6.8.6
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
resources:
limits:
memory: 500Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
runAsUser: 0
volumeMounts:
- name: filebeat-config
mountPath: /etc/filebeat.yml
subPath: filebeat.yml
- name: tomcat-logs
mountPath: /usr/local/tomcat/logs
volumes:
- name: tomcat-logs
emptyDir: {}
- name: filebeat-config
configMap:
name: filebeat-config

filebeat 配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
[root@k8s-master1 ELK-Logs]# cat filebeat-tomcat-configmap.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: test

data:
filebeat.yml: |-
filebeat.prospectors:
- type: log
paths:
- /usr/local/tomcat/logs/catalina.*
# tags: ["tomcat"]
fields:
app: www
type: tomcat-catalina
fields_under_root: true
# 多层匹配 catalina 日志 异常日志有很多条 左中括号 到 下一个左中括号中的 为一条日志
multiline:
pattern: '^\['
negate: true
match: after
output.logstash:
hosts: ['172.17.70.252:5044']

更新部署

1
2
3
4
5
6
7
8
9
10
11
12
13
[root@k8s-master1 ELK-Logs]# kubectl apply -f filebeat-tomcat-configmap.yaml 
configmap/filebeat-config created

[root@k8s-master1 ELK-Logs]# kubectl apply -f tomcat-deployment.yaml
deployment.apps/tomcat-java-demo configured

# 注意查看资源 不够就先把里面的nginx 关闭
[root@k8s-master1 ELK-Logs]# kubectl get pods -n test
NAME READY STATUS RESTARTS AGE
db-0 1/1 Running 0 130m
tomcat-java-demo-6454cf7d7c-pnfj4 2/2 Running 0 2m
tomcat-java-demo-6454cf7d7c-sbpj5 2/2 Running 0 2m
tomcat-java-demo-6454cf7d7c-szbhh 2/2 Running 0 2m

建立索引 查看日志

1
2
1. 部署jar包 原理一样 部署filebeat 
2. 查日志在kibana中搜索