基础环境
- CentOS release 6.4 (Final) 64bit
- jdk-8u45-linux-x64
- Elasticsearch 1.7.1
- Kibana-4.1.1-linux-x64
- Logstash-1.5.4-1/Logstash-forwarder-0.4.0-1.x86_64
1 | export JAVA_HOME=/usr/local/java |
Elasticsearch
安装
1 | $> wget https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-1.7.1.tar.gz |
配置
1 | $> vim /usr/local/elasticsearch/config/elasticsearch.yml |
根据自身机器内存情况调整JVM内存大小,原则上是越大越好但需要预留足够的内存给系统,此规则也是根据实际情况而定并非铁律。
配置文件:/usr/local/elasticsearch/bin/elasticsearch.in.sh
1
2ES_MIN_MEM=10g
ES_MAX_MEM=10g
启动 & 停止
启动
1 | $> /usr/local/elasticsearch/bin/elasticsearch -d |
停止
1 | #停止本地节点 |
检验
启动Elasticsearch后访问http://yourip:9200
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16$> /usr/local/elasticsearch/bin/elasticsearch -d
$> curl -XGET http://127.0.0.1:9200
{
"status" : 200,
"name" : "Vindaloo",
"cluster_name" : "elasticsearch",
"version" : {
"number" : "1.7.1",
"build_hash" : "b88f43fc40b0bcd7f173a1f9ee2e97816de80b19",
"build_timestamp" : "2015-07-29T09:54:16Z",
"build_snapshot" : false,
"lucene_version" : "4.10.4"
},
"tagline" : "You Know, for Search"
}
Kibana
安装
1 | useradd kibana |
配置
配置文件/usr/local/kibana/config/kibana.yml
根据自身环境配置elasticsearch_url
、host
、port
等参数
Kibana启动控制脚本
添加启动脚本
启动Kibana/etc/init.d/kibana start
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158#!/bin/sh
# Init script for kibana
# From Kibana package
# Generated by pleaserun.
# Implemented based on LSB Core 3.1:
# * Sections: 20.2, 20.3
#
### BEGIN INIT INFO
# Provides: kibana
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description:
# Description: no description given
### END INIT INFO
PATH=/sbin:/usr/sbin:/bin:/usr/bin
export PATH
name=kibana
program=/usr/local/kibana/bin/kibana
args=''
pidfile="/var/run/$name.pid"
user="kibana"
group="kibana"
chdir="/"
chroot="/"
trace() {
logger -t "/etc/init.d/kibana" "$@"
}
emit() {
trace "$@"
echo "$@"
}
start() {
# Ensure the log directory is setup correctly.
[ ! -d "/var/log/kibana/" ] && mkdir "/var/log/kibana/"
chown "$user":"$group" "/var/log/kibana/"
chmod 755 "/var/log/kibana/"
# Setup any environmental stuff beforehand
# Run the program!
chroot --userspec "$user":"$group" "$chroot" sh -c "
cd \"$chdir\"
exec \"$program\" $args
" >> /var/log/kibana/kibana.stdout 2>> /var/log/kibana/kibana.stderr &
# Generate the pidfile from here. If we instead made the forked process
# generate it there will be a race condition between the pidfile writing
# and a process possibly asking for status.
echo $! > $pidfile
emit "$name started"
return 0
}
stop() {
# Try a few times to kill TERM the program
if status ; then
pid=$(cat "$pidfile")
trace "Killing $name (pid $pid) with SIGTERM"
kill -TERM $pid
# Wait for it to exit.
for i in 1 2 3 4 5 ; do
trace "Waiting $name (pid $pid) to die..."
status || break
sleep 1
done
if status ; then
emit "$name stop failed; still running."
else
emit "$name stopped."
fi
fi
}
status() {
if [ -f "$pidfile" ] ; then
pid=$(cat "$pidfile")
if ps -p $pid > /dev/null 2> /dev/null ; then
# process by this pid is running.
# It may not be our pid, but that's what you get with just pidfiles.
# TODO(sissel): Check if this process seems to be the same as the one we
# expect. It'd be nice to use flock here, but flock uses fork, not exec,
# so it makes it quite awkward to use in this case.
return 0
else
return 2 # program is dead but pid file exists
fi
else
return 3 # program is not running
fi
}
force_stop() {
if status ; then
stop
status && kill -KILL $(cat "$pidfile")
fi
}
case "$1" in
force-start|start|stop|force-stop|restart)
trace "Attempting '$1' on kibana"
;;
esac
case "$1" in
force-start)
PRESTART=no
exec "$0" start
;;
start)
status
code=$?
if [ $code -eq 0 ]; then
emit "$name is already running"
exit $code
else
start
exit $?
fi
;;
stop) stop ;;
force-stop) force_stop ;;
status)
status
code=$?
if [ $code -eq 0 ] ; then
emit "$name is running"
else
emit "$name is not running"
fi
exit $code
;;
restart)
stop && start
;;
*)
echo "Usage: $SCRIPTNAME {start|force-start|stop|force-start|force-stop|status|restart}" >&2
exit 3
;;
esac
exit $?
Nginx配置
配置文件kibana.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25$> yum -y install httpd-tools
$> htpasswd -c /usr/local/nginx/conf/kibana.users mogl
upstream kibana {
server 127.0.0.1:5601;
}
server {
listen 80;
server_name moglkibana.com;
access_log /var/wwwlog/kibana/access.log main;
error_log /var/wwwlog/kibana/error.log error;
auth_basic "Kibana Access";
auth_basic_user_file /usr/local/nginx/conf/kibana.users;
location / {
proxy_next_upstream http_500 http_502 http_504 error timeout invalid_header;
proxy_pass http://kibana;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
Logstash
Logstash 服务端安装配置
1 | $> wget https://download.elastic.co/logstash/logstash/packages/centos/logstash-1.5.4-1.noarch.rpm |
创建logstash配置文件1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31cat > /etc/logstash/conf.d/01-logstash-initial.conf << EOF
input {
lumberjack {
port => 5043
type => "logs"
ssl_certificate => "/etc/pki/tls/certs/logstash-forwarder.crt"
ssl_key => "/etc/pki/tls/private/logstash-forwarder.key"
}
}
filter {
if [type] == "syslog" {
grok {
match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
add_field => [ "received_at", "%{@timestamp}" ]
add_field => [ "received_from", "%{host}" ]
}
syslog_pri { }
date {
match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
}
}
output {
elasticsearch { host => localhost }
stdout { codec => rubydebug }
}
EOF
output
中的elasticsearch
配置host
若配置成["localhost:9200"]
则在与elasticsearchl连接时会报错Caught exception while handling client http traffic, closing connection
Logstash 客户端安装配置
安装logstash-forward并复制ssl证书1
2
3
4$> wget https://download.elastic.co/logstash-forwarder/binaries/logstash-forwarder-0.4.0-1.x86_64.rpm
$> yum localinstall logstash-forwarder-0.4.0-1.x86_64.rpm
$> echo '10.0.6.6 elk.mogl.com' >> /etc/hosts
$> scp root@elk.mogl.com:/etc/pki/tls/certs/logstash-forwarder.crt /etc/pki/tls/certs/logstash-forwarder.crt
logstash-forward客户端转发配置1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26$> mv /etc/logstash-forwarder.conf /etc/logstash-forwarder.conf.org
cat > /etc/logstash-forwarder.conf << EOF
{
"network": {
"servers": [ "elk.mogl.com:5043" ],
"ssl ca": "/etc/pki/tls/certs/logstash-forwarder.crt",
"timeout": 15
},
"files": [
{
"paths": [
"/var/log/messages",
"/var/log/secure"
],
"fields": { "type": "syslog" }
}
]
}
EOF
$> /etc/init.d/logstash-forwarder start
创建索引模式
访问Kibana,在setting
中添加索引即可。
采集分析Nginx日志
Server端
添加pattern匹配规则
我所使用的Nginx日志格式1
2
3log_format main '$remote_addr [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" $http_x_forwarded_for $request_length $request_time $upstream_addr $host';
规则可通过此网站进行测试Logstash匹配测试网站1
2
3
4
5
6
7
8
9$> mkdir /opt/logstash/patterns
cat > /opt/logstash/patterns/nginx << EOF
NGUSERNAME [a-zA-Z\.\@\-\+_%]+
NGUSER %{NGUSERNAME}
NGINXACCESS %{IPORHOST:remote_addr} \[%{HTTPDATE:time_local}\] "%{WORD:method} %{URIPATH:path}(?:%{URIPARAM:param})? HTTP/%{NUMBER:httpversion}" %{INT:status} %{INT:body_bytes_sent} %{QS:http_referer} %{QS:http_user_agent} %{NOTSPACE:http_x_forwarded_for} %{INT:request_length} %{NUMBER:request_time} %{NOTSPACE:upstream_addr} %{IPORHOST:host}
EOF
$> chown -R logstash:logstash /opt/logstash/patterns/
Logstash添加Nginx相关配置/etc/logstash/conf.d/logstash-nginx.conf
如果按照上文logstash配置了统计syslog的采集,端口需要改变,否则会报错地址已被占用1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24input {
lumberjack {
port => 5043
type => "logs"
ssl_certificate => "/etc/pki/tls/certs/logstash-forwarder.crt"
ssl_key => "/etc/pki/tls/private/logstash-forwarder.key"
}
}
filter {
if [type] == "nginx" {
grok {
match => { "message" => "%{NGINXACCESS}" }
}
}
}
output {
elasticsearch { host => localhost }
stdout { codec => rubydebug }
}
Client端
logstash-forward将日志转发到服务端1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19{
"network": {
"servers": [ "elk.mogl.com:5043" ],
"ssl ca": "/etc/pki/tls/certs/logstash-forwarder.crt",
"timeout": 15
},
"files": [
{
"paths": [
"/var/wwwlog/test/access.log"
],
"fields": { "type": "nginx" }
}
]
}