1. 程式人生 > >ELK 6安裝配置 nginx日誌收集 kabana漢化

ELK 6安裝配置 nginx日誌收集 kabana漢化

restart proc times add-port app pack minimum node cnblogs

#ELK 6安裝配置 nginx日誌收集 kabana漢化

#環境 centos 7.4 ,ELK 6 ,單節點

#服務端
Logstash 收集,過濾
Elasticsearch 存儲,索引日誌
Kibana 可視化
#客戶端
filebeat 監控、轉發,作為agent

filebeat-->Logstash-->Elasticsearch-->Kibana

#基本配置

#時間同步
#關閉selinux
#內核優化
#防火墻端口

#內核
echo ‘
* hard nofile 65536
* soft nofile 65536
* soft nproc  65536
* hard nproc  65536
‘>>/etc/security/limit.conf
echo ‘
vm.max_map_count = 262144
net.core.somaxconn=65535
net.ipv4.ip_forward = 1
‘>>/etc/sysctl.conf
sysctl -p
#防火墻
firewall-cmd --permanent --add-port={9200/tcp,9300/tcp,5044/tcp,5601/tcp}  
firewall-cmd --reload
frewall-cmd  --list-all

#安裝

#可以下載tar或者rpm包安裝
# 官網 https://www.elastic.co/downloads
# 中文 https://www.elastic.co/cn/products
#下載rpm包
https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.2.rpm
https://artifacts.elastic.co/downloads/logstash/logstash-6.2.2.rpm
https://artifacts.elastic.co/downloads/kibana/kibana-6.2.2-x86_64.rpm
https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-6.2.2-x86_64.rpm

#安裝JDK, elasticsearch需要java環境

yum install java-1.8.0-openjdk -y

#配置yum源

rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch
echo ‘
[elk-6]
name=elk-6
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
‘>/etc/yum.repos.d/elk.repo

#安裝

yum install elasticsearch -y
yum install logstash -y
yum install kibana -y
yum install filebeat -y

#elasticsearch配置


 #查看配置
rpm -qc elasticsearch
grep -v ‘^#‘ /etc/elasticsearch/elasticsearch.yml
cp /etc/elasticsearch/elasticsearch.yml{,.bak}

 #更改配置
echo ‘
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
cluster.name: ELK
node.name: elk.novalocal
network.host: 0.0.0.0
http.port: 9200
discovery.zen.ping.unicast.hosts: ["172.16.50.32:9300"]
discovery.zen.minimum_master_nodes: 1
‘>/etc/elasticsearch/elasticsearch.yml
 #修改配置後
systemctl daemon-reload

 #啟動
systemctl enable  elasticsearch
systemctl restart elasticsearch

 #check
systemctl status elasticsearch
netstat -nltp | grep java
curl -X GET http://localhost:9200

#logstash配置

  • input :數據輸入
  • filter:數據轉化,過濾,分析
  • output:數據輸出

 #查看配置
rpm -qc logstash
egrep -v ‘^#|^$‘ /etc/logstash/logstash.yml
cp /etc/logstash/logstash.yml{,.bak}

echo ‘path.config: /etc/logstash/conf.d‘>>/etc/logstash/logstash.yml

 #添加一個日誌處理文件
 #filebeat->logstash->elasticsearch
echo ‘
input {

 #收集本地log#
  file {
     type => "logtest"
     path => "/var/log/logtest.txt"
     start_position => "beginning"
  }

 #filebeat客戶端#
  beats {
     port => 5044
  }

}

 #篩選
 #filter { }

output {

#標準輸出,調試使用#
  stdout {
   codec => rubydebug { }
  }

# 輸出到es#
  elasticsearch {
    hosts => ["http://172.16.50.32:9200"]
    index => "%{type}-%{+YYYY.MM.dd}"
  }

}
‘>/etc/logstash/conf.d/logstash-01.conf

#調試(可選)

 #檢測配置  
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash-01.conf --config.test_and_exit
 #生成測試log
echo $(date +"%F-%T") log-test >>/var/log/logtest.txt
 #啟動,查看生成日誌
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash-01.conf

#啟動

systemctl enable logstash
systemctl restart  logstash

#check

sleep 20
systemctl status logstash
netstat -nltp | grep java

#kibana配置


 #配置 
rpm -qc kibana
cp /etc/kibana/kibana.yml{,.bak}
grep -v ‘^#‘ /etc/kibana/kibana.yml.bak
echo ‘
server.port: 5601
server.host: "0.0.0.0"
 # ES的url的一個ES節點#
 #elasticsearch.url: "http://172.16.50.32:9200"
elasticsearch.url: "http://localhost:9200"
kibana.index: ".kibana"
 #kibana.defaultAppId: "home"
‘>/etc/kibana/kibana.yml

 #啟動
systemctl enable  kibana
systemctl restart kibana

 #check
systemctl status kibana
netstat -nltp | grep node

 #防火墻對外開放tcp/5601
 #瀏覽器訪問 ip:5601

#漢化kibana (可選)

[[ -f /usr/bin/git ]] || { echo ‘install git‘;yum install -y git &>/dev/null; }
git clone https://github.com/anbai-inc/Kibana_Hanization.git
cd Kibana_Hanization
python main.py /usr/share/kibana

 #重啟kibana
systemctl restart kibana

#瀏覽器訪問kabana設置

首次打開,需要添加索引模式
#Management管理-->Index Patterns索引模式-->Create index pattern創建索引模式
填寫*(索引名)-->Next step-->選擇如@timestamp-->Create index pattern ,完成
#Index pattern 下面填寫logstash配置的名稱如type => "logs"填寫logs

技術分享圖片


#filebeat配置 (輕量客戶端)

yum install -y filebeat 

#查看配置

rpm -qc filebeat
egrep -v ‘#|^$‘ /etc/filebeat/filebeat.yml
cp /etc/filebeat/filebeat.yml{,.bak}

#收集nginx日誌試列

 #安裝nginx
rpm -Uvh http://nginx.org/packages/centos/7/noarch/RPMS/nginx-release-centos-7-0.el7.ngx.noarch.rpm
yum install -y nginx
systemctl start nginx
curl localhost
 #查看nginx日誌
tail /var/log/nginx/access.log

#配置filebeat收集nginx日誌

echo ‘#filebeat#
filebeat.prospectors:
#nginx
- input_type: log
  enable: yes
  #tags: nginx-access
  paths:
    - /var/log/nginx/access.log
  exclude_lines: ["^$"]
  fields:
    type: "nginx-access"
  fields_under_root: true

output.logstash:
  hosts: ["localhost:5044"]
  #hosts: ["172.16.50.32:5044"]
  #index: filebeat
‘>/etc/filebeat/filebeat.yml

#啟動

 systemctl enable  filebeat  
 systemctl restart filebeat  
 systemctl status  filebeat  

#在kibana查看日誌


#logstash使用grok過濾nginx日誌

nginx日誌有main和log_json兩種,默認為main普通文本格式
ELK存儲為json格式,文本格式華,拆分出如ip地址、訪問agent等,便於後續使用

#nginx默認日誌格式

    log_format  main  ‘$remote_addr - $remote_user [$time_local] "$request" ‘
                      ‘$status $body_bytes_sent "$http_referer" ‘
                      ‘"$http_user_agent" "$http_x_forwarded_for"‘;

 #curl localhost生成日誌格式如下
 #127.0.0.1 - - [22/Mar/2018:18:37:37 +0800] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-"

#logstash配置,使用grok過濾nginx日誌

grok使用的正則表達式在grok-patterns文件
可以引用或添加自定義規則
Grok=$(find / -name grok-patterns)
echo $Grok
#/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-patterns-core-4.1.2/patterns/grok-patterns

 #創建nginx正則表達式(引用grok正則)  
echo ‘#nginx-access
WZ ([^ ]*)
NGINXACCESS %{IP:remote_ip} \- \- \[%{HTTPDATE:timestamp}\] "%{WORD:method} %{WZ:request} HTTP/%{NUMBER:httpversion}" %{NUMBER:status} %{NUMBER:bytes} %{QS:referer} %{QS:agent} %{QS:xforward}
‘>/etc/logstash/conf.d/nginx-access


 #重新生成logstash配置文件  
echo ‘
input {

 #收集本地log#
  file {
     type => "logtest"
     path => "/var/log/logtest.txt"
     start_position => "beginning"
  }

 #filebeat客戶端#
  beats {
     port => 5044
  }

}

 # #篩選
filter {

# 如果是nginx訪問日誌
  if ( [type] == "nginx-access" ) {

    #按字段切割
    grok { 
      patterns_dir=>"/etc/logstash/conf.d/nginx-access"
      match => { "message" => "%{NGINXACCESS}" }
      }

    # 時間格式轉換
    date {
      match => [ "timestamp", "dd/MMM/YYYY:HH:mm:ss Z" ]
      }

    # 刪除不需要的字段
    mutate { 
      remove_field => [ "offset", "@version", "beat", "input_type", "tags","id"]
      }
    }
}

output {

#標準輸出,調試使用#
  stdout {
   codec => rubydebug { }
  }

# 輸出到es#
  elasticsearch {
    hosts => ["http://172.16.50.32:9200"]
    index => "%{type}-%{+YYYY.MM.dd}"
  }

}
‘>/etc/logstash/conf.d/logstash-01.conf

#檢測配置

/usr/share/logstash/bin/logstash -t -f /etc/logstash/conf.d/logstash-01.conf 

#調試logstash

 #關閉
systemctl stop  logstash
 #在終端啟動查看
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash-01.conf

#訪問nginx產生日誌,在elasticsearch-head或者kabana查看nginx日誌

logstash配置文件可拆分為多個,按input、filter、output類型+序列號指定優先級


#elasticsearch調試工具(可選)

# elasticsearch安裝head插件

 #安裝NodeJS (epel源)  
yum install -y nodejs
 #安裝npm  
npm install -g cnpm --registry=https://registry.npm.taobao.org
 #使用npm安裝grunt  
npm install -g grunt

 #安裝elasticsearch-head
 #查看https://github.com/mobz/elasticsearch-head
mkdir /opt/head
cd /opt/head
git clone git://github.com/mobz/elasticsearch-head.git
cd elasticsearch-head
npm install
 #啟動
npm run start &

 #配置elasticsearch訪問
echo ‘#elasticsearch-head
http.cors.enabled: true
http.cors.allow-origin: "*"
‘>>/etc/elasticsearch/elasticsearch.yml

 #重啟elasticsearch
systemctl restart elasticsearch

 #瀏覽器訪問9100端口 
http://ip:9100/
 #出現“未連接”,請修改localhost為ip地址  

技術分享圖片

ELK 6安裝配置 nginx日誌收集 kabana漢化