192.168.14.240 es-node2
192.168.14.241 es-node3
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0
systemctl stop firewalld
systemctl disable firewalld
cat <<EOF >> /etc/security/limits.conf
* hard nofile 65536
* soft nproc 2048
* hard nproc 4096
EOF
echo vm.max_map_count=655360 >> /etc/sysctl.conf
sysctl -p
cat <<EOF >> /etc/hosts
192.168.14.239 es-node1
192.168.14.240 es-node2
192.168.14.241 es-node3
EOF
hostname es-node1
hostnamectl set-hostname es-node1
ssh-keygen
ssh-copy-id es-node1
ssh-copy-id es-node2
ssh-copy-id es-node3
yum -y install wget vim
cd /etc/yum.repos.d/
mkdir backup
mv *.repo backup
# 阿里云yun源
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
yum clean all
yum makecache
# epel源
yum -y install epel-release
yum clean all
yum makecache
cat <<EOF > /etc/yum.repos.d/elk.repo
[elk]
name=elk
baseurl=https://mirrors.tuna.tsinghua.edu.cn/elasticstack/yum/elastic-6.x/
enable=1
gpgcheck=0
EOF
mkdir -p /data/apps/
tar -xf jdk-8u11-linux-x64.tar.gz
mv jdk1.8.0_11/ jdk
cat <<EOF > /etc/profile.d/jdk.sh
JAVA_HOME=/data/apps/jdk
CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
PATH=$JAVA_HOME/bin:$PATH
export JAVA_HOME CLASSPATH PATH
EOF
source /etc/profile
① 下载
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.3.1-linux-x86_64.tar.gz
tar -xf elasticsearch-7.3.1-linux-x86_64.tar.gz
mv elasticsearch-7.3.1 /data/apps/elasticsearch
cd /data/apps
useradd es
chown -R es.es elasticsearch
su - es
mkdir -pv /home/es/{data,logs}/elastic
cd elasticsearch
vim config/elasticsearch.yml
cluster.name: bigdata # 集群名称
node.name: node-1 # 节点名称
path.data: /home/es/data/elastic # es索引库的数据存储目录
path.logs: /home/es/logs/elastic # es进程启动后,对应的日志信息存放目录
network.host: 0.0.0.0
# 允许跨域请求
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-credentials: true
#discovery.seed_hosts: ["node-1"]
cluster.initial_master_nodes: ["es-node1"]
transport.tcp.port: 9300 # 节点间交互的tcp端口,默认9300
discovery.zen.minimum_master_nodes: 2 # 防脑裂,集群中至少又2台节点可用,否则集群就瘫痪。计算公式: 节点数/2+1
discovery.zen.ping.unicast.hosts: ['es-node1','es-node2','es-node3'] #
./bin/elasticsearch -d -p pid_file
# shut down Elasticsearch
pkill -F pid_file
# Checking that Elaelasticsearch is running
curl -XGET 'http://127.0.0.1:9200'
====> ES-Head Plugin 方便对ES进行各种操作的客户端工具
https://github.com/mobz/elasticsearch-head
*** 插件不能安装在es的plugin目录下
git clone git://github.com/mobz/elasticsearch-head.git
cd elasticsearch-head
yum -y install nodejs npm
npm init -f # 解决 npm WARN enoent ENOENT: no such file or directory, open '/soft/elasticsearch/plugins/package.json'
npm install -g grunt-cli
npm install grunt --save
npm install grunt-contrib-clean
npm install grunt-contrib-concat
npm install grunt-contrib-watch
npm install grunt-contrib-connect
npm install grunt-contrib-copy
npm install phantomjs-prebuilt@2.1.14 --ignore-scripts
npm install grunt-contrib-jasmine
connect: {
server: {
options: {
hostname: '0.0.0.0',
port: 9100,
base: '.',
keepalive: true
}
}
}
# 修改elasticsearch-head/_site/app.js
this.base_uri = this.config.base_uri || this.prefs.get("app-base_uri") || "http://node-1:9200";
nohup grunt server > /dev/null 2>&1 &
yum -y install screen
screen -S es-head
grunt server
http://localhost:9100/
https://github.com/lukas-vlcek/bigdesk
mkdir elasticsearch/plugins/bigdesk/_site
unzip bigdesk-master.zip -C plugin/bigdesk/_site
mv plugin/bigdesk/_site/bigdesk-master/* plugins/bigdesk/_site
cd plugin/bigdesk
cat <<EOF > plugin-descriptor.properties
description=bigdesk
version=master
site=true
name=bigdesk
EOF
vim BigdeskStore.js
return (major == 1 && minor >= 0 && maintenance >= 0 && (build != 'Beta1' || build != 'Beta2'));
-->>
return (minor >= 0 && maintenance >= 0 && (build != 'Beta1' || build != 'Beta2'));
#python -m SimpleHTTPServer
nohup python -m SimpleHTTPServer > /dev/null 2>&1 &
#nohup python -m SimpleHTTPServer 8888 > /dev/null 2>&1 &
https://ip:9200/_plugin/bigdesk/
shasum -a 512 kibana-7.3.2-linux-x86_64.tar.gz
tar -xzf kibana-7.3.2-linux-x86_64.tar.gz
mv kibana-7.3.2-linux-x86_64 kibana
mv kibana /data/apps/es-plugin
cd /data/apps/es-plugin/kibana
server.port: 5601 //监听端口
server.host: "192.168.14.239" //监听IP地址,建议内网ip
elasticsearch.url: "http://192.168.14.239:9200" //elasticsearch连接kibana的URL,也可以填写192.168.1.32,因为它们是一个集群
chown -R kibana.kibana kibana
su - kibana
./bin/kibana
#vim /etc/logstash/conf.d/system.conf
input {
file {
path => "/var/log/messages" //日志路径
type => "system" //定义类型
start_position => "beginning" //表示logstash从头开始读取文件内容
stat_interval => "2" //logstash每隔多久检查一次被监听文件状态(是否有更新),默认是1秒
}
}
output {
elasticsearch {
hosts => ["192.168.1.31"] //指定hosts
index => "systemlog-%{+YYYY.MM.dd}" //指定索引名称
}
}