0.安装docker
yum install docker -y
systemctl enable docker
systemctl start docker
1.创建部署目录
mkdir -p /usr/local/module/
cd /usr/local/module/
2.配置环境
vi /etc/profile
#在最后添加
##########################################
#java
export JAVA_HOME=/usr/local/module/java
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib
#scala
export SCALA_HOME=/usr/local/module/scala
export PATH=$SCALA_HOME/bin:$PATH
#hadoop
export HADOOP_HOME=/usr/local/module/hadoop
export PATH=$HADOOP_HOME/bin:$PATH
export HADOOP_CONF_DIR=/usr/local/module/hadoop/etc/hadoop
#hive
export HIVE_HOME=/usr/local/module/hive
export PATH=$HIVE_HOME/bin:$PATH
export HIVE_CONF_DIR=/usr/local/module/hive/conf
#spark
export SPARK_HOME=/usr/local/module/spark
export PATH=$SPARK_HOME/bin:$PATH
export SPARK_CONF_DIR=/usr/local/module/spark/conf
# Pyspark必须加的参数
export PYSPARK_ALLOW_INSECURE_GATEWAY=1
##########################################
#立即生效
source /etc/profile
3.部署java环境
tar -zxvf jdk-8u181-linux-x64.tar.gz
ln -s jdk1.8.0_181 java
4.部署scala环境
tar -zxvf scala-2.12.1.tgz
ln -s scala-2.12.1 scala
5.部署hadoop
tar -zxvf hadoop-2.7.2.tar.gz
ln -s hadoop-2.7.2 hadoop
cd hadoop
vi etc/hadoop/core-site.xml
#编辑core-site.xml
##############################################
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://localhost.localdomain:9000</value>
<description>hdfs内部通讯访问地址</description>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/module/hadoopdata/</value>
<description>hadoop数据存放</description>
</property>
</configuration>
##############################################
#创建hadoop数据目录
mkdir -p /usr/local/module/hadoopdata/
vi etc/hadoop/hdfs-site.xml
#编辑hdfs—site.xml
##############################################
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
</configuration>
##############################################
vi etc/hadoop/yarn-site.xml
#编辑yarn-site.xml
##############################################
<configuration>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>localhost</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.pmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
</property>
</configuration>
##############################################
cp mapred-site.xml.template mapred-site.xml
vi etc/hadoop/mapred-site.xml
##############################################
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
##############################################
#配置免密登录
#生成秘钥
ssh-keygen -t rsa
#公钥发送到主机
ssh-copy-id -i id_rsa.pub root@localhost
###############################################
vi /usr/local/module/hadoop/etc/hadoop/hadoop-env.sh
############################################### 修改对应位置
export JAVA_HOME=/usr/local/module/java
###############################################
#格式化namenode
hdfs namenode -format
#启动
sh sbin/start-all.sh
6.部署mysql
mkdir -p /usr/local/module/mysql/conf
mkdir -p /usr/local/module/mysql/data
mkdir -p /usr/local/module/mysql/logs
mkdir -p /usr/local/module/mysql/mysql-files
#编辑my.cnf
vi /usr/local/module/mysql/conf/my.cnf
############################################
[mysqld]
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
############################################
docker run -d \
-p 3306:3306 \
--name mysql \
--restart always \
--privileged=true \
-v /usr/local/module/mysql/conf:/etc/mysql \
-v /usr/local/module/mysql/logs:/var/log/mysql \
-v /usr/local/module/mysql/data:/var/lib/mysql \
-v /usr/local/module/mysql/mysql-files:/var/lib/mysql-files/ \
-e MYSQL_ROOT_PASSWORD=root123 \
-e TZ=Asia/Shanghai \
mysql:5.7
#安装客户端
yum -y install http://dev.mysql.com/get/mysql57-community-release-el7-10.noarch.rpm
yum -y install mysql
7.部署hive
#在mysql中创建hive用户,数据库等
mysql -h 192.168.172.134 -u root -p
create user 'hive' identified by 'hive';
create database hive;
grant all on hive.* to hive@'%' identified by 'hive';
grant all on hive.* to hive@'localhost' identified by 'hive';
flush privileges;
exit;
tar -zxvf apache-hive-1.2.2-bin.tar.gz
ln -s apache-hive-1.2.2-bin hive
#复制mysql jar包
tar -zxvf mysql-connector-java-5.1.49.tar.gz
cp mysql-connector-java-5.1.49/mysql-connector-java-5.1.49-bin.jar hive/lib
cd hive
cp conf/hive-env.sh.template conf/hive-env.sh
vi conf/hive-env.sh
#编辑hive-env.sh添加如下内容
############################################
export HADOOP_HOME=/usr/local/module/hadoop
export HIVE_CONF_DIR=/usr/local/module/hive/conf
############################################
vi conf/hive-site.xml
#编辑hive-site.xml
############################################
<configuration>
<property>
<name>hive.users.in.admin.role</name>
<value>root</value>
</property>
<property>
<name>hive.security.authorization.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.security.authorization.createtable.owner.grants</name>
<value>ALL</value>
</property>
<property>
<name>hive.security.authorization.task.factory</name>
<value>org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl</value>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://127.0.0.1:3306/hive?createDatabaseIfNotExist=true&useSSL=false</value>
<description>JDBC connect string for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
<description>Driver class name for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hive</value>
<description>username to use against metastore database</description>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>hive</value>
<description>password to use against metastore database</description>
</property>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive/warehouse</value>
<description>location of default database for the warehouse</description>
</property>
<property>
<name>hive.metastore.schema.verification</name>
<value>false</value>
</property>
<property>
<name>hive.cli.print.header</name>
<value>true</value>
</property>
<property>
<name>hive.cli.print.current.db</name>
<value>true</value>
</property>
<property>
<name>hive.metastore.uris</name>
<value>thrift://127.0.0.1:9083</value>
</property>
</configuration>
############################################
#创建hive在hdfs中的目录
hadoop fs -mkdir /tmp
hadoop fs -mkdir -p /user/hive/warehouse
hadoop fs -chmod g+w /tmp
hadoop fs -chmod g+w /user/hive/warehouse
#初始化数据库
schematool -initSchema -dbType mysql
#启动
nohup hive --service metastore > metastore.log 2>&1 &
nohup hive --service hiveserver2 > hivesesrver2.log 2>&1 &
#配置权限
hive
#set role admin;
grant all to user root;
#创建数据库或者数据表授权给root用户(只有授权后的表或者库才能被 qualitis 找到) 注意这条!!!!!
GRANT ALL ON DATABASE xxxx TO USER root;
GRANT ALL ON TABLE xxxx TO USER root;
#关闭
ps -ef | grep hive
kill -9 pid
8.部署spark
tar -zxvf spark-2.4.8-bin-hadoop2.7.tar.gz
ln -s spark-2.4.8-bin-hadoop2.7 spark
#复制 hadoop 配置
cp hadoop/etc/hadoop/core-site.xml spark/conf/
cp hadoop/etc/hadoop/hdfs-site.xml spark/conf/
#复制 hive 配置
cp hive/conf/hive-site.xml spark/conf/
#复制mysql 驱动
cp mysql-connector-java-5.1.49/mysql-connector-java-5.1.49-bin.jar spark/jars/
#编辑
cd spark
vi conf/spark-env.sh
#######################################################
export JAVA_HOME=/usr/local/module/java
export SPARK_MASTER_HOST=localhost
export SPARK_MASTER_WEBUI_PORT=18080
export SPARK_WORKER_WEBUI_PORT=18081
#######################################################
#运行 spark
sh sbin/start-all.sh
9.部署linkis
tar -zxvf wedatasphere-linkis-1.0.2-combined-package-dist.tar.gz
cd wedatasphere-linkis-1.0.2-combined-package-dist
#编辑配置文件
vi config/linkis-env.sh
########################## config/linkis-env.sh 开始 ####################################
#!/bin/bash
#
# Copyright 2019 WeBank
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# description: Starts and stops Server
#
# @name: linkis-env
#
# Modified for Linkis 1.0.0
# SSH_PORT=22
### deploy user
deployUser=root
##Linkis_SERVER_VERSION
LINKIS_SERVER_VERSION=v1
### Specifies the user workspace, which is used to store the user's script files and log files.
### Generally local directory
WORKSPACE_USER_ROOT_PATH=file:///tmp/linkis/ ##file:// required
### User's root hdfs path
HDFS_USER_ROOT_PATH=hdfs:///tmp/linkis ##hdfs:// required
### Path to store started engines and engine logs, must be local
ENGINECONN_ROOT_PATH=/usr/local/module/linkis
ENTRANCE_CONFIG_LOG_PATH=hdfs:///tmp/linkis/
### Path to store job ResultSet:file or hdfs path
RESULT_SET_ROOT_PATH=hdfs:///tmp/linkis ##hdfs:// required
### Provide the DB information of Hive metadata database.
### Attention! If there are special characters like "&", they need to be enclosed in quotation marks.
HIVE_META_URL="jdbc:mysql://192.168.172.134:3306/hive?createDatabaseIfNotExist=true"
HIVE_META_USER="hive"
HIVE_META_PASSWORD="hive"
##YARN REST URL spark engine required
YARN_RESTFUL_URL=http://127.0.0.1:8088
###HADOOP CONF DIR
HADOOP_CONF_DIR=/usr/local/module/hadoop/etc/hadoop
###HIVE CONF DIR
HIVE_CONF_DIR=/usr/local/module/hive/conf
###SPARK CONF DIR
SPARK_CONF_DIR=/usr/local/module/spark/conf
## Engine version conf
#SPARK_VERSION
SPARK_VERSION=2.4.8
##HIVE_VERSION
HIVE_VERSION=1.2.2
#PYTHON_VERSION=python2
################### The install Configuration of all Micro-Services #####################
#
# NOTICE:
# 1. If you just wanna try, the following micro-service configuration can be set without any settings.
# These services will be installed by default on this machine.
# 2. In order to get the most complete enterprise-level features, we strongly recommend that you install
# Linkis in a distributed manner and set the following microservice parameters
#
### EUREKA install information
### You can access it in your browser at the address below:http://${EUREKA_INSTALL_IP}:${EUREKA_PORT}
#EUREKA_INSTALL_IP=127.0.0.1 # Microservices Service Registration Discovery Center
EUREKA_PORT=20303
export EUREKA_PREFER_IP=false
### Gateway install information
#GATEWAY_INSTALL_IP=127.0.0.1
GATEWAY_PORT=9001
### ApplicationManager
#MANAGER_INSTALL_IP=127.0.0.1
MANAGER_PORT=9101
### EngineManager
#ENGINECONNMANAGER_INSTALL_IP=127.0.0.1
ENGINECONNMANAGER_PORT=9102
### EnginePluginServer
#ENGINECONN_PLUGIN_SERVER_INSTALL_IP=127.0.0.1
ENGINECONN_PLUGIN_SERVER_PORT=9103
### LinkisEntrance
#ENTRANCE_INSTALL_IP=127.0.0.1
ENTRANCE_PORT=9104
### publicservice
#PUBLICSERVICE_INSTALL_IP=127.0.0.1
PUBLICSERVICE_PORT=9105
### cs
#CS_INSTALL_IP=127.0.0.1
CS_PORT=9108
########################################################################################
## LDAP is for enterprise authorization, if you just want to have a try, ignore it.
#LDAP_URL=ldap://localhost:1389/
#LDAP_BASEDN=dc=webank,dc=com
#LDAP_USER_NAME_FORMAT=cn=%s@xxx.com,OU=xxx,DC=xxx,DC=com
## java application default jvm memory
export SERVER_HEAP_SIZE="128M"
##The decompression directory and the installation directory need to be inconsistent
LINKIS_HOME=/usr/local/module/linkis
LINKIS_VERSION=1.0.2
# for install
LINKIS_PUBLIC_MODULE=lib/linkis-commons/public-module
################################ config/linkis-env.sh 结束 ##############################
#配置 db.sh
vi config/db.sh
##########################################################
MYSQL_HOST=192.168.172.134
MYSQL_PORT=3306
MYSQL_DB=linkis
MYSQL_USER=root
MYSQL_PASSWORD=root123
##########################################################
#安装指令环境
yum install telnet -y
yum install dos2unix -y
#安装 (会遇到缺少指令需要安装 )
sh bin/install.sh
#运行
cd ../linkis
sh sbin/linkis-start-all.sh
10.部署linkis-web
mkdir linkis-web
cp wedatasphere-linkis-1.0.2-combined-package-dist/wedatasphere-linkis-web-1.0.2.zip linkis-website/
cd linkis-web
unzip wedatasphere-linkis-web-1.0.2.zip
# 编写nginx.conf
vi nginx.conf
############################################################
user root;
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
server {
listen 8080;# 访问端口
server_name localhost;
#charset koi8-r;
#access_log /var/log/nginx/host.access.log main;
location / {
root /usr/share/nginx/html/; # 前端包解压的目录
index index.html index.html;
}
location /api {
proxy_pass http://172.19.18.189:9001; # linkis-gateway服务的ip端口
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header x_real_ipP $remote_addr;
proxy_set_header remote_addr $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_http_version 1.1;
proxy_connect_timeout 4s;
proxy_read_timeout 600s;
proxy_send_timeout 12s;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection upgrade;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
}
############################################################
docker run --restart always --name linkis-web \
-p 18087:8080 \
--privileged=true \
-v /usr/local/module/linkis-web/linkis-website-master:/usr/share/nginx/html:ro \
-v /usr/local/module/linkis-web/nginx.conf:/etc/nginx/nginx.conf \
-d nginx:stable-alpine
11.配置linkis默认yarn队列
#查看防火墙状态(center os7)
firewall-cmd --state
#开启防火墙
systemctl start firewalld
#关闭防火墙
systemctl stop firewalld
#禁止firewall开机启动
systemctl disable firewalld
#访问
http://172.19.18.189:18080/#/login
root root
参数配置-全局设置
yarn队列名[wds.linkis.rm.yarnqueue]: default
队列实例最大个数[wds.linkis.rm.yarnqueue.instance.max]: 2
队列CPU使用上限[wds.linkis.rm.yarnqueue.cores.max]:2
队列内存使用上限[wds.linkis.rm.yarnqueue.memory.max]:16g
全局各个引擎内存使用上限[wds.linkis.rm.client.memory.max]:2g
全局各个引擎核心个数上限[wds.linkis.rm.client.core.max]:2
全局各个引擎最大并发数[wds.linkis.rm.instance] 2
参数配置-IDE
核心数全为 2
并发数 2
12.部署qualitis
unzip wedatasphere-qualitis-0.8.0.zip
cd qualitis-0.8.0
vi conf/application-dev.yml
###################################################### 注意修改对应配置
spring:
datasource:
username: root
password: root123
url: jdbc:mysql://192.168.172.134:3306/qualitis?createDatabaseIfNotExist=true&useSSL=false
driver-class-name: com.mysql.jdbc.Driver
type: com.zaxxer.hikari.HikariDataSource
hikari:
minimum-idle: 20
maximum-pool-size: 500
idle-timeout: 60000
max-lifetime: 180000
jpa:
hibernate:
ddl-auto: update
database-platform: org.hibernate.dialect.MySQL5InnoDBDialect
show-sql: false
restTemplate:
thread:
maxTotal: 200 # max thread number
maxPerRoute: 100 # max concurrent thread per route
request:
socketTimeout: 10000 # the max time waiting for response
connectTimeout: 2000 # the max time waiting for shaking hand
connectionRequestTimeout: 2000 # the max time waiting for getting connection from connection pool
task:
persistent:
type: jdbc
username: root
password: root123
address: jdbc:mysql://192.168.172.134:3306/qualitis?createDatabaseIfNotExist=true&useSSL=false
tableName: qualitis_application_task_result
execute:
limit_thread: 10
rule_size: 10
timer:
thread:
size: 5
check:
period: 10000
lock:
zk:
path: /qualitis/tmp/monitor
zk:
address:
base_sleep_time: 1000
max_retries: 3
session_time_out: 10000
connection_time_out: 15000
lock_wait_time: 3
auth:
unFilterUrls:
- /qualitis/api/v1/login/local
- /qualitis/api/v1/logout
- /qualitis/api/v1/redirect
uploadUrls:
- /qualitis/api/v1/projector/rule/batch/upload/*
- /qualitis/api/v1/projector/project/batch/upload*
linkis:
api:
prefix: api/rest_j/v1
submitJob: entrance/execute
status: jobhistory/{id}/get
runningLog: entrance/{id}/log
finishLog: filesystem/openLog
meta_data:
db_path: datasource/dbs
table_path: datasource/tables
table_comment: datasource/getTableBaseInfo
column_path: datasource/columns
column_info: datasource/getTableFieldsInfo
spark:
application:
name: IDE
log:
maskKey: task.persistent.username, task.persistent.password
front_end:
home_page: http://192.168.172.134:18090/#/Home
domain_name: http://192.168.172.134:18090
######################################################
vi conf/application.yml
######################################################
spring:
profiles:
active: dev
jersey:
type: servlet
http:
encoding:
charset: UTF-8
enabled: true
force: true
messages:
encoding: UTF-8
basename: i18n/messages
# logging
logging:
config: classpath:log4j2-${spring.profiles.active}.xml
server:
port: 18090
connection-timeout: 6000000 # 600s
# error page
error:
whitelabel:
enabled: false
workflow:
enable: true
ha:
enable: false
system:
config:
save_database_pattern: save_database_pattern
######################################################
#将初始化数据取出
#cd /usr/local/module/qualitis-0.8.0/conf/database
#sz init.sql
#init.sql导入mysql的qualitis数据库中执行,生成初始化数据库
#初始化数据
mysql -u {USERNAME} -p {PASSWORD} -h {IP} --default-character-set=utf8
source conf/database/init.sql
#脚本文件转换为可执行文件
chmod -R +x bin/*
#启动
sh bin/start.sh
#访问
http://192.168.172.134:18090/#/Home
#关闭
sh bin/stop.sh
13.编译
#安装 gradle 4.9
#步骤 下载gradle 4.9 解压 配置环境变量
#下载源码
https://github.com/WeBankFinTech/Qualitis/archive/refs/tags/release-0.8.0.zip
#修改Qualitis-release-0.8.0\Qualitis-release-0.8.0\gradle\wrapper\gradle-wrapper.properties
####################################### 6.6 改成了 4.9
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-4.9-bin.zip
#######################################
# 修改 Qualitis-release-0.8.0\Qualitis-release-0.8.0\gradle\dependencies.gradle
# 可以直接改成 0.7.0 meavn库中只有到 0.7.0
# "dss":"0.7.0",
# 如果编译了 dss 0.9 那么就可以改成0.9 也可以
# "dss":"0.9.0",
#修改Qualitis-release-0.8.0\Qualitis-release-0.8.0\build.gradle
###################################### 注释到这块
processResources {
// filter ReplaceTokens, tokens: [
// "version": project.property("version")
// ]
}
########################################
#然后就可以 编译了 终端里 gradle clean distZip
#或者 点击<小锤子>编译调试 ps: 这里注意设置idea中的编码 utf-8