1台OCP
3台OBServer
服务器要求:
CentOS 8
CPU 4C
内存 14G
磁盘1 100G
磁盘2 100G
这里以ocp为例,其它主机依葫芦画瓢,操作中需要修改计算机名,IP地址。
#配置主机名为246ApiGateWay
hostnamectl set-hostname ocp
#为网卡ens192设置IP地址
nmcli connection modify ens192 ipv4.addresses 192.168.240.76/20
#设置网关
nmcli connection modify ens192 ipv4.gateway 192.168.240.1
#设置DNS服务器
nmcli connection modify ens192 ipv4.dns 192.168.250.10,202.115.128.33,202.115.128.34
#设置IP方式为手动分配
nmcli connection modify ens192 ipv4.method manual
#禁用IPv6
nmcli connection modify ens192 ipv6.method disabled
#应用新的配置(接下来可能会断网一下,然后使用新IP地址连接)
nmcli connection up ens192
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
ob1=192.168.240.95
ob2=192.168.240.226
ob3=192.168.240.254
OBS=($ob1 $ob2 $ob3)
# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export PATH
在里面定义了四个变量,ob1、ob2、ob3、OBS
分别对应三个OBserverIP地址,以及它们的数组
#创建密钥
ssh-keygen -t rsa
#设置免密码登录
ssh-copy-id 'hostname -i'
ssh-copy-id $ob1
ssh-copy-id $ob2
ssh-copy-id $ob3
pvcreate /dev/sdb
vgcreate obvg /dev/sdb
lvcreate obvg -L 20G^C
lvcreate -L 20G obvg -n lvredo
lvcreate -l 100%FREE obvg -n lvdata
mkfs.ext4 /dev/obvg/lvdata
mkfs.ext4 /dev/obvg/lvredo
vi /etc/fstab
添加如下两句
/dev/obvg/lvredo /redo ext4 defaults,noatime,nodiratime,nodelalloc,barrier=0 0 0
/dev/obvg/lvdata /data ext4 defaults,noatime,nodiratime,nodelalloc,barrier=0 0 0
执行挂载,将用户设置为admin所拥有,其它服务器也如此配置
mkdir -p /data /redo
mount /data
mount /redo
useradd admin
echo 'admin:adminPWD123' | chpasswd
usermod admin -G wheel
chown -R admin.admin /data /redo
mount -a
[root@ob00 ~]# vi/etc/sysctl.conf
net.core.somaxconn = 2048
net.core.netdev_max_backlog = 10000
net.core.rmem_default = 16777216
net.core.wmem_default = 16777216
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.ip_local_port_range = 3500 65535
net.ipv4.ip_forward = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.tcp_syncookies = 0
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_slow_start_after_idle=0
vm.swappiness = 0
vm.min_free_kbytes = 2097152
vm.max_map_count=655360
fs.aio-max-nr=1048576
将如上配置复制到其它主机
for ob in ${OBS[*]}; do scp /etc/security/limits.conf root@$ob:/etc/security/limits.conf; done
设置生效
ulimit -a
for ob in ${OBS[*]}; do ssh root@$ob "ulimit -a"; done
vi /etc/security/limits.conf
* soft nofile 655360
* hard nofile 655360
* soft nproc 655360
* hard nproc 655360
* soft core unlimited
* hard core unlimited
* soft stack unlimited
* hard stack unlimited
复制到其他主机
for ob in ${OBS[*]}; do scp /etc/security/limits.conf root@$ob:/etc/security/limits.conf; done
sysctl -p
for ob in ${OBS[*]}; do ssh root@$ob "sysctl -p"; done
yum -y install chrony
for ob in ${OBS[*]}; do ssh root@$ob "yum -y install chrony"; done
chrony 配置
vi /etc/chrony.conf
将“# Please consider joining the pool (http://www.pool.ntp.org/join.html).”这一节的内容修改为(修改为其它的也可):
server ntp.cloud.aliyuncs.com minpoll 4 maxpoll 10 iburst
server ntp.aliyun.com minpoll 4 maxpoll 10 iburst
server ntp1.aliyun.com minpoll 4 maxpoll 10 iburst
server ntp1.cloud.aliyuncs.com minpoll 4 maxpoll 10 iburst
server ntp10.cloud.aliyuncs.com minpoll 4 maxpoll 10 iburst
#使用ocp作为时间服务器
server 192.168.240.76
将时间配置复制到其它主机
for ob in ${OBS[*]}; do scp /etc/chrony.conf root@$ob:/etc/chrony.conf; done
启动时间服务
systemctl enable chronyd
systemctl start chronyd
chronyc activity
chronyc sources
chronyc sources -v
chronyc tracking
for ob in ${OBS[*]}; do ssh root@$ob "systemctl enable chronyd"; done
for ob in ${OBS[*]}; do ssh root@$ob "systemctl start chronyd"; done
for ob in ${OBS[*]}; do ssh root@$ob "chronyc activity"; done
for ob in ${OBS[*]}; do ssh root@$ob "chronyc sources"; done
for ob in ${OBS[*]}; do ssh root@$ob "chronyc sources -v"; done
for ob in ${OBS[*]}; do ssh root@$ob "chronyc tracking"; done
yum install net-tools -y
for ob in ${OBS[*]}; do ssh root@$ob "yum install net-tools -y"; done
systemctl disable firewalld
systemctl stop firewalld
for ob in ${OBS[*]}; do ssh root@$ob "systemctl disable firewalld"; done
for ob in ${OBS[*]}; do ssh root@$ob "systemctl stop firewalld"; done
setenforce 0
for ob in ${OBS[*]}; do ssh root@$ob "setenforce 0"; done
编辑 /etc/selinux/config
vi /etc/selinux/config
修改SELINUX=disabled即可
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of these three values:
# targeted - Targeted processes are protected,
# minimum - Modification of targeted policy. Only selected processes are protected.
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
复制配置
for ob in ${OBS[*]}; do scp /etc/selinux/config root@$ob:/etc/selinux/config; done
for ob in ${OBS[*]}; do ssh root@$ob "reboot"; done
reboot
系统重新启动之后,SELINUX将不会启动
yum install -y yum-utils
yum-config-manager --add-repo https://mirrors.aliyun.com/oceanbase/OceanBase.repo
for ob in ${OBS[*]}; do ssh root@$ob "yum install -y yum-utils"; done
for ob in ${OBS[*]}; do ssh root@$ob "yum-config-manager --add-repo https://mirrors.aliyun.com/oceanbase/OceanBase.repo"; done
ocp主机上安装即可,其它主机上无需安装
yum install -y ob-deploy
su admin
cd
这个su进去不起作用,所以每次需要设置
ob1=192.168.240.95
ob2=192.168.240.226
ob3=192.168.240.254
OBS=($ob1 $ob2 $ob3)
#创建密钥
ssh-keygen -t rsa
#设置免密码登录(单步执行)
ssh-copy-id 'hostname -i'
ssh-copy-id $ob1
ssh-copy-id $ob2
ssh-copy-id $ob3
注意修改IP地址(三处修改)、网卡
[admin@obce00 ~]$ vi obce-3zones.yaml
# Only need to configure when remote login is required
user:
username: admin
# password: your password if need
key_file: /home/admin/.ssh/id_rsa.pub
port: your ssh port, default 22
# timeout: ssh connection timeout (second), default 30
oceanbase-ce:
servers:
- name: obce01
# Please don't use hostname, only IP can be supported
ip: 192.168.240.95
- name: obce02
ip: 192.168.240.226
- name: obce03
ip: 192.168.240.254
global:
# Please set devname as the network adaptor's name whose ip is in the setting of severs.
# if set severs as "127.0.0.1", please set devname as "lo"
# if current ip is 192.168.1.10, and the ip's network adaptor's name is "eth0", please use "eth0"
devname: ens192
cluster_id: 2
# please set memory limit to a suitable value which is matching resource.
memory_limit: 8G # The maximum running memory for an observer
system_memory: 3G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
stack_size: 512K
cpu_count: 16
cache_wash_threshold: 1G
__min_full_resource_pool_memory: 268435456
workers_per_cpu_quota: 10
schema_history_expire_time: 1d
# The value of net_thread_count had better be same as cpu's core number.
net_thread_count: 4
major_freeze_duty_time: Disable
minor_freeze_times: 10
enable_separate_sys_clog: 0
enable_merge_by_turn: FALSE
#datafile_disk_percentage: 20 # The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90.
datafile_size: 50G
syslog_level: WARN # System log level. The default value is INFO.
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 10 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
# observer cluster name, consistent with obproxy's cluster_name
appname: obce-3zones
root_password: 0EI5N08d # root user password, can be empty
proxyro_password: uY7Yf8zx # proxyro user pasword, consistent with obproxy's observer_sys_password, can be empty
obce01:
mysql_port: 2881 # External port for OceanBase Database. The default value is 2881.
rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882.
# The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field.
home_path: /home/admin/oceanbase-ce
# The directory for data storage. The default value is $home_path/store.
data_dir: /data
# The directory for clog, ilog, and slog. The default value is the same as the data_dir value.
redo_dir: /redo
zone: zone1
obce02:
mysql_port: 2881 # External port for OceanBase Database. The default value is 2881.
rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882.
# The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field.
home_path: /home/admin/oceanbase-ce
# The directory for data storage. The default value is $home_path/store.
data_dir: /data
# The directory for clog, ilog, and slog. The default value is the same as the data_dir value.
redo_dir: /redo
zone: zone2
obce03:
mysql_port: 2881 # External port for OceanBase Database. The default value is 2881.
rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882.
# The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field.
home_path: /home/admin/oceanbase-ce
# The directory for data storage. The default value is $home_path/store.
data_dir: /data
# The directory for clog, ilog, and slog. The default value is the same as the data_dir value.
redo_dir: /redo
zone: zone3
obproxy:
servers:
- 192.168.240.95
- 192.168.240.226
- 192.168.240.254
# Set dependent components for the component.
# When the associated configurations are not done, OBD will automatically get the these configurations from the dependent components.
depends:
- oceanbase-ce
global:
listen_port: 2883 # External port. The default value is 2883.
prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884.
home_path: /home/admin/obproxy
# oceanbase root server list
# format: ip:mysql_port;ip:mysql_port
rs_list: 192.168.240.95:2881;192.168.240.226:2881;192.168.240.254:2881
enable_cluster_checkout: false
# observer cluster name, consistent with oceanbase-ce's appname
cluster_name: obce-3zones
obproxy_sys_password: 0MdTv1tm # obproxy sys user password, can be empty
observer_sys_password: uY7Yf8zx # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty
obd cluster deploy obce-3zones -c obce-3zones.yaml
Initializes observer work home x
[ERROR] fail to init obce01(192.168.240.95) data path: /data is not empty
[ERROR] fail to init obce02(192.168.240.226) data path: /data is not empty
[ERROR] fail to init obce03(192.168.240.254) data path: /data is not empty
新打开一个窗口,使用root登录ocp,执行
for ob in ${OBS[*]}; do ssh root@$ob "rm -rf /data/*"; done
for ob in ${OBS[*]}; do ssh root@$ob "rm -rf /home/admin/oceanbase-ce/*"; done
再次运行部署指令
[admin@obce00 ~]$ obd cluster start obce-3zones