5.1.0文档地址:https://www.bookstack.cn/read/shardingsphere-5.1.0-zh/ecf18b21ab3f559c.md
注:仅作参考,因为官方还没有5.1.1的文档。目前最新版本:5.1.2
Github地址:https://github.com/apache/shardingsphere
版本更新日志:https://github.com/apache/shardingsphere/blob/master/RELEASE-NOTES.md
Apache ShardingSphere 产品定位为 Database Plus,旨在构建异构数据库上层的标准和生态。 它关注如何充分合理地利用数据库的计算和存储能力,而并非实现一个全新的数据库。ShardingSphere 站在数据库的上层视角,关注他们之间的协作多于数据库自身。
连接、增强 和 可插拔 是 Apache ShardingSphere 的核心概念。
Apache ShardingSphere 由 JDBC、Proxy 和 Sidecar(规划中)这 3 款既能够独立部署,又支持混合部署配合使用的产品组成。 它们均提供标准化的数据水平扩展、分布式事务和分布式治理等功能,可适用于如 Java 同构、异构语言、云原生等各种多样化的应用场景。
ShardingSphere 已于 2020 年 4 月 16 日成为 Apache 软件基金会的顶级项目。
官网:https://shardingsphere.apache.org/document/current/cn/overview/#shardingsphere-jdbc
注:官网内demo示例与版本对应不上,具体可参考上述文档地址
<properties>
<springboot.version>2.3.2.RELEASE</springboot.version>
<mybatis-plus.version>3.4.2</mybatis-plus.version>
<druid.version>1.2.4</druid.version>
<sharding-sphere.version>5.1.1</sharding-sphere.version>
</properties>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<scope>runtime</scope>
</dependency>
<!--Mybatis-plus-->
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!-- 请勿使用artifactId为:druid-spring-boot-starter -->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>shardingsphere-jdbc-core-spring-boot-starter</artifactId>
<version>${sharding-sphere.version}</version>
</dependency>
seata:
# sharding与seata冲突(seata_v1.3.0),修改seata使用jdk的代理方式
use-jdk-proxy: true
spring:
shardingsphere:
datasource:
# 配置数据源名称(自定义);1写 2读
names: write,read1,read2
read1:
driver-class-name: com.mysql.cj.jdbc.Driver
maxPoolSize: 100
minPoolSize: 5
password: root
type: com.alibaba.druid.pool.DruidDataSource
url: jdbc:mysql://127.0.0.1:3306/esp_manage?allowMultiQueries=true&useUnicode=true&characterEncoding=utf8&autoReconnect=true&useSSL=false&serverTimezone=GMT%2B8
username: root
read2:
driver-class-name: com.mysql.cj.jdbc.Driver
maxPoolSize: 100
minPoolSize: 5
password: root
type: com.alibaba.druid.pool.DruidDataSource
url: jdbc:mysql://127.0.0.1:3306/esp_manage?allowMultiQueries=true&useUnicode=true&characterEncoding=utf8&autoReconnect=true&useSSL=false&serverTimezone=GMT%2B8
username: root
write:
driver-class-name: com.mysql.cj.jdbc.Driver
maxPoolSize: 100
minPoolSize: 5
password: root
type: com.alibaba.druid.pool.DruidDataSource
url: jdbc:mysql://127.0.0.1:3306/esp_manage?allowMultiQueries=true&useUnicode=true&characterEncoding=utf8&autoReconnect=true&useSSL=false&serverTimezone=GMT%2B8
username: root
enabled: true
mode:
type: memory # 内存模式
props:
sql-show: true #打印sql
rules:
# 配置读写分离
readwrite-splitting:
data-sources:
# 主从配置
master-slave:
props:
auto-aware-data-source-name: write
# 从库负载均衡策略:RANDOM:随机;默认为轮询
load-balancer-name: RANDOM
# 从库数据源名称;多个以逗号分割
read-data-source-names: read1,read2
# 写库数据源名称
write-data-source-name: write
type: STATIC
sharding:
binding-tables: esp_ot_login_log
# 默认主键策略
default-key-generate-strategy:
column: id # 主键字段名
key-generator-name: id-key # 自定义策略名称
key-generators:
id-key:
type: SNOWFLAKE # 自定义策略的类型为:雪花id
tables:
# 表名
esp_ot_login_log:
# 数据源.表名+日期后缀规则
actual-data-nodes: write.esp_ot_login_log_$->{2022..2024}
table-strategy:
standard:
# 自定义分片策略名称:不能含有下划线:“_”
sharding-algorithm-name: login-log-inline
# 分片策略对应的key
sharding-column: create_time
# 配置分片策略
sharding-algorithms:
# 自定义分片策略名称
login-log-inline:
type: CLASS_BASED # 自定义分片策略:CLASS_BASED
props:
strategy: standard
# 使用自定义类实现分片逻辑处理
algorithmClassName: com.esp.common.config.DateShardingAlgorithm
# 使用下述自带的时间策略的话,不能使用LocalDateTime类型作为分片key;详细可参考github issue:https://github.com/apache/shardingsphere/issues/8722
# type: INTERVAL # 分片算法类型 时间范围分片算法
# props:
# datetime-pattern: yyyy-MM-dd HH:mm:ss #分片键的时间戳格式
# datetime-lower: 2022-01-01 00:00:00 #时间分片下界值,
# datetime-upper: 2024-01-01 00:00:00 #时间分片上界值
# sharding-suffix-pattern: yyyy #分片数据源或真实表的后缀格式
# datetime-interval-amount: 1 #分片键时间间隔,超过该时间间隔将进入下一分片
# datetime-interval-unit: YEARS #分片键时间间隔单位
package com.esp.common.config;
import com.google.common.collect.Range;
import org.apache.shardingsphere.sharding.api.sharding.standard.PreciseShardingValue;
import org.apache.shardingsphere.sharding.api.sharding.standard.RangeShardingValue;
import org.apache.shardingsphere.sharding.api.sharding.standard.StandardShardingAlgorithm;
import org.springframework.stereotype.Component;
import java.time.LocalDateTime;
import java.util.Collection;
import java.util.LinkedHashSet;
@Component
public class DateShardingAlgorithm implements StandardShardingAlgorithm<LocalDateTime> {
/**
* 精准匹配
* @param collection
* @param preciseShardingValue
* @return
*/
@Override
public String doSharding(Collection<String> collection, PreciseShardingValue<LocalDateTime> preciseShardingValue) {
// 根据当前日期 来 分库分表
LocalDateTime date = preciseShardingValue.getValue();
// 表名后面加上当前年份
String tbName = preciseShardingValue.getLogicTableName() + "_" + date.getYear();
for (String each : collection) {
// 找到对应表名
if (each.equals(tbName)) return each;
}
throw new IllegalArgumentException();
}
/**
* 范围查询
* @param collection
* @param rangeShardingValue
* @return
*/
@Override
public Collection<String> doSharding(Collection<String> collection, RangeShardingValue<LocalDateTime> rangeShardingValue) {
Collection<String> result = new LinkedHashSet<>(collection.size());
Range<LocalDateTime> valueRange = rangeShardingValue.getValueRange();
LocalDateTime lowerDate = valueRange.lowerEndpoint();
LocalDateTime upperDate = valueRange.upperEndpoint();
Integer low = lowerDate.getYear();
Integer upper = upperDate.getYear();
for (int i = low; i <= upper; i++) {
for (String each : collection) {
if (each.endsWith(i + "")) result.add(each);
}
}
return result;
}
@Override
public void init() {
}
@Override
public String getType() {
return null;
}
}