我使用的debezium-connector:https://repo1.maven.org/maven2/io/debezium/debezium-connector-oracle/1.4.0.final/debezium-connector-oracle-1.4.0.final-plugin.tar.gz
我遵循了docker-compose的以下说明:https://github.com/confluentinc/demo-scene/blob/master/oracle-and-kafka/docker-compose.yml
我使用confluent-hub为jdbc-connector实现了这一点,但我不知道如何为debezium实现这一点。将其添加到/usr/share/java中并运行
所以我的docker-compose是:
---
version: '2'
services:
zookeeper:
image: confluentinc/cp-zookeeper:6.0.1
hostname: zookeeper
container_name: zookeeper
volumes:
- /dados/persistence/zookeeper/data:/var/lib/zookeeper/data
- /dados/persistence/zookeeper/log:/var/lib/zookeeper/log
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
kafka:
image: confluentinc/cp-server:6.0.1
hostname: broker
container_name: broker
volumes:
- /dados/persistence/broker/data:/var/lib/kafka/data
depends_on:
- zookeeper
ports:
- "9092:9092"
- "9101:9101"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9101
KAFKA_JMX_HOSTNAME: localhost
KAFKA_CONFLUENT_SCHEMA_REGISTRY_URL: http://schema-registry:8081
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: kafka:29092
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: 'true'
CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
schema-registry:
image: confluentinc/cp-schema-registry:6.0.1
hostname: schema-registry
container_name: schema-registry
depends_on:
- zookeeper
- kafka
ports:
- "8081:8081"
environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper:2181
kafka-connect:
image: cnfldemos/cp-server-connect-datagen:0.4.0-6.0.1
hostname: connect
container_name: kafka-connect
volumes:
- /dados/packages/confluent-hub/share/confluent-hub-components:/usr/share/confluent-hub-components/custom
- /dados/persistence/kafka-connect/jars:/etc/kafka-connect/jars
depends_on:
- zookeeper
- kafka
- schema-registry
ports:
- "8083:8083"
environment:
CONNECT_BOOTSTRAP_SERVERS: 'kafka:29092'
CONNECT_REST_PORT: 8083
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'
CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_REST_ADVERTISED_HOST_NAME: "kafka-connect"
CONNECT_LOG4J_ROOT_LOGLEVEL: "INFO"
CONNECT_LOG4J_APPENDER_STDOUT_LAYOUT_CONVERSIONPATTERN: "[%d] %p %X{connector.context}%m (%c:%L)%n"
CONNECT_LOG4J_LOGGERS: "org.apache.kafka.connect.runtime.rest=WARN,org.reflections=ERROR"
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components,/usr/share/confluent-hub-components/custom"
LD_LIBRARY_PATH: '/usr/share/java/debezium-connector-oracle/instantclient_19_6/'
control-center:
image: confluentinc/cp-enterprise-control-center:6.0.1
hostname: control-center
container_name: control-center
depends_on:
- kafka
- schema-registry
- kafka-connect
- ksqldb
ports:
- "9021:9021"
environment:
CONTROL_CENTER_BOOTSTRAP_SERVERS: 'kafka:29092'
CONTROL_CENTER_CONNECT_CLUSTER: 'kafka-connect:8083'
CONTROL_CENTER_KSQL_KSQLDB1_URL: "http://10.58.0.207:8088"
CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL: "http://10.58.0.207:8088"
CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://10.58.0.207:8081"
CONTROL_CENTER_REPLICATION_FACTOR: 1
CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1
CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1
CONFLUENT_METRICS_TOPIC_REPLICATION: 1
PORT: 9021
ksqldb:
image: confluentinc/cp-ksqldb-server:6.0.1
hostname: ksqldb
container_name: ksqldb-server
depends_on:
- kafka
- kafka-connect
ports:
- "8088:8088"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
KSQL_LISTENERS: "http://0.0.0.0:8088"
KSQL_BOOTSTRAP_SERVERS: kafka:29092
KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: "true"
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: "true"
KSQL_KSQL_CONNECT_URL: http://kafka-connect:8083
KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schema-registry:8081
ksqldb-cli:
image: confluentinc/cp-ksqldb-cli:6.0.1
container_name: ksqldb-cli
depends_on:
- kafka
- kafka-connect
- ksqldb
entrypoint: /bin/sh
tty: true
ksql-datagen:
image: confluentinc/ksqldb-examples:6.0.1
hostname: ksql-datagen
container_name: ksql-datagen
depends_on:
- ksqldb
- kafka
- schema-registry
- kafka-connect
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b broker:29092 1 40 && \
echo Waiting for Confluent Schema Registry to be ready... && \
cub sr-ready schema-registry 8081 40 && \
echo Waiting a few seconds for topic creation to finish... && \
sleep 11 && \
tail -f /dev/null'"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
STREAMS_BOOTSTRAP_SERVERS: kafka:29092
STREAMS_SCHEMA_REGISTRY_HOST: schema-registry
STREAMS_SCHEMA_REGISTRY_PORT: 8081
rest-proxy:
image: confluentinc/cp-kafka-rest:6.0.1
depends_on:
- kafka
- schema-registry
ports:
- 8082:8082
hostname: rest-proxy
container_name: rest-proxy
environment:
KAFKA_REST_HOST_NAME: rest-proxy
KAFKA_REST_BOOTSTRAP_SERVERS: 'kafka:29092'
KAFKA_REST_LISTENERS: "http://0.0.0.0:8082"
KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'
您需要将/etc/kafka-connect/jars
添加到connect_plugin_path
我有一个现有的2个kafka服务器加载了mysql连接器。它起作用了。此外,我需要添加MongoDB连接器。我已经在我的Kafka服务器(Centos7)上安装了confluent schema registry,它可以工作,我停止/启动/重新启动,看起来没有什么问题。我在这里下载并提取了debezium Mongo插件/usr/连接器/插件/debezium连接器mongodb/ 我编辑了 /e
设置: 我安装了Postresql(11.10版)和TimescaleDB(1.7.1版)扩展。我有2个表,我想用安装在Kafka Connect上的Debezium(ver1.3.1)连接器监视它们,目的是CDC(捕获数据更改)。 表是table1和table2hyper,但table2hyper是hypertable。 在Kafka Connect中创建Debezium连接器后,我可以看到创建
下面是/etc/kafka/connect-MongoDB-source.properties中的MongoDB配置 但是低于误差 以独立模式运行连接器。 我在debezium-debezium-连接器-mongob-1.0.0/debezium-connector-mongodb-1.0.0.Final.jar 类路径的设置如下 使用插件路径,我看到它能够注册和加载所有必需的插件。 但最后还是同
操作系统:Ubuntu 17.10 Python:2.7 Sublime文本3: 我正在尝试导入mysql.connector, 没有名为连接器的模块 不过,当我尝试导入mysql时。在pythonshell中,它可以工作。 早些时候它工作得很好,我刚刚升级了Ubuntu,不知何故mysql连接器不工作。 我尝试重新安装mysql连接器使用pip和git两者。 还是不走运。 请帮忙!
我正在通过下面的链接安装SQL Server CDC连接器 https://www . confluent . io/hub/debezium/debezium-connector-SQL server 但得到错误消息 无法检测汇流平台的安装。显式指定--part ent-dir和--waler-configs。 错误:无效的选项或参数 这是在我的开发机器上,我正在尝试设置连接器。 Kafka文件
我想使用Kafka发布MSSQL CDC事件。 我将Docker容器用于: null 所有容器开始成功运行。 然后我在SQL Server容器中创建了新的MSSQL数据库。在新数据库中创建了1个表,并为该表打开了CDC。疾控中心运作良好。 然后我将下面的连接器配置发送到Kafka Connect REST API,如下所示: com.microsoft.sqlserver.jdbc.sqlserv