我正在创建2个flink sql表,1个用于CSV文件系统
CREATE TABLE son_hsb_source_filesystem_csv_bulk(
file_name STRING,
start_time STRING,
oss_cell_id BIGINT,
enodeb STRING,
dl_payload FLOAT,
rrc_conn_den BIGINT,
rrc_conn_num BIGINT,
pm_array_1 STRING
) WITH (
'connector' = 'filesystem', --Don't Change this
'path' = 'file:///opt/kafka-python-exec/files/' , -- Change file name alone
'format' = 'csv', --Don't Change this
'format.ignore-parse-errors' = 'true', --Don't Change this
'csv.ignore-parse-errors' = 'true', --Don't Change this
'csv.allow-comments' = 'true' --Don't Change this
);
FlinkSQL创建目标表
CREATE TABLE son_hsb_target_kafka_9092_filesystem_bulk_tests(
file_name STRING,
start_time STRING,
oss_cell_id BIGINT,
enodeb STRING,
dl_payload FLOAT,
rrc_conn_den BIGINT,
rrc_conn_num BIGINT,
pm_array_1 STRING
) WITH (
'connector' = 'kafka', --Don't Change this
'topic' = 'son_hsb_target_kafka_9092_fs_bulk_data_tests', -- Add any topic name you want
'scan.startup.mode' = 'earliest-offset', --Don't Change this
'properties.bootstrap.servers' = 'localhost:9092', --Don't Change this
'format' = 'json', --Don't Change this
'json.fail-on-missing-field' = 'false', --Don't Change this
'json.ignore-parse-errors' = 'true' --Don't Change this
);
INSERT INTO son_hsb_target_kafka_9092_filesystem_bulk_tests
SELECT file_name,start_time,oss_cell_id,enodeb,dl_payload,rrc_conn_den,rrc_conn_num,pm_array_1 FROM son_hsb_source_filesystem_csv_bulk
如何定义始终处于“运行”状态并查找新文件的流媒体作业。请建议。
文档表明,对于流式文件系统源,该特性尚未实现:
流式传输的文件系统源仍在开发中。未来,社区将增加对常见流式传输用例的支持,即分区和目录监控。