使用flink table api 连接kafka 处理json类型数据,单层json处理比较简单,官方或网上都有很多例子,处理嵌套的json数据没什么介绍。处理嵌套json数据主要是schema定义。
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
EnvironmentSettings bsSettings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env,bsSettings);
tableEnv.connect(new Kafka()
.version("universal")
.topic("w001")
.property("zookeeper.connect", "192.168.0.160:3181")
.property("bootstrap.servers", "192.168.0.160:9092,192.168.0.161:9092,192.168.0.162:9092")
.property("group.id", "w01")
.startFromLatest()
)
.withFormat(new Json().deriveSchema())
.withSchema(new Schema().field("id", Types.BIG_DEC).field("name", Types.STRING).field("timestamp", Types.SQL_TIMESTAMP)
.field("nested",Types.ROW_NAMED(new String[]{"booleanField","decimalField"},new TypeInformation[]{
Types.BOOLEAN,
Types.BIG_DEC
})))
.inAppendMode()
.registerTableSource("test");
Table query = tableEnv.sqlQuery("select name,nested.booleanField from test");
tableEnv.toAppendStream(query, Row.class).print();
tableEnv.execute("streaming");
来源:CSDN
作者:kevin_wf
链接:https://blog.csdn.net/wflh323/article/details/103563908