文章部分参照其他网络资源
- kafka在win10下的使用
安装请参照网上其他教程
启动kafka
bin\windows\kafka-server-start.bat config\server.properties
创建话题
bin\windows\kafka-topics.bat --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic hellow
启动生产者
bin\windows\kafka-console-producer.bat --broker-list localhost:9092 --topic hellow
启动消费者
bin\windows\kafka-console-consumer.bat --bootstrap-server 127.0.0.1:9092 --topic hellow --from-begining
- Java代码
引入的pom文件依赖如下
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>2.2.5.RELEASE</version>
<exclusions>
<exclusion>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>2.0.1</version>
</dependency>
创建话题
Properties pros = new Properties();
pros.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
AdminClient adminClient = AdminClient.create(pros);
CreateTopicsOptions options = new CreateTopicsOptions();
Integer numPartitions = 1;
Short replicationFactor = 1;
NewTopic newTopic = new NewTopic(topicName, numPartitions, replicationFactor);
CreateTopicsResult result = adminClient.createTopics(ImmutableList.of(newTopic), options);
for(Map.Entry<String,KafkaFuture<Void>> e : result.values().entrySet()){
KafkaFuture<Void> future= e.getValue();
future.get();
boolean success=!future.isCompletedExceptionally();
if(success){
System.out.println("已成功创建Kafka topic "+topicName+" ,分区 "+numPartitions+" ,副本 "+replicationFactor);
}
}
adminClient.close();
删除话题
Properties pros = new Properties();
pros.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
AdminClient adminClient = AdminClient.create(pros);
DeleteTopicsOptions options=new DeleteTopicsOptions();
DeleteTopicsResult result = adminClient.deleteTopics(Arrays.asList(new String[]{topic}), options);
Boolean delFlag = result.all().isDone();
System.out.println(delFlag);
adminClient.close();
查询全部话题
Properties pros = new Properties();
pros.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
AdminClient adminClient = AdminClient.create(pros);
ListTopicsResult result = adminClient.listTopics();
List<String> strs = new ArrayList<>();
try {
Set<String> topics= result.names().get();
Iterator<String> it = topics.iterator();
while (it.hasNext()){
String name = it.next();
System.out.println(name);
strs.add(name);
}
} catch (InterruptedException e) {
e.printStackTrace();
} catch (ExecutionException e) {
e.printStackTrace();
}
adminClient.close();
查看话题配置
Properties pros = new Properties();
pros.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
AdminClient adminClient = AdminClient.create(pros);
DescribeTopicsResult ret = adminClient.describeTopics(Arrays.asList(topicName,"__consumer_offsets"));
Map<String, TopicDescription> topics = ret.all().get();
for(Map.Entry<String, TopicDescription> entry : topics.entrySet()) {
System.out.println(entry.getKey() +" ===> "+ entry.getValue());
}
adminClient.close();
更改话题配置
String topicName = "topicName";
Properties properties = new Properties();
properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
AdminClient adminClient = AdminClient.create(properties);
List<ConfigEntry> configEntrys = new ArrayList<>();
configEntrys.add(new ConfigEntry("cleanup.policy","compact"));
Config topicConfig =new Config(configEntrys);
Map<ConfigResource, Config> configs = new HashMap<>();
configs.put(new ConfigResource(ConfigResource.Type.TOPIC, topicName), topicConfig);
AlterConfigsResult configsResult = adminClient.alterConfigs(configs);
KafkaFuture<Void> future= configsResult.all();
future.get();
boolean success=!future.isCompletedExceptionally();
if(success){
//成功修改配置
}
adminClient.close();
yml文件配置
spring:
kafka:
bootstrap-servers: http://127.0.0.1:9092
producer:
retries: 0
batch-size: 16384
buffer-memory: 33554432
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
consumer:
group-id: test-hello-group
auto-offset-reset: earliest
enable-auto-commit: true
auto-commit-interval: 20000
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
项目对应的开源github地址: kafkaDemo
来源:CSDN
作者:阿江
链接:https://blog.csdn.net/u010542488/article/details/104134597