hadoop3 Java client客户端&kerberos认证

浪尽此生 提交于 2020-10-21 02:04:18

hadoop集群升级hadoop3,并需要Kerberos认证,hadoop3代码包做了合并,引用jar包如下:

<dependency>
	<groupId>org.apache.hadoop</groupId>
	<artifactId>hadoop-hdfs</artifactId>
	<version>3.1.1</version>
</dependency>
<dependency>
	<groupId>org.apache.hadoop</groupId>
	<artifactId>hadoop-common</artifactId>
	<version>3.1.1</version>
</dependency>

<dependency>
	<groupId>org.apache.hadoop</groupId>
	<artifactId>hadoop-client</artifactId>
	<version>3.1.1</version>
</dependency>

认证方法

Configuration config = new Configuration();
config.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
config.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");

//开始Kerberos认证
String krb5File = "/etc/krb5.conf";
System.setProperty("java.security.krb5.conf", krb5File);
config.set("hadoop.security.authentication", "kerberos");

//添加集群默认配置(非必须)
configuration.addResource(new Path("/etc/hadoop/conf/hdfs-site.xml"));
configuration.addResource(new Path("/etc/hadoop/conf/core-site.xml"));

UserGroupInformation.setConfiguration(config);
try {
	UserGroupInformation.loginUserFromKeytab(kerUser, keyPath);
} catch (IOException e) {
	e.printStackTrace();
}
//结束Kerberos认证

URI uri = new URI("hdfs://集群名或namenode ip地址");
/**
* 代码指定集群名和namenode节点
*conf.set("fs.defaultFS", "hdfs://yiie");
*conf.set("dfs.nameservices","yiie");
*conf.set("dfs.ha.namenodes.yiie", "nn1,nn2");
*conf.set("dfs.namenode.rpc-address.yiie.nn1", "192.168.116.116:8020");
*conf.set("dfs.namenode.rpc-address.yiie.nn2", "192.168.116.116:8020");
*conf.set("dfs.client.failover.proxy.provider.yiie","org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
*conf.setBoolean("dfs.ha.automatic-failover.enabled",true);
*
*/
FileSystem.get(uri, config);

 

参考博客:https://blog.csdn.net/weixin_43989001/article/details/86309512

标签
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!