hadoop集群升级hadoop3,并需要Kerberos认证,hadoop3代码包做了合并,引用jar包如下:
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>3.1.1</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>3.1.1</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>3.1.1</version>
</dependency>
认证方法
Configuration config = new Configuration();
config.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
config.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
//开始Kerberos认证
String krb5File = "/etc/krb5.conf";
System.setProperty("java.security.krb5.conf", krb5File);
config.set("hadoop.security.authentication", "kerberos");
//添加集群默认配置(非必须)
configuration.addResource(new Path("/etc/hadoop/conf/hdfs-site.xml"));
configuration.addResource(new Path("/etc/hadoop/conf/core-site.xml"));
UserGroupInformation.setConfiguration(config);
try {
UserGroupInformation.loginUserFromKeytab(kerUser, keyPath);
} catch (IOException e) {
e.printStackTrace();
}
//结束Kerberos认证
URI uri = new URI("hdfs://集群名或namenode ip地址");
/**
* 代码指定集群名和namenode节点
*conf.set("fs.defaultFS", "hdfs://yiie");
*conf.set("dfs.nameservices","yiie");
*conf.set("dfs.ha.namenodes.yiie", "nn1,nn2");
*conf.set("dfs.namenode.rpc-address.yiie.nn1", "192.168.116.116:8020");
*conf.set("dfs.namenode.rpc-address.yiie.nn2", "192.168.116.116:8020");
*conf.set("dfs.client.failover.proxy.provider.yiie","org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
*conf.setBoolean("dfs.ha.automatic-failover.enabled",true);
*
*/
FileSystem.get(uri, config);
参考博客:https://blog.csdn.net/weixin_43989001/article/details/86309512
来源:oschina
链接:https://my.oschina.net/wangzonghui/blog/4283240