1、主机初始化
hostnamectl set-hostname hbase-master
hostnamectl set-hostname hbase-node1
hostnamectl set-hostname hbase-node2
cat <<EOF >> /etc/hosts
192.168.2.124 hbase-master
192.168.2.125 hbase-node1
192.168.2.126 hbase-node2
EOF
#设置yum源
cd /etc/yum.repos.d/
rename repo repo.bak *
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
yum clean all && yum makecache
#修改配置文件
cat <<EOF | tee /etc/profile.d/my.sh
PS1='\[\e[1;32m\][\u@\h \w]\$ \[\e[0m\]'
alias vi="vim"
HISTSIZE=10000
HISTTIMEFORMAT="%F %T "
EOF
source /etc/profile.d/my.sh
#安装JDK
yum install -y java-1.8.0-openjdk-devel.x86_64
#配置java路径
cat <<EOF | sudo tee /etc/profile.d/hbase-env.sh
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-1.el7_7.x86_64
export CLASSPATH=.:\$JAVA_HOME/lib/dt.jar:\$JAVA_HOME/lib/tools.jar
export PATH=$PATH:\$JAVA_HOME/bin
EOF
source /etc/profile.d/java.sh
#SSH基于KEY的验证
ssh-keygen -t rsa -P ""
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 0600 ~/.ssh/authorized_keys
2、部署hadoop
#下载hadoop
wget http://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-3.1.2/hadoop-3.1.2.tar.gz
tar -xzvf hadoop-3.1.2.tar.gz
rm hadoop-3.1.2-src.tar.gz
mv hadoop-3.1.2/ /usr/local/
#设置环境变量
cat <<EOF | sudo tee /etc/profile.d/hbase-env.sh
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-1.el7_7.x86_64
export CLASSPATH=.:\$JAVA_HOME/lib/dt.jar:\$JAVA_HOME/lib/tools.ja
export HADOOP_HOME=/usr/local/hadoop-3.1.2
export HADOOP_HDFS_HOME=\$HADOOP_HOME
export HADOOP_MAPRED_HOME=\$HADOOP_HOME
export YARN_HOME=\$HADOOP_HOME
export HADOOP_COMMON_HOME=\$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=\$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib:$HADOOP_COMMON_LIB_NATIVE_DIR"
export PATH=\$PATH::\$JAVA_HOME/bin:\$HADOOP_HOME/bin:\$HADOOP_HOME/sbin
EOF
source /etc/profile.d/hbase-env.sh
#查看版本
hadoop version
mkdir -p /hadoop/hdfs/{name,data,pid,tmp}
#编辑配置文件
vi /usr/local/hadoop-3.1.2/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-1.el7_7.x86_64/
export HADOOP_PID_DIR=/hadoop/hdfs/pid
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
vi /usr/local/hadoop-3.1.2/etc/hadoop/core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://hbase-master/</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/hadoop/hdfs/tmp</value>
</property>
</configuration>
vi /usr/local/hadoop-3.1.2/etc/hadoop/hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>/hadoop/hdfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:///hadoop/hdfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
vi /usr/local/hadoop-3.1.2/etc/hadoop/mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>yarn.app.mapreduce.am.env</name>
<value>HADOOP_MAPRED_HOME=/usr/local/hadoop-3.1.2</value>
</property>
<property>
<name>mapreduce.map.env</name>
<value>HADOOP_MAPRED_HOME=/usr/local/hadoop-3.1.2</value>
</property>
<property>
<name>mapreduce.reduce.env</name>
<value>HADOOP_MAPRED_HOME=/usr/local/hadoop-3.1.2</value>
</property>
</configuration>
vi /usr/local/hadoop-3.1.2/etc/hadoop/yarn-site.xml
<configuration>
<!-- 指定ResourceManager的地址 -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hbase-master</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
vi /usr/local/hadoop-3.1.2/etc/hadoop/workers
hbase-node1
hbase-node2
#namenode节点上进行格式化
hdfs namenode -format
#启停hadoop服务
start-all.sh
stop-all.sh
#查看hadoop集群状态
http://192.168.2.124:9870
#查看应用状态
http://192.168.2.124:8088
#下载zookeeper
wget https://mirrors.tuna.tsinghua.edu.cn/apache/zookeeper/stable/apache-zookeeper-3.5.5-bin.tar.gz
mv apache-zookeeper-3.5.5-bin/ /usr/local/zookeeper-3.5.5/
cd /usr/local/zookeeper-3.5.5/conf
cp zoo_sample.cfg zoo.cfg
vi zoo.cfg
dataDir=/hadoop/zookeeper/data
dataLogDir=/hadoop/zookeeper/logs
initLimit=10
syncLimit=5
server.1=hbase-master:2888:3888
server.2=hbase-node1:2888:3888
server.3=hbase-node2:2888:3888
mkdir -p /hadoop/zookeeper/{data,logs}
vi /hadoop/zookeeper/data/myid
1 #节点id
2 #节点id
3 #节点id
#设置环境变量
cat <<EOF | sudo tee /etc/profile.d/hbase-env.sh
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-1.el7_7.x86_64
export CLASSPATH=.:\$JAVA_HOME/lib/dt.jar:\$JAVA_HOME/lib/tools.ja
export HADOOP_HOME=/usr/local/hadoop-3.1.2
export HADOOP_HDFS_HOME=\$HADOOP_HOME
export HADOOP_MAPRED_HOME=\$HADOOP_HOME
export YARN_HOME=\$HADOOP_HOME
export HADOOP_COMMON_HOME=\$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=\$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib:$HADOOP_COMMON_LIB_NATIVE_DIR"
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.5.5
export PATH=\$PATH::\$JAVA_HOME/bin:\$HADOOP_HOME/bin:\$HADOOP_HOME/sbin:\$ZOOKEEPER_HOME/bin
EOF
source /etc/profile.d/hbase-env.sh
#启动zookeeper服务
zkServer.sh start
zkServer.sh stop
#创建节点
zkCli.sh
create /hbase myhbase
#服务状态查看
zkServer.sh status
#调试命令
zkServer.sh start-foreground
#会话连接
zkCli.sh -server 192.168.2.125:2181
4、部署hbase
#下载HBase
wget https://mirrors.tuna.tsinghua.edu.cn/apache/hbase/stable/hbase-1.4.10-bin.tar.gz
tar -zxf hbase-1.4.10-bin.tar.gz
sudo mv hbase-1.4.10 /usr/local/
#设置环境变量
cat <<EOF | sudo tee /etc/profile.d/hbase-env.sh
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-1.el7_7.x86_64
export CLASSPATH=.:\$JAVA_HOME/lib/dt.jar:\$JAVA_HOME/lib/tools.ja
export HADOOP_HOME=/usr/local/hadoop-3.1.2
export HADOOP_HDFS_HOME=\$HADOOP_HOME
export HADOOP_MAPRED_HOME=\$HADOOP_HOME
export YARN_HOME=\$HADOOP_HOME
export HADOOP_COMMON_HOME=\$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=\$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib:$HADOOP_COMMON_LIB_NATIVE_DIR"
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.5.5
export HBASE_HOME=/usr/local/hbase-1.4.10
export PATH=\$PATH::\$JAVA_HOME/bin:\$HADOOP_HOME/bin:\$HADOOP_HOME/sbin:\$ZOOKEEPER_HOME/bin:\$HBASE_HOME/bin
EOF
source /etc/profile.d/hbase-env.sh
#修改hbase环境变量文件
vi /usr/local/hbase-1.4.10/conf/hbase-env.sh
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-1.el7_7.x86_64
export HBASE_MANAGES_ZK=false
export HBASE_PID_DIR=/hadoop/hbase/pids
#HBASE_CLASSPATH=/usr/local/hadoop/etc/hadoop
#sudo mkdir -p /hadoop/HBase/HFile
#sudo chown -R hadoop:hadoop /hadoop/
vi /usr/local/hbase-1.4.10/conf/regionservers
hbase-node1
hbase-node2
mkdir /home/zk_data
mkdir -p /hadoop/hbase/pids
#修改hbase配置文件
vi /usr/local/hbase-1.4.10/conf/hbase-site.xml
<configuration>
<property>
<name>hbase.rootdir</name>
<value>hdfs://hbase-master:9000/hbase</value>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/hadoop/zookeeper</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>hbase-master,hbase-node1,hbase-node2</value>
</property>
<property>
<name>hbase.master</name>
<value>hdfs://hbase-master:60000</value>
</property>
</configuration>
vi /usr/local/hbase-1.4.10/conf/regionservers
hbase-node1
hbase-node2
#使用HBaseShell
hbase shell
status
#启动/停止HBase服务
start-all.sh
zkServer.sh start
start-hbase.sh
stop-hbase.sh
zkServer.sh stop
stop-all.sh
来源:博客园
作者:BicycleBoy
链接:https://www.cnblogs.com/fourw/p/11567610.html