千家信息网

hadoop集群搭建(一)HDFS的namenode的HA搭建

发表于:2025-12-03 作者:千家信息网编辑
千家信息网最后更新 2025年12月03日,HDFS的namenode的HA搭建,准备好机器hadoop01 IP:192.168.216.203 GATEWAY:192.168.216.2hadoop02 IP:192.168.216.204
千家信息网最后更新 2025年12月03日hadoop集群搭建(一)HDFS的namenode的HA搭建

HDFS的namenode的HA搭建,准备好机器

hadoop01 IP:192.168.216.203 GATEWAY:192.168.216.2

hadoop02 IP:192.168.216.204 GATEWAY:192.168.216.2

hadoop03 IP:192.168.216.205 GATEWAY:192.168.216.2

配置网卡

[root@hadoop01 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth0

DEVICE=eth0

TYPE=Ethernet

HWADDR=00:0C:29:6B:CD:B3 网卡MAC地址

ONBOOT=yes yes表示开机启动

NM_CONTROLLED=yes

BOOTPROTO=none

IPADDR=192.168.216.203 IP地址

PREFIX=24

GATEWAY=192.168.216.2 网关

DNS1=8.8.8.8 域名解析服务器地址一

DNS2=192.168.10.254 域名解析服务器地址 域名解析服务器地址二

DEFROUTE=yes

IPV4_FAILURE_FATAL=yes

IPV6INIT=no

NAME="System eth0"


安装java JDK 并配置环境变量

[root@hadoop01 jdk1.8.0_152]# vim /etc/profile

#my setting

export JAVA_HOME=/usr/local/jdk1.8.0_152/

export PATH=$PATH:$JAVA_HOME/bin:



配置hadoop01/hadoop02/hadoop03之间互相ssh免密登陆



[root@hadoop01 hadoop-2.7.1]# vim ./etc/hadoop/hadoop-env.sh

# The java implementation to use.

export JAVA_HOME=/usr/local/jdk1.8.0_152/


[root@hadoop01 ~]# vim /usr/local/hadoop-2.7.1/etc/hadoop/core-site.xml

fs.defaultFS

hdfs://qian

ha.zookeeper.quorum

hadoop01:2181,hadoop02:2181,hadoop03:2181


[root@hadoop01 ~]# vim /usr/local/hadoop-2.7.1/etc/hadoop/hdfs-site.xml

dfs.nameservices

qian

dfs.ha.namenodes.qian

nn1,nn2

dfs.namenode.rpc-address.qian.nn1

hadoop01:9000

dfs.namenode.rpc-address.qian.nn2

hadoop02:9000

dfs.namenode.http-address.qian.nn1

hadoop01:50070

dfs.namenode.http-address.qian.nn2

hadoop02:50070

dfs.namenode.shared.edits.dir

qjournal://hadoop01:8485;hadoop02:8485;hadoop03:8485/qian

dfs.journalnode.edits.dir

/home/hadata/journalnode/data

dfs.ha.automatic-failover.enabled

true

dfs.client.failover.proxy.provider.qian

org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider

dfs.ha.fencing.methods

sshfence

dfs.ha.fencing.ssh.private-key-files

/root/.ssh/id_rsa

dfs.ha.fencing.ssh.connect-timeout

30000

dfs.namenode.name.dir

/home/hadata/dfs/name

dfs.datanode.data.dir

/home/hadata/dfs/data

dfs.blocksize

134217728

dfs.permissions.enabled

false

dfs.replication

3


[root@hadoop01 ~]# vim /usr/local/hadoop-2.7.1/etc/hadoop/slaves

hadoop01

hadoop02

hadoop03


安装并配置zookeeper

[root@hadoop01 zookeeper-3.4.10]# tar -zxvf /home/zookeeper-3.4.10.tar.gz -C /usr/local/

[root@hadoop01 zookeeper-3.4.10]# cp ./conf/zoo_sample.cfg ./conf/zoo.cfg

# The number of milliseconds of each tick

tickTime=2000

# The number of ticks that the initial

# synchronization phase can take

initLimit=5

# The number of ticks that can pass between

# sending a request and getting an acknowledgement

syncLimit=2

# the directory where the snapshot is stored.

# do not use /tmp for storage, /tmp here is just

# example sakes.

dataDir=/home/zookeeperdata

# the port at which the clients will connect

clientPort=2181

server.1=hadoop01:2888:3888

server.2=hadoop02:2888:3888

server.3=hadoop03:2888:3888


[root@hadoop01 zookeeper-3.4.10]# scp -r /usr/local/zookeeper-3.4.10 hadoop02:/usr/local/

[root@hadoop01 zookeeper-3.4.10]# scp -r /usr/local/zookeeper-3.4.10 hadoop03:/usr/local/


配置三台机器的环境变量

[root@hadoop01 zookeeper-3.4.10]# vim /etc/profile

#my setting

export JAVA_HOME=/usr/local/jdk1.8.0_152/

export HADOOP_HOME=/usr/local/hadoop-2.7.1/

export ZK_HOME=/usr/local/zookeeper-3.4.10/

export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$ZK_HOME/bin:


[root@hadoop01 zookeeper-3.4.10]# scp -r /etc/profile hadoop02:/etc

profile

[root@hadoop01 zookeeper-3.4.10]# scp -r /etc/profile hadoop03:/etc

profile


[root@hadoop01 ~]# source /etc/profile

[root@hadoop02 ~]# source /etc/profile

[root@hadoop03 ~]# source /etc/profile



[root@hadoop01 zookeeper-3.4.10]# mkdir /home/zookeeperdata

[root@hadoop01 zookeeper-3.4.10]# vim /home/zookeeperdata/myid myid文件里输入 1

1

[root@hadoop02 ~]# mkdir /home/zookeeperdata

[root@hadoop02 ~]# vim /home/zookeeperdata/myid myid文件里输入 2

2

[root@hadoop03 ~]# mkdir /home/zookeeperdata

[root@hadoop03 ~]# vim /home/zookeeperdata/myid myid文件里输入 3

3


[root@hadoop01 zookeeper-3.4.10]# zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg

Mode: follower

[root@hadoop02 ~]# zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg

Mode: follower

[root@hadoop03 ~]# zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg

Mode: leader


[root@hadoop01 zookeeper-3.4.10]# scp -r /usr/local/hadoop-2.7.1/ hadoop02:/usr/local/

[root@hadoop01 zookeeper-3.4.10]# scp -r /usr/local/hadoop-2.7.1/ hadoop03:/usr/local/


[root@hadoop01 zookeeper-3.4.10]# hadoop-daemon.sh start journalnode

[root@hadoop02 zookeeper-3.4.10]# hadoop-daemon.sh start journalnode

[root@hadoop03 zookeeper-3.4.10]# hadoop-daemon.sh start journalnode


[root@hadoop01 zookeeper-3.4.10]# hadoop namenode -format

[root@hadoop01 zookeeper-3.4.10]# hadoop-daemon.sh start namenode

starting namenode, logging to /usr/local/hadoop-2.7.1/logs/hadoop-root-namenode-hadoop01.out


同步已启动的namenode的元数据到为启动的nomenode

[root@hadoop02 ~]# hdfs namenode -bootstrapStandby


确认zookeeper集群是否启动

[root@hadoop01 zookeeper-3.4.10]# zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg

Mode: follower

[root@hadoop02 ~]# zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg

Mode: follower

[root@hadoop03 ~]# zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg

Mode: leader

[root@hadoop01 zookeeper-3.4.10]# hdfs zkfc -formatZK

.

.

.

.

....INFO ha.ActiveStandbyElector: Successfully created /hadoop-ha/qian in ZK.

.

.

.

[root@hadoop03 ~]# zkCli.sh

WatchedEvent state:SyncConnected type:None path:null

[zk: localhost:2181(CONNECTED) 0] ls /

[zookeeper, hadoop-ha]

[zk: localhost:2181(CONNECTED) 1] ls /hadoop-ha

[qian]

[zk: localhost:2181(CONNECTED) 2] ls /hadoop-ha/qian

[]

注意:退出zkCli,输入quit


[root@hadoop01 zookeeper-3.4.10]# start-dfs.sh

[root@hadoop01 zookeeper-3.4.10]# jps

3281 JournalNode

4433 Jps

3475 NameNode

4068 DataNode

3110 QuorumPeerMain

4367 DFSZKFailoverController

[root@hadoop02 ~]# jps

3489 DataNode

3715 Jps

2970 QuorumPeerMain

3162 JournalNode

3646 DFSZKFailoverController

3423 NameNode

[root@hadoop03 ~]# zkCli.sh

zkCli.sh

WATCHER::

WatchedEvent state:SyncConnected type:None path:null

[zk: localhost:2181(CONNECTED) 4] ls /hadoop-ha/qian

[ActiveBreadCrumb, ActiveStandbyElectorLock]

[zk: localhost:2181(CONNECTED) 2] get /hadoop-ha/qian/ActiveBreadCrumb

qiannn1hadoop01 �F(�>

cZxid = 0x10000000a

ctime = Sat Jan 13 01:40:21 CST 2018

mZxid = 0x10000000a

mtime = Sat Jan 13 01:40:21 CST 2018

pZxid = 0x10000000a

cversion = 0

dataVersion = 0

aclVersion = 0

ephemeralOwner = 0x0

dataLength = 31

numChildren = 0


[root@hadoop01 hadoop-2.7.1]# hdfs dfs -put ./README.txt hdfs:/

[root@hadoop01 hadoop-2.7.1]# hdfs dfs -ls /

18/01/13 01:58:24 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable

Found 1 items

-rw-r--r-- 3 root supergroup 1366 2018-01-13 01:57 /README.txt


测试是否失败转移

[root@hadoop01 hadoop-2.7.1]# jps

3281 JournalNode

3475 NameNode

4644 Jps

4068 DataNode

3110 QuorumPeerMain

4367 DFSZKFailoverController


[root@hadoop01 hadoop-2.7.1]# kill -9 3475

[root@hadoop03 ~]# zkCli.sh

ActiveBreadCrumb ActiveStandbyElectorLock

[zk: localhost:2181(CONNECTED) 6] get /hadoop-ha/qian/ActiveBreadCrumb

qiannn2hadoop02 �F(�>

cZxid = 0x10000000a

ctime = Sat Jan 13 01:40:21 CST 2018

mZxid = 0x100000011

mtime = Sat Jan 13 02:01:57 CST 2018

pZxid = 0x10000000a

cversion = 0

dataVersion = 1

aclVersion = 0

ephemeralOwner = 0x0

dataLength = 31

numChildren = 0


[root@hadoop02 ~]# jps

3489 DataNode

3989 Jps

2970 QuorumPeerMain

3162 JournalNode

3646 DFSZKFailoverController

3423 NameNode


注意:一个namenode1死了会自动切换到另一个namenode2上,namenode2死后,就都死了,不会自动启动namenode1



配置集群时间同步



HA搭建完毕


配置 地址 输入 域名 文件 服务器 服务 集群 变量 机器 环境 网卡 同步 之间 数据 时间 死后 网关 三台 准备 数据库的安全要保护哪些东西 数据库安全各自的含义是什么 生产安全数据库录入 数据库的安全性及管理 数据库安全策略包含哪些 海淀数据库安全审计系统 建立农村房屋安全信息数据库 易用的数据库客户端支持安全管理 连接数据库失败ssl安全错误 数据库的锁怎样保障安全 紫光数通是做数据库的吗 数据库试验架构 试图在数据库安全性方面 100m带宽的服务器配置 2021年中国数据库技术大会 网络安全推普周主题班会 云开发数据库安全规则 网络数据库的应用有哪些 怎么到4s店服务器下载数据 华为软件开发云推广 贵州软件开发有哪些 用友t3如何布置云服务器上 免费+数据库服务器 互联网科技公司缴纳税点 数据库课程内容主要有哪些 四川大学网络安全专业好就业吗 郑州国贸大厦17楼互联网科技 重庆正规软件开发定制费用 呼和浩特工信局网络安全科 宣汉县财政局网络安全与信息化 国家电网软件开发公司 伟淘网络技术有限公司 数据库重启服务 mfc软件开发报价 怀远县软件开发 程序软件开发专业团队在线服务 数据库选择投影连接相关习题 网络技术实训课心得体会 四川大学网络安全专业好就业吗 说明数据库的作用是什么
0