- 配置 core-site.xml 文件
点击(此处)折叠或打开
- <configuration>
- <property>
- <name>fs.defaultFS</name>
- <value>hdfs://hacluster</value>
- <description> 集群的namenode节点的url </description>
- </property>
- <property>
- <name>ha.zookeeper.quorum</name>
- <value>1.hadoop.com:2181,2.hadoop.com:2181,3.hadoop.com:2181</value>
- <description>zookeeper集群的地址和端口,最好保持基数个至少3台 </description>
-
- </property>
- <property>
- <name>io.file.buffer.size</name>
- <value>131072</value>
- <description></description>
- </property>
- <property>
- <name>hadoop.tmp.dir</name>
- <value>/usr/local/fqlhadoop/datas/hadoop/tmp-hadoop-${user.name}</value>
- <description>执行namenode format之后,很多路径都依赖他,namenode节点该目录不可以删除</description>
- </property>
- </configuration>
配置 hdfs-site.xml
点击(此处)折叠或打开
- <configuration>
- <property>
- <name>dfs.nameservices</name>
- <value>hacluster</value>
- </property>
- <property>
- <name>dfs.ha.namenodes.hacluster</name>
- <value>n1,n2</value>
- </property>
- <property>
- <name>dfs.namenode.rpc-address.hacluster.n1</name>
- <value>1.hadoop.com:8020</value>
- </property>
- <property>
- <name>dfs.namenode.rpc-address.hacluster.n2</name>
- <value>2.hadoop.com:8020</value>
- <description> 远程控制端口 在配置客户端访问时 会用到此端口 </description>
- </property>
- <property>
- <name>dfs.namenode.http-address.hacluster.n1</name>
- <value>1.hadoop.com:8090</value>
- </property>
- <property>
- <name>dfs.namenode.http-address.hacluster.n2</name>
- <value>2.hadoop.com:8090</value>
- <description> 页面访问端口 </description>
- </property>
- <property>
- <name>dfs.namenode.shared.edits.dir</name>
- <value>qjournal://1.hadoop.com:8485;2.hadoop.com:8485;3.hadoop.com:8485/hacluster</value>
- <description> journalnode共享文件集群 </description>
- </property>
- <property>
- <name>dfs.client.failover.proxy.provider.hacluster</name>
- <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
- <description> 故障处理类 </description>
- </property>
-
- <property>
- <name>dfs.ha.fencing.methods</name>
- <value>sshfence(${user.name}:39000)</value>
- <description> ssh方式进行故障切换 </description>
- </property>
-
- <property>
- <name>dfs.ha.fencing.ssh.private-key-files</name>
- <value>/home/${user.name}/.ssh/id_rsa</value>
- </property>
-
- <property>
- <name>dfs.journalnode.edits.dir</name>
- <value>/home/hacluster/hadoop/dfs.journalnode.edits.dir</value>
- </property>
- <!-- auto configure failover -->
- <property>
- <name>dfs.ha.automatic-failover.enabled</name>
- <value>true</value>
- <description> 自动切换 开启 </description>
- </property>
-
- <property>
- <name>dfs.replication</name>
- <value>3</value>
- <description> 备机数量 </description>
- </property>
-
- <property>
- <name>dfs.namenode.name.dir</name>
- <value>/home/hacluster/hadoop/dfs.namenode.name.dir</value>
- </property>
-
- <property>
- <name>dfs.blocksize</name>
- <value>67108864</value>
- </property>
-
- <property>
- <name>dfs.namenode.handler.count</name>
- <value>100</value>
- </property>
-
- <property>
- <name>dfs.datanode.data.dir</name>
- <value>/data1/hacluster/hadoop/datanode</value>
- <description> datanode数据目录 </description>
- </property>
-
- </configuration>
- 配置 yarn-site.xml
点击(此处)折叠或打开
- <configuration>
- <!-- Site specific YARN configuration properties -->
- <property>
- <name>yarn.resourcemanager.scheduler.class</name>
- <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
- </property>
- <property>
- <name>yarn.resourcemanager.ha.enabled</name>
- <value>true</value>
- <description>开启resource manager HA</description>
- </property>
- <property>
- <name>yarn.resourcemanager.cluster-id</name>
- <value>cluster1</value>
- </property>
- <property>
- <name>yarn.resourcemanager.ha.rm-ids</name>
- <value>rm1,rm2</value>
- </property>
- <property>
- <name>yarn.resourcemanager.hostname.rm1</name>
- <value>1.hadoop.com</value>
- </property>
- <property>
- <name>yarn.resourcemanager.hostname.rm2</name>
- <value>2.hadoop.com</value>
- </property>
- <property>
- <name>yarn.resourcemanager.zk-address</name>
- <value>1.hadoop.com:2181,2.hadoop.com:2181,3.hadoop.com:2181</value>
- </property>
- <!--
- enable RM Restart feature.
- -->
- <property>
- <name>yarn.resourcemanager.recovery.enabled</name>
- <value>true</value>
- <description>开启自动恢复功能</description>
- </property>
- <property>
- <name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
- <value>true</value>
- <description>开启故障自动切换</description>
- </property>
- <property>
- <name>yarn.resourcemanager.store.class</name>
- <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
- </property>
- <property>
- <name>yarn.resourcemanager.am.max-attempts</name>
- <value>5</value>
- </property>
- <property>
- <name>yarn.nodemanager.aux-services</name>
- <value>mapreduce_shuffle</value>
- </property>
- </configuration>