1. 程式人生 > >hadoop-3.0.0-alpha4安裝部署過程

hadoop-3.0.0-alpha4安裝部署過程

auth specific daemon ive bsp admin ram 8.14 產生


關閉防火墻
#systemctl stop firewalld.service #停止firewall
#systemctl disable firewalld.service #禁止firewall開機啟動

關閉selinux
#vi /etc/selinux/config

SELINUX=disabled
SELINUXTYPE=targeted

安裝jdk1.8:
#cd /usr
#tar zxvf jdk-8u131-linux-x64.tar.gz

解壓出來的目錄改名為java

vi /etc/profile
export JAVA_HOME=/usr/java
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export CLASSPATH=$CLASSPATH:$JAVA_HOME/lib/dt.jar:$HIVE_HOME/lib:$JAVA_HOME/lib/tools.jar
export PATH=${JAVA_HOME}/bin:$PATH


設置服務器主從無密碼登錄:
[[email protected] ~]$ ssh-keygen -t rsa #產生公鑰私鑰
Generating public/private rsa key pair.
Enter file in which to save the key (/home/study/.ssh/id_rsa):
Created directory ‘/home/study/.ssh‘.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/study/.ssh/id_rsa.
Your public key has been saved in /home/study/.ssh/id_rsa.pub.
The key fingerprint is:
0e:66:68:14:31:d3:3b:92:d0:ce:c3:40:03:15:b7:87 [email protected]

/* */
The key‘s randomart image is:
+--[ RSA 2048]----+
|.+=o*o |
| oo.*. |
| *E... |
| .Boo |
| oo+.S |
| . o o |
| . |
| |
| |
+-----------------+
[[email protected] ~]$ cd .ssh
[[email protected]
/* */ .ssh]$ pwd
/home/study/.ssh
[[email protected] .ssh]$ ls
id_rsa id_rsa.pub #前者私鑰 ,後者公鑰, 私鑰存放本地.ssh目錄, 公鑰發送給申請無密碼登錄的機器並保存到.ssh/authorized_keys
[[email protected] .ssh]$ cp id_rsa.pub authorized_keys #這裏發送給自己的目錄相當於可以無密碼登錄自己,便於以後克隆時直接實現與其他機器的無密碼登錄

[[email protected] etc]# cat hostname #這裏修改主機名
master
[[email protected] etc]# cat hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.144.241 master #為了便於克隆時不需要修改,不在127.0.0.1處進行設置
192.168.144.242 slave1
192.168.144.243 slave2

[[email protected] hadoop]$ pwd
/home/study/hadoop-3.0.0-alpha4/etc/hadoop
vi hadoop-env.sh
export JAVA_HOME=/usr/java
vi core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://master:9000</value>
</property>

<property>
<name>hadoop.tmp.dir</name>
<value>/home/study/hadoop-3.0.0-alpha4/hadoopdata</value>
</property>
</configuration>
vi hdfs-site.xml
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.name.dir</name>
<value>/home/study/hadoop-3.0.0-alpha4/hdfs/name</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>/home/study/hadoop-3.0.0-alpha4/hdfs/data</value>
</property>

</configuration>

vi mapred-site.xml
<property>
<name>mapred.job.tracker</name>
<value>http://master:9001</value>
</property>

<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>

<property>
<name>mapreduce.application.classpath</name>
<value>
/home/study/hadoop-3.0.0-alpha4/etc/hadoop,
/home/study/hadoop-3.0.0-alpha4/share/hadoop/common/*,
/home/study/hadoop-3.0.0-alpha4/share/hadoop/common/lib/*,
/home/study/hadoop-3.0.0-alpha4/share/hadoop/hdfs/*,
/home/study/hadoop-3.0.0-alpha4/share/hadoop/hdfs/lib/*,
/home/study/hadoop-3.0.0-alpha4/share/hadoop/mapreduce/*,
/home/study/hadoop-3.0.0-alpha4/share/hadoop/mapreduce/lib/*,
/home/study/hadoop-3.0.0-alpha4/share/hadoop/yarn/*,
/home/study/hadoop-3.0.0-alpha4/share/hadoop/yarn/lib/*
</value>
</property>

vi yarn-site.xml
<configuration>

<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>master:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>master:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>master:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>master:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>master:8088</value>
</property>
</configuration>
vi workers #hadoop3以下版本在slaves文件裏
slave1
slave2
vi .bash_profile
export HADOOP_HOME=/home/study/hadoop-3.0.0-alpha4
PATH=$PATH:$HOME/.local/bin:$HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
關機
調小虛擬機內存為1G或以下,鏈接克隆2個節點
修改2臺克隆機器主機名分別為slave1 slave2
[[email protected] etc]# cat hostname
slave1
修改ip :
[[email protected] network-scripts]# pwd
/etc/sysconfig/network-scripts
[[email protected] network-scripts]# cat ifcfg-ens33
TYPE=Ethernet
IPADDR="192.168.144.242" #修改ip
NETMASK="255.255.255.0"
GATEWAY="192.168.144.2"
BROADCAST="192.168.144.255"
#BOOTPROTO=dhcp
DEFROUTE=yes
PEERDNS=yes
PEERROUTES=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_PEERDNS=yes
IPV6_PEERROUTES=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=6ab5ebe0-5379-4903-bd1c-dd820fab14d9
DEVICE=ens33
ONBOOT=yes

重啟機器reboot
[[email protected] bin]$ pwd
/home/study/hadoop-3.0.0-alpha4/bin

./hdfs namenode -format #格式化 不然9000端口起不來

[[email protected] ~]$ start-all.sh
WARNING: Attempting to start all Apache Hadoop daemons as study in 10 seconds.
WARNING: This is not a recommended production deployment configuration.
WARNING: Use CTRL-C to abort.
Starting namenodes on [master]
Starting datanodes
Starting secondary namenodes [master]
Starting resourcemanager
Starting nodemanagers
[[email protected] ~]$ jps
8544 SecondaryNameNode
8326 NameNode
8758 ResourceManager
9071 Jps
[[email protected] ~]$ jps
3432 DataNode
3673 Jps
3549 NodeManager
[[email protected] ~]$ jps
3610 DataNode
3854 Jps
3727 NodeManager
完畢

hadoop-3.0.0-alpha4安裝部署過程