1.创建Hadoop用户
[root@hncdf ~]# useradd hadoop
[root@hncdf ~]# id hadoop
uid=1102(hadoop) gid=1102(hadoop) groups=1102(hadoop)
[root@hncdf ~]# passwd hadoop
Changing password for user hadoop.
New password:
BAD PASSWORD: it is too simplistic/systematic
BAD PASSWORD: is too simple
Retype new password:
passwd: all authentication tokens updated successfully.
#给hadoop sudo权限
[root@hncdf ~]# vi /etc/sudoers
hadoop ALL=(root) NOPASSWD:ALL
2.配置环境变量
[root@hncdf opt]# vi /etc/profile
export HADOOP_HOME=/opt/hadoop-2.8.1
[root@hncdf opt]# source /etc/profile
3.上传并解压tar包
[root@hncdf ~]# mv hadoop-2.8.1.tar.gz /opt
[root@hncdf ~]# cd /opt
[root@hncdf opt]# tar -xzvf hadoop-2.8.1.tar.gz
[root@hncdf hadoop-2.8.1]# chown -R hadoop:hadoop /opt/hadoop-2.8.1
4.切换到hadoop用户 配置免密登录
[root@hncdf ~]# su - hadoop
#确保有ssh服务
[hadoop@hncdf ~]$ sudo service sshd status
openssh-daemon (pid 926) is running...
[hadoop@hncdf ~]$ ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
f2:bb:af:84:07:f6:8a:f0:f3:8c:72:b8:08:1e:24:b2 hadoop@hncdf
The key's randomart image is:
+--[ RSA 2048]----+
| |
| |
| |
| |
|o. + S |
|+. . * |
|E... . = |
|o.+oo+ + . |
|...++o+ ++. |
+-----------------+
[hadoop@hncdf ~]$ cd .ssh
[hadoop@hncdf .ssh]$ ll
total 8
-rw------- 1 hadoop hadoop 1675 Mar 7 14:20 id_rsa
-rw-r--r-- 1 hadoop hadoop 394 Mar 7 14:20 id_rsa.pub
[hadoop@hncdf .ssh]$ cat id_rsa.pub >> authorized_keys
[hadoop@hncdf .ssh]$ ll
total 12
-rw-rw-r-- 1 hadoop hadoop 394 Mar 7 14:21 authorized_keys
-rw------- 1 hadoop hadoop 1675 Mar 7 14:20 id_rsa
-rw-r--r-- 1 hadoop hadoop 394 Mar 7 14:20 id_rsa.pub
[hadoop@hncdf .ssh]$ chmod 600 authorized_keys
[hadoop@hncdf .ssh]$ ssh hncdf date
The authenticity of host 'hncdf (192.168.56.101)' can't be established.
RSA key fingerprint is ed:1d:25:1f:b4:9e:12:41:64:75:f2:45:ad:0d:8f:b9.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'hncdf,192.168.56.101' (RSA) to the list of known hosts.
Thu Mar 7 14:22:15 CST 2019
[hadoop@hncdf .ssh]$ ssh hncdf date
Thu Mar 7 14:22:20 CST 2019
5.进入hadoop用户 修改相关配置文件
#第一步:
[hadoop@hncdf hadoop]$ pwd
/opt/hadoop-2.8.1/etc/hadoop
[hadoop@hncdf hadoop]$ vi /opt/hadoop-2.8.1/etc/hadoop/hdfs-site.xml
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>192.168.56.101:50090</value>
</property>
<property>
<name>dfs.namenode.secondary.https-address</name>
<value>192.168.56.101:50091</value>
</property>
</configuration>
#第二步:
[hadoop@hncdf hadoop]$ vi /opt/hadoop-2.8.1/etc/hadoop/core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://192.168.56.101:9000</value>
</property>
</configuration>
#第三步:hncdf
[hadoop@hadoop000 hadoop]# vi /opt/hadoop-2.8.1/etc/hadoop/slaves
192.168.56.101
6.设置JAVA环境变量
[root@hncdf ~]# mkdir -p /usr/java
[root@hncdf ~]# cd /usr/java
[root@hncdf java]# rz #上传jdk-8u45-linux-x64.gz
[root@hncdf java]# tar -xzvf jdk-8u45-linux-x64.gz
# 设置环境变量
[root@hncdf java]# vi /etc/profile
# 在最底下加入
export JAVA_HOME=/usr/java/jdk1.8.0_45
export PATH=$JAVA_HOME/bin:$PATH
# 生效
[root@hncdf java]# source /etc/profile
#更改JAVA_HOME
[root@hncdf hadoop-2.8.1]# vi /opt/hadoop-2.8.1/etc/hadoop/hadoop-env.sh
# 将export JAVA_HOME=${JAVA_HOME}改为
export JAVA_HOME=/usr/java/jdk1.8.0_45
7.格式化和启动(hadoop用户操作)
[hadoop@hncdf ~]$ /opt/hadoop-2.8.1/bin/hdfs namenode -format
[hadoop@hncdf ~]$ /opt/hadoop-2.8.1/sbin/start-dfs.sh
Starting namenodes on [hncdf]
hncdf: starting namenode, logging to /opt/hadoop-2.8.1/logs/hadoop-hadoop-namenode-hncdf.out
192.168.56.101: starting datanode, logging to /opt/hadoop-2.8.1/logs/hadoop-hadoop-datanode-hncdf.out
Starting secondary namenodes [hncdf]
hncdf: starting secondarynamenode, logging to /opt/hadoop-2.8.1/logs/hadoop-hadoop-secondarynamenode-hncdf.out
#至此发现HDFS三个进程都是以hadoop 启动,
8.检查是否成功
[hadoop@hncdf ~]$ jps
4355 DataNode
4261 NameNode
4652 Jps
4541 SecondaryNameNode
访问: http://192.168.56.101:50070
下面安装MapReduce+Yarn:
1.修改mapred-site.xml
/opt/hadoop-2.8.1/etc/hadoop/mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
2.修改yarn-site.xml
/opt/hadoop-2.8.1/etc/hadoop/yarn-site.xml
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
3.Start ResourceManager daemon and NodeManager daemon:
$ /opt/hadoop-2.8.1/sbin/start-yarn.sh
4.Browse the web interface for the ResourceManager; by default it is available at:
ResourceManager - http://192.168.56.101:8088/
5.Run a MapReduce job.
[hadoop@hncdf hadoop-2.8.1]$ ll /opt/hadoop-2.8.1/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.8.1.jar
-rw-rw-r-- 1 hadoop hadoop 301938 Jun 2 2017 /opt/hadoop-2.8.1/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.8.1.jar
[hadoop@hncdf mapreduce]$ /opt/hadoop-2.8.1/bin/hadoop jar /opt/hadoop-2.8.1/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.8.1.jar pi 5 10
6.When you’re done, stop the daemons with:
$ sbin/stop-yarn.sh
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 [email protected] 举报,一经查实,本站将立刻删除。