1. 程式人生 > >map-reduce之wordCount DEMO

map-reduce之wordCount DEMO

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>hadoop</groupId>
    <artifactId>demo</artifactId>
    <version>0.0.1-SNAPSHOT</version>
    <packaging>jar</packaging>

    <properties>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>2.6.0</version>
        </dependency>

        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>2.6.0</version>
        </dependency>

        <dependency>
            <groupId>jdk.tools</groupId>
            <artifactId>jdk.tools</artifactId>
            <version>1.6</version>
            <scope>system</scope>
            <systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
        </dependency>

        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-mapreduce-client-core</artifactId>
            <version>2.6.0</version>
        </dependency>

        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>3.8.1</version>
            <scope>test</scope>
        </dependency>
    </dependencies>
</project>
package hadoop;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class WCMain {

    private static String iPath = "hdfs://localhost:9000/wordcount/input/test.txt";
    private static String oPath = "hdfs://localhost:9000/wordcount/output/";

    /**
     * 1. 業務邏輯相關資訊通過job物件定義與實現 2. 將繫結好的job提交給叢集去執行
     */
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        Job wcjob = Job.getInstance(conf);

        wcjob.setJarByClass(WCMain.class);
        wcjob.setMapperClass(WCMapper.class);
        wcjob.setReducerClass(WCReducer.class);

        // 設定業務邏輯Mapper類的輸出key和value的資料型別
        wcjob.setMapOutputKeyClass(Text.class);
        wcjob.setMapOutputValueClass(IntWritable.class);

        // 設定業務邏輯Reducer類的輸出key和value的資料型別
        wcjob.setOutputKeyClass(Text.class);
        wcjob.setOutputValueClass(IntWritable.class);

        // 指定要處理的資料所在的位置
        FileSystem fs = FileSystem.get(conf);
        Path IPath = new Path(iPath);
        if (fs.exists(IPath)) {
            FileInputFormat.addInputPath(wcjob, IPath);
        }

        // 指定處理完成之後的結果所儲存的位置
        Path OPath = new Path(oPath);
        fs.delete(OPath, true);
        FileOutputFormat.setOutputPath(wcjob, OPath);

        // 向yarn叢集提交這個job
        boolean res = wcjob.waitForCompletion(true);
        System.exit(res ? 0 : 1);
    }
}
package hadoop;

import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

/**
 * @ClassName: WCReducer
 * @Description: TODO
 * @author kngines
 * @date 2018年3月17日
 */

public class WCReducer extends Reducer<Text, IntWritable, Text, IntWritable> {

    //  生命週期:框架每傳遞進來一個kv 組,reduce方法被呼叫一次
    @Override
    protected void reduce(Text key, Iterable<IntWritable> values, Context context)
            throws IOException, InterruptedException {

        int count = 0;  // 定義一個計數器
        for (IntWritable value : values) { // 遍歷所有v,並累加到count中
            count += value.get();
        }
        context.write(key, new IntWritable(count));
    }
}
package hadoop;

import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

/**
 * @ClassName: WCMapper
 * @Description: TODO
 * @author kngines
 * @date 2018年3月17日
 */

public class WCMapper extends Mapper<LongWritable, Text, Text, IntWritable> {

    // map方法的生命週期: 框架每傳一行資料就被呼叫一次
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

        String line = value.toString();  // 行資料轉換為string
        String[] words = line.split(" ");  // 行資料分隔單詞

        for (String word : words) {  // 遍歷陣列,輸出<單詞,1>
            context.write(new Text(word), new IntWritable(1));
        }
    }
}
問題總結
問題 A :
FileAlreadyExistsException
FileAlreadyExistsException: Output directory hdfs://localhost:9000/wordcount/output already exists
解決
程式碼邏輯判斷(java)
// 指定處理完成之後的結果所儲存的位置
Path OPath = new Path(oPath);
fs.delete(OPath, true);
手動刪除Hadoop 檔案目錄

問題 B:
SafeModeException
問題描述 & 原因分析 
該問題可能會使 Hadoop執行任務一直卡在: INFO mapreduce.Job: Runing job。
由空間磁碟剩餘不足導致。實驗時,虛擬機器根目錄剩餘空間不足10%,將新安裝的一些軟體包刪除後,重新執行問題得到解決。
org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.server.namenode.SafeModeException): Cannot delete /benchmarks/TestDFSIO. Name node is in safe mode.  
Resources are low on NN. Please add or free up more resources then turn off safe mode manually. NOTE:  If you turn off safe mode before adding resources, the NN will immediately return to safe mode. Use "hdfs dfsadmin -safemode leave" to turn safe mode off.  
解決方式
離開安全模式
hdfs dfsadmin -safemode leave  
刪除 LInux上 多餘檔案(實驗中採取,簡單有效), 或者 擴充套件虛擬機器分割槽


其他知識
殺掉當前執行的 Hadoop 任務
hadoop job -list  # 列出當前執行所有 job

hadoop job -kill job_xx_xx  # 通過job_id 殺掉某個job任務
查詢 Linux 系統上的 大檔案
find . -type f -size +100M  # 查詢100M以上的檔案
df -hl  # 檢視Linux 磁碟使用情況


 hadoop jar hadoop-mapreduce-examples-2.7.7.jar wordcount /user/root/input/ /user/root/out1/