1. 程式人生 > >【Hadoop】MapReduce程式設計Demo新舊

【Hadoop】MapReduce程式設計Demo新舊

1.wordcount。

import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class Mai {
    public Mai(){}
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        Configuration conf=new Configuration();
        String[] otherArgs=(new GenericOptionsParser(conf,args)).getRemainingArgs();
        if(otherArgs.length < 2) {
            System.err.println("Usage: wordcount <in> [<in>...] <out>");
            System.exit(2);
        }

        Job job=Job.getInstance(conf,"wordcount");

        //指定jobr任務jar包位置
        job.setJarByClass(Mai.class);

        //指定map,reduce,類
        job.setMapperClass(Mai.MyMapper.class);
        job.setReducerClass(Mai.MyReduce.class);

        //指定reduce輸出型別
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        //指定輸入資料路徑
        for (int i = 0; i <otherArgs.length-1 ; i++) {
            FileInputFormat.setInputPaths(job,new Path(otherArgs[i]));
        }

        //指定輸出資料目錄
        FileOutputFormat.setOutputPath(job,new Path(otherArgs[otherArgs.length-1]));
        
        //提交
        System.exit(job.waitForCompletion(true)?0:1);

    }
     //定義map的輸入輸出型別
    public static class MyMapper extends Mapper<Object,Text,Text,IntWritable>{
        private static final IntWritable one = new IntWritable(1);//Hadoop提供的資料型別序列化時效率高
        private Text word = new Text();

        public MyMapper() {
        }
        //重寫map方法
        public void map(Object key, Text value, Mapper<Object, Text, Text, IntWritable>.Context context) throws IOException, InterruptedException {
            //切分單詞
            StringTokenizer itr = new StringTokenizer(value.toString());
            while(itr.hasMoreTokens()) {
                this.word.set(itr.nextToken());
                context.write(this.word, one);
            }
        }
    }
    //定義reduce的輸入輸出型別
    public static class MyReduce extends Reducer<Text,IntWritable,Text,IntWritable>{
        private IntWritable result = new IntWritable();

        public MyReduce() {
        }

        public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
            int sum = 0;

            IntWritable val;
            for(Iterator var5 = values.iterator(); var5.hasNext(); sum += val.get()) {
                val = (IntWritable)var5.next();
            }
/*            for(Iterable i:values){
                sum+=values.get();
}*/

            this.result.set(sum);
            context.write(key, this.result);
        }
    }
}

Maven依賴

<dependencies>
    <dependency>
        <groupId>mysql</groupId>
        <artifactId>mysql-connector-java</artifactId>
        <version>5.1.25</version>
    </dependency>
    <dependency>
        <groupId>junit</groupId>
        <artifactId>junit</artifactId>
        <version>4.12</version>
        <scope>test</scope>
    </dependency>

    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-common</artifactId>
        <version>3.1.0</version>
    </dependency>
    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-hdfs</artifactId>
        <version>3.1.0</version>
    </dependency>


    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-mapreduce-client-core</artifactId>
        <version>3.1.0</version>
    </dependency>

    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
        <version>3.1.0</version>
    </dependency>

    <dependency>
        <groupId>log4j</groupId>
        <artifactId>log4j</artifactId>
        <version>1.2.17</version>
    </dependency>
</dependencies>
    <build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <configuration>
                    <source>1.7</source>
                    <target>1.7</target>
                </configuration>
            </plugin>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-jar-plugin</artifactId>
                <configuration>
                    <archive>
                        <manifest>
                            <addClasspath>true</addClasspath>
                            <useUniqueVersions>false</useUniqueVersions>
                            <classpathPrefix>lib/</classpathPrefix>
                            <mainClass>com.test.Mai</mainClass>
                        </manifest>
                    </archive>
                </configuration>
            </plugin>
        </plugins>
    </build>

提交叢集執行

 hadoop jar ./programe.jar WordCount /input /output0

執行Hadoop自帶word count

hadoop jar /usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-3.1.0.jar wordcount /input /output1

2:流量統計:新規範

   map

import hadoop.mapreduce.serializable.MySerializable;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class MyMap extends Mapper<LongWritable,Text,Text,MySerializable> {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String line=value.toString();
        String[] fields=line.split(" ");
        String name=fields[0];
        long up= Long.parseLong(fields[1]);
        long down= Long.parseLong(fields[2]);
        context.write(new Text(name),new MySerializable(name,up,down));
    }
}

 reduce:

import hadoop.mapreduce.serializable.MySerializable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;


public class MyRecune extends Reducer<Text,MySerializable,Text,MySerializable> {
    @Override
    protected void reduce(Text key, Iterable<MySerializable> values, Context context) throws IOException, InterruptedException {
        long up_counter=0;
        long down_counter=0;
        for (MySerializable i:values){
            up_counter+=i.getUp();
            down_counter+=i.getDown();
        }
        context.write(key,new MySerializable(key.toString(),up_counter,down_counter));
    }
}

 主類:

import hadoop.mapreduce.serializable.MySerializable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;


public class PhoneRunner extends Configured implements Tool {
    @Override
    public int run(String[] strings) throws Exception {
        Configuration configuration=new Configuration();
        Job job=Job.getInstance(configuration);
        job.setJarByClass(PhoneRunner.class);

        job.setMapperClass(MyMap.class);
        job.setReducerClass(MyRecune.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(MySerializable.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(MySerializable.class);

        FileInputFormat.setInputPaths(job,new Path(strings[0]));
        FileOutputFormat.setOutputPath(job,new Path(strings[1]));

        return job.waitForCompletion(true)?0:1;
    }

    public static void main(String[] args) throws Exception {
        int run=ToolRunner.run(new Configuration(),new PhoneRunner(),args);
        System.exit(run);

    }


}