1. 程式人生 > >hadoop解決Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/util/Apps

hadoop解決Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/util/Apps

linux+eclipse+本地執行WordCount丟擲下面異常:

Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/util/Apps。

解決:沒有把yarn下的包以及yarn 下的lib目錄下的包匯入

package cn.itheima.bigdata.hadoop.mr.wordcount;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * 用來描述一個作業job(使用哪個mapper類,哪個reducer類,輸入檔案在哪,輸出結果放哪。。。。)
 * 然後提交這個job給hadoop叢集
 * @author 
[email protected]
* */ //cn.itheima.bigdata.hadoop.mr.wordcount.WordCountRunner public class WordCountRunner { public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); Job wcjob = Job.getInstance(conf); //設定job所使用的jar包 conf.set("mapreduce.job.jar", "wcount.jar"); //設定wcjob中的資源所在的jar包 wcjob.setJarByClass(WordCountRunner.class); //wcjob要使用哪個mapper類 wcjob.setMapperClass(WordCountMapper.class); //wcjob要使用哪個reducer類 wcjob.setReducerClass(WordCountReducer.class); //wcjob的mapper類輸出的kv資料型別 wcjob.setMapOutputKeyClass(Text.class); wcjob.setMapOutputValueClass(LongWritable.class); //wcjob的reducer類輸出的kv資料型別 wcjob.setOutputKeyClass(Text.class); wcjob.setOutputValueClass(LongWritable.class); //指定要處理的原始資料所存放的路徑 FileInputFormat.setInputPaths(wcjob, "hdfs://yun12-01:9000/wc/srcdata"); //指定處理之後的結果輸出到哪個路徑 FileOutputFormat.setOutputPath(wcjob, new Path("hdfs://yun12-01:9000/wc/output")); boolean res = wcjob.waitForCompletion(true); System.exit(res?0:1); } }
package cn.itheima.bigdata.hadoop.mr.wordcount;

import java.io.IOException;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

public class WordCountReducer extends Reducer<Text, LongWritable, Text, LongWritable>{
	
	
	// key: hello ,  values : {1,1,1,1,1.....}
	@Override
	protected void reduce(Text key, Iterable<LongWritable> values,Context context)
			throws IOException, InterruptedException {
		
		//定義一個累加計數器
		long count = 0;
		for(LongWritable value:values){
			
			count += value.get();
			
		}
		
		//輸出<單詞:count>鍵值對
		context.write(key, new LongWritable(count));
		
	}
	
	

}

package cn.itheima.bigdata.hadoop.mr.wordcount;

import java.io.IOException;

import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

public class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable>{
	
	@Override
	protected void map(LongWritable key, Text value,Context context)
			throws IOException, InterruptedException {

		//獲取到一行檔案的內容
		String line = value.toString();
		//切分這一行的內容為一個單詞陣列
		String[] words = StringUtils.split(line, " ");
		//遍歷輸出  <word,1>
		for(String word:words){
			
			context.write(new Text(word), new LongWritable(1));
			
		}
		
		
		
		
	}
	
	
	
	

}