浏览 2902 次
锁定老帖子 主题:Hadoop WordCount进阶
精华帖 (0) :: 良好帖 (0) :: 新手帖 (0) :: 隐藏帖 (0)
|
|
---|---|
作者 | 正文 |
发表时间:2011-11-02
import java.io.IOException; import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.log4j.Logger; public class WordCount { public static Logger loger = Wloger.loger; /** * TokenizerMapper 继续自 Mapper<Object, Text, Text, IntWritable> * * [一个文件就一个map,两个文件就会有两个map] * map[这里读入输入文件内容 以" \t\n\r\f" 进行分割,然后设置 word ==> one 的key/value对] * * @param Object Input key Type: * @param Text Input value Type: * @param Text Output key Type: * @param IntWritable Output value Type: * * Writable的主要特点是它使得Hadoop框架知道对一个Writable类型的对象怎样进行serialize以及deserialize. * WritableComparable在Writable的基础上增加了compareT接口,使得Hadoop框架知道怎样对WritableComparable类型的对象进行排序。 * * @author yangchunlong.tw * */ public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable>{ private final static IntWritable one = new IntWritable(1); private Text word = new Text(); public void map(Object key, Text value, Context context ) throws IOException, InterruptedException { loger.info("Map <key>"+key+"</key>"); loger.info("Map <value>"+value+"</key>"); StringTokenizer itr = new StringTokenizer(value.toString()); while (itr.hasMoreTokens()) { String wordstr = itr.nextToken(); word.set(wordstr); loger.info("Map <word>"+wordstr+"</word>"); context.write(word, one); } } } /** * IntSumReducer 继承自 Reducer<Text,IntWritable,Text,IntWritable> * * [不管几个Map,都只有一个Reduce,这是一个汇总] * reduce[循环所有的map值,把word ==> one 的key/value对进行汇总] * * 这里的key为Mapper设置的word[每一个key/value都会有一次reduce] * * 当循环结束后,最后的确context就是最后的结果. * * @author yangchunlong.tw * */ public static class IntSumReducer extends Reducer<Text,IntWritable,Text,IntWritable> { private IntWritable result = new IntWritable(); public void reduce(Text key, Iterable<IntWritable> values, Context context ) throws IOException, InterruptedException { loger.info("Reduce <key>"+key+"</key>"); loger.info("Reduce <value>"+values+"</key>"); int sum = 0; for (IntWritable val : values) { sum += val.get(); } result.set(sum); loger.info("Reduce <sum>"+sum+"</sum>"); context.write(key, result); } } public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); /** * 这里必须有输入/输出 */ if (otherArgs.length != 2) { System.err.println("Usage: wordcount <in> <out>"); System.exit(2); } Job job = new Job(conf, "word count"); job.setJarByClass(WordCount.class);//主类 job.setMapperClass(TokenizerMapper.class);//mapper job.setCombinerClass(IntSumReducer.class);//作业合成类 job.setReducerClass(IntSumReducer.class);//reducer job.setOutputKeyClass(Text.class);//设置作业输出数据的关键类 job.setOutputValueClass(IntWritable.class);//设置作业输出值类 FileInputFormat.addInputPath(job, new Path(otherArgs[0]));//文件输入 FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));//文件输出 System.exit(job.waitForCompletion(true) ? 0 : 1);//等待完成退出. } } 这里输出了每一次Map,每一次Reduce.结果如下: f1 ==>Map Result Map <key>0</key> Map <value>ycl ycl is ycl good</key> Map <word>ycl</word> Map <word>ycl</word> Map <word>is</word> Map <word>ycl</word> Map <word>good</word> f1 ==>Reduce Result Reduce <key>good</key> Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1dfc547</key> Reduce <sum>1</sum> Reduce <key>is</key> Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1dfc547</key> Reduce <sum>1</sum> Reduce <key>ycl</key> Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1dfc547</key> Reduce <sum>3</sum> f2 ==>Map Result Map <key>0</key> Map <value>hello ycl hello lg</key> Map <word>hello</word> Map <word>ycl</word> Map <word>hello</word> Map <word>lg</word> f2 ==>Reduce Result Reduce <key>hello</key> Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@10f6d3</key> Reduce <sum>2</sum> Reduce <key>lg</key> Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@10f6d3</key> Reduce <sum>1</sum> Reduce <key>ycl</key> Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@10f6d3</key> Reduce <sum>1</sum> f1,f2 ==> Reduce Result Reduce <key>good</key> Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1989f84</key> Reduce <sum>1</sum> Reduce <key>hello</key> Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1989f84</key> Reduce <sum>2</sum> Reduce <key>is</key> Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1989f84</key> Reduce <sum>1</sum> Reduce <key>lg</key> Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1989f84</key> Reduce <sum>1</sum> Reduce <key>ycl</key> Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1989f84</key> Reduce <sum>4</sum> 正常人应该能分析出map/reduce的执行机制,比如有两个输入文件,map/reduce是一个文件一个文件进行处理的,每map一个输入文件就会reduce一次,最后再进行总的reduce. 声明:ITeye文章版权属于作者,受法律保护。没有作者书面许可不得转载。
推荐链接
|
|
返回顶楼 | |