`
goon
  • 浏览: 183802 次
  • 性别: Icon_minigender_1
  • 来自: 上海
社区版块
存档分类
最新评论

hadoop学习2——DistributedCache的部分用法

 
阅读更多

DistributedCache的部分用法。

调试代码:wordcount2.java

public class WordCount2 extends Configured implements Tool {
	
	static Logger log = Logger.getLogger(WordCount2.class);
	
	public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> {
		
		static enum Counters {
			INPUT_WORDS
		}

		private final static IntWritable one = new IntWritable(1);
		private Text word = new Text();

		private boolean caseSensitive = true;	//是否区分大小写
		private Set<String> patternsToSkip = new HashSet<String>();		//替换使用的正则表达式

		private long numRecords = 0;	//数据量
		private String inputFile;		

		public void configure(JobConf job) {
			caseSensitive = job.getBoolean("wordcount.case.sensitive", true);
			inputFile = job.get("map.input.file");
			
			log.info("caseSensitive:" + job.get("wordcount.case.sensitive")
						+ "		inputFile:" + inputFile 
						+ "		patterns:" + job.get("wordcount.skip.patterns"));

			if (job.getBoolean("wordcount.skip.patterns", false)) {
				log.info("传入参数wordcount.skip.patterns");
				Path[] patternsFiles = new Path[0];
				try {
//					patternsFiles[0] = DistributedCache.getCacheFiles(job);	//读取正则表达式路径(通过配置参数传递)
					
					URI[] uris = DistributedCache.getCacheFiles(job);
					patternsFiles = new Path[uris.length];
					for(int i = 0; i < uris.length; i++){
						Path path = new Path(uris[i].toString());
//						Path path = new Path("D:/patterns.txt");
						patternsFiles[i] = path;
					}
//					log.info(uris[0].toString());
					
//					patternsFiles = DistributedCache.getLocalCacheFiles(job);
//					log.info(patternsFiles.length);
				} catch (IOException ioe) {
					System.err.println("Caught exception while getting cached files: "
									+ StringUtils.stringifyException(ioe));
				}
				for (Path patternsFile : patternsFiles) {
					parseSkipFile(patternsFile);
				}
			}
		}
		
		//提取文件中的正则表达式
		private void parseSkipFile(Path patternsFile) {
			log.info("提取文件中的正则表达式");
			try {
//				BufferedReader fis = new BufferedReader(new FileReader(patternsFile.toString()));
//				BufferedReader fis = new BufferedReader(new FileReader("hdfs://192.168.100.228:9000/temp/p.dat"));
				String pattern = null;
//				while ((pattern = fis.readLine()) != null) {
//					log.info("正则表达式:" + pattern);
//					patternsToSkip.add(pattern);
//				}
				//读取hdfs中的模式表达式文件
				Configuration conf = new Configuration();
				  FileSystem fs = FileSystem.get(patternsFile.toUri(), conf);
				  FSDataInputStream hdfsInStream = fs.open(patternsFile);
				  String s = "";
				  while (s != null) {
					s = hdfsInStream.readLine();
					if(s != null){
						System.out.println(s);
						patternsToSkip.add(s);
					}	
				  }
				  hdfsInStream.close();
//				  fs.close();
				  log.info("正则表达式列表:" + patternsToSkip);
				  
			} catch (IOException ioe) {
				System.err.println("Caught exception while parsing the cached file '"
								+ patternsFile
								+ "' : "
								+ StringUtils.stringifyException(ioe));
			}
		}

		public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
			log.info("map 线程id:" + Thread.currentThread().getId());
			
			String line = (caseSensitive) ? value.toString() : value.toString().toLowerCase();

			for (String pattern : patternsToSkip) {
				line = line.replaceAll(pattern, "");
			}

			StringTokenizer tokenizer = new StringTokenizer(line);
			while (tokenizer.hasMoreTokens()) {
				word.set(tokenizer.nextToken());
				output.collect(word, one);
				reporter.incrCounter(Counters.INPUT_WORDS, 1);
			}

			if ((++numRecords % 100) == 0) {
				reporter.setStatus("Finished processing " + numRecords
						+ " records " + "from the input file: " + inputFile);
			}
		}
	}

	public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable> {
		public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
			log.info("reduce 线程id:" + Thread.currentThread().getId());
			int sum = 0;
			while (values.hasNext()) {
				sum += values.next().get();
			}
			output.collect(key, new IntWritable(sum));
		}
	}

	public int run(String[] args) throws Exception {
		JobConf conf = new JobConf(getConf(), WordCount2.class);
		conf.setJobName("wordcount");

		conf.setOutputKeyClass(Text.class);
		conf.setOutputValueClass(IntWritable.class);

		conf.setMapperClass(Map.class);
		conf.setCombinerClass(Reduce.class);
		conf.setReducerClass(Reduce.class);

		conf.setInputFormat(TextInputFormat.class);
		conf.setOutputFormat(TextOutputFormat.class);

		//设置正则表达式文件路径
		DistributedCache.addCacheFile(new URI("/temp/p.dat"), conf);	//向DistributedCache中add一个hdfs文件path
		conf.setBoolean("wordcount.skip.patterns", true);
		
		
//		List<String> other_args = new ArrayList<String>();
//		for (int i = 0; i < args.length; ++i) {
//			if ("-skip".equals(args[i])) {
//				DistributedCache.addCacheFile(new Path(args[++i]).toUri(), conf);
//				conf.setBoolean("wordcount.skip.patterns", true);
//			} else {
//				other_args.add(args[i]);
//			}
//		}

		FileInputFormat.setInputPaths(conf, new Path("/temp/in2"));
		FileOutputFormat.setOutputPath(conf, new Path("/temp/out-" + String.valueOf(System.currentTimeMillis())));

		JobClient.runJob(conf);
		return 0;
	}

	public static void main(String[] args) throws Exception {
		int res = ToolRunner.run(new Configuration(), new WordCount2(), args);
		System.exit(res);
	}

}

 

 其中:

DistributedCache.addCacheFile(new URI("/temp/p.dat"), conf); //向DistributedCache中add一个hdfs文件path

patternsFiles[0] = DistributedCache.getCacheFiles(job); //读取正则表达式路径(通过配置参数传递)
表示了DistributedCache的基本用法。

使用DistributedCache可以在任务执行前传递一个URI路径,在map或reduce中可以使用DistributedCache.get*()拿到此路径对应的文件,这个文件可以是文档,jar包等。DistributedCache还提供直接把jar包加入classpath功能,利用这个功能,可以方便的使用第三方库。

输出日志:

12/02/09 17:28:50 INFO jvm.JvmMetrics: Initializing JVM Metrics with processName=JobTracker, sessionId=
12/02/09 17:28:51 INFO mapred.FileInputFormat: Total input paths to process : 2
12/02/09 17:28:51 INFO mapred.JobClient: Running job: job_local_0001
12/02/09 17:28:51 INFO mapred.FileInputFormat: Total input paths to process : 2
12/02/09 17:28:51 INFO mapred.MapTask: numReduceTasks: 1
12/02/09 17:28:51 INFO mapred.MapTask: io.sort.mb = 100
12/02/09 17:28:51 INFO mapred.MapTask: data buffer = 79691776/99614720
12/02/09 17:28:51 INFO mapred.MapTask: record buffer = 262144/327680
12/02/09 17:28:51 INFO test.WordCount2: caseSensitive:null		inputFile:hdfs://localhost:9000/temp/in2/t1.txt		patterns:true
12/02/09 17:28:51 INFO test.WordCount2: 传入参数wordcount.skip.patterns
12/02/09 17:28:51 INFO test.WordCount2: 提取文件中的正则表达式
12/02/09 17:28:51 INFO test.WordCount2: 正则表达式列表:[\! , \, , \. , to ]
\. 
\, 
\! 
to 
12/02/09 17:28:51 INFO test.WordCount2: map 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: map 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: map 线程id:22
12/02/09 17:28:51 INFO mapred.MapTask: Starting flush of map output
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:51 INFO mapred.MapTask: Finished spill 0
12/02/09 17:28:51 INFO mapred.TaskRunner: Task:attempt_local_0001_m_000000_0 is done. And is in the process of commiting
12/02/09 17:28:51 INFO mapred.LocalJobRunner: hdfs://localhost:9000/temp/in2/t1.txt:0+52
12/02/09 17:28:51 INFO mapred.TaskRunner: Task 'attempt_local_0001_m_000000_0' done.
12/02/09 17:28:51 INFO mapred.MapTask: numReduceTasks: 1
12/02/09 17:28:51 INFO mapred.MapTask: io.sort.mb = 100
12/02/09 17:28:51 INFO mapred.MapTask: data buffer = 79691776/99614720
12/02/09 17:28:51 INFO mapred.MapTask: record buffer = 262144/327680
12/02/09 17:28:51 INFO test.WordCount2: caseSensitive:null		inputFile:hdfs://localhost:9000/temp/in2/t2.txt		patterns:true
12/02/09 17:28:51 INFO test.WordCount2: 传入参数wordcount.skip.patterns
12/02/09 17:28:51 INFO test.WordCount2: 提取文件中的正则表达式
\. 
\, 
\! 
to 
12/02/09 17:28:51 INFO test.WordCount2: 正则表达式列表:[\! , \, , \. , to ]
12/02/09 17:28:51 INFO test.WordCount2: map 线程id:22
12/02/09 17:28:51 INFO test.WordCount2: map 线程id:22
12/02/09 17:28:51 INFO mapred.MapTask: Starting flush of map output
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO mapred.MapTask: Finished spill 0
12/02/09 17:28:52 INFO mapred.TaskRunner: Task:attempt_local_0001_m_000001_0 is done. And is in the process of commiting
12/02/09 17:28:52 INFO mapred.LocalJobRunner: hdfs://localhost:9000/temp/in2/t2.txt:0+35
12/02/09 17:28:52 INFO mapred.TaskRunner: Task 'attempt_local_0001_m_000001_0' done.
12/02/09 17:28:52 INFO mapred.LocalJobRunner: 
12/02/09 17:28:52 INFO mapred.Merger: Merging 2 sorted segments
12/02/09 17:28:52 INFO mapred.Merger: Down to the last merge-pass, with 2 segments left of total size: 184 bytes
12/02/09 17:28:52 INFO mapred.LocalJobRunner: 
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO test.WordCount2: reduce 线程id:22
12/02/09 17:28:52 INFO mapred.TaskRunner: Task:attempt_local_0001_r_000000_0 is done. And is in the process of commiting
12/02/09 17:28:52 INFO mapred.LocalJobRunner: 
12/02/09 17:28:52 INFO mapred.TaskRunner: Task attempt_local_0001_r_000000_0 is allowed to commit now
12/02/09 17:28:52 INFO mapred.FileOutputCommitter: Saved output of task 'attempt_local_0001_r_000000_0' to hdfs://localhost:9000/temp/out-1328779730906
12/02/09 17:28:52 INFO mapred.LocalJobRunner: reduce > reduce
12/02/09 17:28:52 INFO mapred.TaskRunner: Task 'attempt_local_0001_r_000000_0' done.
12/02/09 17:28:52 INFO mapred.JobClient:  map 100% reduce 100%
12/02/09 17:28:52 INFO mapred.JobClient: Job complete: job_local_0001
12/02/09 17:28:52 INFO mapred.JobClient: Counters: 16
12/02/09 17:28:52 INFO mapred.JobClient:   FileSystemCounters
12/02/09 17:28:52 INFO mapred.JobClient:     FILE_BYTES_READ=67623
12/02/09 17:28:52 INFO mapred.JobClient:     HDFS_BYTES_READ=63479
12/02/09 17:28:52 INFO mapred.JobClient:     FILE_BYTES_WRITTEN=64858
12/02/09 17:28:52 INFO mapred.JobClient:     HDFS_BYTES_WRITTEN=131732
12/02/09 17:28:52 INFO mapred.JobClient:   com.hadoop.test.WordCount2$Map$Counters
12/02/09 17:28:52 INFO mapred.JobClient:     INPUT_WORDS=16
12/02/09 17:28:52 INFO mapred.JobClient:   Map-Reduce Framework
12/02/09 17:28:52 INFO mapred.JobClient:     Reduce input groups=16
12/02/09 17:28:52 INFO mapred.JobClient:     Combine output records=16
12/02/09 17:28:52 INFO mapred.JobClient:     Map input records=5
12/02/09 17:28:52 INFO mapred.JobClient:     Reduce shuffle bytes=0
12/02/09 17:28:52 INFO mapred.JobClient:     Reduce output records=16
12/02/09 17:28:52 INFO mapred.JobClient:     Spilled Records=32
12/02/09 17:28:52 INFO mapred.JobClient:     Map output bytes=148
12/02/09 17:28:52 INFO mapred.JobClient:     Map input bytes=87
12/02/09 17:28:52 INFO mapred.JobClient:     Combine input records=16
12/02/09 17:28:52 INFO mapred.JobClient:     Map output records=16
12/02/09 17:28:52 INFO mapred.JobClient:     Reduce input records=16

 测试数据是两个文件,执行了两个task,可以看出每次执行task时都会加载一次配置(读了两次配置文件)。

分享到:
评论

相关推荐

    Hadoop高级编程——构建与实现大数据解决方案.rar

    - 官方文档:了解最新版本特性及API使用方法。 - 开源社区:Apache邮件列表、Stack Overflow等获取问题解答和经验分享。 - 书籍:《Hadoop权威指南》、《Hadoop实战》等深入学习Hadoop生态系统。 总结,掌握Hadoop...

    hadoop实战——初级部分学习笔记 2

    ### Hadoop实战——初级部分学习笔记 2 #### 一、引言与背景 随着大数据时代的到来,Hadoop作为处理大规模数据集的核心工具之一,其重要性和应用范围日益扩大。本文将基于私塾在线《Hadoop实战——初级部分》的...

    hadoop集群搭建——JDK的安装

    ### hadoop集群搭建——JDK的安装 #### 实验背景及目标 在部署Hadoop集群的过程中,第一步往往是安装Java Development Kit (JDK),因为Hadoop是基于Java开发的,因此在集群上的每个节点都需要安装JDK。本实验旨在...

    Hadoop高级编程- 构建与实现大数据解决方案

    2. **HDFS操作**:学习使用HDFS的命令行接口进行文件的上传、下载、查看和删除,以及如何进行HDFS的数据块管理和故障恢复。 3. **MapReduce编程模型**:深入理解Map函数和Reduce函数的工作原理,以及Combiner和...

    hadoop配置文件——精简版.zip

    此压缩包主要包含的是是hadoop的7个主要的配置文件,core-site.xml、hdfs-site.xml、mapred-site.xml、yarn-site.xml、hadoop-env.sh、mapred-env.sh、yarn-env.sh精简配置优化性能,具体相关参数根据集群规模适当...

    Hadoop快速入门——第四章、zookeeper安装包

    在分布式计算领域,Apache Hadoop 和 Apache ZooKeeper 是两个至关重要的组件。本章节将深入探讨Zookeeper在Hadoop生态系统中的作用以及如何进行安装。Zookeeper是一个开源的分布式协调服务,它为分布式应用提供了高...

    最新Hadoop生态圈开发学习资料——尚硅谷

    在大数据领域,Hadoop生态圈是不可或缺的重要组成部分,它为海量数据的存储、处理和分析提供了高效可靠的...这份"最新Hadoop生态圈开发学习资料——尚硅谷"将是你学习过程中宝贵的资源,助你在大数据的世界中游刃有余。

    Hadoop2.7.0学习——Windows下hadoop-eclipse-plugin-2.7.0插件安装-附件资源

    Hadoop2.7.0学习——Windows下hadoop-eclipse-plugin-2.7.0插件安装-附件资源

    Hadoop课程实验和报告——Hadoop安装实验报告

    Hadoop课程实验和报告——Hadoop安装实验报告 Hadoop是一个开源的大数据处理框架,由Apache基金会开发和维护。它提供了一种可靠、可扩展、可高效的方法来存储和处理大规模数据。在本实验报告中,我们将介绍Hadoop的...

    Hadoop2.7.1——NFS部署

    ### Hadoop 2.7.1 —— NFS 部署详解 #### 一、概述 随着大数据技术的发展,Hadoop作为主流的大数据处理框架之一,其分布式存储系统HDFS得到了广泛的应用。为了提高Hadoop集群的数据访问效率,通常会采用网络文件...

    Hadoop学习资料

    以上总结的知识点均来自给定文件的内容,涵盖了Hadoop的学习资料、版本历史、生态圈、安装、HDFS、MapReduce、Zookeeper、HBase、Hive、Storm以及数据挖掘和推荐系统等多个方面,为学习和使用Hadoop提供了全面的理论...

    大数据学习指南合集(Hadoop、Spark、Flink等)

    Hadoop——分布式文件管理系统HDFS   2. Hadoop——HDFS的Shell操作   3. Hadoop——HDFS的Java API操作   4. Hadoop——分布式计算框架MapReduce   5. Hadoop——MapReduce案例   6. Hadoop——资源调度器...

    Hadoop学习笔记

    Hadoop学习笔记,自己总结的一些Hadoop学习笔记,比较简单。

    Hadoop高级编程之构建与实现大数据解决方案

    2. **HDFS操作**:学习如何使用HDFS命令行工具进行文件上传、下载、查看、删除等操作,以及理解HDFS的数据块复制策略和故障恢复机制。 3. **MapReduce编程**:理解MapReduce的工作原理,编写Map函数和Reduce函数,...

Global site tag (gtag.js) - Google Analytics