`

MapReduce高级编程——自定义InputFormat

 
阅读更多

0、测试集样例

ball, 3.5, 12.7, 9.0
car, 15, 23.76, 42.23
device, 0.0, 12.4, -67.1
 

1、测试Point3D InputFormat

import java.io.IOException;
import java.net.URI;

import javax.xml.soap.Text;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * desc:Custom Data Types <code>TestPoint3DInputFormat</code>
 * 
 * @author chenwq
 */
public class TestPoint3DInputFormat {
	 /**
     * @param args
     * @throws IOException 
     * @throws ClassNotFoundException 
     * @throws InterruptedException 
     */
    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
        // TODO Auto-generated method stub
        System.out.println("hello,chenwq!");
        Job job=new Job();
        Configuration conf=new Configuration();
        FileSystem fs=FileSystem.get(URI.create(args[1]), conf);
        fs.delete(new Path(args[1]));
        job.setJobName("测试MyInputFormat程序。。。。。");
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        job.setInputFormatClass(Point3DinputFormat.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Point3D.class);
        job.setMapperClass(Point3DMapper.class);
        job.setNumReduceTasks(0);
        job.waitForCompletion(false);
    }
}

 

2、自定义类型Point3D必须实现WritableComparable接口,才能在Hadoop环境中传输

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.io.WritableComparable;

/**
 * desc:Custom Data Types <code>Point</code>
 * 
 * @author chenwq
 */
public class Point3D implements WritableComparable {
	public float x;
	public float y;
	public float z;

	public Point3D(float x, float y, float z) {
		this.x = x;
		this.y = y;
		this.z = z;
	}

	public Point3D() {
		this(0.0f, 0.0f, 0.0f);
	}

	public void set(float x, float y, float z) {
		this.x = x;
		this.y = y;
		this.z = z;
	}

	public void write(DataOutput out) throws IOException {
		out.writeFloat(x);
		out.writeFloat(y);
		out.writeFloat(z);
	}

	public void readFields(DataInput in) throws IOException {
		x = in.readFloat();
		y = in.readFloat();
		z = in.readFloat();
	}

	public String toString() {
		return Float.toString(x) + ", " + Float.toString(y) + ", "
				+ Float.toString(z);
	}

	public float distanceFromOrigin() {
		return (float) Math.sqrt(x * x + y * y + z * z);
	}

	public int compareTo(Object other) {
		float myDistance = this.distanceFromOrigin();
		float otherDistance = ((Point3D) other).distanceFromOrigin();

		return Float.compare(myDistance, otherDistance);
	}

	public boolean equals(Object o) {
		Point3D other = (Point3D) o;
		if (!(other instanceof Point3D)) {
			return false;
		}

		return this.x == other.x && this.y == other.y && this.z == other.z;
	}

	public int hashCode() {
		return Float.floatToIntBits(x) ^ Float.floatToIntBits(y)
				^ Float.floatToIntBits(z);
	}

}

 3、自定义Point3DInputFormat类型,供MapReduce编程模型使用

import java.io.IOException;

import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.util.LineReader;

public class Point3DinputFormat extends FileInputFormat<Text, Point3D> {
    
    @Override
    protected boolean isSplitable(JobContext context, Path filename) {
        // TODO Auto-generated method stub
        return false;
    }
    @Override
    public RecordReader<Text, Point3D> createRecordReader(InputSplit inputsplit,
            TaskAttemptContext context) throws IOException, InterruptedException {
        // TODO Auto-generated method stub
        return new objPosRecordReader();
    }
    public static class objPosRecordReader extends RecordReader<Text,Point3D>{

        public LineReader in;
        public Text lineKey;
        public Point3D lineValue;
        public StringTokenizer token=null;
        
        public Text line;
      
        @Override
        public void close() throws IOException {
            // TODO Auto-generated method stub
            
        }

        @Override
        public Text getCurrentKey() throws IOException, InterruptedException {
            //lineKey.set(token.nextToken());
            return lineKey;
        }

        @Override
        public Point3D getCurrentValue() throws IOException,
                InterruptedException {
            // TODO Auto-generated method stub
            return lineValue;
        }

        @Override
        public float getProgress() throws IOException, InterruptedException {
            // TODO Auto-generated method stub
            return 0;
        }

        @Override
        public void initialize(InputSplit input, TaskAttemptContext context)
                throws IOException, InterruptedException {
            // TODO Auto-generated method stub
            FileSplit split=(FileSplit)input;
            Configuration job=context.getConfiguration();
            Path file=split.getPath();
            FileSystem fs=file.getFileSystem(job);
            
            FSDataInputStream filein=fs.open(file);
            in=new LineReader(filein,job);
            
            line=new Text();
            lineKey=new Text();
            lineValue=new Point3D();
        }

        @Override
        public boolean nextKeyValue() throws IOException, InterruptedException {
            // TODO Auto-generated method stub
            int linesize=in.readLine(line);
            if(linesize==0)
                return false;
            
            String[] pieces = line.toString().split(",");
    		if(pieces.length != 4){
    			throw new IOException("Invalid record received");
    		}
    		
    		// try to parse floating point components of value
    		float fx, fy, fz;
    		try{
    			fx = Float.parseFloat(pieces[1].trim());
    			fy = Float.parseFloat(pieces[2].trim());
    			fz = Float.parseFloat(pieces[3].trim());
    		}catch(NumberFormatException nfe){
    			throw new IOException("Error parsing floating poing value in record");
    		}
            lineKey.set(pieces[0]);
            
            lineValue.set(fx, fy, fz);
            
            return true;
        }
    }
}

 

4、编写Mapper类,这里仅仅测试自定义类型Point3D的InputFormat,不需要Reducer

import java.io.IOException;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;


public class Point3DMapper extends Mapper<Text, Point3D, Text, Point3D>{
	protected void map(Text key, Point3D value, Context context) throws IOException, InterruptedException{
		context.write(key, value);
	}
}
 
2
0
分享到:
评论

相关推荐

    Hadoop高级编程- 构建与实现大数据解决方案

    本文将深入探讨“Hadoop高级编程——构建与实现大数据解决方案”这一主题,旨在帮助读者掌握如何利用Hadoop构建实际的大数据项目。 首先,我们要理解Hadoop的基础架构。Hadoop由两个主要组件构成:Hadoop ...

    MapReduce2.0源码分析与实战编程

    6. **实战编程**:MapReduce编程涉及定义Mapper和Reducer类,以及配置输入输出格式。例如,自定义InputFormat处理非标准输入,OutputFormat定义结果存储方式。此外,还需关注作业提交、监控和调试技巧。 7. **...

    hadoop中文文档

    用户可以通过自定义InputFormat和OutputFormat来处理特定类型的数据。 8. **故障恢复与容错机制**:Hadoop具有内置的故障检测和恢复机制,如心跳检测、数据块的冗余复制等,以确保系统的稳定运行。如果DataNode或...

    mapreduce-db-operat:mapreduce实现数据从hdfs到mysql之间的相互传递

    1. **MapReduce**:MapReduce由两个主要阶段组成——Map阶段和Reduce阶段。Map阶段将输入数据分割成独立的键值对,并在各个节点上并行处理;Reduce阶段则负责收集Map阶段的结果,进行合并和聚合操作,最终生成所需的...

    hadoop-map-reduce-demo

    该项目提供了一个基本的MapReduce应用实例,帮助开发者理解和实践MapReduce编程。核心代码通常包含以下几个部分: 1. **Mapper类**:自定义Mapper类继承自`org.apache.hadoop.mapreduce.Mapper`,并重写`map()`方法...

    hadoop帮助文档

    9. **数据输入与输出**:Hadoop支持多种数据输入和输出格式,如TextInputFormat和TextInputFormat,以及自定义的InputFormat和OutputFormat,允许开发者处理不同类型的文件。 10. **Hadoop应用开发**:学习Hadoop...

    hadoop开发所需类

    5. **MapReduce编程模型**:MapReduce包含两个主要阶段——Map和Reduce。Map阶段将输入数据分成键值对并进行处理,Reduce阶段则负责聚合Map阶段的结果。开发者需要定义自己的`Mapper`和`Reducer`类来实现业务逻辑。 ...

    hadoop-2.7.6-src

    源码中可以看到各种接口设计,使得开发者可以方便地开发自定义的InputFormat、OutputFormat、Partitioner和Reducer等,以适应不同的数据处理需求。 9. **源码学习方法** 理解Hadoop源码需要具备Java基础、网络知识...

    MR-read-Hfile2

    然而,这也要求开发者具备深入理解HBase数据存储结构和MapReduce编程模型的能力。 在实际应用中,需要注意的是,虽然直接读取HFile可以提高效率,但这种方法可能不适合实时查询或更新操作,因为它们缺乏HBase服务层...

Global site tag (gtag.js) - Google Analytics