这两天一直在弄hadoop-streaming整合hbase执行C语言的问题。
昨天在Eclipse上自己写了一个TextTableInputFormat类继承了InputFormat类去解析hbase,然后通过自己写的mapreduce测试没有问题。
下面这个类是分析InputFormat Hbase的基类。
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapred.TableInputFormat;
import org.apache.hadoop.hbase.mapred.TableSplit;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobContext;
import org.apache.hadoop.mapred.RecordReader;
public abstract class TextTableInputFormatBase implements
InputFormat<Text, Text>, Configurable {
final Log LOG = LogFactory.getLog(TextTableInputFormatBase.class);
/** Holds the details for the internal scanner. */
private Scan scan = null;
/** The table to scan. */
private HTable table = null;
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
if (table == null) {
throw new IOException("No table was provided.");
}
Pair<byte[][], byte[][]> keys = table.getStartEndKeys();
if (keys == null || keys.getFirst() == null
|| keys.getFirst().length == 0) {
throw new IOException("Expecting at least one region.");
}
int count = 0;
InputSplit[] splits = new InputSplit[keys.getFirst().length];
for (int i = 0; i < keys.getFirst().length; i++) {
if (!includeRegionInSplit(keys.getFirst()[i], keys.getSecond()[i])) {
continue;
}
String regionLocation = table.getRegionLocation(keys.getFirst()[i])
.getServerAddress().getHostname();
byte[] startRow = scan.getStartRow();
byte[] stopRow = scan.getStopRow();
// determine if the given start an stop key fall into the region
if ((startRow.length == 0 || keys.getSecond()[i].length == 0 || Bytes
.compareTo(startRow, keys.getSecond()[i]) < 0)
&& (stopRow.length == 0 || Bytes.compareTo(stopRow,
keys.getFirst()[i]) > 0)) {
byte[] splitStart = startRow.length == 0
|| Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ? keys
.getFirst()[i] : startRow;
byte[] splitStop = (stopRow.length == 0 || Bytes.compareTo(
keys.getSecond()[i], stopRow) <= 0)
&& keys.getSecond()[i].length > 0 ? keys.getSecond()[i]
: stopRow;
InputSplit split = new TableSplit(table.getTableName(),
splitStart, splitStop, regionLocation);
splits[i] = split;
if (LOG.isDebugEnabled())
LOG.debug("getSplits: split -> " + (count++) + " -> "
+ split);
}
}
return splits;
}
public RecordReader<Text, Text> getRecordReader(InputSplit split, JobConf job,
Reporter reporter) throws IOException {
if (table == null) {
throw new IOException(
"Cannot create a record reader because of a"
+ " previous error. Please look at the previous logs lines from"
+ " the task's full log for more details.");
}
TableInputFormat inputFormat = new TableInputFormat();
return new TextTableRecordReader(inputFormat.getRecordReader(split, job, reporter));
}
protected boolean includeRegionInSplit(final byte[] startKey,
final byte[] endKey) {
return true;
}
protected HTable getHTable() {
return this.table;
}
protected void setHTable(HTable table) {
this.table = table;
}
public Scan getScan() {
if (this.scan == null)
this.scan = new Scan();
return scan;
}
public void setScan(Scan scan) {
this.scan = scan;
}
public abstract String formatRowResult(Result row);
public class TextTableRecordReader implements RecordReader<Text, Text> {
private RecordReader<ImmutableBytesWritable, Result> tableRecordReader;
public TextTableRecordReader(RecordReader<ImmutableBytesWritable, Result> reader) {
tableRecordReader = reader;
}
public void close() throws IOException {
tableRecordReader.close();
}
public Text createKey() {
return new Text("");
}
public Text createValue() {
return new Text("");
}
public long getPos() throws IOException {
return tableRecordReader.getPos();
}
public float getProgress() throws IOException {
return tableRecordReader.getProgress();
}
public boolean next(Text key, Text value) throws IOException {
Result row = new Result();
boolean hasNext = tableRecordReader.next(new ImmutableBytesWritable(key.getBytes()), row);
if (hasNext) {
key.set(row.getRow());
value.set(formatRowResult(row));
}
return hasNext;
}
}
}
这个类主要是用来逐行获取hbase信息的。
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableRecordReader;
import org.apache.hadoop.hbase.mapreduce.TableSplit;
import org.apache.hadoop.hbase.util.Base64;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.StringUtils;
public class StringTableInputFormat extends TextTableInputFormatBase {
/** Job parameter that specifies the input table. */
public static final String INPUT_TABLE = "hbase.mapreduce.inputtable";
/**
* Base-64 encoded scanner. All other SCAN_ confs are ignored if this is
* specified. See {@link TableMapReduceUtil#convertScanToString(Scan)} for
* more details.
*/
public static final String SCAN = "hbase.mapreduce.scan";
/** Column Family to Scan */
public static final String SCAN_COLUMN_FAMILY = "hbase.mapreduce.scan.column.family";
/** Space delimited list of columns to scan. */
public static final String SCAN_COLUMNS = "hbase.mapreduce.scan.columns";
/** The timestamp used to filter columns with a specific timestamp. */
public static final String SCAN_TIMESTAMP = "hbase.mapreduce.scan.timestamp";
/**
* The starting timestamp used to filter columns with a specific range of
* versions.
*/
public static final String SCAN_TIMERANGE_START = "hbase.mapreduce.scan.timerange.start";
/**
* The ending timestamp used to filter columns with a specific range of
* versions.
*/
public static final String SCAN_TIMERANGE_END = "hbase.mapreduce.scan.timerange.end";
/** The maximum number of version to return. */
public static final String SCAN_MAXVERSIONS = "hbase.mapreduce.scan.maxversions";
/** Set to false to disable server-side caching of blocks for this scan. */
public static final String SCAN_CACHEBLOCKS = "hbase.mapreduce.scan.cacheblocks";
/** The number of rows for caching that will be passed to scanners. */
public static final String SCAN_CACHEDROWS = "hbase.mapreduce.scan.cachedrows";
/** The configuration. */
private Configuration conf = null;
@Override
public Configuration getConf() {
return conf;
}
Scan convertStringToScan(String base64) throws IOException {
ByteArrayInputStream bis = new ByteArrayInputStream(Base64.decode(base64));
DataInputStream dis = new DataInputStream(bis);
Scan scan = new Scan();
scan.readFields(dis);
return scan;
}
@Override
public void setConf(Configuration configuration) {
this.conf = configuration;
String tableName = conf.get(INPUT_TABLE);
try {
setHTable(new HTable(new Configuration(conf), tableName));
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
}
Scan scan = null;
if (conf.get(SCAN) != null) {
try {
scan = this.convertStringToScan(conf.get(SCAN));
} catch (IOException e) {
LOG.error("An error occurred.", e);
}
} else {
try {
scan = new Scan();
if (conf.get(SCAN_COLUMNS) != null) {
scan.addColumns(conf.get(SCAN_COLUMNS));
}
if (conf.get(SCAN_COLUMN_FAMILY) != null) {
scan.addFamily(Bytes.toBytes(conf.get(SCAN_COLUMN_FAMILY)));
}
if (conf.get(SCAN_TIMESTAMP) != null) {
scan.setTimeStamp(Long.parseLong(conf.get(SCAN_TIMESTAMP)));
}
if (conf.get(SCAN_TIMERANGE_START) != null
&& conf.get(SCAN_TIMERANGE_END) != null) {
scan.setTimeRange(
Long.parseLong(conf.get(SCAN_TIMERANGE_START)),
Long.parseLong(conf.get(SCAN_TIMERANGE_END)));
}
if (conf.get(SCAN_MAXVERSIONS) != null) {
scan.setMaxVersions(Integer.parseInt(conf
.get(SCAN_MAXVERSIONS)));
}
if (conf.get(SCAN_CACHEDROWS) != null) {
scan.setCaching(Integer.parseInt(conf.get(SCAN_CACHEDROWS)));
}
scan.setCacheBlocks((conf.getBoolean(SCAN_CACHEBLOCKS, false)));
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
}
}
setScan(scan);
}
@Override
public String formatRowResult(Result row) {
StringBuilder builder = new StringBuilder();
for (KeyValue kv : row.list()) {
builder.append(new String(kv.getRow())).append(" ")
.append(new String(kv.getFamily())).append(":")
.append(new String(kv.getQualifier())).append(" ")
.append(kv.getTimestamp()).append(" ")
.append(new String(kv.getValue()));
}
return builder.toString();
}
}
然后通过这个简单的wordcount mapreduce即可执行。
package com.cp.hbase.mapreduce;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.util.Base64;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.cp.hadoop.hbase.mapreduce.StringTableInputFormat;
public class WordCount {
public static class TokenizerMapper extends
Mapper<Object, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
public static class IntSumReducer extends
Reducer<Text, IntWritable, Text, IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
static String convertScanToString(Scan scan) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(out);
scan.write(dos);
return Base64.encodeBytes(out.toByteArray());
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args)
.getRemainingArgs();
Scan scan = new Scan();
scan.addColumn(Bytes.toBytes("author"),Bytes.toBytes("nickname"));
scan.addColumn(Bytes.toBytes("article"),Bytes.toBytes("tags"));
Job job = new Job(conf, "word count");
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setInputFormatClass(StringTableInputFormat.class);
HBaseConfiguration.addHbaseResources(job.getConfiguration());
job.getConfiguration().set(TableInputFormat.INPUT_TABLE, "blog");
job.getConfiguration().set(TableInputFormat.SCAN, convertScanToString(scan));
// FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path("/home/cp/output"));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
相关推荐
【标题】中的“基于Hadoop,Spark,HBase,Kafka新闻统计java大数据demo”揭示了这个项目是关于使用Java编程语言实现的大数据处理示例,它整合了四个关键的大数据技术:Hadoop、Spark、HBase和Kafka。这些技术都是在大...
在“over-bigdata-master”这个文件夹中,可能包含了这些技术的示例代码、教程或者项目实例,供学习者实践和理解大数据处理的基本原理和应用。通过学习和掌握这些工具,开发者可以构建高效的数据处理平台,处理PB...
生成的数据主要是模拟某学习网站学习视频课程的访问量(其中*以“ / class”开头的表示实战课程,然后通过流水线Flume + Kafka + SparkStreaming进行实时日志的收集,HBase来存储数据)*注意事项(使用的软件工具及...
5. **Hadoop Streaming**:允许使用任何可执行程序(如Python、Perl)作为Mapper和Reducer,扩展了Hadoop的使用范围。示例可能包括非Java语言实现MapReduce任务。 6. **Oozie**:是Hadoop的工作流调度器,用于管理...
书中可能详细介绍了Hadoop生态系统,包括YARN资源管理器、HBase数据库、Hive数据仓库工具以及Oozie工作流调度系统等。 本书可能会从以下几个方面深入讲解Spark和Hadoop的结合使用: 1. **数据存储与准备**:介绍...
Hadoop提供了丰富的API和开发工具,如Hadoop Streaming、Hadoop Pipes、Java API等。源代码分析可以帮助我们掌握如何使用这些工具编写MapReduce作业。 10. **性能调优**: 优化Hadoop集群的性能是实践中不可或缺...
**Kafka、Spark Streaming与HBase的集成** 在大数据处理领域,Kafka作为一个高吞吐量的分布式消息系统,常用于实时数据流处理;Spark Streaming则提供了基于微批处理的实时计算框架,能够高效地处理持续的数据流;...
《Hadoop-in-Action:使用脚本语言和Java实现Hadoop》是一本深入探讨Hadoop技术的书籍,它主要关注如何利用脚本语言和Java在Hadoop生态系统中进行数据处理和分析。Hadoop是一个开源的分布式计算框架,由Apache软件...
这个压缩包“Hadoop大数据开发实战-代码.rar”包含了与书中的实例和练习相关的源代码,旨在帮助读者更好地理解和掌握Hadoop在实际项目中的运用。 Hadoop作为开源的大数据处理框架,由Apache基金会维护,它由两个...
-SparkStreaming原理介绍.pdf7.SparkStreaming(下)--SparkStreaming实战.pdf8.SparkMLlib(上)--机器学习及SparkMLlib简介.pdf8.SparkMLlib(下)--SparkMLlib实战.pdf9.SparkGraphX介绍及实例.pdf10.分布式内存...
2.Spark编译与部署(中)--Hadoop编译安装.pdf 3.Spark编程模型(上)--概念及SparkShell实战.pdf 3.Spark编程模型(下)--IDEA搭建及实战.pdf 4.Spark运行架构.pdf 5.Hive(上)--Hive介绍及部署.pdf 5.Hive(下)-...
总的来说,这个基于Spark Streaming、Kafka和HBase的日志统计分析系统是学习实时大数据处理的一个优秀实例,涵盖了从数据采集、实时处理到存储的关键环节,对于提升大数据技术的理论知识和实践经验有着极大的价值。
【描述】: "本项目是一个计算机科学的毕业设计实例,主要利用Apache Spark Streaming、Kafka消息队列以及分布式数据库HBase,构建了一个实时日志统计分析系统。此系统能够高效地处理大规模的实时数据流,进行快速的...
本实战案例将展示如何利用Spark接收来自Kafka的数据,解析后存入HBase实例,而不是采用传统的Kafka API和HBase API,而是通过Hadoop的方式来实现这一过程。 首先,我们需要理解Spark的核心组件。Spark主要由Spark ...
本书用于Hadoop+Spark快速上手,全面解析Hadoop和Spark生态系统,通过原理解说和实例操作每一个组件,让读者能够轻松跨入大数据分析与开发的大门。 全书共12章,大致分为3个部分,第1部分(第1~7章)讲解Hadoop的...
Nutch系统利用Hadoop进行数据处理的精选实例 总结 Rackspace的日志处理 简史 选择Hadoop 收集和存储 日志的MapReduce模型 关于Cascading 字段、元组和管道 操作 Tap类,Scheme对象和Flow对象 ...
- 司机乘客轨迹的实时流处理,如通过Storm整合Kafka和Spark Streaming,将数据持久化到HBase。 - **ETA模型训练**: - 通过Spark任务每30分钟训练一次模型,减少训练时间,提高灵活性。 - 首先从HBase读取数据,...
第1章 初识Hadoop 数据!数据! 数据存储与分析 与其他系统相比 关系型数据库管理系统 网格计算 志愿计算 1.3.4 Hadoop 发展简史 Apache Hadoop和Hadoop生态圈 第2章 关于MapReduce 一个气象数据集 数据的格式 使用...
这些专刊通常会涵盖Hadoop生态系统的主要组件,如Hadoop MapReduce编程、Hadoop Streaming和数据处理工具的使用。 O'Reilly的另一本书《Hadoop:The Definitive Guide》的早期版本(2009年),同样深入浅出地讲解了...
- 除了HDFS之外,Hadoop还支持多种数据存储选项,如HBase、Cassandra等。这些分布式数据库提供了灵活的数据访问方式,能够满足不同应用场景的需求。 综上所述,Hadoop的发展趋势涵盖了多个方面,从编程模型的简化...