etc/hadoop配置文件
1.vi core-site.xml
<?xml version="2.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.default.name </name>
<value>hdfs://localhost:9000</value>
</property>
</configuration>
2. vi mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapred.job.tracker </name>
<value>localhost:9001</value>
</property>
</configuration>
3.vi hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fds.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.name.dir</name>
<value>/home/hdfs/name</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>/home/hdfs/data</value>
</property>
</configuration>
4.vi hadoop-env.sh
export JAVA_HOME=/opt/jdk1.8.0_65 调用jdk
5.启动
sbin下面是启动命令
./start-all.sh
5.1jps 查看启动的服务 (五个)
15509 ResourceManager
14808 NameNode
15241 SecondaryNameNode
14974 DataNode
15679 NodeManager
6.关闭./stop-all.sh
hdfs的命令
hadoop fs -mkdir /user/trunk
hadoop fs -ls /user
hadoop fs -lsr /user (递归的)
hadoop fs -put test.txt /user/trunk
hadoop fs -put test.txt . (复制到hdfs当前目录下,首先要创建当前目录)
hadoop fs -get /user/trunk/test.txt . (复制到本地当前目录下)
hadoop fs -cat /user/trunk/test.txt
hadoop fs -tail /user/trunk/test.txt (查看最后1000字节)
hadoop fs -rm /user/trunk/test.txt
hadoop fs -help ls (查看ls命令的帮助文档)
java代码
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class WordcountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
String[] arrayOfString1;
String val = value.toString();
String[] str = val.split(" ");
int j = (arrayOfString1 = str).length;
for (int i = 0; i < j; ++i) {
String s = arrayOfString1[i];
context.write(new Text(s), new IntWritable(1));
}
}
}
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Reducer.Context;
public class WordcountReducer extends Reducer<Text, IntWritable, Text, IntWritable>
{
protected void reduce(Text key, Iterable<IntWritable> values, Reducer<Text, IntWritable, Text, IntWritable>.Context context)
throws IOException, InterruptedException
{
int sum = 0;
for (Iterator localIterator = values.iterator(); localIterator.hasNext(); ) { IntWritable val = (IntWritable)localIterator.next();
sum += val.get();
}
context.write(key, new IntWritable(sum));
}
}
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCount
{
public static void main(String[] args)
throws Exception
{
Configuration conf = new Configuration();
Job job = new Job(conf, "word count");
job.setJarByClass(WordCount.class);
job.setMapperClass(WordcountMapper.class);
job.setCombinerClass(WordcountReducer.class);
job.setReducerClass(WordcountReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit((job.waitForCompletion(true)) ? 0 : 1);
}
}
hadoop-core-0.20.2.jar
相关推荐
这是MapReduce的小例子,计算单词量,其中就只是用到小框架,只不过每个人的算法不一样而已。 对应博客地址: https://blog.csdn.net/magic_ninja/article/details/80071394
在这个实例中,`wc`通常表示“word count”,在Hadoop MapReduce中,它被用来统计单词数。然而,在这个字符数统计的场景下,我们可以稍作修改,使其计算字符数而非单词数。这通常涉及对Mapper和Reducer的Java代码...
WordCount是Hadoop中的经典示例,用于统计文本文件中单词出现的次数。其主要包括Mapper和Reducer两个阶段: 1. **Mapper阶段**:输入文件被分割成多个块,每个块由一个Mapper处理。Mapper读取每一行,通过空格分隔...
例如,假设我们有一个任务是统计文本文件中每个单词的出现次数。Map函数会解析输入的文本行,将每一行拆分成单词并生成键值对(单词,1)。Reduce函数则接收所有具有相同键的值,将它们相加,从而得到每个单词的总数...
在"Java实现Hadoop下词配对Wordcount计数代码实现"这个项目中,我们的目标是读取文档,对每一行进行处理,去除标点符号,将所有单词转换为小写,然后统计每个单词出现的次数。以下是一般的步骤: 1. **Mapper阶段**...
总的来说,基于Hadoop实现朴素贝叶斯文本分类器是将大数据处理能力与机器学习算法有效结合的一个实例,它展示了在分布式环境下进行高效文本分析的可能性。这种结合不仅提高了处理速度,也降低了对硬件资源的需求,...
WordCount是Hadoop中最经典的入门级示例程序之一,它主要用于演示如何在Hadoop集群中进行简单的文本词频统计。WordCount程序通常包含以下几个核心部分:输入数据的准备、Mapper类的实现、Reducer类的实现以及程序的...
WordCount是MapReduce的典型示例,用于统计文本中单词的数量。Map阶段,Mapper将输入文本分割成单词,输出形式为(单词,1)。Reduce阶段,Reducer将相同单词的计数聚合,输出形式为(单词,总数量)。 总的来说,...
- **WordCount**:这是一个经典的MapReduce示例程序,用于统计文本文件中各个单词出现的频率。 - **映射阶段**:读取输入文本,将每一行文本按单词拆分,并为每个单词生成键值对(单词, 1)。 - **化简阶段**:...
#### 二、百度统计概况 在百度内部,HCE已经被大规模地部署和使用。目前,百度运行着超过10个集群,每个集群包含4000个节点,其中最大的集群拥有1000个节点。每个节点配置为8核CPU、16GB内存,并且配备12块1TB硬盘...
- **Word Count功能**:Hadoop的示例程序之一,用于统计输入文本中每个单词出现的次数,并将结果写入到指定的output目录,体现了Hadoop处理大数据的能力。 这个实验展示了如何在Linux环境中搭建一个简单的Hadoop...
以上是MapReduce在WordCount实例中的应用,涵盖了如何搭建Hadoop环境、编写MapReduce程序的框架,以及如何实现统计单词频次的Map和Reduce方法。通过这个案例,可以掌握MapReduce编程模型的基本原理和操作流程。在...
我们现在创建这么一个应用,统计文本文件中的单词个数,详细学习过Hadoop的朋友都应该写过。 那么我们需要具体创建这样一个Topology,用一个spout负责读取文本文件,用第一个bolt来解析成单词,用第二个bolt来对...
在大数据处理中,"数单词"是最经典的示例之一,它模拟了文本中单词频率统计的过程。WordCount是Hadoop MapReduce框架下的一个基础程序,用于计算文本中每个单词出现的次数。这个过程分为两个主要步骤:Map阶段和...
Hadoop MapReduce是Apache Hadoop项目的一部分,它实现了这个模型,允许开发者编写能够处理PB级数据的程序,即使在由数千个节点组成的集群上也能高效运行。 MapReduce的核心思想是将大问题分解为小任务,通过并行...
Word Count是MapReduce的经典示例,其目标是统计文本中每个单词出现的次数。在Map阶段,输入文本被分割成单词,每个单词及其计数1作为key-value对输出;Shuffle阶段按单词排序并归并;Reduce阶段则将相同单词的...
WordCount程序是MapReduce中最经典的示例之一,用于统计一组文本文件中各个单词出现的次数。下面详细介绍WordCount程序的实现过程: 1. **上传txt文件到HDFS集群**:首先,需要通过Java API将本地文件上传到HDFS...
本书不仅深入探讨了MapReduce的基本原理和技术细节,还提供了丰富的实例和算法设计方法,适用于那些需要处理海量数据的应用场景。 #### 二、MapReduce基础知识 ##### 2.1 功能性编程根源 MapReduce的设计理念受到...