- 浏览: 20635 次
- 性别:
- 来自: 北京
文章分类
- 全部博客 (45)
- hibernate merge saveOrUpdate (0)
- hibernate (1)
- js (4)
- hbase (1)
- hadoop 无密码登陆 (0)
- hadoop2.2.0编译 (0)
- hadoop (3)
- storm lein (1)
- hive hadoop hbase (0)
- 算法 (4)
- netty下载 (1)
- spring-boot (2)
- jvm内存及异常(一) (1)
- mysql (1)
- 大数据 (2)
- oracle的varchar,number范围查询 (0)
- netty (0)
- solrcloud (0)
- 开源框架 (5)
- 优化 (4)
- 秒杀 (3)
- springcloud集成dubbo (2)
- web css (0)
- 运维 (3)
- jenkins防杀 (1)
- rocketmq (1)
- 多线程 (1)
- openssl (4)
- springboot (1)
最新评论
原始数据
订单号 商品名称
1 a
1 b
1 c
2 a
2 d
2 c
单品统计
商品编号 出现次数
a 5
b 5
c 6
d 4
e 1
商品组合
a-b 1
a-c 1
b-c 1
计算结果
a-b 3 a 5 b 5
a-c 3 a 5 c 6
a-d 3 a 5 d 4
b-c 4 b 5 c 6
b-d 1 b 5 d 4
b-e 1 b 5 e 1
c-d 3 c 6 d 4
c-e 1 c 6 e 1
算法实现
package com.anyec.join;
import java.io.IOException;
import java.util.Set;
import java.util.TreeSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Reducer.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
public class RefProducts {
public static class SingleMapper extends
Mapper<LongWritable, Text, Text, Text> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String ivalue = value.toString();
String[] keyvalue = ivalue.split(" ");
context.write(new Text(keyvalue[1]), new Text("1"));
}
}
public static class SingleReduce extends Reducer<Text, Text, Text, Text> {
redis.clients.jedis.JedisPool pool;
Jedis jedis;
protected void setup(Context context) throws IOException,
InterruptedException {
pool = new JedisPool("nstorma");
jedis = pool.getResource();
}
protected void cleanup(Context context) throws IOException,
InterruptedException {
pool.returnResource(jedis);
pool.destroy();
}
@Override
protected void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
int count = 0;
for (Text v : values) {
count = count + Integer.parseInt(v.toString());
}
jedis.set(key.toString(), count+"");
context.write(key, new Text(count + ""));
}
}
public static class DoubleMapper extends
Mapper<LongWritable, Text, Text, Text> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String ivalue = value.toString();
String[] keyvalue = ivalue.split(" ");
context.write(new Text(keyvalue[0]), new Text(keyvalue[1]));
}
}
public static class DoubleReduce extends Reducer<Text, Text, Text, Text> {
@Override
protected void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
Set<String> set = new TreeSet<String>();
for (Text t : values) {
set.add(t.toString());
}
int i = 0;
for (String s : set) {
int j = 0;
for (String s2 : set) {
if (i < j) {
context.write(new Text(s + "-" + s2), new Text("1"));
}
j++;
}
i++;
}
}
}
public static class DoubleRefMapper extends
Mapper<LongWritable, Text, Text, Text> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String ivalue = value.toString();
String[] keyvalue = ivalue.split(" ");
context.write(new Text(keyvalue[0]), new Text(keyvalue[1]));
}
}
public static class DoubleRefReduce extends Reducer<Text, Text, Text, Text> {
redis.clients.jedis.JedisPool pool;
Jedis jedis ;
protected void setup(Context context) throws IOException,
InterruptedException {
pool = new JedisPool("nstorma");
jedis = pool.getResource();
}
protected void cleanup(Context context) throws IOException,
InterruptedException {
pool.returnResource(jedis);
pool.destroy();
}
@Override
protected void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
Set<String> sets = new TreeSet<String>();
int count = 0;
for (Text t : values) {
count = count + Integer.parseInt(t.toString());
}
String[] iv=key.toString().split("-");
int a=Integer.parseInt(jedis.get(iv[0]));
int b=Integer.parseInt(jedis.get(iv[1]));
context.write(key, new Text(count + "\t"+iv[0]+"\t"+a+"\t"+iv[1]+"\t"+b));
}
}
public static class RefProductMapper extends
Mapper<LongWritable, Text, Text, Text> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
}
}
public static class RefProductReduce extends
Reducer<Text, Text, Text, Text> {
@Override
protected void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
}
}
public static void main(String[] args) throws IOException,
ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
// System.setProperty("hadoop.home.dir","D:/soft/hadoop-2.2.0");
// System.setProperty("HADOOP_HOME","D:/soft/hadoop-2.2.0");
// System.setProperty("HADOOP_USER_NAME","hadoop");
String[] otherArgs = new GenericOptionsParser(conf, args)
.getRemainingArgs();
// if (otherArgs.length != 2) {
// System.err
// .println("Usage: com.anyec.join.RefProducts <in> <out>");
// System.exit(2);
// }
String basePath = conf.get("fs.defaultFS");
conf.set("mapreduce.framework.name", "local");// </value> //yarn
// conf.set("mapreduce.framework.name", "yarn");
// conf.set("yarn.resourcemanager.address","master");
// conf.set("mapred.remote.os","Linux");
conf.set(
"mapreduce.application.classpath",
"$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*");
if (basePath.endsWith("/"))
basePath = basePath + "mr";
else
basePath = basePath + "/" + "mr";
String inpath = basePath + "/in/ref/";
// String inpath = "file:///E:/stock/dpdata";
String outpath = basePath + "/out/ref/sigle";
String outpath2 = basePath + "/out/ref/double";
String outpath3 = basePath + "/out/ref/doubleref";
String outpath4 = basePath + "/out/ref/doublecount";
FileSystem fs = FileSystem.get(conf);
fs.delete(new Path(outpath), true);
fs.delete(new Path(outpath2), true);
fs.delete(new Path(outpath3), true);
fs.delete(new Path(outpath4), true);
// HdfsClient.rm(conf, outpath);
// if (f.exists()) {
// com.anyec.common.DelOutPut.del(f);
// }
Job job = new Job(conf, "single");
job.setJarByClass(RefProducts.class);
job.setUser("hadoop");
job.setMapperClass(RefProducts.SingleMapper.class);
job.setReducerClass(RefProducts.SingleReduce.class);
// job.setNumReduceTasks(100);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// job.setOutputFormatClass(AlphabetOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(inpath));
FileOutputFormat.setOutputPath(job, new Path(outpath));
job.waitForCompletion(true);
// System.exit(job.waitForCompletion(true) ? 0 : 1);
job = new Job(conf, "double");
job.setJarByClass(RefProducts.class);
job.setUser("hadoop");
job.setMapperClass(RefProducts.DoubleMapper.class);
job.setReducerClass(RefProducts.DoubleReduce.class);
// job.setNumReduceTasks(100);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// job.setOutputFormatClass(AlphabetOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(inpath));
FileOutputFormat.setOutputPath(job, new Path(outpath2));
job.waitForCompletion(true);
job = new Job(conf, "doubleRef");
job.setJarByClass(RefProducts.class);
job.setUser("hadoop");
job.setMapperClass(RefProducts.DoubleRefMapper.class);
job.setReducerClass(RefProducts.DoubleRefReduce.class);
// job.setNumReduceTasks(100);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// job.setOutputFormatClass(AlphabetOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(outpath2));
FileOutputFormat.setOutputPath(job, new Path(outpath3));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
订单号 商品名称
1 a
1 b
1 c
2 a
2 d
2 c
单品统计
商品编号 出现次数
a 5
b 5
c 6
d 4
e 1
商品组合
a-b 1
a-c 1
b-c 1
计算结果
a-b 3 a 5 b 5
a-c 3 a 5 c 6
a-d 3 a 5 d 4
b-c 4 b 5 c 6
b-d 1 b 5 d 4
b-e 1 b 5 e 1
c-d 3 c 6 d 4
c-e 1 c 6 e 1
算法实现
package com.anyec.join;
import java.io.IOException;
import java.util.Set;
import java.util.TreeSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Reducer.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
public class RefProducts {
public static class SingleMapper extends
Mapper<LongWritable, Text, Text, Text> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String ivalue = value.toString();
String[] keyvalue = ivalue.split(" ");
context.write(new Text(keyvalue[1]), new Text("1"));
}
}
public static class SingleReduce extends Reducer<Text, Text, Text, Text> {
redis.clients.jedis.JedisPool pool;
Jedis jedis;
protected void setup(Context context) throws IOException,
InterruptedException {
pool = new JedisPool("nstorma");
jedis = pool.getResource();
}
protected void cleanup(Context context) throws IOException,
InterruptedException {
pool.returnResource(jedis);
pool.destroy();
}
@Override
protected void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
int count = 0;
for (Text v : values) {
count = count + Integer.parseInt(v.toString());
}
jedis.set(key.toString(), count+"");
context.write(key, new Text(count + ""));
}
}
public static class DoubleMapper extends
Mapper<LongWritable, Text, Text, Text> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String ivalue = value.toString();
String[] keyvalue = ivalue.split(" ");
context.write(new Text(keyvalue[0]), new Text(keyvalue[1]));
}
}
public static class DoubleReduce extends Reducer<Text, Text, Text, Text> {
@Override
protected void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
Set<String> set = new TreeSet<String>();
for (Text t : values) {
set.add(t.toString());
}
int i = 0;
for (String s : set) {
int j = 0;
for (String s2 : set) {
if (i < j) {
context.write(new Text(s + "-" + s2), new Text("1"));
}
j++;
}
i++;
}
}
}
public static class DoubleRefMapper extends
Mapper<LongWritable, Text, Text, Text> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String ivalue = value.toString();
String[] keyvalue = ivalue.split(" ");
context.write(new Text(keyvalue[0]), new Text(keyvalue[1]));
}
}
public static class DoubleRefReduce extends Reducer<Text, Text, Text, Text> {
redis.clients.jedis.JedisPool pool;
Jedis jedis ;
protected void setup(Context context) throws IOException,
InterruptedException {
pool = new JedisPool("nstorma");
jedis = pool.getResource();
}
protected void cleanup(Context context) throws IOException,
InterruptedException {
pool.returnResource(jedis);
pool.destroy();
}
@Override
protected void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
Set<String> sets = new TreeSet<String>();
int count = 0;
for (Text t : values) {
count = count + Integer.parseInt(t.toString());
}
String[] iv=key.toString().split("-");
int a=Integer.parseInt(jedis.get(iv[0]));
int b=Integer.parseInt(jedis.get(iv[1]));
context.write(key, new Text(count + "\t"+iv[0]+"\t"+a+"\t"+iv[1]+"\t"+b));
}
}
public static class RefProductMapper extends
Mapper<LongWritable, Text, Text, Text> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
}
}
public static class RefProductReduce extends
Reducer<Text, Text, Text, Text> {
@Override
protected void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
}
}
public static void main(String[] args) throws IOException,
ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
// System.setProperty("hadoop.home.dir","D:/soft/hadoop-2.2.0");
// System.setProperty("HADOOP_HOME","D:/soft/hadoop-2.2.0");
// System.setProperty("HADOOP_USER_NAME","hadoop");
String[] otherArgs = new GenericOptionsParser(conf, args)
.getRemainingArgs();
// if (otherArgs.length != 2) {
// System.err
// .println("Usage: com.anyec.join.RefProducts <in> <out>");
// System.exit(2);
// }
String basePath = conf.get("fs.defaultFS");
conf.set("mapreduce.framework.name", "local");// </value> //yarn
// conf.set("mapreduce.framework.name", "yarn");
// conf.set("yarn.resourcemanager.address","master");
// conf.set("mapred.remote.os","Linux");
conf.set(
"mapreduce.application.classpath",
"$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*");
if (basePath.endsWith("/"))
basePath = basePath + "mr";
else
basePath = basePath + "/" + "mr";
String inpath = basePath + "/in/ref/";
// String inpath = "file:///E:/stock/dpdata";
String outpath = basePath + "/out/ref/sigle";
String outpath2 = basePath + "/out/ref/double";
String outpath3 = basePath + "/out/ref/doubleref";
String outpath4 = basePath + "/out/ref/doublecount";
FileSystem fs = FileSystem.get(conf);
fs.delete(new Path(outpath), true);
fs.delete(new Path(outpath2), true);
fs.delete(new Path(outpath3), true);
fs.delete(new Path(outpath4), true);
// HdfsClient.rm(conf, outpath);
// if (f.exists()) {
// com.anyec.common.DelOutPut.del(f);
// }
Job job = new Job(conf, "single");
job.setJarByClass(RefProducts.class);
job.setUser("hadoop");
job.setMapperClass(RefProducts.SingleMapper.class);
job.setReducerClass(RefProducts.SingleReduce.class);
// job.setNumReduceTasks(100);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// job.setOutputFormatClass(AlphabetOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(inpath));
FileOutputFormat.setOutputPath(job, new Path(outpath));
job.waitForCompletion(true);
// System.exit(job.waitForCompletion(true) ? 0 : 1);
job = new Job(conf, "double");
job.setJarByClass(RefProducts.class);
job.setUser("hadoop");
job.setMapperClass(RefProducts.DoubleMapper.class);
job.setReducerClass(RefProducts.DoubleReduce.class);
// job.setNumReduceTasks(100);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// job.setOutputFormatClass(AlphabetOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(inpath));
FileOutputFormat.setOutputPath(job, new Path(outpath2));
job.waitForCompletion(true);
job = new Job(conf, "doubleRef");
job.setJarByClass(RefProducts.class);
job.setUser("hadoop");
job.setMapperClass(RefProducts.DoubleRefMapper.class);
job.setReducerClass(RefProducts.DoubleRefReduce.class);
// job.setNumReduceTasks(100);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// job.setOutputFormatClass(AlphabetOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(outpath2));
FileOutputFormat.setOutputPath(job, new Path(outpath3));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
发表评论
-
java模拟画直线机械
2019-09-27 16:58 241package com.anyec.math; ... -
数组小算法
2018-05-22 18:51 449public static void acc2(int N ... -
kmean算法java版
2014-01-20 23:03 763package com.anyec.math.base; i ... -
hadoop2.2.0编译1 准备
2013-11-29 23:15 587下载的hadoop2.2.0默认是32位的,如果不重新编译 ... -
hadoop之无密码登陆
2013-11-29 23:04 767ssh localhost 无密码认证登录 网上的方法如下 ...
相关推荐
文章首先从概念和用途两个方面,介绍了数据挖掘的理论基础,并且将其具体表现形式归纳为归纳学习、关联法则以及粗糙集三种方法。 归纳学习是从特殊到一般的知识发现过程,通过统计分析大量的档案信息,找出其中的...
5 组合和关联法则 6 6 聚类 7 6.1 定义 7 6.2 聚类与分类的区别? 7 6.3 应用实例 7 7 描述与可视化 7 8 数据挖掘的两种类型 7 8.1 自上而下-预测 7 8.2 自下而上-探索 7 8.3 什么是最好的模型? 8 9 数据挖掘的循环...
用户留存阶段,R法则(Relevant,关联法则)指导企业建立关联触点,通过个性化的营销手段,如电子邮件营销,保持与用户的紧密联系,提高用户忠诚度。 电子商务全程营销规划是一个系统的工程,需要企业结合自身发展...
灰色关联法则考虑了各指标之间的相对相似性,而TOPSIS法(技术优势排序法)通过构建理想解和反理想解,衡量每个案例与最优和最差情况的距离,从而进行排序。 研究表明,传统方法(线性求和法、灰色关联法、TOPSIS法...
其核心思想是通过基于关联法则的算法对物品进行评分预测并使用此预测评分填充数据集降低数据的稀疏性,然后结合基于内容与基于协同过滤计算的物品相似度去预测用户评分并根据评分的高低给目标用户产生推荐物品。...
关联法则强调数据间的内在联系,通过制定数据关联管理方案,揭示数据间隐蔽的关联性。特征法则侧重于数据特征的提取,如人工神经网络技术可以对复杂的数据模式进行抽取,遗传算法则可用于评估其他算法的适合度,而...
关联法则关注不同数据间的隐含关系,通过发现关联规则来预测未来趋势或行为。特征法则侧重于提取数据的特定属性,例如使用人工神经网络和遗传算法等技术,来识别复杂模式或趋势。 数据挖掘的技术流程包括数据准备、...
用户留存的R法则(Relevant,关联法则)强调选择与用户需求高度相关的产品和服务,培养用户的忠诚度,避免过于追求短期销售而忽视长期关系的建立。 全程营销规划下,营销部门的角色和职责也在扩展,不再仅限于用户...
关联法则探寻数据间的隐含关联,用于预测和推荐系统。特征法则是提取数据的显著属性,例如通过神经网络或遗传算法来处理复杂模式和趋势。 在大数据时代,数据挖掘技术的流程一般包括数据准备、挖掘处理和结果分析。...
关联分析通过寻找目标资料集中的属性间的关联来发现问题和规律,其中Apriori算法是关联法则分析的一个典型例子。 循序特征分析和时间序列分析分别关注资料在不同历史时间点上的特征以及根据资料属性的数值预测未来...
其次,自我介绍与工作关联法则更注重与职业或兴趣的结合。这种介绍方式包括:姓名、所在单位、专业技能以及与听众的联系点。例如,“我是王土旦,来自中国演说联盟,擅长演讲与口才培训,同时我还是个业余理发师,...
本文的关键词包括竹林寺女科、崩漏、核心药物、聚类分析、关联法则以及数据挖掘。这些关键词不仅体现了文章的研究内容,也显示了作者如何运用现代科技手段去解读和验证传统医学文献。 中图分类号为R271.12,这表明...
- **核心思想**: 结合基于内容的推荐算法和基于关联法则的算法来改进传统的协同过滤推荐算法。 - **步骤**: - 使用基于关联法则的算法预测物品评分,以此填充数据集,降低数据稀疏性。 - 结合基于内容的推荐算法...
例如,求和法中的评分标准可能因项目性质不同而有所不同,规模关联法则可能无法完全捕捉到某些特定项目的需求。因此,软件开发单位应结合实例,根据自身经验和项目特性来定制合适的文件编制规定。 【总结】 文件给...
同时,文章还分析了现有的其他并行Apriori算法的研究成果,如李晓飞等人提出的关联法则挖掘策略、吴起等人提出的基于逻辑运算的Apriori算法、李莉等人提出的基于Hadoop的关联规则挖掘算法以及林长方等人提出的固定多...
直接关联法则要求考生能迅速回想起相关概念或原理,直接与题干进行匹配。 **二、组合排序型选择题** 这种题目需要考生根据时间和逻辑关系对选项进行排序。时空演化排序题可根据已知地理原理进行排序,逻辑性组合...
动作关联法则通过设计使错误动作难以执行,比如,装配件只能按特定顺序安装。 综上所述,防错是一种系统性的方法,旨在预防生产过程中的人为失误和设备异常,通过设计和过程控制,确保产品品质的稳定性和可靠性。...
而灰色关联法则体现了这种不确定性,广泛应用在水质评价中。根据张集地下水水源地2010—2015年的实测水质监测数据,采用灰色关联法对地下水水源地水质状况进行评价。结果表明,张集地下水水源地属于Ⅱ~Ⅲ类水质类别。