编写一个MapReduce程序,该程序可以读取HDFS中的数据,并按月份和销售量排序的代码
时间: 2024-03-14 14:47:06 浏览: 68
好的,这是一个简单的MapReduce程序,可以按照月份和销售量对HDFS中的数据进行排序:
```java
import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class SalesSort {
public static class SalesMapper extends Mapper<LongWritable, Text, Text, IntWritable>{
private final static IntWritable sales = new IntWritable();
private Text month = new Text();
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] fields = value.toString().split(",");
String dateStr = fields[0];
int sale = Integer.parseInt(fields[1]);
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
Date date = null;
try {
date = dateFormat.parse(dateStr);
} catch (ParseException e) {
e.printStackTrace();
}
SimpleDateFormat monthFormat = new SimpleDateFormat("MM");
month.set(monthFormat.format(date));
sales.set(sale);
context.write(month, sales);
}
}
public static class SalesReducer extends Reducer<Text, IntWritable, NullWritable, Text>{
private TreeMap<Integer, String> salesMap = new TreeMap<Integer, String>();
public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
salesMap.put(sum, key.toString());
if (salesMap.size() > 10) {
salesMap.remove(salesMap.firstKey());
}
}
protected void cleanup(Context context) throws IOException, InterruptedException {
for (Map.Entry<Integer, String> entry : salesMap.entrySet()) {
context.write(NullWritable.get(), new Text(entry.getValue() + "\t" + entry.getKey()));
}
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "Sales Sort");
job.setJarByClass(SalesSort.class);
job.setMapperClass(SalesMapper.class);
job.setReducerClass(SalesReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
```
这个MapReduce程序的输入是一个包含销售数据的CSV文件,格式如下:
```
2017-01-01,100
2017-02-01,200
2017-01-02,150
2017-02-02,250
……
```
每一行包含两个字段,第一个字段是销售日期,第二个字段是销售量。
在Mapper中,我们将销售日期解析成月份,销售量作为Value,输出一个Key-Value对。
在Reducer中,我们使用一个TreeMap来保存销售量最大的10个月份。在cleanup方法中,我们将这10个月份按照销售量从小到大输出。
最后,我们将输出结果写入HDFS中的一个文件。
阅读全文