Mapreduce做中位数和标准差

mac2025-11-14  4

利用Mapreduece做中位数和标准差 先定义一个自定义的值类型,实现Writable接口 然后去存储数据 话不多说 附上代码:

package com.standard1; import org.apache.hadoop.io.Writable; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; public class IntPair implements Writable { private float ave = 0; private float deviation = 0; public void IntPair(){ } public void IntPair(float ave,float deviation){ this.ave = ave; this.deviation = deviation; } public float getAve() { return ave; } public float getDeviation() { return deviation; } public void setAve(float ave) { this.ave = ave; } public void setDeviation(float deviation) { this.deviation = deviation; } public void write(DataOutput out)throws IOException { out.writeFloat(ave); out.writeFloat(deviation); } public void readFields(DataInput in)throws IOException{ ave =in.readFloat(); deviation = in.readFloat(); } public String toString(){ return "平均值:"+ave+"\t"+"标准差:"+deviation; } }

上面这个是自定义的值类型,然后写Mapreduce程序

package com.standard1; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; public class Map1 { public static class map1 extends Mapper<LongWritable, Text, IntWritable,IntWritable>{ public void map(LongWritable key,Text value,Context context)throws IOException,InterruptedException{ String line = value.toString(); String[] list = line.split("\t"); context.write(new IntWritable(Integer.parseInt(list[0])),new IntWritable(Integer.parseInt(list[2]))); } } public static class reduce1 extends Reducer<IntWritable,IntWritable,IntWritable,IntPair>{ private IntPair Outvalue = new IntPair(); public void reduce(IntWritable key, Iterable<IntWritable>values,Context context)throws IOException,InterruptedException{ ArrayList<Float> li = new ArrayList<>(); float sum = 0; float zhongwei = 0; float zw1 = 0; int count = 0; for (IntWritable val:values){ li.add(Float.valueOf(val.get())); sum+=(float) val.get(); count++; } float aver = sum/count; if (count%2==0){ zhongwei = li.get((count/2)-1)+li.get(count/2); zw1 = zhongwei/2; }else { zw1 = li.get((count/2)-1); } float u = 0.0f; for (Float val:li){ u += (val-aver)*(val-aver); } Outvalue.setDeviation((float)Math.sqrt(u/count)); Outvalue.setAve(zw1); context.write(key,Outvalue); } } public static void main(String[] args)throws Exception { Configuration conf = new Configuration(); Job job = Job.getInstance(conf); job.setMapperClass(map1.class); job.setJarByClass(Map1.class); job.setReducerClass(reduce1.class); job.setMapOutputKeyClass(IntWritable.class); job.setMapOutputValueClass(IntWritable.class); job.setOutputKeyClass(IntWritable.class); job.setOutputValueClass(IntPair.class); File file = new File("E:\\data1\\out"); if (file.exists()){ FileUtils.deleteDirectory(file); } FileInputFormat.setInputPaths(job,new Path("E:\\data1")); FileOutputFormat.setOutputPath(job,new Path("E:\\data1\\out")); Boolean b = job.waitForCompletion(true); System.out.println(b); System.exit(job.waitForCompletion(true) ? 0 : 1); } }

由于我是最近一个月才开始接触Maperduce 写的不好 难免 ,请见谅

最新回复(0)