JobSubmit.class

package cn.reduce;

import java.net.URI;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


public class JobSubmit {

    public static void main(String[] args)  {
        try {
         
        //在代码中设置jvm系统参数,用于给job对象来获取访问hdfs的用户身份
        System.setProperty("HADOOP_USER_NAME", "root");
        
    
        Configuration conf = new Configuration();
        //1.设置job运行时要访问的默认文件系统
        conf.set("fs.defaultFS", "hdfs://hadoop01:9000");
        //2.设置jobi叫到哪去运行
        conf.set("mapreduce.framework.name", "yarn");
        
        //3.
        conf.set("yarn.resourcemanager.hostname", "hadoop01");
        //4.如果要从windows系统上运行这个job提交客户端程序,则需要加上这个跨平台提交的参数
        conf.set("mapreduce.app-submission.cross-platform", "true");
        Job job = Job.getInstance(conf);
        //设置jar 位置 在windows上运行 需要打jar包 放到E:/hadoop下的jar
        job.setJar("E:\\hadoop\\wc.jar");
        //1.封装参数 jar位置 在linux 上运行时需要开启
//        job.setJarByClass(JobSubmit.class);
        //2.封装参数:本次job所要调用的Mapper实现类 、Reducer实现类
        job.setMapperClass(WordcountMapper.class);
        job.setReducerClass(ReduceMapper.class);
        //3.封装参数:本次job所要调用的mapper实现类、Reducer实现类 的结果数据的key、value类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        Path output = new Path("/wordcount/output");
        FileSystem fs = FileSystem.get(new URI("hdfs://hadoop01:9000"),conf,"root");
        if(fs.exists(output)){
            fs.delete(output, true);
        }
        //4.封装参数: 本次job要处理的输入数据集所在路径、最终结果集路径
        FileInputFormat.setInputPaths(job, new Path("/wordcount/input"));
        FileOutputFormat.setOutputPath(job, output);
        //5.封装参数:想要启动的reduce task 的数量
        job.setNumReduceTasks(2);
        //6.提交job给yarn
        boolean res = job.waitForCompletion(true);
        System.exit(res?0:1);
        }catch (Exception e) {
            e.printStackTrace();
        }
        
    }
}

ReduceMapper.class

import java.io.IOException;
import java.util.Iterator;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

/**
 * Text, IntWritable 是你wordcountMapper 输出的 key value 类型
 * @author hgz
 *
 */
public class ReduceMapper extends Reducer<Text, IntWritable, Text, IntWritable>{
    
    @Override
    protected void reduce(Text key, Iterable<IntWritable> values,
            Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
        Iterator<IntWritable> iterator = values.iterator();
        int count =0;
        while (iterator.hasNext()) {
            IntWritable next = iterator.next();
            count +=next.get();
            context.write(key, new IntWritable(count));
            
            
        }
    }
}

WordcountMapper .class

import java.io.IOException;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

/**
 * KEYIN 是map task 读取到的数据的key的类型,是一行的起始偏移量 Long VALUEIN 是map task
 * 去读到的数据的value的类型,是一行的内容 String KEYOUT 是用户的自定义map方法要返回的结果key,value
 * 数据的key的类型,在wordcount逻辑中,我们需要返回单词String
 * VALYEOUT是用户自定义map方法要返回的结果key,value的value类型,在wordcount逻辑中,我们要返回的是整型
 * 
 * 在mapreduce中,map产生的数据需要传输给reduce,需要实现mapreduce的 序列化和反序列化 Long LongWritable
 * String Text INTERGER INTWRITABLE FLOAT
 * 
 * @author hgz
 *
 */
public class WordcountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
    

    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)
            throws IOException, InterruptedException {
        
        String line =value.toString();
        String[] split = line.split(" ");
        for (String newKey : split) {
            context.write(new Text(newKey), new IntWritable(1));
        }
    }
}
Last modification:September 18, 2019
如果觉得我的文章对你有用,请随意赞赏