ETL数据清洗(每行字段大于11)

2)需求分析

需要在Map阶段对输入的数据根据规则进行过滤清洗。

3)实现代码

(1)编写LogMapper类

package com.atguigu.mapreduce.weblog;

import java.io.IOException;

import org.apache.hadoop.io.LongWritable;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Mapper;

 

public class LogMapper extends Mapper<LongWritable, Text, Text, NullWritable>{

  

   Text k = new Text();

  

   @Override

   protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

     

      // 1获取1行数据

      String line = value.toString();

     

      // 2解析日志

      boolean result = parseLog(line,context);

     

      // 3日志不合法退出

      if (!result) {

          return;

      }

     

      // 4设置key

      k.set(line);

     

      // 5写出数据

      context.write(k, NullWritable.get());

   }

 

   // 2解析日志

   private boolean parseLog(String line, Context context) {

 

      // 1截取

      String[] fields = line.split(" ");

     

      // 2日志长度大于11的为合法

      if (fields.length > 11) {

 

          //系统计数器

          context.getCounter("map", "true").increment(1);

          return true;

      }else {

          context.getCounter("map", "false").increment(1);

          return false;

      }

   }

}

(2)编写LogDriver类

package com.atguigu.mapreduce.weblog;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

 

public class LogDriver {

 

   public static void main(String[] args) throws Exception {

 

//输入输出路径需要根据自己电脑上实际的输入输出路径设置

        args = new String[] { "e:/input/inputlog", "e:/output1" };

 

      // 1获取job信息

      Configuration conf = new Configuration();

      Job job = Job.getInstance(conf);

 

      // 2加载jar

      job.setJarByClass(LogDriver.class);

 

      // 3关联map

      job.setMapperClass(LogMapper.class);

 

      // 4设置最终输出类型

      job.setOutputKeyClass(Text.class);

      job.setOutputValueClass(NullWritable.class);

 

      //设置reducetask个数为0

      job.setNumReduceTasks(0);

 

      // 5设置输入和输出路径

      FileInputFormat.setInputPaths(job, new Path(args[0]));

      FileOutputFormat.setOutputPath(job, new Path(args[1]));

 

      // 6提交

      job.waitForCompletion(true);

   }

}


版权声明:本文为weixin_56765170原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。