mapreduce

This commit is contained in:
罗祥 2019-04-21 19:23:13 +08:00
parent df4c084334
commit 32c50d684e
15 changed files with 704 additions and 0 deletions

View File

@ -0,0 +1,55 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.heibaiying</groupId>
<artifactId>hadoop-word-count</artifactId>
<version>1.0</version>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>8</source>
<target>8</target>
</configuration>
</plugin>
</plugins>
</build>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<hadoop.version>2.6.0-cdh5.15.2</hadoop.version>
</properties>
<!---配置CDH仓库地址-->
<repositories>
<repository>
<id>cloudera</id>
<url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
</repository>
</repositories>
<dependencies>
<!--Hadoop-client-->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.8.1</version>
</dependency>
</dependencies>
</project>

View File

@ -0,0 +1,84 @@
package com.heibaiying;
import com.heibaiying.component.WordCountMapper;
import com.heibaiying.component.WordCountReducer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.net.URI;
/**
* 组装作业 并提交到集群运行
*/
public class WordCountApp {
// 这里为了直观显示参数 使用了硬编码实际开发中可以通过外部传参
private static final String HDFS_URL = "hdfs://192.168.0.107:8020";
private static final String HADOOP_USER_NAME = "root";
public static void main(String[] args) throws Exception {
// 文件输入路径和输出路径由外部传参指定
if (args.length < 2) {
System.out.println("Input and output paths are necessary!");
return;
}
// 需要指明hadoop用户名否则在HDFS上创建目录时可能会抛出权限不足的异常
System.setProperty("HADOOP_USER_NAME", HADOOP_USER_NAME);
Configuration configuration = new Configuration();
// 指明HDFS的地址
configuration.set("fs.defaultFS", HDFS_URL);
// 创建一个Job
Job job = Job.getInstance(configuration);
// 设置运行的主类
job.setJarByClass(WordCountApp.class);
// 设置Mapper和Reducer
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
// 设置Mapper输出key和value的类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 设置Reducer输出key和value的类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 如果输出目录已经存在则必须先删除否则重复运行程序时会抛出异常
FileSystem fileSystem = FileSystem.get(new URI(HDFS_URL), configuration, HADOOP_USER_NAME);
Path outputPath = new Path(args[1]);
if (fileSystem.exists(outputPath)) {
fileSystem.delete(outputPath, true);
}
// 设置作业输入文件和输出文件的路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, outputPath);
// 将作业提交到群集并等待它完成参数设置为true代表打印显示对应的进度
boolean result = job.waitForCompletion(true);
// 关闭之前创建的fileSystem
fileSystem.close();
// 根据作业结果,终止当前运行的Java虚拟机,退出程序
System.exit(result ? 0 : -1);
}
}

View File

@ -0,0 +1,87 @@
package com.heibaiying;
import com.heibaiying.component.WordCountMapper;
import com.heibaiying.component.WordCountReducer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.net.URI;
/**
* 组装作业 并提交到集群运行
*/
public class WordCountCombinerApp {
// 这里为了直观显示参数 使用了硬编码的形式实际开发中可以通过外部传参
private static final String HDFS_URL = "hdfs://192.168.0.107:8020";
private static final String HADOOP_USER_NAME = "root";
public static void main(String[] args) throws Exception {
// 文件输入路径和输出路径由外部传参指定
if (args.length < 2) {
System.out.println("Input and output paths are necessary!");
return;
}
// 需要指明hadoop用户名否则在HDFS上创建目录时可能会抛出权限不足的异常
System.setProperty("HADOOP_USER_NAME", HADOOP_USER_NAME);
Configuration configuration = new Configuration();
// 指明HDFS的地址
configuration.set("fs.defaultFS", HDFS_URL);
// 创建一个Job
Job job = Job.getInstance(configuration);
// 设置运行的主类
job.setJarByClass(WordCountCombinerApp.class);
// 设置Mapper和Reducer
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
// 设置Combiner
job.setCombinerClass(WordCountReducer.class);
// 设置Mapper输出key和value的类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 设置Reducer输出key和value的类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 如果输出目录已经存在则必须先删除否则重复运行程序时会抛出异常
FileSystem fileSystem = FileSystem.get(new URI(HDFS_URL), configuration, HADOOP_USER_NAME);
Path outputPath = new Path(args[1]);
if (fileSystem.exists(outputPath)) {
fileSystem.delete(outputPath, true);
}
// 设置作业输入文件和输出文件的路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, outputPath);
// 将作业提交到群集并等待它完成参数设置为true代表打印显示对应的进度
boolean result = job.waitForCompletion(true);
// 关闭之前创建的fileSystem
fileSystem.close();
// 根据作业结果,终止当前运行的Java虚拟机,退出程序
System.exit(result ? 0 : -1);
}
}

View File

@ -0,0 +1,95 @@
package com.heibaiying;
import com.heibaiying.component.CustomPartitioner;
import com.heibaiying.component.WordCountMapper;
import com.heibaiying.component.WordCountReducer;
import com.heibaiying.utils.WordCountDataUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.net.URI;
/**
* 组装作业 并提交到集群运行
*/
public class WordCountCombinerPartitionerApp {
// 这里为了直观显示参数 使用了硬编码的形式实际开发中可以通过外部传参
private static final String HDFS_URL = "hdfs://192.168.0.107:8020";
private static final String HADOOP_USER_NAME = "root";
public static void main(String[] args) throws Exception {
// 文件输入路径和输出路径由外部传参指定
if (args.length < 2) {
System.out.println("Input and output paths are necessary!");
return;
}
// 需要指明hadoop用户名否则在HDFS上创建目录时可能会抛出权限不足的异常
System.setProperty("HADOOP_USER_NAME", HADOOP_USER_NAME);
Configuration configuration = new Configuration();
// 指明HDFS的地址
configuration.set("fs.defaultFS", HDFS_URL);
// 创建一个Job
Job job = Job.getInstance(configuration);
// 设置运行的主类
job.setJarByClass(WordCountCombinerPartitionerApp.class);
// 设置Mapper和Reducer
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
// 设置Combiner
job.setCombinerClass(WordCountReducer.class);
// 设置自定义分区规则
job.setPartitionerClass(CustomPartitioner.class);
// 设置reduce个数
job.setNumReduceTasks(WordCountDataUtils.WORD_LIST.size());
// 设置Mapper输出key和value的类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 设置Reducer输出key和value的类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 如果输出目录已经存在则必须先删除否则重复运行程序时会抛出异常
FileSystem fileSystem = FileSystem.get(new URI(HDFS_URL), configuration, HADOOP_USER_NAME);
Path outputPath = new Path(args[1]);
if (fileSystem.exists(outputPath)) {
fileSystem.delete(outputPath, true);
}
// 设置作业输入文件和输出文件的路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, outputPath);
// 将作业提交到群集并等待它完成参数设置为true代表打印显示对应的进度
boolean result = job.waitForCompletion(true);
// 关闭之前创建的fileSystem
fileSystem.close();
// 根据作业结果,终止当前运行的Java虚拟机,退出程序
System.exit(result ? 0 : -1);
}
}

View File

@ -0,0 +1,16 @@
package com.heibaiying.component;
import com.heibaiying.utils.WordCountDataUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
/**
* 自定义partitioner,按照单词分区
*/
public class CustomPartitioner extends Partitioner<Text, IntWritable> {
public int getPartition(Text text, IntWritable intWritable, int numPartitions) {
return WordCountDataUtils.WORD_LIST.indexOf(text.toString());
}
}

View File

@ -0,0 +1,23 @@
package com.heibaiying.component;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* 将每行数据按照指定分隔符进行拆分
*/
public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] words = value.toString().split("\t");
for (String word : words) {
context.write(new Text(word), new IntWritable(1));
}
}
}

View File

@ -0,0 +1,22 @@
package com.heibaiying.component;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* 进行词频统计
*/
public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int count = 0;
for (IntWritable value : values) {
count += value.get();
}
context.write(key, new IntWritable(count));
}
}

View File

@ -0,0 +1,91 @@
package com.heibaiying.utils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Random;
/**
* 产生词频统计模拟数据
*/
public class WordCountDataUtils {
public static final List<String> WORD_LIST = Arrays.asList("Spark", "Hadoop", "HBase", "Storm", "Flink", "Hive");
/**
* 模拟产生词频数据
*
* @return 词频数据
*/
private static String generateData() {
StringBuilder builder = new StringBuilder();
for (int i = 0; i < 1000; i++) {
Collections.shuffle(WORD_LIST);
Random random = new Random();
int endIndex = random.nextInt(WORD_LIST.size()) % (WORD_LIST.size()) + 1;
String line = StringUtils.join(WORD_LIST.toArray(), "\t", 0, endIndex);
builder.append(line).append("\n");
}
return builder.toString();
}
/**
* 模拟产生词频数据并输出到本地
*
* @param outputPath 输出文件路径
*/
private static void generateDataToLocal(String outputPath) {
try {
java.nio.file.Path path = Paths.get(outputPath);
if (Files.exists(path)) {
Files.delete(path);
}
Files.write(path, generateData().getBytes(), StandardOpenOption.CREATE);
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 模拟产生词频数据并输出到HDFS
*
* @param hdfsUrl HDFS地址
* @param user hadoop用户名
* @param outputPathString 存储到HDFS上的路径
*/
private static void generateDataToHDFS(String hdfsUrl, String user, String outputPathString) {
FileSystem fileSystem = null;
try {
fileSystem = FileSystem.get(new URI(hdfsUrl), new Configuration(), user);
Path outputPath = new Path(outputPathString);
if (fileSystem.exists(outputPath)) {
fileSystem.delete(outputPath, true);
}
FSDataOutputStream out = fileSystem.create(outputPath);
out.write(generateData().getBytes());
out.flush();
out.close();
fileSystem.close();
} catch (Exception e) {
e.printStackTrace();
}
}
public static void main(String[] args) {
//generateDataToLocal("input.txt");
generateDataToHDFS("hdfs://192.168.0.107:8020", "root", "/wordcount/input.txt");
}
}

View File

@ -0,0 +1,9 @@
log4j.rootLogger=INFO,CONSOLE
log4j.addivity.org.apache=false
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
log4j.appender.CONSOLE.Threshold=INFO
log4j.appender.CONSOLE.layout.ConversionPattern=%d{yyyy-MM-dd HH\:mm\:ss} -%-4r [%t] %-5p %x - %m%n
log4j.appender.CONSOLE.Target=System.out
log4j.appender.CONSOLE.Encoding=UTF-8
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout

View File

@ -0,0 +1,42 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.heibaiying</groupId>
<artifactId>hdfs-java-api</artifactId>
<version>1.0</version>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<hadoop.version>2.6.0-cdh5.15.2</hadoop.version>
</properties>
<!---配置CDH仓库地址-->
<repositories>
<repository>
<id>cloudera</id>
<url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
</repository>
</repositories>
<dependencies>
<!--Hadoop-client-->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View File

@ -0,0 +1,180 @@
package com.heibaiying.utils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.URI;
import java.net.URISyntaxException;
/**
* HDFS 工具类
*/
public class HdfsUtils {
private static final String HDFS_PATH = "hdfs://192.168.0.107:8020";
private static final String HDFS_USER = "root";
private static FileSystem fileSystem;
static {
try {
Configuration configuration = new Configuration();
configuration.set("dfs.replication", "1");
fileSystem = FileSystem.get(new URI(HDFS_PATH), configuration, HDFS_USER);
} catch (IOException e) {
e.printStackTrace();
} catch (InterruptedException e) {
e.printStackTrace();
} catch (URISyntaxException e) {
e.printStackTrace();
}
}
public static FileSystem getFileSystem() {
return fileSystem;
}
/**
* 创建目录 支持递归创建
*
* @param path 路径地址
* @return 创建是否成功
*/
public static boolean mkdir(String path) throws Exception {
return fileSystem.mkdirs(new Path(path));
}
/**
* 查看文件内容
*
* @param path 路径地址
* @return 返回文件内容字符串
*/
public static String text(String path, String encode) throws Exception {
FSDataInputStream inputStream = fileSystem.open(new Path(path));
return inputStreamToString(inputStream, encode);
}
/**
* 创建文件并写入内容
*
* @param path 路径地址
* @param context 文件内容
*/
public void createAndWrite(String path, String context) throws Exception {
FSDataOutputStream out = fileSystem.create(new Path(path));
out.writeUTF(context);
out.flush();
out.close();
}
/**
* 文件重命名
*
* @param oldPath 旧文件路径
* @param newPath 新文件路径
* @return 重命名是否成功
*/
public boolean rename(String oldPath, String newPath) throws Exception {
return fileSystem.rename(new Path(oldPath), new Path(newPath));
}
/**
* 拷贝文件到HDFS
*
* @param localPath 本地文件路径
* @param hdfsPath 存储到hdfs上的路径
*/
public void copyFromLocalFile(String localPath, String hdfsPath) throws Exception {
fileSystem.copyFromLocalFile(new Path(localPath), new Path(hdfsPath));
}
/**
* 从HDFS下载文件
*
* @param hdfsPath 文件在hdfs上的路径
* @param localPath 存储到本地的路径
*/
public void copyToLocalFile(String hdfsPath, String localPath) throws Exception {
fileSystem.copyToLocalFile(new Path(hdfsPath), new Path(localPath));
}
/**
* 查询给定路径中文件/目录的状态
*
* @param path 目录路径
* @return 文件信息的数组
*/
public FileStatus[] listFiles(String path) throws Exception {
return fileSystem.listStatus(new Path(path));
}
/**
* 查询给定路径中文件的状态和块位置
*
* @param path 路径可以是目录路径也可以是文件路径
* @return 文件信息的数组
*/
public RemoteIterator<LocatedFileStatus> listFilesRecursive(String path, boolean recursive) throws Exception {
return fileSystem.listFiles(new Path(path), recursive);
}
/**
* 查看文件块信息
*
* @param path 文件路径
* @return 块信息数组
*/
public BlockLocation[] getFileBlockLocations(String path) throws Exception {
FileStatus fileStatus = fileSystem.getFileStatus(new Path(path));
return fileSystem.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
}
/**
* 删除文件
*
* @param path 文件路径
* @return 删除是否成功
*/
public boolean delete(String path) throws Exception {
return fileSystem.delete(new Path(path), true);
}
/**
* 把输入流转换为指定字符
*
* @param inputStream 输入流
* @param encode 指定编码类型
*/
private static String inputStreamToString(InputStream inputStream, String encode) {
try {
if (encode == null || ("".equals(encode))) {
encode = "utf-8";
}
BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream, encode));
StringBuilder builder = new StringBuilder();
String str = "";
while ((str = reader.readLine()) != null) {
builder.append(str).append("\n");
}
return builder.toString();
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB