This commit is contained in:
luoxiang
2019-05-22 00:18:50 +08:00
parent 38ae95ae97
commit 45846d9573
3 changed files with 187 additions and 0 deletions

View File

@ -0,0 +1,29 @@
package com.heibaiying.flume
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.flume.FlumeUtils
/**
* @author : heibaiying
* 使用自定义接收器的基于拉的方法获取数据
*/
object PullBasedWordCount {
def main(args: Array[String]): Unit = {
val sparkConf = new SparkConf()
val ssc = new StreamingContext(sparkConf, Seconds(5))
// 1.获取输入流
val flumeStream = FlumeUtils.createPollingStream(ssc, "hadoop001", 8888)
// 2.词频统计
flumeStream.map(line => new String(line.event.getBody.array()).trim)
.flatMap(_.split(" ")).map((_, 1)).reduceByKey(_ + _).print()
ssc.start()
ssc.awaitTermination()
}
}

View File

@ -0,0 +1,29 @@
package com.heibaiying.flume
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.flume.FlumeUtils
/**
* @author : heibaiying
* 基于推的方法获取数据
*/
object PushBasedWordCount {
def main(args: Array[String]): Unit = {
val sparkConf = new SparkConf()
val ssc = new StreamingContext(sparkConf, Seconds(5))
// 1.获取输入流
val flumeStream = FlumeUtils.createStream(ssc, "hadoop001", 8888)
// 2.词频统计
flumeStream.map(line => new String(line.event.getBody.array()).trim)
.flatMap(_.split(" ")).map((_, 1)).reduceByKey(_ + _).print()
ssc.start()
ssc.awaitTermination()
}
}