import
org.apache.hadoop.hbase.client.Put
import
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
import
org.apache.hadoop.hbase.filter.SingleColumnValueFilter
import
org.apache.hadoop.hbase.io.ImmutableBytesWritable
import
org.apache.hadoop.hbase.mapred.TableOutputFormat
import
org.apache.hadoop.hbase.mapreduce.TableInputFormat
import
org.apache.hadoop.hbase.protobuf.ProtobufUtil
import
org.apache.hadoop.hbase.util.{Base
64
, Bytes}
import
org.apache.hadoop.hbase.HBaseConfiguration
import
org.apache.hadoop.mapred.JobConf
import
org.apache.spark.SparkContext
import
org.apache.hadoop.hbase.client.
_
/**
* Spark 讀取和寫入 HBase
**/
object
SparkOnHBase {
def
convertScanToString(scan
:
Scan)
=
{
val
proto
=
ProtobufUtil.toScan(scan)
Base
64
.encodeBytes(proto.toByteArray)
}
def
main(args
:
Array[String]) {
val
sc
=
new
SparkContext(
"local"
,
"SparkOnHBase"
)
val
conf
=
HBaseConfiguration.create()
conf.set(
"hbase.zookeeper.property.clientPort"
,
"2181"
)
conf.set(
"hbase.zookeeper.quorum"
,
"master"
)
// ======Save RDD to HBase========
// step 1: JobConf setup
val
jobConf
=
new
JobConf(conf,
this
.getClass)
jobConf.setOutputFormat(classOf[TableOutputFormat])
jobConf.set(TableOutputFormat.OUTPUT
_
TABLE,
"user"
)
// step 2: rdd mapping to table
// 在 HBase 中表的 schema 一般是這樣的
// *row cf:col_1 cf:col_2
// 而在Spark中,我們操作的是RDD元組,比如(1,"lilei",14) , (2,"hanmei",18)
// 我們需要將 *RDD[(uid:Int, name:String, age:Int)]* 轉(zhuǎn)換成 *RDD[(ImmutableBytesWritable, Put)]*
// 我們定義了 convert 函數(shù)做這個(gè)轉(zhuǎn)換工作
def
convert(triple
:
(Int, String, Int))
=
{
val
p
=
new
Put(Bytes.toBytes(triple.
_
1
))
p.addColumn(Bytes.toBytes(
"basic"
),Bytes.toBytes(
"name"
),Bytes.toBytes(triple.
_
2
))
p.addColumn(Bytes.toBytes(
"basic"
),Bytes.toBytes(
"age"
),Bytes.toBytes(triple.
_
3
))
(
new
ImmutableBytesWritable, p)
}
// step 3: read RDD data from somewhere and convert
val
rawData
=
List((
1
,
"lilei"
,
14
), (
2
,
"hanmei"
,
18
), (
3
,
"someone"
,
38
))
val
localData
=
sc.parallelize(rawData).map(convert)
//step 4: use `saveAsHadoopDataset` to save RDD to HBase
localData.saveAsHadoopDataset(jobConf)
// =================================
// ======Load RDD from HBase========
// use `newAPIHadoopRDD` to load RDD from HBase
//直接從 HBase 中讀取數(shù)據(jù)并轉(zhuǎn)成 Spark 能直接操作的 RDD[K,V]
//設(shè)置查詢的表名
conf.set(TableInputFormat.INPUT
_
TABLE,
"user"
)
//添加過濾條件,年齡大于 18 歲
val
scan
=
new
Scan()
scan.setFilter(
new
SingleColumnValueFilter(
"basic"
.getBytes,
"age"
.getBytes,
CompareOp.GREATER
_
OR
_
EQUAL,Bytes.toBytes(
18
)))
conf.set(TableInputFormat.SCAN,convertScanToString(scan))
val
usersRDD
=
sc.newAPIHadoopRDD(conf, classOf[TableInputFormat],
classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
classOf[org.apache.hadoop.hbase.client.Result])
val
count
=
usersRDD.count()
println(
"Users RDD Count:"
+ count)
usersRDD.cache()
//遍歷輸出
usersRDD.foreach{
case
(
_
,result)
=
>
val
key
=
Bytes.toInt(result.getRow)
val
name
=
Bytes.toString(result.getValue(
"basic"
.getBytes,
"name"
.getBytes))
val
age
=
Bytes.toInt(result.getValue(
"basic"
.getBytes,
"age"
.getBytes))
println(
"Row key:"
+key+
" Name:"
+name+
" Age:"
+age)
}
// =================================
}
}
聯(lián)系客服