1.准备数据employee.txt
,Gong Shaocheng,
,Li Dachao,
,Qiu Xin,
,Cheng Jiangzhong,
,Wo Binggang,
将数据放入hdfs
[root@jfp3- spark-studio]# hdfs dfs -put employee.txt /user/spark_studio
2.启动spark shell
[root@jfp3- spark-1.0.-bin-hadoop2]# ./bin/spark-shell --master spark://192.168.0.71:7077
3.编写脚本
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
import sqlContext._ case class Employee(employeeId: Int, name: String, departmentId: Int) // Create an RDD of Employee objects and register it as a table.
val employees = sc.textFile("hdfs://jfp3-1:8020/user/spark_studio/employee.txt").map(_.split(",")).map(p => Employee(p(), p(), p().trim.toInt))
employees.registerAsTable("employee") // SQL statements can be run by using the sql methods provided by sqlContext.
val fsis = sql("SELECT name FROM employee WHERE departmentId = 1") // The results of SQL queries are SchemaRDDs and support all the normal RDD operations.
// The columns of a row in the result can be accessed by ordinal.
fsis.map(t => "Name: " + t()).collect().foreach(println)
4.运行
scala> val sqlContext = new org.apache.spark.sql.SQLContext(sc)
sqlContext: org.apache.spark.sql.SQLContext = org.apache.spark.sql.SQLContext@ scala> import sqlContext._
import sqlContext._ scala> case class Employee(employeeId: String, name: String, departmentId: Int)
defined class Employee scala> val employees = sc.textFile("hdfs://jfp3-1:8020/user/spark_studio/employee.txt").map(_.split(",")).map(p => Employee(p(), p(), p().trim.toInt))
// :: INFO MemoryStore: ensureFreeSpace() called with curMem=, maxMem=
// :: INFO MemoryStore: Block broadcast_0 stored as values to memory (estimated size 135.5 KB, free 294.8 MB)
employees: org.apache.spark.rdd.RDD[Employee] = MappedRDD[] at map at <console>: scala> employees.registerAsTable("employee") scala> val fsis = sql("SELECT name FROM employee WHERE departmentId = 1")
// :: INFO Analyzer: Max iterations () reached for batch MultiInstanceRelations
// :: INFO Analyzer: Max iterations () reached for batch CaseInsensitiveAttributeReferences
// :: INFO SQLContext$$anon$: Max iterations () reached for batch Add exchange
// :: INFO SQLContext$$anon$: Max iterations () reached for batch Prepare Expressions
fsis: org.apache.spark.sql.SchemaRDD =
SchemaRDD[] at RDD at SchemaRDD.scala:
== Query Plan ==
Project [name#:]
Filter (departmentId#: = )
ExistingRdd [employeeId#,name#,departmentId#], MapPartitionsRDD[] at mapPartitions at basicOperators.scala: scala> fsis.map(t => "Name: " + t()).collect().foreach(println)
// :: INFO FileInputFormat: Total input paths to process :
// :: INFO SparkContext: Starting job: collect at <console>:
// :: INFO DAGScheduler: Got job (collect at <console>:) with output partitions (allowLocal=false)
// :: INFO DAGScheduler: Final stage: Stage (collect at <console>:)
// :: INFO DAGScheduler: Parents of final stage: List()
// :: INFO DAGScheduler: Missing parents: List()
// :: INFO DAGScheduler: Submitting Stage (MappedRDD[] at map at <console>:), which has no missing parents
// :: INFO DAGScheduler: Submitting missing tasks from Stage (MappedRDD[] at map at <console>:)
// :: INFO TaskSchedulerImpl: Adding task set 0.0 with tasks
// :: INFO TaskSetManager: Starting task 0.0: as TID on executor : jfp3- (NODE_LOCAL)
// :: INFO TaskSetManager: Serialized task 0.0: as bytes in ms
// :: INFO TaskSetManager: Starting task 0.0: as TID on executor : jfp3- (NODE_LOCAL)
// :: INFO TaskSetManager: Serialized task 0.0: as bytes in ms
// :: INFO TaskSetManager: Finished TID in ms on jfp3- (progress: /)
// :: INFO TaskSetManager: Finished TID in ms on jfp3- (progress: /)
// :: INFO DAGScheduler: Completed ResultTask(, )
// :: INFO DAGScheduler: Completed ResultTask(, )
// :: INFO TaskSchedulerImpl: Removed TaskSet 0.0, whose tasks have all completed, from pool
// :: INFO DAGScheduler: Stage (collect at <console>:) finished in 1.284 s
// :: INFO SparkContext: Job finished: collect at <console>:, took 1.386154401 s
Name: Gong Shaocheng
Name: Li Dachao
Name: Qiu Xin
5.将数据存为parquet格式,并运行sql
scala> val parquetFile = sqlContext.parquetFile("hdfs://jfp3-1:8020/user/spark_studio/employee.parquet")
// :: INFO Analyzer: Max iterations () reached for batch MultiInstanceRelations
// :: INFO Analyzer: Max iterations () reached for batch CaseInsensitiveAttributeReferences
// :: INFO SQLContext$$anon$: Max iterations () reached for batch Add exchange
// :: INFO SQLContext$$anon$: Max iterations () reached for batch Prepare Expressions
parquetFile: org.apache.spark.sql.SchemaRDD =
SchemaRDD[] at RDD at SchemaRDD.scala:
== Query Plan ==
ParquetTableScan [employeeId#,name#,departmentId#], (ParquetRelation hdfs://jfp3-1:8020/user/spark_studio/employee.parquet), None scala> parquetFile.registerAsTable("parquetFile") scala> val telcos = sql("SELECT name FROM parquetFile WHERE departmentId = 3")
// :: INFO Analyzer: Max iterations () reached for batch MultiInstanceRelations
// :: INFO Analyzer: Max iterations () reached for batch CaseInsensitiveAttributeReferences
// :: INFO SQLContext$$anon$: Max iterations () reached for batch Add exchange
// :: INFO SQLContext$$anon$: Max iterations () reached for batch Prepare Expressions
// :: INFO MemoryStore: ensureFreeSpace() called with curMem=, maxMem=
// :: INFO MemoryStore: Block broadcast_1 stored as values to memory (estimated size 176.3 KB, free 294.6 MB)
telcos: org.apache.spark.sql.SchemaRDD =
SchemaRDD[] at RDD at SchemaRDD.scala:
== Query Plan ==
Project [name#:]
Filter (departmentId#: = )
ParquetTableScan [name#,departmentId#], (ParquetRelation hdfs://jfp3-1:8020/user/spark_studio/employee.parquet), None scala> telcos.collect().foreach(println)
// :: INFO FileInputFormat: Total input paths to process :
// :: INFO ParquetInputFormat: Total input paths to process :
// :: INFO ParquetFileReader: reading summary file: hdfs://jfp3-1:8020/user/spark_studio/employee.parquet/_metadata
// :: INFO deprecation: mapred.max.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.maxsize
// :: INFO deprecation: mapred.min.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize
// :: INFO SparkContext: Starting job: collect at <console>:
// :: INFO DAGScheduler: Got job (collect at <console>:) with output partitions (allowLocal=false)
// :: INFO DAGScheduler: Final stage: Stage (collect at <console>:)
// :: INFO DAGScheduler: Parents of final stage: List()
// :: INFO DAGScheduler: Missing parents: List()
// :: INFO DAGScheduler: Submitting Stage (SchemaRDD[] at RDD at SchemaRDD.scala:
== Query Plan ==
Project [name#:]
Filter (departmentId#: = )
ParquetTableScan [name#,departmentId#], (ParquetRelation hdfs://jfp3-1:8020/user/spark_studio/employee.parquet), None), which has no missing parents
// :: INFO DAGScheduler: Submitting missing tasks from Stage (SchemaRDD[] at RDD at SchemaRDD.scala:
== Query Plan ==
Project [name#:]
Filter (departmentId#: = )
ParquetTableScan [name#,departmentId#], (ParquetRelation hdfs://jfp3-1:8020/user/spark_studio/employee.parquet), None)
// :: INFO TaskSchedulerImpl: Adding task set 2.0 with tasks
// :: INFO TaskSetManager: Starting task 2.0: as TID on executor : jfp3- (NODE_LOCAL)
// :: INFO TaskSetManager: Serialized task 2.0: as bytes in ms
// :: INFO TaskSetManager: Starting task 2.0: as TID on executor : jfp3- (NODE_LOCAL)
// :: INFO TaskSetManager: Serialized task 2.0: as bytes in ms
// :: INFO DAGScheduler: Completed ResultTask(, )
// :: INFO TaskSetManager: Finished TID in ms on jfp3- (progress: /)
// :: INFO DAGScheduler: Completed ResultTask(, )
// :: INFO TaskSetManager: Finished TID in ms on jfp3- (progress: /)
// :: INFO TaskSchedulerImpl: Removed TaskSet 2.0, whose tasks have all completed, from pool
// :: INFO DAGScheduler: Stage (collect at <console>:) finished in 2.177 s
// :: INFO SparkContext: Job finished: collect at <console>:, took 2.210887848 s
[Wo Binggang]
6. DSL syntax支持
scala> all.collect().foreach(println)
// :: INFO SparkContext: Starting job: collect at <console>:
// :: INFO DAGScheduler: Got job (collect at <console>:) with output partitions (allowLocal=false)
// :: INFO DAGScheduler: Final stage: Stage (collect at <console>:)
// :: INFO DAGScheduler: Parents of final stage: List()
// :: INFO DAGScheduler: Missing parents: List()
// :: INFO DAGScheduler: Submitting Stage (SchemaRDD[] at RDD at SchemaRDD.scala:
== Query Plan ==
Project [name#:]
Filter (departmentId#: >= )
ExistingRdd [employeeId#,name#,departmentId#], MapPartitionsRDD[] at mapPartitions at basicOperators.scala:), which has no missing parents
// :: INFO DAGScheduler: Submitting missing tasks from Stage (SchemaRDD[] at RDD at SchemaRDD.scala:
== Query Plan ==
Project [name#:]
Filter (departmentId#: >= )
ExistingRdd [employeeId#,name#,departmentId#], MapPartitionsRDD[] at mapPartitions at basicOperators.scala:)
// :: INFO TaskSchedulerImpl: Adding task set 6.0 with tasks
// :: INFO TaskSetManager: Starting task 6.0: as TID on executor : jfp3- (NODE_LOCAL)
// :: INFO TaskSetManager: Serialized task 6.0: as bytes in ms
// :: INFO TaskSetManager: Starting task 6.0: as TID on executor : jfp3- (NODE_LOCAL)
// :: INFO TaskSetManager: Serialized task 6.0: as bytes in ms
// :: INFO TaskSetManager: Finished TID in ms on jfp3- (progress: /)
// :: INFO DAGScheduler: Completed ResultTask(, )
// :: INFO DAGScheduler: Completed ResultTask(, )
// :: INFO TaskSetManager: Finished TID in ms on jfp3- (progress: /)
// :: INFO TaskSchedulerImpl: Removed TaskSet 6.0, whose tasks have all completed, from pool
// :: INFO DAGScheduler: Stage (collect at <console>:) finished in 0.039 s
// :: INFO SparkContext: Job finished: collect at <console>:, took 0.052556716 s
[Gong Shaocheng]
[Li Dachao]
[Qiu Xin]
[Cheng Jiangzhong]
[Wo Binggang]