-
Notifications
You must be signed in to change notification settings - Fork 290
/
Copy pathBasic.scala
60 lines (42 loc) · 1.68 KB
/
Basic.scala
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
package dataframe
import org.apache.spark.sql.SparkSession
//
// Create a DataFrame based on an RDD of case class objects and perform some basic
// DataFrame operations. The DataFrame can instead be created more directly from
// the standard building blocks -- an RDD[Row] and a schema -- see the example
// FromRowsAndSchema.scala to see how to do that.
//
object Basic {
case class Cust(id: Integer, name: String, sales: Double, discount: Double, state: String)
def main(args: Array[String]) {
val spark =
SparkSession.builder()
.appName("DataFrame-Basic")
.master("local[4]")
.getOrCreate()
import spark.implicits._
// create a sequence of case class objects
// (we defined the case class above)
val custs = Seq(
Cust(1, "Widget Co", 120000.00, 0.00, "AZ"),
Cust(2, "Acme Widgets", 410500.00, 500.00, "CA"),
Cust(3, "Widgetry", 410500.00, 200.00, "CA"),
Cust(4, "Widgets R Us", 410500.00, 0.0, "CA"),
Cust(5, "Ye Olde Widgete", 500.00, 0.0, "MA")
)
// make it an RDD and convert to a DataFrame
val customerDF = spark.sparkContext.parallelize(custs, 4).toDF()
println("*** toString() just gives you the schema")
println(customerDF.toString())
println("*** It's better to use printSchema()")
customerDF.printSchema()
println("*** show() gives you neatly formatted data")
customerDF.show()
println("*** use select() to choose one column")
customerDF.select("id").show()
println("*** use select() for multiple columns")
customerDF.select("sales", "state").show()
println("*** use filter() to choose rows")
customerDF.filter($"state".equalTo("CA")).show()
}
}