1. 程式人生 > >Spark2載入儲存檔案,資料檔案轉換成資料框dataframe

Spark2載入儲存檔案,資料檔案轉換成資料框dataframe

hadoop fs -put /home/wangxiao/data/ml/Affairs.csv /datafile/wangxiao/

hadoop fs -ls -R /datafile

drwxr-xr-x   - wangxiao supergroup          0 2016-10-15 10:46 /datafile/wangxiao

-rw-r--r--   3 wangxiao supergroup      16755 2016-10-15 10:46 /datafile/wangxiao/Affairs.csv

-rw-r--r--   3 wangxiao supergroup      16755 2016-10-13 21:48 /datafile/wangxiao/Affairs.txt

// affairs:一年來婚外情的頻率

// gender:性別

// age:年齡

// yearsmarried:婚齡

// children:是否有小孩

// religiousness:宗教信仰程度(5分制,1分表示反對,5分表示非常信仰)

// education:學歷

// occupation:職業(逆向編號的戈登7種分類)

// rating:對婚姻的自我評分(5分制,1表示非常不幸福,5表示非常幸福)

0,male,37,10,no,3,18,7,4
0,female,27,4,no,4,14,6,4
0,female,32,15,yes,1,12,1,4
0,male,57,15,yes,5,18,6,5
0,male,22,0.75,no,2,17,6,3
0,female,32,1.5,no,2,17,5,5
0,female,22,0.75,no,2,12,1,3
0,male,57,15,yes,2,14,4,4
0,female,32,15,yes,4,16,1,2
0,male,22,1.5,no,4,14,4,5
0,male,37,15,yes,2,20,7,2
0,male,27,4,yes,4,18,6,4
0,male,47,15,yes,5,17,6,4
0,female,22,1.5,no,2,17,5,4

import org.apache.spark.sql.SparkSession

import org.apache.spark.sql.DataFrame

import org.apache.spark.rdd.RDD

object ML1 {   def main(args: Array[String]) {      

  val spark = SparkSession.builder().appName("Spark SQL basic example").config("spark.some.config.option", "some-value").getOrCreate()

    // For implicit conversions like converting RDDs to DataFrames    

  import spark.implicits._        

  // 建立資料框    

  // val data1:DataFrame=spark.read.csv("hdfs://ns1/datafile/wangxiao/Affairs.csv")        

  val data2:DataFrame=spark.read.format("csv").load("hdfs://ns1/datafile/wangxiao/Affairs.csv")        

  val df=data2.toDF("affairs","gender","age","yearsmarried","children","religiousness","education","occupation","rating")   

   df.printSchema()        

  // 指定欄位名和欄位型別    

  /*case class Affairs(affairs: Int, gender: String, age: Int,                       

            yearsmarried: Double, children: String, religiousness: Int,                       

            education: Double, occupation: Double, rating: Int)

    val res = data2.map { r =>      

            Affairs(r(0).toString().toInt, r(0).toString(), r(0).toString().toInt,        

               r(0).toString().toDouble, r(0).toString(), r(0).toString().toInt,        

               r(0).toString().toDouble, r(0).toString().toDouble, r(0).toString().toInt)    

  }    

  res.printSchema()*/

  /* 建立RDD     

  * val data:RDD[String]=spark.sparkContext.textFile("hdfs://ns1/datafile/wangxiao/Affairs.txt")         

  case class Affairs(affairs:Int, gender:String, age:Int,        

           yearsmarried:Double, children:String, religiousness:Int,        

           education:Double, occupation:Double, rating:Int)        

  // RDD轉換成資料框    

  val df=data.map{_.split(" ")}.map { line =>       Affairs( line(0).toInt,line(1).trim.toString(),line(2).toInt,          

                               line(3).toDouble,line(4).trim.toString(),line(5).toInt,          

                               line(6).toDouble,line(7).toDouble,line(8).toInt)       

          }.toDF()     */ 

  // 建立檢視     df.createOrReplaceTempView("Affairs")  

  // 子查詢    

  //val df1 = spark.sql("SELECT * FROM Affairs WHERE age BETWEEN 20 AND 25")    

  val df1 = spark.sql("select gender, age,rating from  ( SELECT * FROM Affairs WHERE age BETWEEN 20 AND 25 ) t ")        

  df1.show  

  // 儲存資料框到檔案

  df.select("gender", "age","education").write.format("csv").save("hdfs://ns1/datafile/wangxiao/data123.csv")

} }

hadoop fs -ls -R /datafile
drwxr-xr-x   - wangxiao supergroup          0 2016-10-15 11:43 /datafile/wangxiao
-rw-r--r--   3 wangxiao supergroup      16755 2016-10-15 10:46 /datafile/wangxiao/Affairs.csv
-rw-r--r--   3 wangxiao supergroup      16755 2016-10-13 21:48 /datafile/wangxiao/Affairs.txt
drwxr-xr-x   - wangxiao supergroup          0 2016-10-15 11:43 /datafile/wangxiao/data123.csv

標籤:class   it   si   la   sp   檔案   c   資料   ha   

原文:http://www.cnblogs.com/wwxbi/p/5963031.html