1. 程式人生 > >spark操作hive方式(scala)

spark操作hive方式(scala)

第一種方式:

def operatorHive: Unit = {
    Class.forName("org.apache.hive.jdbc.HiveDriver")
    val url = "jdbc:hive2://192.168.2.xxx:10000"
    val connection: Connection = DriverManager.getConnection(url, "root", "[email protected]")
    val createStatement: Statement = connection.createStatement()
    val query: ResultSet 
= createStatement.executeQuery("select * from diagbot.ord_lis_trend limit 2") while (query.next()) { println(query.getString(1)) } }

第二種方式:

object SparkOperaterHive {
  val sparkConf: SparkConf = new SparkConf().setAppName(SparkOperaterHive.getClass.getSimpleName)
  val sparkSession: SparkSession 
= SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate() val sc: SparkContext = sparkSession.sparkContext val sqlContext: SQLContext = sparkSession.sqlContext def main(args: Array[String]) { import sparkSession.implicits._ val sql1: DataFrame = sparkSession.sql("select * from janggan.diagnosismedication") val properties: Properties
= new Properties() properties.put("user", "root") properties.put("password", "[email protected]") properties.put("driver", "com.mysql.jdbc.Driver") // sql1.write.mode(SaveMode.Append).jdbc(url,"doc_info_hive",properties) println("總數為:" + sql1.count()) println("sddhdj" + sql1.columns(1)) sparkSession.stop() } }