object CreateDataFrameFromJDBC {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder().appName(this.getClass.getSimpleName).master("local[*]").getOrCreate()
//val df = spark.read.csv("bigdata\\user.csv")
val properties = new Properties()
properties.setProperty("driver", "com.mysql.jdbc.Driver")
properties.setProperty("user", "root")
properties.setProperty("password", "root")
val url = "jdbc:mysql://localhost:3306/bigdata?characterEncoding=UTF-8"
// val df = spark.read.jdbc(url, "tb_user", properties)
// val df = spark.read.csv("bigdata\\user.csv")
val df = spark.read.option("header" ,true).option("inferSchema",true).csv("bigdata\\user.csv")
df.write.mode(SaveMode.Append).jdbc(url,"tb_user1",properties)
df.printSchema()
df.show()
spark.stop()
}
}
val df = spark.read.option("header" ,true).option("inferSchema",true).csv("bigdata\\user.csv")
读取csv文件,先让他读取头获取类型,然后在自动获取属性值,
写出去直接追加,然后写入到jdbc中
不要创建表,这个会自动创建表
id,name,age,province,score 1,张飞,21,北京,80.0 2,关羽,23,北京,82.0 3,赵云,20,上海,88.6 4,刘备,26,上海,83.0 5,曹操,30,深圳,90.0