Dataframes in spark can be saved into different file formats with DataframeWriter’s write API.Spark supports text,parquet,orc,json file formats. By default it saves in parquet file format. You can provide different compression options during saving the output . With mode you can tell to save in append or overwrite mode.
Here are simple examples explaining saving or writing dataframes to various output formats.
scala> val data = Seq(("Amir", 45), ("Varun", 25), ("Sneha",30)).toDF("Name","Age")
data: org.apache.spark.sql.DataFrame = [Name: string, Age: int]
scala>
scala> val cdata = data.select(concat_ws(",",$"Name",$"Age").as("NewCol"))
cdata: org.apache.spark.sql.DataFrame = [NewCol: string]
scala> cdata.show
+--------+
| NewCol|
+--------+
| Amir,45|
|Varun,25|
|Sneha,30|
+--------+
scala> cdata.write.text("/results/textdata")
scala> cdata.write.text("/results/textdata1/")
scala> cdata.write.format("text").option("compression", "gzip").save("/results/txtcompress")
scala> val df = Seq(("Amir", 45), ("Varun", 25), ("Sneha",30)).toDF("Name","Age")
df: org.apache.spark.sql.DataFrame = [Name: string, Age: int]
scala> df.write.option("header","true").csv("/results/csvout/")
scala> df.write.format("csv").option("header","true").save("/results/csvout1/")
scala> df.write.save("/results/prqout/")
scala> df.write.format("parquet").save("/results/prqout1/")
scala> df.write.parquet("/results/prqout2/")
scala> df.write.format("json").save("/results/jsondata/")
scala> df.write.json("/results/jsondata1/")
scala> df.write.format("avro").save("/results/avrodata/")
scala> df.write.avro("/results/avrodata1/")
<console>:26: error: value avro is not a member of org.apache.spark.sql.DataFrameWriter[org.apache.spark.sql.Row]
df.write.avro("/results/avrodata1/")
^
scala> df.write.format("orc").save("/results/orcdata/")
scala> df.write.orc("/results/orcdata1/")
scala>