Skip to content

Commit f508e1b

Browse files
scala-stewardnightscape
authored andcommitted
Reformat with scalafmt 3.8.5
Executed command: scalafmt --non-interactive
1 parent 349d6c8 commit f508e1b

File tree

6 files changed

+20
-26
lines changed

6 files changed

+20
-26
lines changed

build.mill

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -75,9 +75,7 @@ trait SparkModule extends Cross.Module2[String, String] with SbtModule with Sona
7575

7676
def publishArtifacts: T[PublishModule.PublishData] = Task {
7777
val publishData = super.publishArtifacts()
78-
publishData.copy(
79-
payload = publishData.payload.filterNot { case (ref, name) => ref.toString.contains("jar.dest") }
80-
)
78+
publishData.copy(payload = publishData.payload.filterNot { case (ref, name) => ref.toString.contains("jar.dest") })
8179
}
8280

8381
override def sonatypeCentralReadTimeout: T[Int] = 600000

src/test/scala/dev/mauch/spark/excel/v2/DataFrameWriterApiComplianceSuite.scala

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,7 @@ import org.scalatest.wordspec.AnyWordSpec
2323
class DataFrameWriterApiComplianceSuite extends AnyWordSpec with DataFrameSuiteBase with LocalFileTestingUtilities {
2424

2525
private def simpleDf = {
26-
val data = Seq(
27-
("foo", "bar", "1"),
28-
("baz", "bang", "2")
29-
)
26+
val data = Seq(("foo", "bar", "1"), ("baz", "bang", "2"))
3027
spark.createDataFrame(data).toDF("col1", "col2", "col3")
3128
}
3229

src/test/scala/dev/mauch/spark/excel/v2/GlobPartitionAndFileNameSuite.scala

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,7 @@ import org.scalatest.funsuite.AnyFunSuite
2525
*
2626
* #52. input_file_name returns empty string https://github.dev/mauch/spark-excel/issues/52
2727
*
28-
* #74. Allow reading multiple files specified as a list OR by a pattern
29-
* https://github.dev/mauch/spark-excel/issues/74
28+
* #74. Allow reading multiple files specified as a list OR by a pattern https://github.dev/mauch/spark-excel/issues/74
3029
*
3130
* #97. Reading multiple files https://github.dev/mauch/spark-excel/issues/97
3231
*/

src/test/scala/dev/mauch/spark/excel/v2/KeepUndefinedRowsSuite.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,8 @@ object KeepUndefinedRowsSuite {
7272
Row("C", "7", "8")
7373
).asJava
7474

75-
/** Issue: https://github.dev/mauch/spark-excel/issues/162 Spark-excel still infers to Double-Type, however, user
76-
* can provide custom scheme and Spark-excel should load to IntegerType or LongType accordingly
75+
/** Issue: https://github.dev/mauch/spark-excel/issues/162 Spark-excel still infers to Double-Type, however, user can
76+
* provide custom scheme and Spark-excel should load to IntegerType or LongType accordingly
7777
*/
7878
val userDefined_Issue162 = StructType(
7979
List(

src/test/scala/dev/mauch/spark/excel/v2/ManyPartitionReadSuite.scala

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ import org.apache.spark.sql._
2121
import org.apache.spark.sql.functions.col
2222
import org.apache.spark.sql.types.IntegerType
2323
import org.scalatest.wordspec.AnyWordSpec
24-
import org.apache.spark.sql.types.{StructType, StructField, StringType}
24+
import org.apache.spark.sql.types.{StringType, StructField, StructType}
2525

2626
class ManyPartitionReadSuite extends AnyWordSpec with DataFrameSuiteBase with LocalFileTestingUtilities {
2727

@@ -51,9 +51,9 @@ class ManyPartitionReadSuite extends AnyWordSpec with DataFrameSuiteBase with Lo
5151
// Each col1 value has multiple rows (around 10-11 rows each)
5252
val rowsPerPartition = if (col1 == 1) 8 else if (col1 == 2) 16 else 11
5353
(0 until rowsPerPartition).map { i =>
54-
val index = (col1 - 1) * 11 + i + 1234 // Starting from 1234 as in original data
54+
val index = (col1 - 1) * 11 + i + 1234 // Starting from 1234 as in original data
5555
Row(
56-
Integer.valueOf(col1), // Make it nullable Integer
56+
Integer.valueOf(col1), // Make it nullable Integer
5757
s"fubar_$index",
5858
s"bazbang_${index + 77000}",
5959
s"barfang_${index + 237708}",
@@ -63,18 +63,18 @@ class ManyPartitionReadSuite extends AnyWordSpec with DataFrameSuiteBase with Lo
6363
}
6464

6565
// Define schema explicitly to match expected nullability
66-
val schema = StructType(Array(
67-
StructField("col1", IntegerType, nullable = true),
68-
StructField("col2", StringType, nullable = true),
69-
StructField("col3", StringType, nullable = true),
70-
StructField("col4", StringType, nullable = true),
71-
StructField("col5", StringType, nullable = true)
72-
))
66+
val schema = StructType(
67+
Array(
68+
StructField("col1", IntegerType, nullable = true),
69+
StructField("col2", StringType, nullable = true),
70+
StructField("col3", StringType, nullable = true),
71+
StructField("col4", StringType, nullable = true),
72+
StructField("col5", StringType, nullable = true)
73+
)
74+
)
7375

7476
val dfInput = spark.createDataFrame(spark.sparkContext.parallelize(data), schema)
7577

76-
77-
7878
val dfFinal = dfInput.union(dfInput)
7979

8080
val dfWriter = dfFinal.write

src/test/scala/dev/mauch/spark/excel/v2/RowNumberColumnSuite.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,9 @@ import org.scalatest.funsuite.AnyFunSuite
2424
import java.util
2525
import scala.jdk.CollectionConverters._
2626

27-
/** Related issues: #40 Allow reading only a subset of rows https://github.dev/mauch/spark-excel/issues/40 #59 Rows
28-
* are returned in incorrect order on cluster https://github.dev/mauch/spark-excel/issues/59 #115 Add excel row
29-
* number column https://github.dev/mauch/spark-excel/issues/115
27+
/** Related issues: #40 Allow reading only a subset of rows https://github.dev/mauch/spark-excel/issues/40 #59 Rows are
28+
* returned in incorrect order on cluster https://github.dev/mauch/spark-excel/issues/59 #115 Add excel row number
29+
* column https://github.dev/mauch/spark-excel/issues/115
3030
*/
3131
object RowNumberColumnSuite {
3232

0 commit comments

Comments
 (0)