Skip to content

Commit

Permalink
Update Scala and Java format (pingcap#1476)
Browse files Browse the repository at this point in the history
  • Loading branch information
birdstorm authored Jun 10, 2020
1 parent 0ddf044 commit 17b41a1
Show file tree
Hide file tree
Showing 421 changed files with 9,843 additions and 8,761 deletions.
21 changes: 18 additions & 3 deletions .ci/build.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -22,22 +22,37 @@ def call(ghprbActualCommit, ghprbPullId, ghprbPullTitle, ghprbPullLink, ghprbPul
}
}

stage('Build') {
stage('Format') {
dir("go/src/github.com/pingcap/tispark") {
sh """
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
export LANGUAGE=en_US.UTF-8
cp -R /home/jenkins/agent/git/tispark/. ./
git checkout -f ${ghprbActualCommit}
mvn clean install -Dmaven.test.skip=true
mvn mvn-scalafmt_2.11:format -Dscalafmt.skip=false
mvn com.coveo:fmt-maven-plugin:format
git diff --quiet
formatted="\$?"
if [[ "\${formatted}" -eq 1 ]]
then
echo "code format error"
echo "code format error, please run the following commands:"
echo " mvn mvn-scalafmt_2.11:format -Dscalafmt.skip=false"
echo " mvn com.coveo:fmt-maven-plugin:format"
exit 1
fi
"""
}
}

stage('Build') {
dir("go/src/github.com/pingcap/tispark") {
sh """
git checkout -f ${ghprbActualCommit}
mvn clean package -Dmaven.test.skip=true
"""
}
}
}
}

Expand Down
6 changes: 2 additions & 4 deletions .ci/integration_test.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -199,8 +199,7 @@ def call(ghprbActualCommit, ghprbCommentBody, ghprbPullId, ghprbPullTitle, ghprb
if [ ! "\$(ls -A /maven/.m2/repository)" ]; then curl -sL \$archive_url | tar -zx -C /maven || true; fi
"""
sh """
export MAVEN_OPTS="-Xmx6G -XX:MaxPermSize=512M -XX:ReservedCodeCacheSize=51M"
mvn compile ${MVN_PROFILE}
export MAVEN_OPTS="-Xmx6G -XX:MaxPermSize=512M"
mvn test ${MVN_PROFILE} -Dtest=moo ${mvnStr}
"""
}
Expand All @@ -216,9 +215,8 @@ def call(ghprbActualCommit, ghprbCommentBody, ghprbPullId, ghprbPullTitle, ghprb
if [ ! "\$(ls -A /maven/.m2/repository)" ]; then curl -sL \$archive_url | tar -zx -C /maven || true; fi
"""
sh """
export MAVEN_OPTS="-Xmx6G -XX:MaxPermSize=512M -XX:ReservedCodeCacheSize=512M"
export MAVEN_OPTS="-Xmx6G -XX:MaxPermSize=512M"
mvn test ${MVN_PROFILE} -am -pl tikv-client
mvn test ${MVN_PROFILE} -Dtest=moo -DwildcardSuites=com.pingcap.tispark.datasource.DataSourceWithoutExtensionsSuite,org.apache.spark.sql.IssueTestSuite -DfailIfNoTests=false
"""
unstash "CODECOV_TOKEN"
sh 'curl -s https://codecov.io/bash | bash -s - -t @CODECOV_TOKEN'
Expand Down
15 changes: 13 additions & 2 deletions core-test/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -221,15 +221,26 @@
<artifactId>maven-clean-plugin</artifactId>
<version>2.4.1</version>
</plugin>
<!-- Scalatest runs all Scala tests -->
<plugin>
<groupId>org.scalatest</groupId>
<artifactId>scalatest-maven-plugin</artifactId>
<version>2.0.0</version>
<!-- Note config is repeated in surefire config -->
<configuration>
<reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
<junitxml>.</junitxml>
<filereports>WDF TestSuite.txt</filereports>
<argLine>-Dfile.encoding=UTF-8 -Duser.timezone=GMT+8 -Dio.netty.leakDetection.level=paranoid</argLine>
<filereports>SparkTestSuite.txt</filereports>
<argLine>-ea -Xmx6g -Xss4m -XX:ReservedCodeCacheSize=${CodeCacheSize} -Dfile.encoding=UTF-8 -Duser.timezone=GMT+8 -Dio.netty.tryReflectionSetAccessible=true</argLine>
<stderr/>
<environmentVariables>
<JAVA_HOME>${test.java.home}</JAVA_HOME>
</environmentVariables>
<systemProperties>
<log4j.configuration>file:src/test/resources/log4j.properties</log4j.configuration>
</systemProperties>
<tagsToExclude>${test.exclude.tags}</tagsToExclude>
<tagsToInclude>${test.include.tags}</tagsToInclude>
</configuration>
<executions>
<execution>
Expand Down
42 changes: 23 additions & 19 deletions core/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -262,45 +262,49 @@
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<skipTests>true</skipTests>
</configuration>
</plugin>
<!-- Scala Format Plug-in -->
<plugin>
<groupId>org.antipathy</groupId>
<artifactId>mvn-scalafmt_${scala.binary.version}</artifactId>
<version>0.10_1.5.1</version>
<version>1.0.3</version>
<configuration>
<skipSources>${scalafmt.skip}</skipSources>
<skipTestSources>${scalafmt.skip}</skipTestSources>
<sourceDirectories> <!-- (Optional) Paths to source-directories. Overrides ${project.build.sourceDirectory} -->
<sourceDirectory>${project.basedir}/src/main/scala</sourceDirectory>
</sourceDirectories>
<testSourceDirectories> <!-- (Optional) Paths to test-source-directories. Overrides ${project.build.testSourceDirectory} -->
<param>${project.basedir}/src/test/scala</param>
</testSourceDirectories>
<configLocation>${project.parent.basedir}/scalafmt.conf</configLocation>
</configuration>
<executions>
<execution>
<phase>validate</phase>
<goals>
<goal>format</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<skipTests>true</skipTests>
</configuration>
</plugin>
<!-- Scalatest runs all Scala tests -->
<plugin>
<groupId>org.scalatest</groupId>
<artifactId>scalatest-maven-plugin</artifactId>
<version>2.0.0</version>
<!-- Note config is repeated in surefire config -->
<configuration>
<reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
<junitxml>.</junitxml>
<filereports>WDF TestSuite.txt</filereports>
<argLine>-Dfile.encoding=UTF-8 -Duser.timezone=GMT+8 -Dio.netty.leakDetection.level=paranoid</argLine>
<filereports>SparkTestSuite.txt</filereports>
<argLine>-ea -Xmx6g -Xss4m -XX:ReservedCodeCacheSize=${CodeCacheSize} -Dfile.encoding=UTF-8 -Duser.timezone=GMT+8 -Dio.netty.leakDetection.level=paranoid -Dio.netty.tryReflectionSetAccessible=true</argLine>
<stderr/>
<environmentVariables>
<JAVA_HOME>${test.java.home}</JAVA_HOME>
</environmentVariables>
<systemProperties>
<log4j.configuration>file:src/test/resources/log4j.properties</log4j.configuration>
</systemProperties>
<tagsToExclude>${test.exclude.tags}</tagsToExclude>
<tagsToInclude>${test.include.tags}</tagsToInclude>
</configuration>
<executions>
<execution>
Expand Down
4 changes: 3 additions & 1 deletion core/scripts/version.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,6 @@ echo '
package com.pingcap.tispark
object TiSparkVersion {
val version: String = "Release Version: '${TiSparkReleaseVersion}'\\nGit Commit Hash: '${TiSparkGitHash}'\\nGit Branch: '${TiSparkGitBranch}'\\nUTC Build Time: '${TiSparkBuildTS}'" }' > ${TISPARK_HOME}/core/src/main/scala/com/pingcap/tispark/TiSparkVersion.scala
val version: String =
"Release Version: '${TiSparkReleaseVersion}'\\nGit Commit Hash: '${TiSparkGitHash}'\\nGit Branch: '${TiSparkGitBranch}'\\nUTC Build Time: '${TiSparkBuildTS}'"
}' > ${TISPARK_HOME}/core/src/main/scala/com/pingcap/tispark/TiSparkVersion.scala
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,9 @@
import org.apache.spark.unsafe.types.UTF8String;

public class TiColumnVectorAdapter extends ColumnVector {
private TiColumnVector tiColumnVector;
/**
* Sets up the data type of this column vector.
*/
private final TiColumnVector tiColumnVector;

/** Sets up the data type of this column vector. */
public TiColumnVectorAdapter(TiColumnVector tiColumnVector) {
super(TypeMapping.toSparkType(tiColumnVector.dataType()));
this.tiColumnVector = tiColumnVector;
Expand All @@ -37,27 +36,19 @@ public TiColumnVectorAdapter(TiColumnVector tiColumnVector) {
* <p>This overwrites `AutoCloseable.close` to remove the `throws` clause, as column vector is
* in-memory and we don't expect any exception to happen during closing.
*/
public void close() {
public void close() {}

}

/**
* Returns true if this column vector contains any null values.
*/
/** Returns true if this column vector contains any null values. */
public boolean hasNull() {
return tiColumnVector.hasNull();
}

/**
* Returns the number of nulls in this column vector.
*/
/** Returns the number of nulls in this column vector. */
public int numNulls() {
return tiColumnVector.numNulls();
}

/**
* Returns whether the value at rowId is NULL.
*/
/** Returns whether the value at rowId is NULL. */
public boolean isNullAt(int rowId) {
return tiColumnVector.isNullAt(rowId);
}
Expand Down Expand Up @@ -121,13 +112,13 @@ public double getDouble(int rowId) {
/**
* Returns the array type value for rowId. If the slot for rowId is null, it should return null.
*
* To support array type, implementations must construct an {@link ColumnarArray} and return it in
* this method. {@link ColumnarArray} requires a {@link ColumnVector} that stores the data of all
* the elements of all the arrays in this vector, and an offset and length which points to a range
* in that {@link ColumnVector}, and the range represents the array for rowId. Implementations are
* free to decide where to put the data vector and offsets and lengths. For example, we can use
* the first child vector as the data vector, and store offsets and lengths in 2 int arrays in
* this vector.
* <p>To support array type, implementations must construct an {@link ColumnarArray} and return it
* in this method. {@link ColumnarArray} requires a {@link ColumnVector} that stores the data of
* all the elements of all the arrays in this vector, and an offset and length which points to a
* range in that {@link ColumnVector}, and the range represents the array for rowId.
* Implementations are free to decide where to put the data vector and offsets and lengths. For
* example, we can use the first child vector as the data vector, and store offsets and lengths in
* 2 int arrays in this vector.
*/
@Override
public ColumnarArray getArray(int rowId) {
Expand All @@ -137,14 +128,14 @@ public ColumnarArray getArray(int rowId) {
/**
* Returns the map type value for rowId. If the slot for rowId is null, it should return null.
*
* In Spark, map type value is basically a key data array and a value data array. A key from the
* key array with a index and a value from the value array with the same index contribute to an
* entry of this map type value.
* <p>In Spark, map type value is basically a key data array and a value data array. A key from
* the key array with a index and a value from the value array with the same index contribute to
* an entry of this map type value.
*
* To support map type, implementations must construct a {@link ColumnarMap} and return it in this
* method. {@link ColumnarMap} requires a {@link ColumnVector} that stores the data of all the
* keys of all the maps in this vector, and another {@link ColumnVector} that stores the data of
* all the values of all the maps in this vector, and a pair of offset and length which specify
* <p>To support map type, implementations must construct a {@link ColumnarMap} and return it in
* this method. {@link ColumnarMap} requires a {@link ColumnVector} that stores the data of all
* the keys of all the maps in this vector, and another {@link ColumnVector} that stores the data
* of all the values of all the maps in this vector, and a pair of offset and length which specify
* the range of the key/value array that belongs to the map type value at rowId.
*/
@Override
Expand All @@ -153,11 +144,10 @@ public ColumnarMap getMap(int ordinal) {
}

/**
* Returns the decimal type value for rowId. If the slot for rowId is null, it should return
* null.
* Returns the decimal type value for rowId. If the slot for rowId is null, it should return null.
*/
public Decimal getDecimal(int rowId, int precision, int scale) {
return Decimal.apply(tiColumnVector.getDecimal(rowId, precision, scale));
return Decimal.apply(tiColumnVector.getDecimal(rowId, precision, scale));
}

/**
Expand All @@ -176,11 +166,9 @@ public byte[] getBinary(int rowId) {
return tiColumnVector.getBinary(rowId);
}

/**
* @return child [[ColumnVector]] at the given ordinal.
*/
/** @return child [[ColumnVector]] at the given ordinal. */
@Override
protected ColumnVector getChild(int ordinal) {
throw new UnsupportedOperationException("TiColumnVectorAdapter is not supported this method");
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,12 @@

import org.apache.spark.sql.vectorized.ColumnarBatch;

/**
* A helper class to create {@link ColumnarBatch} from {@link TiChunk}
*/
/** A helper class to create {@link ColumnarBatch} from {@link TiChunk} */
public final class TiColumnarBatchHelper {
public static ColumnarBatch createColumnarBatch(TiChunk chunk) {
int colLen = chunk.numOfCols();
TiColumnVectorAdapter[] columns = new TiColumnVectorAdapter[colLen];
for(int i = 0; i < colLen; i++) {
for (int i = 0; i < colLen; i++) {
columns[i] = new TiColumnVectorAdapter(chunk.column(i));
}
ColumnarBatch batch = new ColumnarBatch(columns);
Expand Down
37 changes: 28 additions & 9 deletions core/src/main/java/com/pingcap/tikv/datatype/TypeMapping.java
Original file line number Diff line number Diff line change
@@ -1,3 +1,18 @@
/*
* Copyright 2020 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.pingcap.tikv.datatype;

import static com.pingcap.tikv.types.MySQLType.TypeLonglong;
Expand All @@ -22,7 +37,9 @@ public class TypeMapping {
private static final int MAX_PRECISION = 38;

private static boolean isStringType(DataType type) {
return type instanceof EnumType || type instanceof JsonType || type instanceof SetType
return type instanceof EnumType
|| type instanceof JsonType
|| type instanceof SetType
|| type instanceof StringType;
}

Expand All @@ -31,15 +48,17 @@ public static org.apache.spark.sql.types.DataType toSparkType(DataType type) {
return DataTypes.DateType;
}

if(type instanceof AbstractDateTimeType) {
if (type instanceof AbstractDateTimeType) {
return DataTypes.TimestampType;
}

if (type instanceof DecimalType) {
int len = (int) type.getLength();
if (len > MAX_PRECISION) {
logger.warning(
"Decimal precision exceeding MAX_PRECISION=" + MAX_PRECISION + ", "
"Decimal precision exceeding MAX_PRECISION="
+ MAX_PRECISION
+ ", "
+ "value will be truncated");
len = MAX_PRECISION;
}
Expand All @@ -52,15 +71,15 @@ public static org.apache.spark.sql.types.DataType toSparkType(DataType type) {
}

if (type instanceof RealType) {
switch (type.getType()){
switch (type.getType()) {
case TypeFloat:
return DataTypes.FloatType;
case TypeDouble:
return DataTypes.DoubleType;
}
}

if(type instanceof BytesType) {
if (type instanceof BytesType) {
return DataTypes.BinaryType;
}

Expand All @@ -72,11 +91,11 @@ public static org.apache.spark.sql.types.DataType toSparkType(DataType type) {
return DataTypes.LongType;
}

if(type instanceof TimeType) {
if (type instanceof TimeType) {
return DataTypes.LongType;
}

throw new UnsupportedOperationException(String.format("found unsupported type %s",
type.getClass().getCanonicalName()));
throw new UnsupportedOperationException(
String.format("found unsupported type %s", type.getClass().getCanonicalName()));
}
}
}
3 changes: 2 additions & 1 deletion core/src/main/scala/com/pingcap/tispark/TiConfigConst.scala
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ object TiConfigConst {
val CODEC_FORMAT: String = "spark.tispark.coprocess.codec_format"
val UNSUPPORTED_PUSHDOWN_EXPR: String = "spark.tispark.plan.unsupported_pushdown_exprs"
val CHUNK_BATCH_SIZE: String = "spark.tispark.coprocessor.chunk_batch_size"
val REGION_INDEX_SCAN_DOWNGRADE_THRESHOLD: String = "spark.tispark.plan.downgrade.index_threshold"
val REGION_INDEX_SCAN_DOWNGRADE_THRESHOLD: String =
"spark.tispark.plan.downgrade.index_threshold"
val UNSUPPORTED_TYPES: String = "spark.tispark.type.unsupported_mysql_types"
val ENABLE_AUTO_LOAD_STATISTICS: String = "spark.tispark.statistics.auto_load"
val CACHE_EXPIRE_AFTER_ACCESS: String = "spark.tispark.statistics.expire_after_access"
Expand Down
Loading

0 comments on commit 17b41a1

Please sign in to comment.