Skip to content

Commit

Permalink
[CORE] Avoid formatted comments from being messed by non-spotless lin…
Browse files Browse the repository at this point in the history
…ters (especially IDE linters) (#7989)
  • Loading branch information
zhztheplayer authored Nov 20, 2024
1 parent 90428b9 commit 9315835
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ case class CachedColumnarBatch(
bytes: Array[Byte])
extends CachedBatch {}

// spotless:off
// format: off
/**
* Feature:
* 1. This serializer supports column pruning
Expand Down Expand Up @@ -75,7 +75,7 @@ case class CachedColumnarBatch(
* - Deserializer DefaultCachedBatch -> InternalRow (unsupport ColumnarToRow)
* -> Convert DefaultCachedBatch to InternalRow using vanilla Spark serializer
*/
// spotless:on
// format: on
class ColumnarCachedBatchSerializer extends CachedBatchSerializer with SQLConfHelper with Logging {
private lazy val rowBasedCachedBatchSerializer = new DefaultCachedBatchSerializer

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ import org.apache.spark.sql.execution.columnar.InMemoryTableScanExec
import org.apache.spark.sql.execution.command.ExecutedCommandExec
import org.apache.spark.sql.execution.exchange.Exchange

// spotless:off

// format: off
/**
* Note, this rule should only fallback to row-based plan if there is no harm.
* The follow case should be handled carefully
Expand Down Expand Up @@ -64,7 +65,7 @@ import org.apache.spark.sql.execution.exchange.Exchange
* @param isAdaptiveContext If is inside AQE
* @param originalPlan The vanilla SparkPlan without apply gluten transform rules
*/
// spotless:on
// format: on
case class ExpandFallbackPolicy(isAdaptiveContext: Boolean, originalPlan: SparkPlan)
extends Rule[SparkPlan] {
import ExpandFallbackPolicy._
Expand Down Expand Up @@ -106,14 +107,13 @@ case class ExpandFallbackPolicy(isAdaptiveContext: Boolean, originalPlan: SparkP
transitionCost
}

// format: off
/**
* When making a stage fall back, it's possible that we need a ColumnarToRow to adapt to last
* stage's columnar output. So we need to evaluate the cost, i.e., the number of required
* ColumnarToRow between entirely fallback stage and last stage(s). Thus, we can avoid possible
* performance degradation caused by fallback policy.
*
* spotless:off
*
* Spark plan before applying fallback policy:
*
* ColumnarExchange
Expand All @@ -136,9 +136,8 @@ case class ExpandFallbackPolicy(isAdaptiveContext: Boolean, originalPlan: SparkP
* Project
*
* So by considering the cost, the fallback policy will not be applied.
*
* spotless:on
*/
// format: on
private def countStageFallbackTransitionCost(plan: SparkPlan): Int = {
var stageFallbackTransitionCost = 0

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,15 +147,13 @@ object GlutenExplainUtils extends AdaptiveSparkPlanHelper {
}
}

// spotless:off
// scalastyle:off
// format: off
/**
* Given a input physical plan, performs the following tasks.
* 1. Generates the explain output for the input plan excluding the subquery plans. 2. Generates
* the explain output for each subquery referenced in the plan.
*/
// scalastyle:on
// spotless:on
// format: on
def processPlan[T <: QueryPlan[T]](
plan: T,
append: String => Unit,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ import org.apache.spark.sql.internal.SQLConf
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

// spotless:off

// format: off
/**
* A helper class to get the Gluten fallback summary from a Spark [[Dataset]].
*
Expand All @@ -52,7 +53,7 @@ import scala.collection.mutable.ArrayBuffer
* df.fallbackSummary
* }}}
*/
// spotless:on
// format: on
object GlutenImplicits {

case class FallbackSummary(
Expand Down

0 comments on commit 9315835

Please sign in to comment.