diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpReplyToSinkStage.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpReplyToSinkStage.scala index ffe30b6dd..e012e5e1d 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpReplyToSinkStage.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpReplyToSinkStage.scala @@ -28,7 +28,7 @@ import scala.concurrent.{ Future, Promise } * the queue named in the replyTo options of the message instead of from settings declared at construction. */ @InternalApi -private[amqp] final class AmqpReplyToSinkStage(settings: AmqpReplyToSinkSettings) +private[amqp] final class AmqpReplyToSinkStage(replyToSinkSettings: AmqpReplyToSinkSettings) extends GraphStageWithMaterializedValue[SinkShape[WriteMessage], Future[Done]] { stage => val in = Inlet[WriteMessage]("AmqpReplyToSink.in") @@ -41,7 +41,7 @@ private[amqp] final class AmqpReplyToSinkStage(settings: AmqpReplyToSinkSettings override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[Done]) = { val streamCompletion = Promise[Done]() (new GraphStageLogic(shape) with AmqpConnectorLogic { - override val settings = stage.settings + override val settings: AmqpReplyToSinkSettings = stage.replyToSinkSettings override def whenConnected(): Unit = pull(in) diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpRpcFlowStage.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpRpcFlowStage.scala index 621d97b7d..289ff27eb 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpRpcFlowStage.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpRpcFlowStage.scala @@ -37,7 +37,8 @@ import scala.util.Success * can be overridden per message by including `expectedReplies` in the the header of the [[pekko.stream.connectors.amqp.WriteMessage]] */ @InternalApi -private[amqp] final class AmqpRpcFlowStage(settings: AmqpWriteSettings, bufferSize: Int, responsesPerMessage: Int = 1) +private[amqp] final class AmqpRpcFlowStage(writeSettings: AmqpWriteSettings, bufferSize: Int, + responsesPerMessage: Int = 1) extends GraphStageWithMaterializedValue[FlowShape[WriteMessage, CommittableReadResult], Future[String]] { stage => @@ -53,7 +54,7 @@ private[amqp] final class AmqpRpcFlowStage(settings: AmqpWriteSettings, bufferSi val streamCompletion = Promise[String]() (new GraphStageLogic(shape) with AmqpConnectorLogic { - override val settings = stage.settings + override val settings: AmqpWriteSettings = stage.writeSettings private val exchange = settings.exchange.getOrElse("") private val routingKey = settings.routingKey.getOrElse("") private val queue = mutable.Queue[CommittableReadResult]() diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpSimpleFlowStage.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpSimpleFlowStage.scala index 69b772d3a..5d24b4447 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpSimpleFlowStage.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/impl/AmqpSimpleFlowStage.scala @@ -32,7 +32,7 @@ import scala.concurrent.{ Future, Promise } * instead of complete [[WriteResult]] (possibly it would be less confusing for users), but [[WriteResult]] is used * for consistency with other variants and to make the flow ready for any possible future [[WriteResult]] extensions. */ -@InternalApi private[amqp] final class AmqpSimpleFlowStage[T](settings: AmqpWriteSettings) +@InternalApi private[amqp] final class AmqpSimpleFlowStage[T](writeSettings: AmqpWriteSettings) extends GraphStageWithMaterializedValue[FlowShape[(WriteMessage, T), (WriteResult, T)], Future[Done]] { stage => private val in: Inlet[(WriteMessage, T)] = Inlet(Logging.simpleName(this) + ".in") @@ -45,7 +45,7 @@ import scala.concurrent.{ Future, Promise } override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[Done]) = { val streamCompletion = Promise[Done]() - (new AbstractAmqpFlowStageLogic[T](settings, streamCompletion, shape) { + (new AbstractAmqpFlowStageLogic[T](writeSettings, streamCompletion, shape) { override def publish(message: WriteMessage, passThrough: T): Unit = { log.debug("Publishing message {}.", message) diff --git a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpSource.scala b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpSource.scala index 5e2fa8d16..9cca3ac32 100644 --- a/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpSource.scala +++ b/amqp/src/main/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpSource.scala @@ -20,8 +20,10 @@ import pekko.stream.connectors.amqp.impl import pekko.stream.connectors.amqp.{ AmqpSourceSettings, ReadResult } import pekko.stream.scaladsl.Source +import scala.concurrent.ExecutionContext + object AmqpSource { - private implicit val executionContext = ExecutionContexts.parasitic + private implicit val executionContext: ExecutionContext = ExecutionContexts.parasitic /** * Scala API: Convenience for "at-most once delivery" semantics. Each message is acked to RabbitMQ diff --git a/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala b/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala index 6eabe9944..dab24b6a3 100644 --- a/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala +++ b/amqp/src/test/scala/docs/scaladsl/AmqpDocsSpec.scala @@ -33,7 +33,7 @@ import scala.collection.immutable */ class AmqpDocsSpec extends AmqpSpec { - override implicit val patienceConfig = PatienceConfig(10.seconds) + override implicit val patienceConfig: PatienceConfig = PatienceConfig(10.seconds) val businessLogic: CommittableReadResult => Future[CommittableReadResult] = Future.successful(_) diff --git a/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/AmqpSpec.scala b/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/AmqpSpec.scala index e472ef917..f51e5a3b2 100644 --- a/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/AmqpSpec.scala +++ b/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/AmqpSpec.scala @@ -22,10 +22,12 @@ import org.scalatest.concurrent.ScalaFutures import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec +import scala.concurrent.ExecutionContext + abstract class AmqpSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with ScalaFutures with LogCapturing { - implicit val system = ActorSystem(this.getClass.getSimpleName) - implicit val executionContext = ExecutionContexts.parasitic + implicit val system: ActorSystem = ActorSystem(this.getClass.getSimpleName) + implicit val executionContext: ExecutionContext = ExecutionContexts.parasitic override protected def afterAll(): Unit = system.terminate() diff --git a/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala b/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala index 3240c4b5d..23be429ca 100644 --- a/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala +++ b/amqp/src/test/scala/org/apache/pekko/stream/connectors/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala @@ -15,7 +15,6 @@ package org.apache.pekko.stream.connectors.amqp.scaladsl import java.util.concurrent.ExecutorService import java.util.concurrent.atomic.AtomicInteger - import org.apache.pekko import pekko.actor.ActorSystem import pekko.dispatch.ExecutionContexts @@ -33,7 +32,7 @@ import com.rabbitmq.client.{ AddressResolver, Connection, ConnectionFactory, Shu import org.scalatest.concurrent.ScalaFutures import org.scalatest.BeforeAndAfterEach -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContext, Future } import scala.concurrent.duration._ import scala.util.control.NonFatal import org.scalatest.matchers.should.Matchers @@ -50,8 +49,8 @@ class AmqpGraphStageLogicConnectionShutdownSpec with BeforeAndAfterEach with LogCapturing { - override implicit val patienceConfig = PatienceConfig(10.seconds) - private implicit val executionContext = ExecutionContexts.parasitic + override implicit val patienceConfig: PatienceConfig = PatienceConfig(10.seconds) + private implicit val executionContext: ExecutionContext = ExecutionContexts.parasitic val shutdownsAdded = new AtomicInteger() val shutdownsRemoved = new AtomicInteger() @@ -76,7 +75,7 @@ class AmqpGraphStageLogicConnectionShutdownSpec "registers and unregisters a single connection shutdown hook per graph" in { // actor system is within this test as it has to be shut down in order // to verify graph stage termination - implicit val system = ActorSystem(this.getClass.getSimpleName + System.currentTimeMillis()) + implicit val system: ActorSystem = ActorSystem(this.getClass.getSimpleName + System.currentTimeMillis()) val connectionFactory = new ConnectionFactory() { override def newConnection(es: ExecutorService, ar: AddressResolver, name: String): Connection = diff --git a/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala b/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala index 38e70da6c..517831f51 100644 --- a/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala +++ b/aws-event-bridge/src/test/scala/docs/scaladsl/EventBridgePublisherSpec.scala @@ -28,7 +28,7 @@ import scala.concurrent.duration._ class EventBridgePublisherSpec extends AnyFlatSpec with Matchers with ScalaFutures with IntegrationTestContext { - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 15.seconds, interval = 100.millis) "EventBridge Publisher sink" should "send PutEventsEntry message" in { diff --git a/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala b/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala index 1dd83c99a..4cad3b954 100644 --- a/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala +++ b/awslambda/src/test/scala/docs/scaladsl/AwsLambdaFlowSpec.scala @@ -14,7 +14,6 @@ package docs.scaladsl import java.util.concurrent.CompletableFuture - import org.apache.pekko import pekko.actor.ActorSystem import pekko.stream.connectors.awslambda.scaladsl.AwsLambdaFlow @@ -36,7 +35,7 @@ import software.amazon.awssdk.core.SdkBytes import software.amazon.awssdk.services.lambda.LambdaAsyncClient import software.amazon.awssdk.services.lambda.model.{ InvokeRequest, InvokeResponse } -import scala.concurrent.Await +import scala.concurrent.{ Await, ExecutionContext } import scala.concurrent.duration._ class AwsLambdaFlowSpec @@ -49,9 +48,9 @@ class AwsLambdaFlowSpec with MockitoSugar with LogCapturing { - implicit val ec = system.dispatcher + implicit val ec: ExecutionContext = system.dispatcher - implicit val awsLambdaClient = mock[LambdaAsyncClient] + implicit val awsLambdaClient: LambdaAsyncClient = mock[LambdaAsyncClient] override protected def afterEach(): Unit = { reset(awsLambdaClient) diff --git a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/javadsl/AzureQueueSink.scala b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/javadsl/AzureQueueSink.scala index ecca1b8b3..ee5d7d9ee 100644 --- a/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/javadsl/AzureQueueSink.scala +++ b/azure-storage-queue/src/main/scala/org/apache/pekko/stream/connectors/azure/storagequeue/javadsl/AzureQueueSink.scala @@ -51,7 +51,7 @@ object AzureQueueWithTimeoutsSink { * of a [[com.microsoft.azure.storage.queue.CloudQueueMessage]] a [[MessageWithTimeouts]]. */ def create(cloudQueue: Supplier[CloudQueue]): Sink[MessageWithTimeouts, CompletionStage[Done]] = - AzureQueueSink.fromFunction { input: MessageWithTimeouts => + AzureQueueSink.fromFunction[MessageWithTimeouts] { input => AzureQueueSinkFunctions .addMessage(() => cloudQueue.get)(input.message, input.timeToLive, input.initialVisibility) } diff --git a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSession.scala b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSession.scala index 1421620ea..a805f5e4f 100644 --- a/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSession.scala +++ b/cassandra/src/main/scala/org/apache/pekko/stream/connectors/cassandra/javadsl/CassandraSession.scala @@ -83,7 +83,7 @@ final class CassandraSession(@InternalApi private[pekko] val delegate: scaladsl. onClose: java.lang.Runnable) = this(system.classicSystem, sessionProvider, executionContext, log, metricsCategory, init, onClose) - implicit private val ec = delegate.ec + implicit private val ec: ExecutionContext = delegate.ec /** * Closes the underlying Cassandra session. diff --git a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala index d2113e753..01d67fd96 100644 --- a/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala +++ b/cassandra/src/test/scala/org/apache/pekko/stream/connectors/cassandra/scaladsl/CassandraLifecycle.scala @@ -15,7 +15,6 @@ package org.apache.pekko.stream.connectors.cassandra.scaladsl import java.util.concurrent.CompletionStage import java.util.concurrent.atomic.AtomicInteger - import org.apache.pekko import pekko.Done import pekko.testkit.TestKitBase @@ -27,7 +26,7 @@ import org.scalatest.concurrent.{ PatienceConfiguration, ScalaFutures } import scala.collection.immutable import scala.concurrent.duration._ -import scala.concurrent.{ Await, Future } +import scala.concurrent.{ Await, ExecutionContext, Future } import scala.util.control.NonFatal trait CassandraLifecycleBase { @@ -71,7 +70,7 @@ trait CassandraLifecycleBase { executeCql(lifecycleSession, statements.asScala.toList).asJava def withSchemaMetadataDisabled(block: => Future[Done]): Future[Done] = { - implicit val ec = lifecycleSession.ec + implicit val ec: ExecutionContext = lifecycleSession.ec lifecycleSession.underlying().flatMap { cqlSession => cqlSession.setSchemaMetadataEnabled(false) val blockResult = diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/CouchbaseFlow.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/CouchbaseFlow.scala index ebc0f08a8..6a35d171a 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/CouchbaseFlow.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/CouchbaseFlow.scala @@ -29,6 +29,8 @@ import pekko.stream.connectors.couchbase.{ import pekko.stream.scaladsl.Flow import com.couchbase.client.java.document.{ Document, JsonDocument } +import scala.concurrent.ExecutionContext + /** * Scala API: Factory methods for Couchbase flows. */ @@ -104,7 +106,7 @@ object CouchbaseFlow { val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName) Flow[T] .mapAsync(writeSettings.parallelism)(doc => { - implicit val executor = materializer.system.dispatcher + implicit val executor: ExecutionContext = materializer.system.dispatcher session .flatMap(_.upsertDoc(doc, writeSettings)) .map(_ => CouchbaseWriteSuccess(doc)) @@ -157,7 +159,7 @@ object CouchbaseFlow { val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName) Flow[T] .mapAsync(writeSettings.parallelism)(doc => { - implicit val executor = materializer.system.dispatcher + implicit val executor: ExecutionContext = materializer.system.dispatcher session .flatMap(_.replaceDoc(doc, writeSettings)) .map(_ => CouchbaseWriteSuccess(doc)) @@ -179,7 +181,7 @@ object CouchbaseFlow { val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName) Flow[String] .mapAsync(writeSettings.parallelism)(id => { - implicit val executor = materializer.system.dispatcher + implicit val executor: ExecutionContext = materializer.system.dispatcher session .flatMap(_.remove(id, writeSettings)) .map(_ => id) @@ -198,7 +200,7 @@ object CouchbaseFlow { val session = CouchbaseSessionRegistry(materializer.system).sessionFor(sessionSettings, bucketName) Flow[String] .mapAsync(writeSettings.parallelism)(id => { - implicit val executor = materializer.system.dispatcher + implicit val executor: ExecutionContext = materializer.system.dispatcher session .flatMap(_.remove(id, writeSettings)) .map(_ => CouchbaseDeleteSuccess(id)) diff --git a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/DiscoverySupport.scala b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/DiscoverySupport.scala index 57d2ca805..19a323589 100644 --- a/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/DiscoverySupport.scala +++ b/couchbase/src/main/scala/org/apache/pekko/stream/connectors/couchbase/scaladsl/DiscoverySupport.scala @@ -25,7 +25,7 @@ import pekko.util.FutureConverters._ import com.typesafe.config.Config import scala.collection.immutable -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContext, Future } import scala.concurrent.duration.FiniteDuration /** @@ -39,7 +39,7 @@ sealed class DiscoverySupport private { private def readNodes( serviceName: String, lookupTimeout: FiniteDuration)(implicit system: ClassicActorSystemProvider): Future[immutable.Seq[String]] = { - implicit val ec = system.classicSystem.dispatcher + implicit val ec: ExecutionContext = system.classicSystem.dispatcher val discovery = Discovery(system).discovery discovery.lookup(serviceName, lookupTimeout).map { resolved => resolved.addresses.map(_.host) @@ -63,7 +63,7 @@ sealed class DiscoverySupport private { def nodes( config: Config)( implicit system: ClassicActorSystemProvider): CouchbaseSessionSettings => Future[CouchbaseSessionSettings] = { - implicit val ec = system.classicSystem.dispatcher + implicit val ec: ExecutionContext = system.classicSystem.dispatcher settings => readNodes(config) .map { nodes => diff --git a/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala b/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala index b661296ee..25bcc4fb9 100644 --- a/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala +++ b/csv-bench/src/main/scala/org/apache/pekko/stream/connectors/csv/scaladsl/CsvBench.scala @@ -5,7 +5,6 @@ package org.apache.pekko.stream.connectors.csv.scaladsl import java.util.concurrent.TimeUnit - import org.apache.pekko import pekko.NotUsed import pekko.actor.ActorSystem @@ -15,7 +14,7 @@ import pekko.util.ByteString import org.openjdk.jmh.annotations._ import org.openjdk.jmh.infra.Blackhole -import scala.concurrent.Await +import scala.concurrent.{ Await, ExecutionContext } import scala.concurrent.duration.Duration /** @@ -50,9 +49,9 @@ import scala.concurrent.duration.Duration @State(Scope.Benchmark) class CsvBench { - implicit val system = ActorSystem() - implicit val executionContext = system.dispatcher - implicit val mat = ActorMaterializer() + implicit val system: ActorSystem = ActorSystem() + implicit val executionContext: ExecutionContext = system.dispatcher + implicit val mat: ActorMaterializer = ActorMaterializer() /** * Size of [[ByteString]] chunks in bytes. diff --git a/csv/src/test/scala/docs/scaladsl/CsvSpec.scala b/csv/src/test/scala/docs/scaladsl/CsvSpec.scala index 276d543ae..41daa7816 100644 --- a/csv/src/test/scala/docs/scaladsl/CsvSpec.scala +++ b/csv/src/test/scala/docs/scaladsl/CsvSpec.scala @@ -30,7 +30,7 @@ abstract class CsvSpec with ScalaFutures with LogCapturing { - implicit val system = ActorSystem(this.getClass.getSimpleName) + implicit val system: ActorSystem = ActorSystem(this.getClass.getSimpleName) override protected def afterAll(): Unit = TestKit.shutdownActorSystem(system) diff --git a/doc-examples/src/test/scala/org/apache/pekko/stream/connectors/eip/scaladsl/PassThroughExamples.scala b/doc-examples/src/test/scala/org/apache/pekko/stream/connectors/eip/scaladsl/PassThroughExamples.scala index 50e805258..50bdd127b 100644 --- a/doc-examples/src/test/scala/org/apache/pekko/stream/connectors/eip/scaladsl/PassThroughExamples.scala +++ b/doc-examples/src/test/scala/org/apache/pekko/stream/connectors/eip/scaladsl/PassThroughExamples.scala @@ -29,7 +29,7 @@ import org.scalatest.wordspec.AnyWordSpec class PassThroughExamples extends AnyWordSpec with BeforeAndAfterAll with Matchers with ScalaFutures { - implicit val system = ActorSystem("Test") + implicit val system: ActorSystem = ActorSystem("Test") "PassThroughFlow" should { " original message is maintained " in { @@ -103,7 +103,7 @@ object PassThroughFlow { //#PassThrough object PassThroughFlowKafkaCommitExample { - implicit val system = ActorSystem("Test") + implicit val system: ActorSystem = ActorSystem("Test") def dummy(): Unit = { // #passThroughKafkaFlow diff --git a/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala b/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala index a948391be..475fc019c 100644 --- a/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala +++ b/dynamodb/src/test/scala/docs/scaladsl/ExampleSpec.scala @@ -17,15 +17,14 @@ import java.net.URI import org.apache.pekko import pekko.NotUsed +import pekko.actor.ActorSystem import pekko.stream.connectors.testkit.scaladsl.LogCapturing import pekko.stream.scaladsl.{ FlowWithContext, SourceWithContext } import scala.util.{ Failure, Success, Try } //#init-client -import org.apache.pekko.actor.ActorSystem //#init-client -import org.apache.pekko import pekko.stream.connectors.dynamodb.DynamoDbOp._ import pekko.stream.connectors.dynamodb.scaladsl._ import pekko.stream.scaladsl.{ Sink, Source } diff --git a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala index 7ec576a4f..33b8c8032 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchConnectorBehaviour.scala @@ -364,7 +364,7 @@ trait ElasticsearchConnectorBehaviour { val indexName = "sink7" val createBooks = Source(books) - .map { book: (String, Book) => + .map { (book: (String, Book)) => WriteMessage.createUpsertMessage(id = book._1, source = book._2) } .via( @@ -391,7 +391,7 @@ trait ElasticsearchConnectorBehaviour { // Update sink7/_doc with the second dataset val upserts = Source(updatedBooks) - .map { book: (String, JsObject) => + .map { (book: (String, JsObject)) => WriteMessage.createUpsertMessage(id = book._1, source = book._2) } .via( @@ -453,7 +453,7 @@ trait ElasticsearchConnectorBehaviour { "read and write document-version if configured to do so" in { case class VersionTestDoc(id: String, name: String, value: Int) - implicit val formatVersionTestDoc: JsonFormat[VersionTestDoc] = jsonFormat3(VersionTestDoc) + implicit val formatVersionTestDoc: JsonFormat[VersionTestDoc] = jsonFormat3(VersionTestDoc.apply) val indexName = "version-test-scala" val typeName = "_doc" diff --git a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchSpecUtils.scala b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchSpecUtils.scala index 4236234bc..2c67de274 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchSpecUtils.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchSpecUtils.scala @@ -45,7 +45,7 @@ trait ElasticsearchSpecUtils { this: AnyWordSpec with ScalaFutures => case class Book(title: String, shouldSkip: Option[Boolean] = None, price: Int = 10) - implicit val format: JsonFormat[Book] = jsonFormat3(Book) + implicit val format: JsonFormat[Book] = jsonFormat3(Book.apply) // #define-class def register(connectionSettings: ElasticsearchConnectionSettings, diff --git a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala index 3c9fd4132..52b27dc11 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV5Spec.scala @@ -64,7 +64,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V5), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[spray.json.JsObject] => + .map { (message: ReadResult[spray.json.JsObject]) => val book: Book = jsonReader[Book].read(message.source) WriteMessage.createIndexMessage(message.id, book) } @@ -97,7 +97,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V5), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage.createIndexMessage(message.id, message.source) } .runWith( @@ -129,7 +129,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V5), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage.createIndexMessage(message.id, message.source) } .via( @@ -200,7 +200,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt val indexName = "sink6" val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -251,7 +251,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt val indexName = "sink6-bulk" val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -306,7 +306,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt val indexName = "sink6-nop" val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -363,7 +363,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt register(connectionSettings, indexName, "dummy", 10) // need to create index else exception in reading below val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -447,7 +447,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V5), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage .createIndexMessage(message.id, message.source) .withIndexName(customIndexName) // Setting the index-name to use for this document @@ -480,7 +480,7 @@ class ElasticsearchV5Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt case class TestDoc(id: String, a: String, b: Option[String], c: String) // #custom-search-params - implicit val formatVersionTestDoc: JsonFormat[TestDoc] = jsonFormat4(TestDoc) + implicit val formatVersionTestDoc: JsonFormat[TestDoc] = jsonFormat4(TestDoc.apply) val indexName = "custom-search-params-test-scala" val typeName = "_doc" diff --git a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala index 28f064cc8..1d2522454 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/ElasticsearchV7Spec.scala @@ -56,7 +56,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V7), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[spray.json.JsObject] => + .map { (message: ReadResult[spray.json.JsObject]) => val book: Book = jsonReader[Book].read(message.source) WriteMessage.createIndexMessage(message.id, book) } @@ -88,7 +88,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V7), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage.createIndexMessage(message.id, message.source) } .runWith( @@ -119,7 +119,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V7), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage.createIndexMessage(message.id, message.source) } .via( @@ -187,7 +187,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt val indexName = "sink6" val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -237,7 +237,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt val indexName = "sink6-bulk" val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -291,7 +291,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt val indexName = "sink6-nop" val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -347,7 +347,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt register(connectionSettings, indexName, "dummy", 10) // need to create index else exception in reading below val kafkaToEs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -428,7 +428,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt constructElasticsearchParams("source", "_doc", ApiVersion.V7), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage .createIndexMessage(message.id, message.source) .withIndexName(customIndexName) // Setting the index-name to use for this document @@ -458,7 +458,7 @@ class ElasticsearchV7Spec extends ElasticsearchSpecBase with ElasticsearchSpecUt case class TestDoc(id: String, a: String, b: Option[String], c: String) - implicit val formatVersionTestDoc: JsonFormat[TestDoc] = jsonFormat4(TestDoc) + implicit val formatVersionTestDoc: JsonFormat[TestDoc] = jsonFormat4(TestDoc.apply) val indexName = "custom-search-params-test-scala" val typeName = "_doc" diff --git a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala index 72ca863c0..77c513ee1 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchConnectorBehaviour.scala @@ -364,7 +364,7 @@ trait OpensearchConnectorBehaviour { val indexName = "sink7" val createBooks = Source(books) - .map { book: (String, Book) => + .map { (book: (String, Book)) => WriteMessage.createUpsertMessage(id = book._1, source = book._2) } .via( @@ -391,7 +391,7 @@ trait OpensearchConnectorBehaviour { // Update sink7/_doc with the second dataset val upserts = Source(updatedBooks) - .map { book: (String, JsObject) => + .map { (book: (String, JsObject)) => WriteMessage.createUpsertMessage(id = book._1, source = book._2) } .via( @@ -453,7 +453,7 @@ trait OpensearchConnectorBehaviour { "read and write document-version if configured to do so" in { case class VersionTestDoc(id: String, name: String, value: Int) - implicit val formatVersionTestDoc: JsonFormat[VersionTestDoc] = jsonFormat3(VersionTestDoc) + implicit val formatVersionTestDoc: JsonFormat[VersionTestDoc] = jsonFormat3(VersionTestDoc.apply) val indexName = "version-test-scala" val typeName = "_doc" diff --git a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala index b202588e0..3ba59ee55 100644 --- a/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala +++ b/elasticsearch/src/test/scala/docs/scaladsl/OpensearchV1Spec.scala @@ -65,7 +65,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils constructElasticsearchParams("source", "_doc", OpensearchApiVersion.V1), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[spray.json.JsObject] => + .map { (message: ReadResult[spray.json.JsObject]) => val book: Book = jsonReader[Book].read(message.source) WriteMessage.createIndexMessage(message.id, book) } @@ -100,7 +100,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils constructElasticsearchParams("source", "_doc", OpensearchApiVersion.V1), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage.createIndexMessage(message.id, message.source) } .runWith( @@ -133,7 +133,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils constructElasticsearchParams("source", "_doc", OpensearchApiVersion.V1), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage.createIndexMessage(message.id, message.source) } .via( @@ -205,7 +205,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils val indexName = "sink6" val kafkaToOs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -255,7 +255,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils val indexName = "sink6-bulk" val kafkaToOs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -309,7 +309,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils val indexName = "sink6-nop" val kafkaToOs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -365,7 +365,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils register(connectionSettings, indexName, "dummy", 10) // need to create index else exception in reading below val kafkaToOs = Source(messagesFromKafka) // Assume we get this from Kafka - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title @@ -449,7 +449,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils constructElasticsearchParams("source", "_doc", OpensearchApiVersion.V1), query = """{"match_all": {}}""", settings = baseSourceSettings) - .map { message: ReadResult[Book] => + .map { (message: ReadResult[Book]) => WriteMessage .createIndexMessage(message.id, message.source) .withIndexName(customIndexName) // Setting the index-name to use for this document @@ -480,7 +480,7 @@ class OpensearchV1Spec extends ElasticsearchSpecBase with ElasticsearchSpecUtils case class TestDoc(id: String, a: String, b: Option[String], c: String) - implicit val formatVersionTestDoc: JsonFormat[TestDoc] = jsonFormat4(TestDoc) + implicit val formatVersionTestDoc: JsonFormat[TestDoc] = jsonFormat4(TestDoc.apply) val indexName = "custom-search-params-test-scala" val typeName = "_doc" diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala index 84465b0f2..75f0ffe74 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/javadsl/LogRotatorSink.scala @@ -16,12 +16,10 @@ package org.apache.pekko.stream.connectors.file.javadsl import java.nio.file.{ Path, StandardOpenOption } import java.util.Optional import java.util.concurrent.CompletionStage - import org.apache.pekko import pekko.Done import pekko.stream.javadsl import pekko.stream.scaladsl -import pekko.stream.javadsl.Sink import pekko.util.ByteString import pekko.japi.function import pekko.util.ccompat.JavaConverters._ @@ -42,11 +40,11 @@ object LogRotatorSink { */ def createFromFunction( triggerGeneratorCreator: function.Creator[function.Function[ByteString, Optional[Path]]]) - : javadsl.Sink[ByteString, CompletionStage[Done]] = - new Sink( - pekko.stream.connectors.file.scaladsl - .LogRotatorSink(asScala(triggerGeneratorCreator)) - .toCompletionStage()) + : javadsl.Sink[ByteString, CompletionStage[Done]] = { + val logRotatorSink = new scaladsl.SinkToCompletionStage[ByteString, Done](pekko.stream.connectors.file.scaladsl + .LogRotatorSink(asScala(triggerGeneratorCreator))) + new javadsl.Sink(logRotatorSink.toCompletionStage()) + } /** * Sink directing the incoming `ByteString`s to new files whenever `triggerGenerator` returns a value. @@ -56,11 +54,12 @@ object LogRotatorSink { */ def createFromFunctionAndOptions( triggerGeneratorCreator: function.Creator[function.Function[ByteString, Optional[Path]]], - fileOpenOptions: java.util.Set[StandardOpenOption]): javadsl.Sink[ByteString, CompletionStage[Done]] = - new Sink( - pekko.stream.connectors.file.scaladsl - .LogRotatorSink(asScala(triggerGeneratorCreator), fileOpenOptions.asScala.toSet) - .toCompletionStage()) + fileOpenOptions: java.util.Set[StandardOpenOption]): javadsl.Sink[ByteString, CompletionStage[Done]] = { + + val logRotatorSink = new scaladsl.SinkToCompletionStage[ByteString, Done](pekko.stream.connectors.file.scaladsl + .LogRotatorSink(asScala(triggerGeneratorCreator), fileOpenOptions.asScala.toSet)) + new javadsl.Sink(logRotatorSink.toCompletionStage()) + } /** * Sink directing the incoming `ByteString`s to a new `Sink` created by `sinkFactory` whenever `triggerGenerator` returns a value. @@ -72,14 +71,14 @@ object LogRotatorSink { */ def withSinkFactory[C, R]( triggerGeneratorCreator: function.Creator[function.Function[ByteString, Optional[C]]], - sinkFactory: function.Function[C, Sink[ByteString, CompletionStage[R]]]) + sinkFactory: function.Function[C, javadsl.Sink[ByteString, CompletionStage[R]]]) : javadsl.Sink[ByteString, CompletionStage[Done]] = { val t: C => scaladsl.Sink[ByteString, Future[R]] = path => sinkFactory.apply(path).asScala.mapMaterializedValue(_.asScala) - new Sink( - pekko.stream.connectors.file.scaladsl.LogRotatorSink - .withSinkFactory(asScala[C](triggerGeneratorCreator), t) - .toCompletionStage()) + val logRotatorSink = + new scaladsl.SinkToCompletionStage[ByteString, Done](pekko.stream.connectors.file.scaladsl.LogRotatorSink + .withSinkFactory(asScala[C](triggerGeneratorCreator), t)) + new javadsl.Sink(logRotatorSink.toCompletionStage()) } private def asScala[C]( diff --git a/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/LogRotatorSink.scala b/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/LogRotatorSink.scala index 7550936db..fcb19d0b0 100644 --- a/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/LogRotatorSink.scala +++ b/file/src/main/scala/org/apache/pekko/stream/connectors/file/scaladsl/LogRotatorSink.scala @@ -167,7 +167,7 @@ final private class LogRotatorSink[T, C, R](triggerGeneratorCreator: () => T => override def postStop(): Unit = promise.completeWith { - implicit val ec = materializer.executionContext + implicit val ec: ExecutionContext = materializer.executionContext Future .sequence(sinkCompletions) .map(_ => Done)(pekko.dispatch.ExecutionContexts.parasitic) diff --git a/file/src/test/scala/docs/scaladsl/LogRotatorSinkSpec.scala b/file/src/test/scala/docs/scaladsl/LogRotatorSinkSpec.scala index fb2e3570b..24f744e83 100644 --- a/file/src/test/scala/docs/scaladsl/LogRotatorSinkSpec.scala +++ b/file/src/test/scala/docs/scaladsl/LogRotatorSinkSpec.scala @@ -95,7 +95,7 @@ class LogRotatorSinkSpec "complete when consuming an empty source" in assertAllStagesStopped { val triggerCreator: () => ByteString => Option[Path] = () => { - element: ByteString => fail("trigger creator should not be called") + (element: ByteString) => fail("trigger creator should not be called") } val rotatorSink: Sink[ByteString, Future[Done]] = @@ -112,7 +112,7 @@ class LogRotatorSinkSpec val fileSizeTriggerCreator: () => ByteString => Option[Path] = () => { val max = 10 * 1024 * 1024 var size: Long = max - element: ByteString => + (element: ByteString) => if (size + element.size > max) { val path = Files.createTempFile("out-", ".log") size = element.size diff --git a/ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/CommonFtpStageSpec.scala b/ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/CommonFtpStageSpec.scala index 1859f83a1..177503d8c 100644 --- a/ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/CommonFtpStageSpec.scala +++ b/ftp/src/test/scala/org/apache/pekko/stream/connectors/ftp/CommonFtpStageSpec.scala @@ -20,8 +20,9 @@ import java.nio.file.{ Files, Paths } import java.time.Instant import java.util.concurrent.TimeUnit import org.apache.pekko -import pekko.stream.{ IOOperationIncompleteException, IOResult } +import pekko.stream.{ IOOperationIncompleteException, IOResult, Materializer } import BaseSftpSupport.{ CLIENT_PRIVATE_KEY_PASSPHRASE => ClientPrivateKeyPassphrase } +import pekko.actor.ActorSystem import pekko.stream.scaladsl.{ Keep, Sink, Source } import pekko.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped import pekko.stream.testkit.scaladsl.TestSink @@ -30,7 +31,7 @@ import org.scalatest.concurrent.Eventually import org.scalatest.time.{ Millis, Seconds, Span } import scala.collection.immutable -import scala.concurrent.{ Await, ExecutionContextExecutor } +import scala.concurrent.{ Await, ExecutionContext, ExecutionContextExecutor } import scala.concurrent.duration._ import scala.util.Random @@ -85,14 +86,14 @@ final class UnconfirmedReadsSftpSourceSpec extends BaseSftpSpec with CommonFtpSt trait CommonFtpStageSpec extends BaseSpec with Eventually { - implicit val system = getSystem - implicit val mat = getMaterializer - implicit val defaultPatience = + implicit val system: ActorSystem = getSystem + implicit val mat: Materializer = getMaterializer + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = Span(30, Seconds), interval = Span(600, Millis)) "FtpBrowserSource" should { "complete with a failed Future, when the credentials supplied were wrong" in assertAllStagesStopped { - implicit val ec = system.getDispatcher + implicit val ec: ExecutionContext = system.getDispatcher listFilesWithWrongCredentials("") .toMat(Sink.seq)(Keep.right) .run() diff --git a/geode/src/test/scala/docs/scaladsl/GeodeBaseSpec.scala b/geode/src/test/scala/docs/scaladsl/GeodeBaseSpec.scala index 48532d0be..46dca6113 100644 --- a/geode/src/test/scala/docs/scaladsl/GeodeBaseSpec.scala +++ b/geode/src/test/scala/docs/scaladsl/GeodeBaseSpec.scala @@ -30,7 +30,7 @@ import org.scalatest.wordspec.AnyWordSpec class GeodeBaseSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with LogCapturing { - implicit val system = ActorSystem("test") + implicit val system: ActorSystem = ActorSystem("test") // #region val personsRegionSettings: RegionSettings[Int, Person] = RegionSettings("persons", (p: Person) => p.id) diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala index 8a9a0d51e..426f1dd8d 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/BigQueryException.scala @@ -77,5 +77,5 @@ object BigQueryException { } private final case class ErrorResponse(error: Option[ErrorProto]) - private implicit val errorResponseFormat: RootJsonFormat[ErrorResponse] = jsonFormat1(ErrorResponse) + private implicit val errorResponseFormat: RootJsonFormat[ErrorResponse] = jsonFormat1(ErrorResponse.apply) } diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala index 08998f071..fa0451534 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/model/TableDataJsonProtocol.scala @@ -24,7 +24,6 @@ import com.fasterxml.jackson.annotation._ import spray.json.{ JsonFormat, RootJsonFormat, RootJsonReader, RootJsonWriter } import java.{ lang, util } - import scala.annotation.nowarn import scala.annotation.unchecked.uncheckedVariance import scala.collection.immutable.Seq @@ -166,8 +165,8 @@ object TableDataInsertAllRequest { implicit def writer[T]( implicit writer: BigQueryRootJsonWriter[T]): RootJsonWriter[TableDataInsertAllRequest[T]] = { - implicit val format = lift(writer) - implicit val rowFormat = jsonFormat2(Row[T]) + implicit val format: RootJsonFormat[T] = lift(writer) + implicit val rowFormat: RootJsonFormat[Row[T]] = jsonFormat2(Row[T]) jsonFormat4(TableDataInsertAllRequest[T]) } } diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryDatasets.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryDatasets.scala index feea44fab..b130f5f3e 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryDatasets.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryDatasets.scala @@ -28,7 +28,7 @@ import pekko.stream.connectors.googlecloud.bigquery.{ BigQueryEndpoints, BigQuer import pekko.stream.scaladsl.Source import pekko.{ Done, NotUsed } -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContext, Future } private[scaladsl] trait BigQueryDatasets { this: BigQueryRest => @@ -102,7 +102,7 @@ private[scaladsl] trait BigQueryDatasets { this: BigQueryRest => settings: GoogleSettings): Future[Dataset] = { import BigQueryException._ import SprayJsonSupport._ - implicit val ec = ExecutionContexts.parasitic + implicit val ec: ExecutionContext = ExecutionContexts.parasitic val uri = BigQueryEndpoints.datasets(settings.projectId) Marshal(dataset).to[RequestEntity].flatMap { entity => val request = HttpRequest(POST, uri, entity = entity) diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryJobs.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryJobs.scala index 6ae985019..166e1037f 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryJobs.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryJobs.scala @@ -43,7 +43,7 @@ import pekko.stream.scaladsl.{ Flow, GraphDSL, Keep, Sink } import pekko.util.ByteString import scala.annotation.nowarn -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContext, Future } private[scaladsl] trait BigQueryJobs { this: BigQueryRest => @@ -114,7 +114,7 @@ private[scaladsl] trait BigQueryJobs { this: BigQueryRest => .fromMaterializer { (mat, attr) => import SprayJsonSupport._ import mat.executionContext - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val BigQuerySettings(loadJobPerTableQuota) = BigQueryAttributes.resolveSettings(mat, attr) val job = Job( @@ -168,8 +168,8 @@ private[scaladsl] trait BigQueryJobs { this: BigQueryRest => Sink .fromMaterializer { (mat, attr) => import BigQueryException._ - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) - implicit val ec = ExecutionContexts.parasitic + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) + implicit val ec: ExecutionContext = ExecutionContexts.parasitic val uri = BigQueryMediaEndpoints.jobs(settings.projectId).withQuery(Query("uploadType" -> "resumable")) Sink .lazyFutureSink { () => diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueries.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueries.scala index a86072b98..be092ef82 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueries.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueries.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl import org.apache.pekko +import pekko.actor.ActorSystem import pekko.NotUsed import pekko.dispatch.ExecutionContexts import pekko.http.scaladsl.marshallers.sprayjson.SprayJsonSupport @@ -23,14 +24,14 @@ import pekko.http.scaladsl.model.Uri.Query import pekko.http.scaladsl.model.{ HttpRequest, RequestEntity } import pekko.http.scaladsl.unmarshalling.FromEntityUnmarshaller import pekko.stream.RestartSettings -import pekko.stream.connectors.google.GoogleAttributes +import pekko.stream.connectors.google.{ GoogleAttributes, GoogleSettings } import pekko.stream.connectors.google.implicits._ import pekko.stream.connectors.googlecloud.bigquery.model.JobReference import pekko.stream.connectors.googlecloud.bigquery.model.{ QueryRequest, QueryResponse } import pekko.stream.connectors.googlecloud.bigquery.{ BigQueryEndpoints, BigQueryException } import pekko.stream.scaladsl.{ Keep, RestartSource, Sink, Source } -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContext, Future } import scala.concurrent.duration.FiniteDuration import scala.util.{ Failure, Success } @@ -70,9 +71,9 @@ private[scaladsl] trait BigQueryQueries { this: BigQueryRest => .fromMaterializer { (mat, attr) => import BigQueryException._ import SprayJsonSupport._ - implicit val system = mat.system - implicit val ec = ExecutionContexts.parasitic - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) + implicit val system: ActorSystem = mat.system + implicit val ec: ExecutionContext = ExecutionContexts.parasitic + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) Source.lazyFutureSource { () => for { diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTableData.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTableData.scala index 676e400b7..29b9cb4f8 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTableData.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTableData.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.googlecloud.bigquery.scaladsl import org.apache.pekko +import pekko.actor.ActorSystem import pekko.NotUsed import pekko.dispatch.ExecutionContexts import pekko.http.scaladsl.marshallers.sprayjson.SprayJsonSupport @@ -22,7 +23,7 @@ import pekko.http.scaladsl.model.HttpMethods.POST import pekko.http.scaladsl.model.Uri.Query import pekko.http.scaladsl.model.{ HttpRequest, RequestEntity } import pekko.http.scaladsl.unmarshalling.{ FromEntityUnmarshaller, FromResponseUnmarshaller } -import pekko.stream.connectors.google.GoogleAttributes +import pekko.stream.connectors.google.{ GoogleAttributes, GoogleSettings } import pekko.stream.connectors.google.http.GoogleHttp import pekko.stream.connectors.google.implicits._ import pekko.stream.connectors.googlecloud.bigquery.model.{ @@ -131,9 +132,9 @@ private[scaladsl] trait BigQueryTableData { this: BigQueryRest => .fromMaterializer { (mat, attr) => import BigQueryException._ import SprayJsonSupport._ - implicit val system = mat.system + implicit val system: ActorSystem = mat.system implicit val ec = ExecutionContexts.parasitic - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val uri = BigQueryEndpoints.tableDataInsertAll(settings.projectId, datasetId, tableId) val request = HttpRequest(POST, uri) diff --git a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTables.scala b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTables.scala index ccde83332..2938d077c 100644 --- a/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTables.scala +++ b/google-cloud-bigquery/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryTables.scala @@ -29,7 +29,7 @@ import pekko.stream.connectors.googlecloud.bigquery.scaladsl.schema.TableSchemaW import pekko.stream.connectors.googlecloud.bigquery.{ BigQueryEndpoints, BigQueryException } import pekko.stream.scaladsl.{ Keep, Sink, Source } -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContext, Future } private[scaladsl] trait BigQueryTables { this: BigQueryRest => @@ -94,7 +94,7 @@ private[scaladsl] trait BigQueryTables { this: BigQueryRest => settings: GoogleSettings): Future[Table] = { import BigQueryException._ import SprayJsonSupport._ - implicit val ec = ExecutionContexts.parasitic + implicit val ec: ExecutionContext = ExecutionContexts.parasitic val projectId = table.tableReference.projectId.getOrElse(settings.projectId) val datasetId = table.tableReference.datasetId val uri = BigQueryEndpoints.tables(projectId, datasetId) diff --git a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueriesSpec.scala b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueriesSpec.scala index 2c1c549b3..7051651d9 100644 --- a/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueriesSpec.scala +++ b/google-cloud-bigquery/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/bigquery/scaladsl/BigQueryQueriesSpec.scala @@ -50,7 +50,7 @@ class BigQueryQueriesSpec jsonFormat10(QueryResponse[T]) } - implicit val settings = GoogleSettings().copy(credentials = NoCredentials("", "")) + implicit val settings: GoogleSettings = GoogleSettings().copy(credentials = NoCredentials("", "")) val jobId = "jobId" val pageToken = "pageToken" diff --git a/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala b/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala index ea402dbd5..38eba06e4 100644 --- a/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala +++ b/google-cloud-pub-sub-grpc/src/test/scala/docs/scaladsl/IntegrationSpec.scala @@ -50,7 +50,7 @@ class IntegrationSpec with OptionValues with LogCapturing { - implicit val system = ActorSystem("IntegrationSpec") + implicit val system: ActorSystem = ActorSystem("IntegrationSpec") implicit val defaultPatience = PatienceConfig(timeout = 15.seconds, interval = 50.millis) diff --git a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala index 7ec28d3e9..696406f89 100644 --- a/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala +++ b/google-cloud-pub-sub/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApi.scala @@ -23,7 +23,7 @@ import pekko.http.scaladsl.marshalling.Marshal import pekko.http.scaladsl.model.HttpMethods.POST import pekko.http.scaladsl.model._ import pekko.http.scaladsl.unmarshalling.{ FromResponseUnmarshaller, Unmarshal, Unmarshaller } -import pekko.stream.connectors.google.GoogleAttributes +import pekko.stream.connectors.google.{ GoogleAttributes, GoogleSettings, RequestSettings } import pekko.stream.connectors.google.http.GoogleHttp import pekko.stream.connectors.google.implicits._ import pekko.stream.connectors.googlecloud.pubsub._ @@ -142,7 +142,7 @@ private[pubsub] trait PubSubApi { AcknowledgeRequest(json.asJsObject.fields("ackIds").convertTo[immutable.Seq[String]]: _*) def write(ar: AcknowledgeRequest): JsValue = JsObject("ackIds" -> ar.ackIds.toJson) } - private implicit val pullRequestFormat = DefaultJsonProtocol.jsonFormat2(PullRequest) + private implicit val pullRequestFormat = DefaultJsonProtocol.jsonFormat2(PullRequest.apply) private def scheme: String = if (isEmulated) "http" else "https" @@ -150,8 +150,8 @@ private[pubsub] trait PubSubApi { Flow .fromMaterializer { (mat, attr) => import mat.executionContext - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) - implicit val requestSettings = settings.requestSettings + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) + implicit val requestSettings: RequestSettings = settings.requestSettings val url: Uri = Uri.from( scheme = scheme, @@ -188,8 +188,8 @@ private[pubsub] trait PubSubApi { Flow .fromMaterializer { (mat, attr) => import mat.executionContext - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) - implicit val requestSettings = settings.requestSettings + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) + implicit val requestSettings: RequestSettings = settings.requestSettings val url: Uri = Uri.from( scheme = scheme, @@ -239,8 +239,8 @@ private[pubsub] trait PubSubApi { Flow .fromMaterializer { (mat, attr) => import mat.executionContext - implicit val system = mat.system - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) + implicit val system: ActorSystem = mat.system + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val url: Uri = s"/v1/projects/${settings.projectId}/topics/$topic:publish" FlowWithContext[PublishRequest, T] .mapAsync(parallelism) { request => diff --git a/google-cloud-pub-sub/src/test/scala/docs/scaladsl/ExampleUsage.scala b/google-cloud-pub-sub/src/test/scala/docs/scaladsl/ExampleUsage.scala index 0a611e470..d3928b8a2 100644 --- a/google-cloud-pub-sub/src/test/scala/docs/scaladsl/ExampleUsage.scala +++ b/google-cloud-pub-sub/src/test/scala/docs/scaladsl/ExampleUsage.scala @@ -31,7 +31,7 @@ import scala.concurrent.{ Future, Promise } class ExampleUsage { // #init-system - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() val config = PubSubConfig() val topic = "topic1" val subscription = "subscription1" diff --git a/google-cloud-pub-sub/src/test/scala/docs/scaladsl/IntegrationSpec.scala b/google-cloud-pub-sub/src/test/scala/docs/scaladsl/IntegrationSpec.scala index 3a2cc47e1..f7a1d2fab 100644 --- a/google-cloud-pub-sub/src/test/scala/docs/scaladsl/IntegrationSpec.scala +++ b/google-cloud-pub-sub/src/test/scala/docs/scaladsl/IntegrationSpec.scala @@ -45,7 +45,7 @@ class IntegrationSpec with OptionValues with LogCapturing { - private implicit val system = ActorSystem("IntegrationSpec") + private implicit val system: ActorSystem = ActorSystem("IntegrationSpec") override def afterAll(): Unit = TestKit.shutdownActorSystem(system) diff --git a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/GooglePubSubSpec.scala b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/GooglePubSubSpec.scala index afd6daf47..03c179ca5 100644 --- a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/GooglePubSubSpec.scala +++ b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/GooglePubSubSpec.scala @@ -42,10 +42,10 @@ class GooglePubSubSpec with LogCapturing with BeforeAndAfterAll { - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 5.seconds, interval = 100.millis) - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() override protected def afterAll(): Unit = { TestKit.shutdownActorSystem(system) diff --git a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala index 78dd49392..d5bbb9e7e 100644 --- a/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala +++ b/google-cloud-pub-sub/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/pubsub/impl/PubSubApiSpec.scala @@ -53,7 +53,7 @@ class NoopTrustManager extends X509TrustManager { class PubSubApiSpec extends AnyFlatSpec with BeforeAndAfterAll with ScalaFutures with Matchers with LogCapturing { - implicit val system = ActorSystem( + implicit val system: ActorSystem = ActorSystem( "PubSubApiSpec", ConfigFactory .parseString( diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala index aab63a46c..5c20c3931 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/Formats.scala @@ -26,13 +26,15 @@ import scala.util.Try object Formats extends DefaultJsonProtocol { private final case class CustomerEncryption(encryptionAlgorithm: String, keySha256: String) - private implicit val customerEncryptionJsonFormat = jsonFormat2(CustomerEncryption) + + private implicit val customerEncryptionJsonFormat: RootJsonFormat[CustomerEncryption] = + jsonFormat2(CustomerEncryption.apply) private final case class Owner(entity: String, entityId: Option[String]) - private implicit val OwnerJsonFormat = jsonFormat2(Owner) + private implicit val OwnerJsonFormat: RootJsonFormat[Owner] = jsonFormat2(Owner.apply) private final case class ProjectTeam(projectNumber: String, team: String) - private implicit val ProjectTeamJsonFormat = jsonFormat2(ProjectTeam) + private implicit val ProjectTeamJsonFormat: RootJsonFormat[ProjectTeam] = jsonFormat2(ProjectTeam.apply) private final case class ObjectAccessControls(kind: String, id: String, @@ -129,7 +131,7 @@ object Formats extends DefaultJsonProtocol { prefixes: Option[List[String]], items: Option[List[StorageObjectJson]]) - private implicit val bucketInfoJsonFormat = jsonFormat6(BucketInfoJson) + private implicit val bucketInfoJsonFormat: RootJsonFormat[BucketInfoJson] = jsonFormat6(BucketInfoJson.apply) /** * Google API rewrite response object @@ -144,7 +146,8 @@ object Formats extends DefaultJsonProtocol { rewriteToken: Option[String], resource: Option[StorageObjectJson]) - private implicit val rewriteResponseFormat = jsonFormat6(RewriteResponseJson) + private implicit val rewriteResponseFormat: RootJsonFormat[RewriteResponseJson] = + jsonFormat6(RewriteResponseJson.apply) /** * Google API bucket response object @@ -159,7 +162,7 @@ object Formats extends DefaultJsonProtocol { selfLink: String, etag: String) - implicit val bucketInfoFormat = jsonFormat2(BucketInfo) + implicit val bucketInfoFormat: RootJsonFormat[BucketInfo] = jsonFormat2(BucketInfo.apply) implicit object BucketListResultReads extends RootJsonReader[BucketListResult] { override def read(json: JsValue): BucketListResult = { @@ -172,7 +175,7 @@ object Formats extends DefaultJsonProtocol { } } - private implicit val bucketListResultJsonReads = jsonFormat4(BucketListResultJson) + private implicit val bucketListResultJsonReads = jsonFormat4(BucketListResultJson.apply) implicit object RewriteResponseReads extends RootJsonReader[RewriteResponse] { override def read(json: JsValue): RewriteResponse = { diff --git a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala index 53319fc93..0ba705a5c 100644 --- a/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala +++ b/google-cloud-storage/src/main/scala/org/apache/pekko/stream/connectors/googlecloud/storage/impl/GCStorageStream.scala @@ -37,7 +37,7 @@ import pekko.{ Done, NotUsed } import spray.json._ import scala.annotation.nowarn -import scala.concurrent.Future +import scala.concurrent.{ ExecutionContext, Future } @InternalApi private[storage] object GCStorageStream { @@ -55,7 +55,7 @@ import scala.concurrent.Future val uri = Uri(gcsSettings.endpointUrl) .withPath(Path(gcsSettings.basePath) / "b") .withQuery(Query("project" -> settings.projectId)) - implicit val ec = parasitic + implicit val ec: ExecutionContext = parasitic val request = Marshal(BucketInfo(bucketName, location)).to[RequestEntity].map { entity => HttpRequest(POST, uri, entity = entity) } @@ -142,7 +142,7 @@ import scala.concurrent.Future metadata: Option[Map[String, String]] = None): Sink[ByteString, Future[StorageObject]] = Sink .fromMaterializer { (mat, attr) => - implicit val settings = { + implicit val settings: GoogleSettings = { val s = resolveSettings(mat, attr) s.copy(requestSettings = s.requestSettings.copy(uploadChunkSize = chunkSize)) } @@ -226,7 +226,7 @@ import scala.concurrent.Future private def makeRequestSource[T: FromResponseUnmarshaller](request: Future[HttpRequest]): Source[T, NotUsed] = Source .fromMaterializer { (mat, attr) => - implicit val settings = resolveSettings(mat, attr) + implicit val settings: GoogleSettings = resolveSettings(mat, attr) Source.lazyFuture { () => request.flatMap { request => GoogleHttp()(mat.system).singleAuthenticatedRequest[T](request) diff --git a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSExtSpec.scala b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSExtSpec.scala index e93147b73..547941298 100644 --- a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSExtSpec.scala +++ b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCSExtSpec.scala @@ -31,7 +31,7 @@ class GCSExtSpec extends AnyFlatSpec with Matchers with LogCapturing { "pekko.connectors.google.cloud-storage.endpoint-url" -> endpointUrl, "pekko.connectors.google.cloud-storage.base-path" -> basePath).asJava) - implicit val system = ActorSystem.create("gcs", config) + implicit val system: ActorSystem = ActorSystem.create("gcs", config) val ext = GCSExt(system) ext.settings.endpointUrl shouldBe endpointUrl diff --git a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageExtSpec.scala b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageExtSpec.scala index ee8de4f20..a17a715c9 100644 --- a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageExtSpec.scala +++ b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/GCStorageExtSpec.scala @@ -42,7 +42,7 @@ class GCStorageExtSpec extends AnyFlatSpec with Matchers with LogCapturing { "pekko.connectors.google.cloud.storage.base-path" -> basePath, "pekko.connectors.google.cloud.storage.token-url" -> tokenUrl, "pekko.connectors.google.cloud.storage.token-scope" -> tokenScope).asJava) - implicit val system = ActorSystem.create("gcStorage", config) + implicit val system: ActorSystem = ActorSystem.create("gcStorage", config) @nowarn("msg=deprecated") val ext = GCStorageExt(system) diff --git a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala index ea391e491..eb046720a 100644 --- a/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala +++ b/google-cloud-storage/src/test/scala/org/apache/pekko/stream/connectors/googlecloud/storage/WithMaterializerGlobal.scala @@ -19,7 +19,7 @@ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } -import scala.concurrent.Await +import scala.concurrent.{ Await, ExecutionContext } import scala.concurrent.duration._ trait WithMaterializerGlobal @@ -30,7 +30,7 @@ trait WithMaterializerGlobal with IntegrationPatience with Matchers { implicit val actorSystem = ActorSystem("test") - implicit val ec = actorSystem.dispatcher + implicit val ec: ExecutionContext = actorSystem.dispatcher override protected def afterAll(): Unit = { super.afterAll() diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/PaginatedRequest.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/PaginatedRequest.scala index 7560af7c9..20fbf7004 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/PaginatedRequest.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/PaginatedRequest.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.google import org.apache.pekko +import pekko.actor.ActorSystem import pekko.annotation.InternalApi import pekko.dispatch.ExecutionContexts import pekko.http.scaladsl.model.HttpMethods.GET @@ -51,8 +52,8 @@ private[connectors] object PaginatedRequest { Source .fromMaterializer { (mat, attr) => - implicit val system = mat.system - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) + implicit val system: ActorSystem = mat.system + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val requestWithPageToken = addPageToken(request, query) Source.unfoldAsync[Either[Done, Option[String]], Out](Right(initialPageToken)) { diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala index 5da81e3fb..f50c921b9 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/ResumableUpload.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.google import org.apache.pekko +import pekko.actor.ActorSystem import pekko.NotUsed import pekko.annotation.InternalApi import pekko.http.scaladsl.model.HttpMethods.{ POST, PUT } @@ -56,7 +57,7 @@ private[connectors] object ResumableUpload { .fromMaterializer { (mat, attr) => import mat.executionContext implicit val materializer = mat - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val uploadChunkSize = settings.requestSettings.uploadChunkSize val in = Flow[ByteString] @@ -95,7 +96,7 @@ private[connectors] object ResumableUpload { private def initiateSession(request: HttpRequest)(implicit mat: Materializer, settings: GoogleSettings): Future[Uri] = { - implicit val system = mat.system + implicit val system: ActorSystem = mat.system import implicits._ implicit val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => @@ -111,7 +112,7 @@ private[connectors] object ResumableUpload { private def uploadChunk[T: FromResponseUnmarshaller]( request: HttpRequest)(implicit mat: Materializer): Flow[Either[T, MaybeLast[Chunk]], Try[Option[T]], NotUsed] = { - implicit val system = mat.system + implicit val system: ActorSystem = mat.system val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => response.status match { @@ -146,7 +147,7 @@ private[connectors] object ResumableUpload { request: HttpRequest, chunk: Future[MaybeLast[Chunk]])( implicit mat: Materializer, settings: GoogleSettings): Future[Either[T, MaybeLast[Chunk]]] = { - implicit val system = mat.system + implicit val system: ActorSystem = mat.system import implicits._ implicit val um = Unmarshaller.withMaterializer { implicit ec => implicit mat => response: HttpResponse => diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleComputeMetadata.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleComputeMetadata.scala index da0e06726..b205a11cb 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleComputeMetadata.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleComputeMetadata.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.google.auth import org.apache.pekko +import pekko.actor.ActorSystem import pekko.annotation.InternalApi import pekko.http.scaladsl.Http import pekko.http.scaladsl.marshallers.sprayjson.SprayJsonSupport @@ -42,7 +43,7 @@ private[auth] object GoogleComputeMetadata { clock: Clock): Future[AccessToken] = { import SprayJsonSupport._ import mat.executionContext - implicit val system = mat.system + implicit val system: ActorSystem = mat.system for { response <- Http().singleRequest(tokenRequest) token <- Unmarshal(response.entity).to[AccessToken] @@ -52,7 +53,7 @@ private[auth] object GoogleComputeMetadata { def getProjectId()( implicit mat: Materializer): Future[String] = { import mat.executionContext - implicit val system = mat.system + implicit val system: ActorSystem = mat.system for { response <- Http().singleRequest(projectIdRequest) projectId <- Unmarshal(response.entity).to[String] diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2.scala index 615e7959d..4099f91c1 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.google.auth import org.apache.pekko +import pekko.actor.ActorSystem import pekko.annotation.InternalApi import pekko.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import pekko.http.scaladsl.model.HttpMethods.POST @@ -42,7 +43,7 @@ private[auth] object GoogleOAuth2 { import GoogleOAuth2Exception._ import SprayJsonSupport._ import implicits._ - implicit val system = mat.system + implicit val system: ActorSystem = mat.system try { val entity = FormData( @@ -71,5 +72,5 @@ private[auth] object GoogleOAuth2 { } final case class JwtClaimContent(scope: String) - implicit val jwtClaimContentFormat: JsonFormat[JwtClaimContent] = jsonFormat1(JwtClaimContent) + implicit val jwtClaimContentFormat: JsonFormat[JwtClaimContent] = jsonFormat1(JwtClaimContent.apply) } diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Credentials.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Credentials.scala index 5f2c78b5c..bbd9e848d 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Credentials.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Credentials.scala @@ -38,7 +38,7 @@ private[auth] final class GoogleOAuth2Credentials(credentials: OAuth2Credentials Await.result(requestMetadata, Duration.Inf) override def getRequestMetadata(uri: URI, executor: Executor, callback: RequestMetadataCallback): Unit = { - implicit val ec = ExecutionContext.fromExecutor(executor) + implicit val ec: ExecutionContext = ExecutionContext.fromExecutor(executor) requestMetadata.onComplete { case Success(metadata) => callback.onSuccess(metadata) case Failure(ex) => callback.onFailure(ex) diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Exception.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Exception.scala index 9c7e373d5..ae9690008 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Exception.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Exception.scala @@ -28,7 +28,8 @@ private[google] object GoogleOAuth2Exception { private val internalFailure = "internal_failure" private final case class OAuth2ErrorResponse(error: Option[String], error_description: Option[String]) - private implicit val oAuth2ErrorResponseFormat: RootJsonFormat[OAuth2ErrorResponse] = jsonFormat2(OAuth2ErrorResponse) + private implicit val oAuth2ErrorResponseFormat: RootJsonFormat[OAuth2ErrorResponse] = + jsonFormat2(OAuth2ErrorResponse.apply) implicit val unmarshaller: FromResponseUnmarshaller[Throwable] = Unmarshaller diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ServiceAccountCredentials.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ServiceAccountCredentials.scala index 2914286e9..369365415 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ServiceAccountCredentials.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/ServiceAccountCredentials.scala @@ -57,7 +57,7 @@ private[connectors] object ServiceAccountCredentials { final case class ServiceAccountCredentialsFile(project_id: String, client_email: String, private_key: String) implicit val serviceAccountCredentialsFormat: RootJsonFormat[ServiceAccountCredentialsFile] = jsonFormat3( - ServiceAccountCredentialsFile) + ServiceAccountCredentialsFile.apply) } @InternalApi diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessCredentials.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessCredentials.scala index b41549dcb..be1f71ea9 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessCredentials.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessCredentials.scala @@ -61,7 +61,7 @@ private[connectors] object UserAccessCredentials { refresh_token: String, quota_project_id: String) implicit val userAccessCredentialsFormat: RootJsonFormat[UserAccessCredentialsFile] = jsonFormat4( - UserAccessCredentialsFile) + UserAccessCredentialsFile.apply) } @InternalApi diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessMetadata.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessMetadata.scala index 56e834d8e..c3efbfcce 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessMetadata.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/auth/UserAccessMetadata.scala @@ -14,6 +14,7 @@ package org.apache.pekko.stream.connectors.google.auth import org.apache.pekko +import pekko.actor.ActorSystem import pekko.annotation.InternalApi import pekko.http.scaladsl.Http import pekko.http.scaladsl.marshallers.sprayjson.SprayJsonSupport @@ -45,7 +46,7 @@ private[auth] object UserAccessMetadata { clock: Clock): Future[AccessToken] = { import SprayJsonSupport._ import mat.executionContext - implicit val system = mat.system + implicit val system: ActorSystem = mat.system for { response <- Http().singleRequest(tokenRequest(clientId, clientSecret, refreshToken)) token <- Unmarshal(response.entity).to[AccessToken] diff --git a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttp.scala b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttp.scala index f5326563d..d536f8b00 100644 --- a/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttp.scala +++ b/google-common/src/main/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttp.scala @@ -76,7 +76,7 @@ private[connectors] final class GoogleHttp private (val http: HttpExt) extends A def singleAuthenticatedRequest[T](request: HttpRequest)( implicit settings: GoogleSettings, um: FromResponseUnmarshaller[T]): Future[T] = Retry(settings.requestSettings.retrySettings) { - implicit val requestSettings = settings.requestSettings + implicit val requestSettings: RequestSettings = settings.requestSettings addAuth(request).flatMap(singleRequest(_))(ExecutionContexts.parasitic) } @@ -110,7 +110,7 @@ private[connectors] final class GoogleHttp private (val http: HttpExt) extends A parallelism: Int = 1): FlowWithContext[HttpRequest, Ctx, Try[T], Ctx, Future[HostConnectionPool]] = FlowWithContext.fromTuples { Flow.fromMaterializer { (mat, attr) => - implicit val settings = GoogleAttributes.resolveSettings(mat, attr) + implicit val settings: GoogleSettings = GoogleAttributes.resolveSettings(mat, attr) val p = if (port == -1) if (https) 443 else 80 else port val uriFlow = FlowWithContext[HttpRequest, Ctx].map(addStandardQuery) @@ -163,7 +163,7 @@ private[connectors] final class GoogleHttp private (val http: HttpExt) extends A .fold(settings.requestSettings.queryString)(_.concat(settings.requestSettings.`&queryString`))))) private def addAuth(request: HttpRequest)(implicit settings: GoogleSettings): Future[HttpRequest] = { - implicit val requestSettings = settings.requestSettings + implicit val requestSettings: RequestSettings = settings.requestSettings settings.credentials .get() .map { token => diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/PaginatedRequestSpec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/PaginatedRequestSpec.scala index d58c54de0..5d64b5b2a 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/PaginatedRequestSpec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/PaginatedRequestSpec.scala @@ -43,7 +43,7 @@ class PaginatedRequestSpec super.afterAll() } - implicit val patience = PatienceConfig(remainingOrDefault) + implicit val patience: PatienceConfig = PatienceConfig(remainingOrDefault) implicit val paginated: Paginated[JsValue] = _.asJsObject.fields.get("pageToken").flatMap { case JsString(value) => Some(value) case _ => None diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/ResumableUploadSpec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/ResumableUploadSpec.scala index 15ded38bf..819834bb8 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/ResumableUploadSpec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/ResumableUploadSpec.scala @@ -40,7 +40,7 @@ class ResumableUploadSpec with ScalaFutures with HoverflySupport { - implicit val patience = PatienceConfig(remainingOrDefault) + implicit val patience: PatienceConfig = PatienceConfig(remainingOrDefault) override def afterAll(): Unit = { TestKit.shutdownActorSystem(system) diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala index 08c067da5..1bd56b790 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/GoogleOAuth2Spec.scala @@ -15,7 +15,7 @@ package org.apache.pekko.stream.connectors.google.auth import org.apache.pekko import pekko.actor.ActorSystem -import pekko.stream.connectors.google.{ GoogleSettings, HoverflySupport } +import pekko.stream.connectors.google.{ GoogleSettings, HoverflySupport, RequestSettings } import pekko.testkit.TestKit import io.specto.hoverfly.junit.core.SimulationSource.dsl import io.specto.hoverfly.junit.core.model.RequestFieldMatcher.newRegexMatcher @@ -42,10 +42,10 @@ class GoogleOAuth2Spec TestKit.shutdownActorSystem(system) super.afterAll() } - implicit val defaultPatience = PatienceConfig(remainingOrDefault) + implicit val defaultPatience: PatienceConfig = PatienceConfig(remainingOrDefault) implicit val executionContext: ExecutionContext = system.dispatcher - implicit val settings = GoogleSettings(system) + implicit val settings: GoogleSettings = GoogleSettings(system) implicit val clock = Clock.systemUTC() lazy val privateKey = { @@ -73,7 +73,7 @@ class GoogleOAuth2Spec success("""{"access_token": "token", "token_type": "String", "expires_in": 3600}""", "application/json")))) - implicit val settings = GoogleSettings().requestSettings + implicit val settings: RequestSettings = GoogleSettings().requestSettings GoogleOAuth2.getAccessToken("email", privateKey, scopes).futureValue should matchPattern { case AccessToken("token", exp) if exp > (System.currentTimeMillis / 1000L + 3000L) => } diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala index 5816d906e..7d7e2342a 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/auth/OAuth2CredentialsSpec.scala @@ -43,7 +43,8 @@ class OAuth2CredentialsSpec } import system.dispatcher - implicit val settings = GoogleSettings().requestSettings + + implicit val settings: RequestSettings = GoogleSettings().requestSettings implicit val clock = Clock.systemUTC() final object AccessTokenProvider { diff --git a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala index aa75e9cc4..56a75389c 100644 --- a/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala +++ b/google-common/src/test/scala/org/apache/pekko/stream/connectors/google/http/GoogleHttpSpec.scala @@ -76,7 +76,7 @@ class GoogleHttpSpec http } - implicit val settings = GoogleSettings().requestSettings + implicit val settings: RequestSettings = GoogleSettings().requestSettings "GoogleHttp" must { @@ -163,7 +163,7 @@ class GoogleHttpSpec when(credentials.get()(any[ExecutionContext], any[RequestSettings])).thenReturn( Future.failed(GoogleOAuth2Exception(ErrorInfo())), Future.failed(new AnotherException)) - implicit val settingsWithMockedCredentials = GoogleSettings().copy(credentials = credentials) + implicit val settingsWithMockedCredentials: GoogleSettings = GoogleSettings().copy(credentials = credentials) val http = mockHttp when( diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmFlows.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmFlows.scala index 9349cc71b..61070294c 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmFlows.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmFlows.scala @@ -37,7 +37,7 @@ private[fcm] object FcmFlows { private[fcm] def fcmWithData[T](conf: FcmSettings): Flow[(FcmNotification, T), (FcmResponse, T), NotUsed] = Flow .fromMaterializer { (mat, attr) => - implicit val settings = resolveSettings(conf)(mat, attr) + implicit val settings: GoogleSettings = resolveSettings(conf)(mat, attr) val sender = new FcmSender() Flow[(FcmNotification, T)].mapAsync(conf.maxConcurrentConnections) { case (notification, data) => @@ -54,7 +54,7 @@ private[fcm] object FcmFlows { private[fcm] def fcm(conf: FcmSettings): Flow[FcmNotification, FcmResponse, NotUsed] = Flow .fromMaterializer { (mat, attr) => - implicit val settings = resolveSettings(conf)(mat, attr) + implicit val settings: GoogleSettings = resolveSettings(conf)(mat, attr) val sender = new FcmSender() Flow[FcmNotification].mapAsync(conf.maxConcurrentConnections) { notification => sender.send(Http(mat.system), FcmSend(conf.isTest, notification))(mat, settings) diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmJsonSupport.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmJsonSupport.scala index 1fd2d9753..4e1768204 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmJsonSupport.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/impl/FcmJsonSupport.scala @@ -86,11 +86,13 @@ private[fcm] object FcmJsonSupport extends DefaultJsonProtocol with SprayJsonSup } // app -> google - implicit val webPushNotificationJsonFormat: RootJsonFormat[WebPushNotification] = jsonFormat3(WebPushNotification) + implicit val webPushNotificationJsonFormat: RootJsonFormat[WebPushNotification] = + jsonFormat3(WebPushNotification.apply) implicit val webPushConfigJsonFormat: RootJsonFormat[WebPushConfig] = jsonFormat3(WebPushConfig.apply) - implicit val androidNotificationJsonFormat: RootJsonFormat[AndroidNotification] = jsonFormat11(AndroidNotification) + implicit val androidNotificationJsonFormat: RootJsonFormat[AndroidNotification] = + jsonFormat11(AndroidNotification.apply) implicit val androidConfigJsonFormat: RootJsonFormat[AndroidConfig] = jsonFormat6(AndroidConfig.apply) - implicit val basicNotificationJsonFormat: RootJsonFormat[BasicNotification] = jsonFormat2(BasicNotification) + implicit val basicNotificationJsonFormat: RootJsonFormat[BasicNotification] = jsonFormat2(BasicNotification.apply) implicit val sendableFcmNotificationJsonFormat: RootJsonFormat[FcmNotification] = jsonFormat8(FcmNotification.apply) - implicit val fcmSendJsonFormat: RootJsonFormat[FcmSend] = jsonFormat2(FcmSend) + implicit val fcmSendJsonFormat: RootJsonFormat[FcmSend] = jsonFormat2(FcmSend.apply) } diff --git a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmJsonSupport.scala b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmJsonSupport.scala index 1046cdba7..fdc9008b4 100644 --- a/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmJsonSupport.scala +++ b/google-fcm/src/main/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmJsonSupport.scala @@ -249,10 +249,10 @@ private[fcm] object FcmJsonSupport extends DefaultJsonProtocol with SprayJsonSup implicit val fcmOptionsJsonFormat: RootJsonFormat[FcmOptions] = jsonFormat1(FcmOptions.apply) implicit val apnsFcmOptionsJsonFormat: RootJsonFormat[ApnsFcmOptions] = jsonFormat2(ApnsFcmOptions.apply) implicit val webPushFcmOptionsJsonFormat: RootJsonFormat[WebPushFcmOptions] = jsonFormat2(WebPushFcmOptions.apply) - implicit val androidColorJsonFormat: RootJsonFormat[Color] = jsonFormat4(Color) + implicit val androidColorJsonFormat: RootJsonFormat[Color] = jsonFormat4(Color.apply) implicit val androidLightSettingsJsonFormat: RootJsonFormat[LightSettings] = jsonFormat3(LightSettings.apply) implicit val androidConfigJsonFormat: RootJsonFormat[AndroidConfig] = jsonFormat8(AndroidConfig.apply) implicit val basicNotificationJsonFormat: RootJsonFormat[BasicNotification] = jsonFormat3(BasicNotification.apply) implicit val mainFcmNotificationJsonFormat: RootJsonFormat[FcmNotification] = jsonFormat9(FcmNotification.apply) - implicit val fcmSendJsonFormat: RootJsonFormat[FcmSend] = jsonFormat2(FcmSend) + implicit val fcmSendJsonFormat: RootJsonFormat[FcmSend] = jsonFormat2(FcmSend.apply) } diff --git a/google-fcm/src/test/scala/docs/scaladsl/FcmExamples.scala b/google-fcm/src/test/scala/docs/scaladsl/FcmExamples.scala index 66a605dcf..8b8e46a51 100644 --- a/google-fcm/src/test/scala/docs/scaladsl/FcmExamples.scala +++ b/google-fcm/src/test/scala/docs/scaladsl/FcmExamples.scala @@ -13,9 +13,9 @@ package docs.scaladsl -import org.apache.pekko.actor.ActorSystem //#imports import org.apache.pekko +import pekko.actor.ActorSystem import pekko.stream.connectors.google.firebase.fcm.FcmSettings import pekko.stream.connectors.google.firebase.fcm.v1.models._ import pekko.stream.connectors.google.firebase.fcm.v1.scaladsl.GoogleFcm @@ -28,7 +28,7 @@ import scala.concurrent.Future class FcmExamples { - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() // #simple-send val fcmConfig = FcmSettings() diff --git a/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala b/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala index 16dff540f..a2d4d4922 100644 --- a/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala +++ b/google-fcm/src/test/scala/org/apache/pekko/stream/connectors/google/firebase/fcm/v1/impl/FcmSenderSpec.scala @@ -52,13 +52,13 @@ class FcmSenderSpec override def afterAll(): Unit = TestKit.shutdownActorSystem(system) - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 2.seconds, interval = 50.millis) implicit val executionContext: ExecutionContext = system.dispatcher implicit val conf = FcmSettings() - implicit val settings = GoogleSettings().copy(projectId = "projectId") + implicit val settings: GoogleSettings = GoogleSettings().copy(projectId = "projectId") "FcmSender" should { diff --git a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseFlowStage.scala b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseFlowStage.scala index 43dc29854..b18579e45 100644 --- a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseFlowStage.scala +++ b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseFlowStage.scala @@ -36,7 +36,7 @@ private[hbase] class HBaseFlowStage[A](settings: HTableSettings[A]) extends Grap override protected def logSource = classOf[HBaseFlowStage[A]] - implicit val connection = connect(settings.conf) + implicit val connection: Connection = connect(settings.conf) lazy val table: Table = getOrCreateTable(settings.tableName, settings.columnFamilies).get diff --git a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseSourceStage.scala b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseSourceStage.scala index e9935121a..7c8a30bcc 100644 --- a/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseSourceStage.scala +++ b/hbase/src/main/scala/org/apache/pekko/stream/connectors/hbase/impl/HBaseSourceStage.scala @@ -17,7 +17,7 @@ import org.apache.pekko import pekko.stream.{ Attributes, Outlet, SourceShape } import pekko.stream.connectors.hbase.HTableSettings import pekko.stream.stage.{ GraphStage, GraphStageLogic, OutHandler, StageLogging } -import org.apache.hadoop.hbase.client.{ Result, Scan, Table } +import org.apache.hadoop.hbase.client.{ Connection, Result, Scan, Table } import scala.util.control.NonFatal @@ -40,7 +40,7 @@ private[hbase] final class HBaseSourceLogic[A](scan: Scan, with StageLogging with HBaseCapabilities { - implicit val connection = connect(settings.conf) + implicit val connection: Connection = connect(settings.conf) lazy val table: Table = getOrCreateTable(settings.tableName, settings.columnFamilies).get private var results: java.util.Iterator[Result] = null diff --git a/hbase/src/test/scala/docs/scaladsl/HBaseStageSpec.scala b/hbase/src/test/scala/docs/scaladsl/HBaseStageSpec.scala index 06c4ed2a0..a6a867107 100644 --- a/hbase/src/test/scala/docs/scaladsl/HBaseStageSpec.scala +++ b/hbase/src/test/scala/docs/scaladsl/HBaseStageSpec.scala @@ -41,7 +41,7 @@ class HBaseStageSpec with BeforeAndAfterAll with LogCapturing { - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 5.seconds, interval = 500.millis) // #create-converter-put diff --git a/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala b/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala index 3eca2dbd6..3587f53e4 100644 --- a/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala +++ b/hdfs/src/test/scala/docs/scaladsl/HdfsWriterSpec.scala @@ -253,7 +253,7 @@ class HdfsWriterSpec committedOffsets = committedOffsets :+ offset val resF = Source(messagesFromKafka) - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book // Transform message so that we can write to hdfs HdfsWriteMessage(ByteString(book.title), kafkaMessage.offset) diff --git a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitJsonSupport.scala b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitJsonSupport.scala index a511e46a5..e2c2273f8 100644 --- a/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitJsonSupport.scala +++ b/huawei-push-kit/src/main/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitJsonSupport.scala @@ -227,5 +227,5 @@ private[pushkit] object PushKitJsonSupport extends DefaultJsonProtocol with Spra implicit val webNotificationJsonFormat: RootJsonFormat[WebNotification] = jsonFormat14(WebNotification.apply) implicit val pushKitNotificationJsonFormat: RootJsonFormat[PushKitNotification] = jsonFormat8( PushKitNotification.apply) - implicit val pushKitSendJsonFormat: RootJsonFormat[PushKitSend] = jsonFormat2(PushKitSend) + implicit val pushKitSendJsonFormat: RootJsonFormat[PushKitSend] = jsonFormat2(PushKitSend.apply) } diff --git a/huawei-push-kit/src/test/scala/docs/scaladsl/PushKitExamples.scala b/huawei-push-kit/src/test/scala/docs/scaladsl/PushKitExamples.scala index 6350b7bec..7a6d0f5e9 100644 --- a/huawei-push-kit/src/test/scala/docs/scaladsl/PushKitExamples.scala +++ b/huawei-push-kit/src/test/scala/docs/scaladsl/PushKitExamples.scala @@ -13,9 +13,9 @@ package docs.scaladsl -import org.apache.pekko.actor.ActorSystem //#imports import org.apache.pekko +import pekko.actor.ActorSystem import pekko.stream.connectors.huawei.pushkit._ import pekko.stream.connectors.huawei.pushkit.scaladsl.HmsPushKit import pekko.stream.connectors.huawei.pushkit.models.AndroidConfig @@ -38,7 +38,7 @@ import scala.concurrent.Future class PushKitExamples { - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() // #simple-send val config = HmsSettings() diff --git a/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApiSpec.scala b/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApiSpec.scala index 5ff7206bc..99f46463b 100644 --- a/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApiSpec.scala +++ b/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/HmsTokenApiSpec.scala @@ -48,7 +48,7 @@ class HmsTokenApiSpec override def afterAll() = TestKit.shutdownActorSystem(system) - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 2.seconds, interval = 50.millis) val config = HmsSettings() diff --git a/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala b/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala index d047ca0f5..cc83e885f 100644 --- a/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala +++ b/huawei-push-kit/src/test/scala/org/apache/pekko/stream/connectors/huawei/pushkit/impl/PushKitSenderSpec.scala @@ -50,7 +50,7 @@ class PushKitSenderSpec override def afterAll() = TestKit.shutdownActorSystem(system) - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 6.seconds, interval = 50.millis) implicit val executionContext: ExecutionContext = system.dispatcher diff --git a/influxdb/src/test/scala/docs/scaladsl/FlowSpec.scala b/influxdb/src/test/scala/docs/scaladsl/FlowSpec.scala index 8178b2c69..246443dba 100644 --- a/influxdb/src/test/scala/docs/scaladsl/FlowSpec.scala +++ b/influxdb/src/test/scala/docs/scaladsl/FlowSpec.scala @@ -43,7 +43,7 @@ class FlowSpec with ScalaFutures with LogCapturing { - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() final val DatabaseName = this.getClass.getSimpleName @@ -102,7 +102,7 @@ class FlowSpec committedOffsets = committedOffsets :+ offset val f1 = Source(messagesFromKafka) - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val cpu = kafkaMessage.cpu println("hostname: " + cpu.getHostname) @@ -111,7 +111,7 @@ class FlowSpec .groupedWithin(10, 50.millis) .via( InfluxDbFlow.typedWithPassThrough(classOf[InfluxDbFlowCpu])) - .map { messages: Seq[InfluxDbWriteResult[InfluxDbFlowCpu, KafkaOffset]] => + .map { (messages: Seq[InfluxDbWriteResult[InfluxDbFlowCpu, KafkaOffset]]) => messages.foreach { message => commitToKafka(message.writeMessage.passThrough) } diff --git a/influxdb/src/test/scala/docs/scaladsl/InfluxDbSourceSpec.scala b/influxdb/src/test/scala/docs/scaladsl/InfluxDbSourceSpec.scala index 3072b825e..35c86547d 100644 --- a/influxdb/src/test/scala/docs/scaladsl/InfluxDbSourceSpec.scala +++ b/influxdb/src/test/scala/docs/scaladsl/InfluxDbSourceSpec.scala @@ -39,7 +39,7 @@ class InfluxDbSourceSpec final val DatabaseName = "InfluxDbSourceSpec" - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() implicit var influxDB: InfluxDB = _ diff --git a/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala b/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala index 61d35d8f6..4de4d3817 100644 --- a/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala +++ b/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala @@ -41,7 +41,7 @@ class InfluxDbSpec with ScalaFutures with LogCapturing { - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() final val DatabaseName = this.getClass.getSimpleName @@ -81,7 +81,7 @@ class InfluxDbSpec // #run-typed val f1 = InfluxDbSource .typed(classOf[InfluxDbSpecCpu], InfluxDbReadSettings(), influxDB, query) - .map { cpu: InfluxDbSpecCpu => + .map { (cpu: InfluxDbSpecCpu) => { val clonedCpu = cpu.cloneAt(cpu.getTime.plusSeconds(60000)) List(InfluxDbWriteMessage(clonedCpu)) diff --git a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPullStage.scala b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPullStage.scala index 85891ae3a..2209d120d 100644 --- a/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPullStage.scala +++ b/ironmq/src/main/scala/org/apache/pekko/stream/connectors/ironmq/impl/IronMqPullStage.scala @@ -125,12 +125,12 @@ private[ironmq] final class IronMqPullStage(queueName: String, settings: IronMqS buffer = buffer.tail } - private val updateBuffer = getAsyncCallback { xs: List[ReservedMessage] => + private val updateBuffer = getAsyncCallback { (xs: List[ReservedMessage]) => buffer = buffer ::: xs deliveryMessages() } - private val updateFetching = getAsyncCallback { x: Boolean => + private val updateFetching = getAsyncCallback { (x: Boolean) => fetching = x } } diff --git a/ironmq/src/test/scala/org/apache/pekko/stream/connectors/ironmq/IronMqSpec.scala b/ironmq/src/test/scala/org/apache/pekko/stream/connectors/ironmq/IronMqSpec.scala index 763e7d57b..f0a930975 100644 --- a/ironmq/src/test/scala/org/apache/pekko/stream/connectors/ironmq/IronMqSpec.scala +++ b/ironmq/src/test/scala/org/apache/pekko/stream/connectors/ironmq/IronMqSpec.scala @@ -40,7 +40,7 @@ abstract class IronMqSpec override implicit val patienceConfig: PatienceConfig = PatienceConfig(timeout = 15.seconds, interval = 1.second) val DefaultActorSystemTerminateTimeout: Duration = 10.seconds - private implicit val ec = ExecutionContext.global + private implicit val ec: ExecutionContext = ExecutionContext.global private var mutableIronMqClient = Option.empty[IronMqClient] private var mutableConfig = Option.empty[Config] diff --git a/jms/src/test/scala/docs/scaladsl/JmsBufferedAckConnectorsSpec.scala b/jms/src/test/scala/docs/scaladsl/JmsBufferedAckConnectorsSpec.scala index 01eb1cc6b..5c33cf710 100644 --- a/jms/src/test/scala/docs/scaladsl/JmsBufferedAckConnectorsSpec.scala +++ b/jms/src/test/scala/docs/scaladsl/JmsBufferedAckConnectorsSpec.scala @@ -35,7 +35,7 @@ import scala.util.{ Failure, Success } class JmsBufferedAckConnectorsSpec extends JmsSharedServerSpec { - override implicit val patienceConfig = PatienceConfig(2.minutes) + override implicit val patienceConfig: PatienceConfig = PatienceConfig(2.minutes) "The JMS Ack Connectors" should { "publish and consume strings through a queue" in withConnectionFactory() { connectionFactory => diff --git a/jms/src/test/scala/docs/scaladsl/JmsIbmmqConnectorsSpec.scala b/jms/src/test/scala/docs/scaladsl/JmsIbmmqConnectorsSpec.scala index 5184eb5dc..7da5909b3 100644 --- a/jms/src/test/scala/docs/scaladsl/JmsIbmmqConnectorsSpec.scala +++ b/jms/src/test/scala/docs/scaladsl/JmsIbmmqConnectorsSpec.scala @@ -26,7 +26,7 @@ import scala.concurrent.duration._ import scala.concurrent.Future class JmsIbmmqConnectorsSpec extends JmsSpec { - override implicit val patienceConfig = PatienceConfig(2.minutes) + override implicit val patienceConfig: PatienceConfig = PatienceConfig(2.minutes) "The JMS Ibmmq Connectors" should { val queueConnectionFactory = { diff --git a/jms/src/test/scala/docs/scaladsl/JmsTxConnectorsSpec.scala b/jms/src/test/scala/docs/scaladsl/JmsTxConnectorsSpec.scala index 5b8d74933..4c61c2481 100644 --- a/jms/src/test/scala/docs/scaladsl/JmsTxConnectorsSpec.scala +++ b/jms/src/test/scala/docs/scaladsl/JmsTxConnectorsSpec.scala @@ -35,7 +35,7 @@ class JmsTxConnectorsSpec extends JmsSharedServerSpec { private final val log = LoggerFactory.getLogger(classOf[JmsTxConnectorsSpec]) - override implicit val patienceConfig = PatienceConfig(2.minutes) + override implicit val patienceConfig: PatienceConfig = PatienceConfig(2.minutes) "The JMS Transactional Connectors" should { "publish and consume strings through a queue" in withConnectionFactory() { connectionFactory => diff --git a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSpec.scala b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSpec.scala index f5511597a..24c698119 100644 --- a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSpec.scala +++ b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/JmsSpec.scala @@ -37,7 +37,7 @@ abstract class JmsSpec with MockitoSugar with LogCapturing { - implicit val system = ActorSystem(this.getClass.getSimpleName) + implicit val system: ActorSystem = ActorSystem(this.getClass.getSimpleName) val consumerConfig = system.settings.config.getConfig(JmsConsumerSettings.configPath) val producerConfig = system.settings.config.getConfig(JmsProducerSettings.configPath) diff --git a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/scaladsl/JmsAckConnectorsSpec.scala b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/scaladsl/JmsAckConnectorsSpec.scala index 2f2c5b4a7..10339609b 100644 --- a/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/scaladsl/JmsAckConnectorsSpec.scala +++ b/jms/src/test/scala/org/apache/pekko/stream/connectors/jms/scaladsl/JmsAckConnectorsSpec.scala @@ -33,7 +33,7 @@ import scala.util.{ Failure, Success } class JmsAckConnectorsSpec extends JmsSpec { - override implicit val patienceConfig = PatienceConfig(2.minutes) + override implicit val patienceConfig: PatienceConfig = PatienceConfig(2.minutes) "The JMS Ack Connectors" should { "publish and consume strings through a queue" in withConnectionFactory() { connectionFactory => diff --git a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardIterator.scala b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardIterator.scala index e8b641806..1716bfd87 100644 --- a/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardIterator.scala +++ b/kinesis/src/main/scala/org/apache/pekko/stream/connectors/kinesis/ShardIterator.scala @@ -40,7 +40,7 @@ object ShardIterator { override final val shardIteratorType: ShardIteratorType = ShardIteratorType.TRIM_HORIZON } - case class AtTimestamp private (value: Instant) extends ShardIterator { + case class AtTimestamp private[kinesis] (value: Instant) extends ShardIterator { override final val timestamp: Option[Instant] = Some(value) override final val startingSequenceNumber: Option[String] = None diff --git a/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisSchedulerSourceSpec.scala b/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisSchedulerSourceSpec.scala index b23a28f50..bca357c36 100644 --- a/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisSchedulerSourceSpec.scala +++ b/kinesis/src/test/scala/org/apache/pekko/stream/connectors/kinesis/KinesisSchedulerSourceSpec.scala @@ -269,7 +269,7 @@ class KinesisSchedulerSourceSpec var recordProcessor: ShardRecordProcessor = _ var otherRecordProcessor: ShardRecordProcessor = _ - private val schedulerBuilder = { x: ShardRecordProcessorFactory => + private val schedulerBuilder = { (x: ShardRecordProcessorFactory) => recordProcessor = x.shardRecordProcessor() otherRecordProcessor = x.shardRecordProcessor() semaphore.release() @@ -334,12 +334,12 @@ class KinesisSchedulerSourceSpec var latestRecord: KinesisClientRecord = _ val allRecordsPushed: Future[Unit] = Future { for (i <- 1 to 3) { - val record = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) - when(record.sequenceNumber).thenReturn("1") - when(record.subSequenceNumber).thenReturn(i.toLong) + val clientRecord = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) + when(clientRecord.sequenceNumber).thenReturn("1") + when(clientRecord.subSequenceNumber).thenReturn(i.toLong) sourceProbe.sendNext( new CommittableRecord( - record, + clientRecord, new BatchData(null, null, false, 0), new ShardProcessorData( "shard-1", @@ -349,7 +349,7 @@ class KinesisSchedulerSourceSpec override def forceCheckpoint(): Unit = checkpointer(record) }) - latestRecord = record + latestRecord = clientRecord } } @@ -374,11 +374,11 @@ class KinesisSchedulerSourceSpec val allRecordsPushed: Future[Unit] = Future { for (i <- 1 to 3) { - val record = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) - when(record.sequenceNumber).thenReturn(i.toString) + val clientRecord = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) + when(clientRecord.sequenceNumber).thenReturn(i.toString) sourceProbe.sendNext( new CommittableRecord( - record, + clientRecord, new BatchData(null, null, false, 0), new ShardProcessorData( "shard-1", @@ -388,14 +388,14 @@ class KinesisSchedulerSourceSpec override def forceCheckpoint(): Unit = checkpointerShard1(record) }) - latestRecordShard1 = record + latestRecordShard1 = clientRecord } for (i <- 1 to 3) { - val record = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) - when(record.sequenceNumber).thenReturn(i.toString) + val clientRecord = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) + when(clientRecord.sequenceNumber).thenReturn(i.toString) sourceProbe.sendNext( new CommittableRecord( - record, + clientRecord, new BatchData(null, null, false, 0), new ShardProcessorData( "shard-2", @@ -405,7 +405,7 @@ class KinesisSchedulerSourceSpec override def forceCheckpoint(): Unit = checkpointerShard2(record) }) - latestRecordShard2 = record + latestRecordShard2 = clientRecord } } @@ -422,12 +422,12 @@ class KinesisSchedulerSourceSpec } "fail with Exception if checkpoint action fails" in new KinesisSchedulerCheckpointContext { - val record: KinesisClientRecord = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) - when(record.sequenceNumber).thenReturn("1") + val clientRecord: KinesisClientRecord = org.mockito.Mockito.mock(classOf[KinesisClientRecord]) + when(clientRecord.sequenceNumber).thenReturn("1") val checkpointer: KinesisClientRecord => Unit = org.mockito.Mockito.mock(classOf[KinesisClientRecord => Unit]) val committableRecord: CommittableRecord = new CommittableRecord( - record, + clientRecord, new BatchData(null, null, false, 0), new ShardProcessorData( "shard-1", @@ -439,7 +439,7 @@ class KinesisSchedulerSourceSpec sourceProbe.sendNext(committableRecord) val failure = new RuntimeException() - when(checkpointer.apply(record)).thenThrow(failure) + when(checkpointer.apply(clientRecord)).thenThrow(failure) sinkProbe.request(1) diff --git a/mongodb/src/test/scala/docs/scaladsl/MongoSinkSpec.scala b/mongodb/src/test/scala/docs/scaladsl/MongoSinkSpec.scala index 09af15528..7804296e9 100644 --- a/mongodb/src/test/scala/docs/scaladsl/MongoSinkSpec.scala +++ b/mongodb/src/test/scala/docs/scaladsl/MongoSinkSpec.scala @@ -52,11 +52,11 @@ class MongoSinkSpec fromRegistries(fromProviders(classOf[Number], classOf[DomainObject]), DEFAULT_CODEC_REGISTRY): @nowarn( "msg=match may not be exhaustive") - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() + implicit val defaultPatience: PatienceConfig = + PatienceConfig(timeout = 5.seconds, interval = 50.millis) override protected def beforeAll(): Unit = { - implicit val patienceConfig = - PatienceConfig(timeout = 2.seconds, interval = 100.millis) Source.fromPublisher(db.drop()).runWith(Sink.headOption).futureValue } @@ -69,9 +69,6 @@ class MongoSinkSpec db.getCollection("domainObjectsSink", classOf[DomainObject]).withCodecRegistry(codecRegistry) private val domainObjectsDocumentColl = db.getCollection("domainObjectsSink") - implicit val defaultPatience = - PatienceConfig(timeout = 5.seconds, interval = 50.millis) - override def afterEach(): Unit = { Source.fromPublisher(numbersDocumentColl.deleteMany(new Document())).runWith(Sink.head).futureValue Source.fromPublisher(domainObjectsDocumentColl.deleteMany(new Document())).runWith(Sink.head).futureValue diff --git a/mongodb/src/test/scala/docs/scaladsl/MongoSourceSpec.scala b/mongodb/src/test/scala/docs/scaladsl/MongoSourceSpec.scala index 1e390cbb1..8037a3dff 100644 --- a/mongodb/src/test/scala/docs/scaladsl/MongoSourceSpec.scala +++ b/mongodb/src/test/scala/docs/scaladsl/MongoSourceSpec.scala @@ -43,7 +43,7 @@ class MongoSourceSpec with LogCapturing { // #init-system - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() // #init-system override protected def beforeAll(): Unit = @@ -74,7 +74,7 @@ class MongoSourceSpec private val numbersDocumentColl = db.getCollection("numbers") - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 5.seconds, interval = 50.millis) override def afterEach(): Unit = diff --git a/orientdb/src/test/scala/docs/scaladsl/OrientDbSpec.scala b/orientdb/src/test/scala/docs/scaladsl/OrientDbSpec.scala index efce44f7a..f4dff41f9 100644 --- a/orientdb/src/test/scala/docs/scaladsl/OrientDbSpec.scala +++ b/orientdb/src/test/scala/docs/scaladsl/OrientDbSpec.scala @@ -159,7 +159,7 @@ class OrientDbSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with // Copy source to sink1 through ODocument stream val f1 = OrientDbSource( sourceClass, - OrientDbSourceSettings(oDatabase)).map { message: OrientDbReadResult[ODocument] => + OrientDbSourceSettings(oDatabase)).map { (message: OrientDbReadResult[ODocument]) => OrientDbWriteMessage(message.oDocument) } .groupedWithin(10, 50.millis) @@ -173,7 +173,7 @@ class OrientDbSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with // #run-odocument val result: Future[immutable.Seq[String]] = OrientDbSource( sink4, - OrientDbSourceSettings(oDatabase)).map { message: OrientDbReadResult[ODocument] => + OrientDbSourceSettings(oDatabase)).map { (message: OrientDbReadResult[ODocument]) => message.oDocument.field[String]("book_title") } .runWith(Sink.seq) @@ -197,7 +197,7 @@ class OrientDbSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with val f1 = OrientDbSource( sourceClass, - OrientDbSourceSettings(oDatabase)).map { message: OrientDbReadResult[ODocument] => + OrientDbSourceSettings(oDatabase)).map { (message: OrientDbReadResult[ODocument]) => OrientDbWriteMessage(message.oDocument) } .groupedWithin(10, 50.millis) @@ -231,7 +231,7 @@ class OrientDbSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with // #run-typed val streamCompletion: Future[Done] = OrientDbSource .typed(sourceClass, OrientDbSourceSettings(oDatabase), classOf[OrientDbTest.source1]) - .map { m: OrientDbReadResult[OrientDbTest.source1] => + .map { (m: OrientDbReadResult[OrientDbTest.source1]) => val db: ODatabaseDocumentTx = oDatabase.acquire db.setDatabaseOwner(new OObjectDatabaseTx(db)) ODatabaseRecordThreadLocal.instance.set(db) @@ -267,7 +267,7 @@ class OrientDbSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with committedOffsets = committedOffsets :+ offset val f1 = Source(messagesFromKafka) - .map { kafkaMessage: KafkaMessage => + .map { (kafkaMessage: KafkaMessage) => val book = kafkaMessage.book val id = book.title println("title: " + book.title) @@ -279,7 +279,7 @@ class OrientDbSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with OrientDbFlow.createWithPassThrough( sink7, OrientDbWriteSettings(oDatabase))) - .map { messages: Seq[OrientDbWriteMessage[ODocument, KafkaOffset]] => + .map { (messages: Seq[OrientDbWriteMessage[ODocument, KafkaOffset]]) => messages.foreach { message => commitToKafka(message.passThrough) } diff --git a/pravega/src/test/scala/docs/scaladsl/PravegaReadWriteDocs.scala b/pravega/src/test/scala/docs/scaladsl/PravegaReadWriteDocs.scala index 7bf6095c4..6be579e03 100644 --- a/pravega/src/test/scala/docs/scaladsl/PravegaReadWriteDocs.scala +++ b/pravega/src/test/scala/docs/scaladsl/PravegaReadWriteDocs.scala @@ -18,6 +18,8 @@ import pekko.actor.ActorSystem import pekko.stream.connectors.pravega.{ PravegaEvent, ReaderSettingsBuilder, + TableReaderSettingsBuilder, + TableWriterSettings, TableWriterSettingsBuilder, WriterSettingsBuilder } @@ -27,21 +29,21 @@ import io.pravega.client.stream.Serializer import io.pravega.client.stream.impl.UTF8StringSerializer import java.nio.ByteBuffer -import pekko.stream.connectors.pravega.TableReaderSettingsBuilder import pekko.stream.connectors.pravega.scaladsl.PravegaTable import pekko.stream.connectors.pravega.scaladsl.Pravega + import scala.util.Using import io.pravega.client.tables.TableKey class PravegaReadWriteDocs { - implicit val system = ActorSystem("PravegaDocs") + implicit val system: ActorSystem = ActorSystem("PravegaDocs") val serializer = new UTF8StringSerializer implicit def personSerialiser: Serializer[Person] = ??? - implicit val intSerializer = new Serializer[Int] { + implicit val intSerializer: Serializer[Int] = new Serializer[Int] { override def serialize(value: Int): ByteBuffer = { val buff = ByteBuffer.allocate(4).putInt(value) buff.position(0) @@ -89,7 +91,7 @@ class PravegaReadWriteDocs { Pravega .source(readerGroup, readerSettings) - .to(Sink.foreach { event: PravegaEvent[String] => + .to(Sink.foreach { (event: PravegaEvent[String]) => val message: String = event.message processMessage(message) }) @@ -99,7 +101,7 @@ class PravegaReadWriteDocs { } - implicit val tablewriterSettings = TableWriterSettingsBuilder[Int, Person]() + implicit val tablewriterSettings: TableWriterSettings[Int, Person] = TableWriterSettingsBuilder[Int, Person]() .withKeyExtractor(id => new TableKey(intSerializer.serialize(id))) .build() diff --git a/pravega/src/test/scala/docs/scaladsl/PravegaSettingsSpec.scala b/pravega/src/test/scala/docs/scaladsl/PravegaSettingsSpec.scala index fa0362dbc..b109878c9 100644 --- a/pravega/src/test/scala/docs/scaladsl/PravegaSettingsSpec.scala +++ b/pravega/src/test/scala/docs/scaladsl/PravegaSettingsSpec.scala @@ -31,9 +31,9 @@ import io.pravega.client.tables.TableKey class PravegaSettingsSpec extends PravegaBaseSpec with Matchers { - implicit val serializer = new UTF8StringSerializer + implicit val serializer: UTF8StringSerializer = new UTF8StringSerializer - implicit val intSerializer = new Serializer[Int] { + implicit val intSerializer: Serializer[Int] = new Serializer[Int] { override def serialize(value: Int): ByteBuffer = { val buff = ByteBuffer.allocate(4).putInt(value) buff.position(0) diff --git a/pravega/src/test/scala/docs/scaladsl/Serializers.scala b/pravega/src/test/scala/docs/scaladsl/Serializers.scala index 20edef785..6eb33c596 100644 --- a/pravega/src/test/scala/docs/scaladsl/Serializers.scala +++ b/pravega/src/test/scala/docs/scaladsl/Serializers.scala @@ -19,9 +19,9 @@ import io.pravega.client.stream.impl.UTF8StringSerializer object Serializers { - implicit val stringSerializer = new UTF8StringSerializer() + implicit val stringSerializer: UTF8StringSerializer = new UTF8StringSerializer() - implicit val personSerializer = new Serializer[Person] { + implicit val personSerializer: Serializer[Person] = new Serializer[Person] { def serialize(x: Person): ByteBuffer = { val name = x.firstname.getBytes("UTF-8") val buff = ByteBuffer.allocate(4 + name.length).putInt(x.id) @@ -38,7 +38,7 @@ object Serializers { } - implicit val intSerializer = new Serializer[Int] { + implicit val intSerializer: Serializer[Int] = new Serializer[Int] { override def serialize(value: Int): ByteBuffer = { val buff = ByteBuffer.allocate(4).putInt(value) buff.position(0) diff --git a/project/Common.scala b/project/Common.scala index 8dd890f1a..2f020dff0 100644 --- a/project/Common.scala +++ b/project/Common.scala @@ -30,6 +30,8 @@ object Common extends AutoPlugin { override def requires = JvmPlugin && HeaderPlugin && ApacheSonatypePlugin && DynVerPlugin + val isScala3 = Def.setting(scalaBinaryVersion.value == "3") + override def globalSettings = Seq( scmInfo := Some(ScmInfo(url("https://github.com/apache/incubator-pekko-connectors"), "git@github.com:apache/incubator-pekko-connectors.git")), @@ -43,6 +45,12 @@ object Common extends AutoPlugin { // Ignore unused keys which affect documentation excludeLintKeys ++= Set(scmInfo, projectInfoVersion, autoAPIMappings)) + val packagesToSkip = "org.apache.pekko.pattern:" + // for some reason Scaladoc creates this + "org.mongodb.scala:" + // this one is a mystery as well + // excluding generated grpc classes, except the model ones (com.google.pubsub) + "com.google.api:com.google.cloud:com.google.iam:com.google.logging:" + + "com.google.longrunning:com.google.protobuf:com.google.rpc:com.google.type" + override lazy val projectSettings = Dependencies.Common ++ Seq( projectInfoVersion := (if (isSnapshot.value) "snapshot" else version.value), crossVersion := CrossVersion.binary, @@ -64,13 +72,8 @@ object Common extends AutoPlugin { "-doc-version", version.value, "-sourcepath", - (ThisBuild / baseDirectory).value.toString, - "-skip-packages", - "org.apache.pekko.pattern:" + // for some reason Scaladoc creates this - "org.mongodb.scala:" + // this one is a mystery as well - // excluding generated grpc classes, except the model ones (com.google.pubsub) - "com.google.api:com.google.cloud:com.google.iam:com.google.logging:" + - "com.google.longrunning:com.google.protobuf:com.google.rpc:com.google.type"), + (ThisBuild / baseDirectory).value.toString), + Compile / doc / scalacOptions := scalacOptions.value, Compile / doc / scalacOptions ++= Seq( "-doc-source-url", { @@ -79,6 +82,13 @@ object Common extends AutoPlugin { }, "-doc-canonical-base-url", "https://pekko.apache.org/api/pekko-connectors/current/"), + Compile / doc / scalacOptions ++= { + if (isScala3.value) { + Seq("-skip-packages:" + packagesToSkip) + } else { + Seq("-skip-packages", packagesToSkip) + } + }, Compile / doc / scalacOptions -= "-Werror", compile / javacOptions ++= Seq( "-Xlint:cast", diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 6540baa4b..1ab9ff117 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -16,7 +16,8 @@ object Dependencies { val Scala213 = "2.13.10" // update even in link-validator.conf val Scala212 = "2.12.17" - val ScalaVersions = Seq(Scala213, Scala212) + val Scala3 = "3.3.0" + val ScalaVersions = Seq(Scala213, Scala212, Scala3) val PekkoVersion = "0.0.0+26669-ec5b6764-SNAPSHOT" val PekkoBinaryVersion = "current" @@ -62,7 +63,7 @@ object Dependencies { val testkit = Seq( libraryDependencies := Seq( - "org.scala-lang.modules" %% "scala-collection-compat" % "2.2.0", + "org.scala-lang.modules" %% "scala-collection-compat" % "2.10.0", "org.apache.pekko" %% "pekko-stream" % PekkoVersion, "org.apache.pekko" %% "pekko-stream-testkit" % PekkoVersion, "org.apache.pekko" %% "pekko-slf4j" % PekkoVersion, @@ -88,6 +89,7 @@ object Dependencies { "com.fasterxml.jackson.core" % "jackson-databind" % JacksonDatabindVersion) val Amqp = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.rabbitmq" % "amqp-client" % "5.14.2" // APLv2 ) ++ Mockito) @@ -104,6 +106,7 @@ object Dependencies { ExclusionRule("software.amazon.awssdk", "netty-nio-client"))) ++ Mockito) val AzureStorageQueue = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.microsoft.azure" % "azure-storage" % "8.0.0" // ApacheV2 )) @@ -113,6 +116,7 @@ object Dependencies { val CassandraDriverVersionInDocs = "4.15" val Cassandra = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( ("com.datastax.oss" % "java-driver-core" % CassandraDriverVersion) .exclude("com.github.spotbugs", "spotbugs-annotations") @@ -121,6 +125,7 @@ object Dependencies { "org.apache.pekko" %% "pekko-discovery" % PekkoVersion % Provided)) val Couchbase = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.couchbase.client" % "java-client" % CouchbaseVersion, // ApacheV2 "io.reactivex" % "rxjava-reactive-streams" % "1.2.1", // ApacheV2 @@ -151,6 +156,7 @@ object Dependencies { )) val Elasticsearch = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, @@ -162,17 +168,19 @@ object Dependencies { )) val AvroParquet = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.parquet" % "parquet-avro" % "1.10.1", // Apache2 ("org.apache.hadoop" % "hadoop-client" % "3.2.1" % Test).exclude("log4j", "log4j"), // Apache2 ("org.apache.hadoop" % "hadoop-common" % "3.2.1" % Test).exclude("log4j", "log4j"), // Apache2 "com.sksamuel.avro4s" %% "avro4s-core" % "4.1.1" % Test, "org.scalacheck" %% "scalacheck" % scalaCheckVersion % Test, - "org.specs2" %% "specs2-core" % "4.8.3" % Test, // MIT like: https://github.com/etorreborre/specs2/blob/master/LICENSE.txt + "org.specs2" %% "specs2-core" % "4.20.0" % Test, // MIT like: https://github.com/etorreborre/specs2/blob/master/LICENSE.txt "org.slf4j" % "log4j-over-slf4j" % log4jOverSlf4jVersion % Test // MIT like: http://www.slf4j.org/license.html )) val Ftp = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "commons-net" % "commons-net" % "3.8.0", // ApacheV2 "com.hierynomus" % "sshj" % "0.33.0" // ApacheV2 @@ -182,6 +190,7 @@ object Dependencies { val GeodeVersionForDocs = "115" val Geode = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq("geode-core", "geode-cq") .map("org.apache.geode" % _ % GeodeVersion) ++ @@ -190,6 +199,7 @@ object Dependencies { "org.apache.logging.log4j" % "log4j-to-slf4j" % "2.17.1" % Test) ++ JacksonDatabindDependencies) val GoogleCommon = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, @@ -199,6 +209,7 @@ object Dependencies { ) ++ Mockito) val GoogleBigQuery = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-jackson" % PekkoHttpVersion % Provided, @@ -209,6 +220,7 @@ object Dependencies { "io.specto" % "hoverfly-java" % hoverflyVersion % Test // ApacheV2 ) ++ Mockito) val GoogleBigQueryStorage = Seq( + crossScalaVersions -= Scala3, // see Pekko gRPC version in plugins.sbt libraryDependencies ++= Seq( // https://github.com/googleapis/java-bigquerystorage/tree/master/proto-google-cloud-bigquerystorage-v1 @@ -224,6 +236,7 @@ object Dependencies { "org.apache.pekko" %% "pekko-discovery" % PekkoVersion) ++ Mockito) val GooglePubSub = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, @@ -231,6 +244,7 @@ object Dependencies { ) ++ Mockito) val GooglePubSubGrpc = Seq( + crossScalaVersions -= Scala3, // see Pekko gRPC version in plugins.sbt libraryDependencies ++= Seq( // https://github.com/googleapis/java-pubsub/tree/master/proto-google-cloud-pubsub-v1/ @@ -241,11 +255,13 @@ object Dependencies { "org.apache.pekko" %% "pekko-discovery" % PekkoVersion)) val GoogleFcm = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion) ++ Mockito) val GoogleStorage = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, @@ -271,6 +287,7 @@ object Dependencies { val HadoopVersion = "3.2.1" val Hdfs = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( ("org.apache.hadoop" % "hadoop-client" % HadoopVersion).exclude("log4j", "log4j").exclude("org.slf4j", "slf4j-log4j12"), // ApacheV2 @@ -285,6 +302,7 @@ object Dependencies { )) val HuaweiPushKit = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-spray-json" % PekkoHttpVersion, @@ -297,6 +315,7 @@ object Dependencies { )) val IronMq = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.mdedetrich" %% "pekko-stream-circe" % "0.0.0+94-dbf3173f-SNAPSHOT", // ApacheV2 @@ -304,6 +323,7 @@ object Dependencies { )) val Jms = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "javax.jms" % "jms" % "1.1" % Provided, // CDDL + GPLv2 "com.ibm.mq" % "com.ibm.mq.allclient" % "9.2.5.0" % Test, // IBM International Program License Agreement https://public.dhe.ibm.com/ibmdl/export/pub/software/websphere/messaging/mqdev/maven/licenses/L-APIG-AZYF2E/LI_en.html @@ -316,6 +336,7 @@ object Dependencies { "https://repository.jboss.org/nexus/content/groups/public")) +: externalResolvers.value) val JsonStreaming = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.github.jsurfer" % "jsurfer-jackson" % "1.6.0" // MIT ) ++ JacksonDatabindDependencies) @@ -341,6 +362,7 @@ object Dependencies { )) val MongoDb = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.mongodb.scala" %% "mongo-scala-driver" % "4.4.0" // ApacheV2 )) @@ -351,6 +373,7 @@ object Dependencies { )) val MqttStreaming = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-actor-typed" % PekkoVersion, // ApacheV2 "org.apache.pekko" %% "pekko-actor-testkit-typed" % PekkoVersion % Test, // ApacheV2 @@ -382,6 +405,7 @@ object Dependencies { )) val S3 = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "org.apache.pekko" %% "pekko-http" % PekkoHttpVersion, "org.apache.pekko" %% "pekko-http-xml" % PekkoHttpVersion, @@ -408,6 +432,7 @@ object Dependencies { val SlickVersion = "3.3.3" val Slick = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.typesafe.slick" %% "slick" % SlickVersion, // BSD 2-clause "Simplified" License "com.typesafe.slick" %% "slick-hikaricp" % SlickVersion, // BSD 2-clause "Simplified" License @@ -468,6 +493,7 @@ object Dependencies { "org.apache.pekko" %% "pekko-http-testkit" % PekkoHttpVersion % Test)) val UnixDomainSocket = Seq( + crossScalaVersions -= Scala3, libraryDependencies ++= Seq( "com.github.jnr" % "jffi" % "1.3.1", // classifier "complete", // Is the classifier needed anymore? "com.github.jnr" % "jnr-unixsocket" % "0.38.5" // BSD/ApacheV2/CPL/MIT as per https://github.com/akka/alpakka/issues/620#issuecomment-348727265 diff --git a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala index e21449b4a..461fbe92f 100644 --- a/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala +++ b/reference/src/main/scala/org/apache/pekko/stream/connectors/reference/Resource.scala @@ -116,7 +116,7 @@ object ResourceSettings { * there is only one instance of the resource instantiated per Actor System. */ final class ResourceExt private (sys: ExtendedActorSystem) extends Extension { - implicit val resource = Resource(ResourceSettings()(sys)) + implicit val resource: Resource = Resource(ResourceSettings()(sys)) sys.registerOnTermination(resource.cleanup()) } diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequestsSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequestsSpec.scala index db019dcd2..f5d851291 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequestsSpec.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/HttpRequestsSpec.scala @@ -14,7 +14,6 @@ package org.apache.pekko.stream.connectors.s3.impl import java.util.UUID - import org.apache.pekko import pekko.actor.ActorSystem import pekko.http.scaladsl.Http @@ -33,6 +32,8 @@ import software.amazon.awssdk.auth.credentials._ import software.amazon.awssdk.regions.Region import software.amazon.awssdk.regions.providers._ +import scala.concurrent.ExecutionContext + class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with IntegrationPatience with LogCapturing { // test fixtures @@ -55,7 +56,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with val multipartUpload = MultipartUpload("test-bucket", "testKey", "uploadId") it should "initiate multipart upload when the region is us-east-1" in { - implicit val settings = getSettings() + implicit val settings: S3Settings = getSettings() val req = HttpRequests.initiateMultipartUploadRequest( @@ -74,7 +75,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with other regions" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2) + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2) val req = HttpRequests.initiateMultipartUploadRequest( @@ -93,21 +94,22 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "throw an error if path-style access is false and the bucket name contains non-LDH characters" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1) + implicit val settings: S3Settings = getSettings(s3Region = Region.EU_WEST_1) assertThrows[IllegalUriException]( HttpRequests.getDownloadRequest(S3Location("invalid_bucket_name", "image-1024@2x"))) } it should "throw an error if the key uses `..`" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1) + implicit val settings: S3Settings = getSettings(s3Region = Region.EU_WEST_1) assertThrows[IllegalUriException]( HttpRequests.getDownloadRequest(S3Location("validbucket", "../other-bucket/image-1024@2x"))) } it should "throw an error when using `..` with path-style access" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) assertThrows[IllegalUriException]( HttpRequests.getDownloadRequest(S3Location("invalid/../bucket_name", "image-1024@2x"))) @@ -120,7 +122,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with path-style access in region us-east-1" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_1).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.US_EAST_1).withAccessStyle(AccessStyle.PathAccessStyle) val req = HttpRequests.initiateMultipartUploadRequest( @@ -133,7 +136,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support download requests with path-style access in region us-east-1" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_1).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.US_EAST_1).withAccessStyle(AccessStyle.PathAccessStyle) val req = HttpRequests.getDownloadRequest(location) @@ -143,7 +147,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with path-style access in other regions" in { - implicit val settings = getSettings(s3Region = Region.US_WEST_2).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.US_WEST_2).withAccessStyle(AccessStyle.PathAccessStyle) val req = HttpRequests.initiateMultipartUploadRequest( @@ -156,7 +161,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support download requests with path-style access in other regions" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) val req = HttpRequests.getDownloadRequest(location) @@ -166,7 +172,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support download requests via configured `endpointUrl`" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") val req = HttpRequests.getDownloadRequest(location) @@ -180,7 +187,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with // into an object at path /[empty string]/... // added this test because of a tricky uri building issue // in case of pathStyleAccess = false - implicit val settings = getSettings() + implicit val settings: S3Settings = getSettings() val location = S3Location("bucket", "/test/foo.txt") @@ -193,7 +200,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with it should "support download requests with keys ending with /" in { // object with a slash at the end of the filename should be accessible - implicit val settings = getSettings() + implicit val settings: S3Settings = getSettings() val location = S3Location("bucket", "/test//") @@ -205,7 +212,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support download requests with keys containing spaces" in { - implicit val settings = getSettings() + implicit val settings: S3Settings = getSettings() val location = S3Location("bucket", "test folder/test file.txt") @@ -217,7 +224,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support download requests with keys containing plus" in { - implicit val settings = getSettings() + implicit val settings: S3Settings = getSettings() val location = S3Location("bucket", "test folder/1 + 2 = 3") val req = HttpRequests.getDownloadRequest(location) @@ -227,7 +234,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support download requests with keys containing spaces with path-style access in other regions" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) val location = S3Location("bucket", "test folder/test file.txt") @@ -239,7 +247,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "add versionId query parameter when provided" in { - implicit val settings = getSettings().withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = getSettings().withAccessStyle(AccessStyle.PathAccessStyle) val location = S3Location("bucket", "test/foo.txt") val versionId = "123456" @@ -253,7 +261,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support multipart init upload requests via configured `endpointUrl`" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") val req = HttpRequests.initiateMultipartUploadRequest( @@ -267,7 +276,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support multipart upload part requests via configured `endpointUrl`" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") val req = HttpRequests.uploadPartRequest(multipartUpload, 1, MemoryChunk(ByteString.empty)) @@ -278,7 +288,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "properly multipart upload part request with customer keys server side encryption" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withAccessStyle(AccessStyle.PathAccessStyle) val myKey = "my-key" val md5Key = "md5-key" val s3Headers = ServerSideEncryption.customerKeys(myKey).withMd5(md5Key).headersFor(UploadPart) @@ -290,8 +301,9 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "support multipart upload complete requests via configured `endpointUrl`" in { - implicit val settings = getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") - implicit val executionContext = scala.concurrent.ExecutionContext.global + implicit val settings: S3Settings = + getSettings(s3Region = Region.EU_WEST_1).withEndpointUrl("http://localhost:8080") + implicit val executionContext: ExecutionContext = ExecutionContext.global val req = HttpRequests.completeMultipartUploadRequest(multipartUpload, (1, "part") :: Nil, Nil).futureValue @@ -302,7 +314,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with AES-256 server side encryption" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2) + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2) val s3Headers = ServerSideEncryption.aes256().headersFor(InitiateMultipartUpload) val req = HttpRequests.initiateMultipartUploadRequest(location, contentType, s3Headers) @@ -310,7 +322,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with aws:kms server side encryption" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2) + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2) val testArn = "arn:aws:kms:my-region:my-account-id:key/my-key-id" val s3Headers = ServerSideEncryption.kms(testArn).headersFor(InitiateMultipartUpload) val req = HttpRequests.initiateMultipartUploadRequest(location, contentType, s3Headers) @@ -320,7 +332,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with customer keys encryption" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2) + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2) val myKey = "my-key" val md5Key = "md5-key" val s3Headers = ServerSideEncryption.customerKeys(myKey).withMd5(md5Key).headersFor(InitiateMultipartUpload) @@ -332,7 +344,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with custom s3 storage class" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2) + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2) val s3Headers = S3Headers().withStorageClass(StorageClass.ReducedRedundancy).headers val req = HttpRequests.initiateMultipartUploadRequest(location, contentType, s3Headers) @@ -340,7 +352,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "initiate multipart upload with custom s3 headers" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2) + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2) val s3Headers = S3Headers().withCustomHeaders(Map("Cache-Control" -> "no-cache")).headers val req = HttpRequests.initiateMultipartUploadRequest(location, contentType, s3Headers) @@ -348,7 +360,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "properly construct the list bucket request with no prefix, continuation token or delimiter passed" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.US_EAST_2).withAccessStyle(AccessStyle.PathAccessStyle) val req = HttpRequests.listBucket(location.bucket) @@ -357,7 +370,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "properly construct the list bucket request with a prefix and token passed" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.US_EAST_2).withAccessStyle(AccessStyle.PathAccessStyle) val req = HttpRequests.listBucket(location.bucket, Some("random/prefix"), Some("randomToken")) @@ -368,7 +382,8 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "properly construct the list bucket request with a delimiter and token passed" in { - implicit val settings = getSettings(s3Region = Region.US_EAST_2).withAccessStyle(AccessStyle.PathAccessStyle) + implicit val settings: S3Settings = + getSettings(s3Region = Region.US_EAST_2).withAccessStyle(AccessStyle.PathAccessStyle) val req = HttpRequests.listBucket(location.bucket, delimiter = Some("/"), continuationToken = Some("randomToken")) @@ -377,7 +392,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "properly construct the list bucket request when using api version 1" in { - implicit val settings = + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2, listBucketApiVersion = ApiVersion.ListBucketVersion1) .withAccessStyle(AccessStyle.PathAccessStyle) @@ -388,7 +403,7 @@ class HttpRequestsSpec extends AnyFlatSpec with Matchers with ScalaFutures with } it should "properly construct the list bucket request when using api version set to 1 and a continuation token" in { - implicit val settings = + implicit val settings: S3Settings = getSettings(s3Region = Region.US_EAST_2, listBucketApiVersion = ApiVersion.ListBucketVersion1) .withAccessStyle(AccessStyle.PathAccessStyle) diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/S3StreamSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/S3StreamSpec.scala index c665056e5..5c24dbc7f 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/S3StreamSpec.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/S3StreamSpec.scala @@ -62,7 +62,7 @@ class S3StreamSpec(_system: ActorSystem) } val location = S3Location("test-bucket", "test-key") - implicit val settings = + implicit val settings: S3Settings = S3Settings(MemoryBufferType, credentialsProvider, regionProvider, ApiVersion.ListBucketVersion2) val result: HttpRequest = S3Stream.invokePrivate(requestHeaders(getDownloadRequest(location), None)) @@ -86,7 +86,7 @@ class S3StreamSpec(_system: ActorSystem) val location = S3Location("test-bucket", "test-key") val range = ByteRange(1, 4) - implicit val settings = + implicit val settings: S3Settings = S3Settings(MemoryBufferType, credentialsProvider, regionProvider, ApiVersion.ListBucketVersion2) val result: HttpRequest = S3Stream.invokePrivate(requestHeaders(getDownloadRequest(location), Some(range))) diff --git a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SignerSpec.scala b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SignerSpec.scala index 0ae1be6a6..75621b3bd 100644 --- a/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SignerSpec.scala +++ b/s3/src/test/scala/org/apache/pekko/stream/connectors/s3/impl/auth/SignerSpec.scala @@ -41,7 +41,7 @@ class SignerSpec(_system: ActorSystem) with LogCapturing { def this() = this(ActorSystem("SignerSpec")) - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = Span(2, Seconds), interval = Span(5, Millis)) val credentials = StaticCredentialsProvider.create( diff --git a/slick/src/test/scala/docs/scaladsl/DocSnippets.scala b/slick/src/test/scala/docs/scaladsl/DocSnippets.scala index b0112d3ae..fe20fd3fd 100644 --- a/slick/src/test/scala/docs/scaladsl/DocSnippets.scala +++ b/slick/src/test/scala/docs/scaladsl/DocSnippets.scala @@ -16,6 +16,8 @@ package docs.scaladsl import org.apache.pekko import pekko.Done import pekko.actor.ActorSystem + +import scala.concurrent.ExecutionContext //#important-imports import org.apache.pekko import pekko.stream.connectors.slick.scaladsl._ @@ -26,8 +28,8 @@ import slick.jdbc.GetResult import scala.concurrent.Future object SlickSourceWithPlainSQLQueryExample extends App { - implicit val system = ActorSystem() - implicit val ec = system.dispatcher + implicit val system: ActorSystem = ActorSystem() + implicit val ec: ExecutionContext = system.dispatcher // #source-example implicit val session = SlickSession.forConfig("slick-h2") @@ -62,8 +64,8 @@ object SlickSourceWithPlainSQLQueryExample extends App { } object SlickSourceWithTypedQueryExample extends App { - implicit val system = ActorSystem() - implicit val ec = system.dispatcher + implicit val system: ActorSystem = ActorSystem() + implicit val ec: ExecutionContext = system.dispatcher // #source-with-typed-query implicit val session = SlickSession.forConfig("slick-h2") @@ -94,8 +96,8 @@ object SlickSourceWithTypedQueryExample extends App { } object SlickSinkExample extends App { - implicit val system = ActorSystem() - implicit val ec = system.dispatcher + implicit val system: ActorSystem = ActorSystem() + implicit val ec: ExecutionContext = system.dispatcher // #sink-example implicit val session = SlickSession.forConfig("slick-h2") @@ -125,8 +127,8 @@ object SlickSinkExample extends App { } object SlickFlowExample extends App { - implicit val system = ActorSystem() - implicit val ec = system.dispatcher + implicit val system: ActorSystem = ActorSystem() + implicit val ec: ExecutionContext = system.dispatcher // #flow-example implicit val session = SlickSession.forConfig("slick-h2") @@ -168,8 +170,8 @@ object SlickFlowWithPassThroughExample extends App { def map[B](f: A => B): KafkaMessage[B] = KafkaMessage(f(msg), offset) } - implicit val system = ActorSystem() - implicit val ec = system.dispatcher + implicit val system: ActorSystem = ActorSystem() + implicit val ec: ExecutionContext = system.dispatcher // #flowWithPassThrough-example implicit val session = SlickSession.forConfig("slick-h2") diff --git a/slick/src/test/scala/docs/scaladsl/SlickSpec.scala b/slick/src/test/scala/docs/scaladsl/SlickSpec.scala index 428f4d995..6563dba1f 100644 --- a/slick/src/test/scala/docs/scaladsl/SlickSpec.scala +++ b/slick/src/test/scala/docs/scaladsl/SlickSpec.scala @@ -27,7 +27,7 @@ import slick.dbio.DBIOAction import slick.jdbc.{ GetResult, JdbcProfile } import scala.concurrent.duration._ -import scala.concurrent.{ Await, Future } +import scala.concurrent.{ Await, ExecutionContext, Future } import org.scalatest.matchers.must.Matchers import org.scalatest.wordspec.AnyWordSpec @@ -43,7 +43,7 @@ class SlickSpec with Matchers with LogCapturing { // #init-mat - implicit val system = ActorSystem() + implicit val system: ActorSystem = ActorSystem() // #init-mat // #init-session @@ -59,9 +59,9 @@ class SlickSpec def * = (id, name) } - implicit val ec = system.dispatcher - implicit val defaultPatience = PatienceConfig(timeout = 3.seconds, interval = 50.millis) - implicit val getUserResult = GetResult(r => User(r.nextInt(), r.nextString())) + implicit val ec: ExecutionContext = system.dispatcher + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 3.seconds, interval = 50.millis) + implicit val getUserResult: GetResult[User] = GetResult(r => User(r.nextInt(), r.nextString())) val users = (1 to 40).map(i => User(i, s"Name$i")).toSet diff --git a/sns/src/test/scala/docs/scaladsl/SnsPublisherSpec.scala b/sns/src/test/scala/docs/scaladsl/SnsPublisherSpec.scala index efea2cc2a..9fc774f7d 100644 --- a/sns/src/test/scala/docs/scaladsl/SnsPublisherSpec.scala +++ b/sns/src/test/scala/docs/scaladsl/SnsPublisherSpec.scala @@ -34,7 +34,7 @@ class SnsPublisherSpec with IntegrationTestContext with LogCapturing { - implicit val defaultPatience = + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = 15.seconds, interval = 100.millis) "SNS Publisher sink" should "send string message" in { diff --git a/solr/src/test/scala/docs/scaladsl/SolrSpec.scala b/solr/src/test/scala/docs/scaladsl/SolrSpec.scala index 3d8b80169..f38d9e1cb 100644 --- a/solr/src/test/scala/docs/scaladsl/SolrSpec.scala +++ b/solr/src/test/scala/docs/scaladsl/SolrSpec.scala @@ -90,7 +90,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #run-document val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple) val doc: SolrInputDocument = bookToDoc(book) WriteMessage.createUpsertMessage(doc) @@ -138,7 +138,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #run-bean val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val title = tuple.getString("title") WriteMessage.createUpsertMessage(BookBean(title)) } @@ -179,7 +179,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #run-typed val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple) WriteMessage.createUpsertMessage(book) } @@ -223,7 +223,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #typeds-flow val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple) WriteMessage.createUpsertMessage(book) } @@ -294,7 +294,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #kafka-example // Note: This code mimics Pekko Connectors Kafka APIs val copyCollection = kafkaConsumerSource - .map { kafkaMessage: CommittableMessage => + .map { (kafkaMessage: CommittableMessage) => val book = kafkaMessage.book // Transform message so that we can write to solr WriteMessage.createUpsertMessage(book).withPassThrough(kafkaMessage.committableOffset) @@ -340,7 +340,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple) val doc: SolrInputDocument = bookToDoc(book) WriteMessage.createUpsertMessage(doc) @@ -360,7 +360,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #delete-documents val deleteDocuments = SolrSource .fromTupleStream(stream2) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val id = tuple.fields.get("title").toString WriteMessage.createDeleteMessage[SolrInputDocument](id) } @@ -393,7 +393,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca val upsertCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple) .copy(comment = "Written by good authors.") val doc: SolrInputDocument = bookToDoc(book) @@ -414,7 +414,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #update-atomically-documents val updateCollection = SolrSource .fromTupleStream(stream2) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val id = tuple.fields.get("title").toString val comment = tuple.fields.get("comment").toString WriteMessage.createUpdateMessage[SolrInputDocument]( @@ -461,7 +461,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple) WriteMessage.createUpsertMessage(book) } @@ -482,7 +482,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca val deleteElements = SolrSource .fromTupleStream(stream2) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val title = tuple.fields.get("title").toString WriteMessage.createDeleteMessage[Book](title) } @@ -512,7 +512,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple).copy(comment = "Written by good authors.", routerOpt = Some("router-value")) WriteMessage.createUpsertMessage(book) @@ -534,7 +534,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca val updateCollection = SolrSource .fromTupleStream(stream2) - .map { tuple: Tuple => + .map { (tuple: Tuple) => WriteMessage .createUpdateMessage[Book]( idField = "title", @@ -581,7 +581,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca val copyCollection = SolrSource .fromTupleStream(stream) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val book: Book = tupleToBook(tuple) val doc: SolrInputDocument = bookToDoc(book) WriteMessage.createUpsertMessage(doc) @@ -601,7 +601,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #delete-documents-query val deleteByQuery = SolrSource .fromTupleStream(stream2) - .map { tuple: Tuple => + .map { (tuple: Tuple) => val title = tuple.fields.get("title").toString WriteMessage.createDeleteByQueryMessage[SolrInputDocument]( s"""title:"$title" """) @@ -657,7 +657,7 @@ class SolrSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with Sca // #kafka-example-PT // Note: This code mimics Pekko Connectors Kafka APIs val copyCollection = kafkaConsumerSource - .map { offset: CommittableOffset => + .map { (offset: CommittableOffset) => // Transform message so that we can write to solr WriteMessage.createPassThrough(offset).withSource(new SolrInputDocument()) } diff --git a/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala b/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala index c830d77c6..2751a01e2 100644 --- a/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala +++ b/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala @@ -248,7 +248,7 @@ class SqsAckSpec extends AnyFlatSpec with Matchers with DefaultTestContext with it should "fail if any of the messages in the batch request failed" in { val messages = for (i <- 0 until 10) yield Message.builder().body(s"Message - $i").build() - implicit val mockAwsSqsClient = mock[SqsAsyncClient] + implicit val mockAwsSqsClient: SqsAsyncClient = mock[SqsAsyncClient] when(mockAwsSqsClient.deleteMessageBatch(any[DeleteMessageBatchRequest])) .thenReturn(CompletableFuture.completedFuture { @@ -269,7 +269,7 @@ class SqsAckSpec extends AnyFlatSpec with Matchers with DefaultTestContext with it should "fail if the batch request failed" in { val messages = for (i <- 0 until 10) yield Message.builder().body(s"Message - $i").build() - implicit val mockAwsSqsClient = mock[SqsAsyncClient] + implicit val mockAwsSqsClient: SqsAsyncClient = mock[SqsAsyncClient] when(mockAwsSqsClient.deleteMessageBatch(any[DeleteMessageBatchRequest])) .thenReturn( @@ -289,7 +289,7 @@ class SqsAckSpec extends AnyFlatSpec with Matchers with DefaultTestContext with it should "fail if the client invocation failed" in { val messages = for (i <- 0 until 10) yield Message.builder().body(s"Message - $i").build() - implicit val mockAwsSqsClient = mock[SqsAsyncClient] + implicit val mockAwsSqsClient: SqsAsyncClient = mock[SqsAsyncClient] when( mockAwsSqsClient.deleteMessageBatch(any[DeleteMessageBatchRequest])).thenThrow(new RuntimeException("error")) @@ -357,7 +357,7 @@ class SqsAckSpec extends AnyFlatSpec with Matchers with DefaultTestContext with it should "ignore batch of messages" in { val messages = for (i <- 0 until 10) yield Message.builder().body(s"Message - $i").build() - implicit val mockAwsSqsClient = mock[SqsAsyncClient] + implicit val mockAwsSqsClient: SqsAsyncClient = mock[SqsAsyncClient] val future = // #batch-ignore diff --git a/sqs/src/test/scala/docs/scaladsl/SqsSourceSpec.scala b/sqs/src/test/scala/docs/scaladsl/SqsSourceSpec.scala index f93f1b684..8677c6d25 100644 --- a/sqs/src/test/scala/docs/scaladsl/SqsSourceSpec.scala +++ b/sqs/src/test/scala/docs/scaladsl/SqsSourceSpec.scala @@ -256,7 +256,7 @@ class SqsSourceSpec extends AnyFlatSpec with ScalaFutures with Matchers with Def val customClient: SdkAsyncHttpClient = PekkoHttpClient.builder().withActorSystem(system).build() // #init-custom-client - implicit val customSqsClient = SqsAsyncClient + implicit val customSqsClient: SqsAsyncClient = SqsAsyncClient .builder() .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("x", "x"))) // #init-custom-client diff --git a/sse/src/test/scala/docs/scaladsl/EventSourceSpec.scala b/sse/src/test/scala/docs/scaladsl/EventSourceSpec.scala index ad410bdbb..2502ff912 100644 --- a/sse/src/test/scala/docs/scaladsl/EventSourceSpec.scala +++ b/sse/src/test/scala/docs/scaladsl/EventSourceSpec.scala @@ -32,7 +32,7 @@ import org.scalatest.BeforeAndAfterAll import scala.collection.immutable import scala.concurrent.duration.DurationInt -import scala.concurrent.{ Await, Future } +import scala.concurrent.{ Await, ExecutionContext, Future } //#event-source import org.apache.pekko import pekko.http.scaladsl.Http @@ -91,7 +91,7 @@ object EventSourceSpec { import Server._ import context.dispatcher - private implicit val sys = context.system + private implicit val sys: ActorSystem = context.system context.system.scheduler.scheduleOnce(1.second, self, Bind) @@ -147,8 +147,8 @@ object EventSourceSpec { final class EventSourceSpec extends AsyncWordSpec with Matchers with BeforeAndAfterAll { import EventSourceSpec._ - private implicit val system = ActorSystem() - private implicit val ec = system.dispatcher + private implicit val system: ActorSystem = ActorSystem() + private implicit val ec: ExecutionContext = system.dispatcher "EventSource" should { "communicate correctly with an instable HTTP server" in { diff --git a/udp/src/test/scala/docs/scaladsl/UdpSpec.scala b/udp/src/test/scala/docs/scaladsl/UdpSpec.scala index 9cdcb3bb1..8f58affa6 100644 --- a/udp/src/test/scala/docs/scaladsl/UdpSpec.scala +++ b/udp/src/test/scala/docs/scaladsl/UdpSpec.scala @@ -41,8 +41,8 @@ class UdpSpec with BeforeAndAfterAll with LogCapturing { - implicit val mat = Materializer(system) - implicit val pat = PatienceConfig(3.seconds, 50.millis) + implicit val mat: Materializer = Materializer(system) + implicit val pat: PatienceConfig = PatienceConfig(3.seconds, 50.millis) // #bind-address val bindToLocal = new InetSocketAddress("localhost", 0) diff --git a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala index 4d6c7919a..7927d922b 100644 --- a/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala +++ b/unix-domain-socket/src/main/scala/org/apache/pekko/stream/connectors/unixdomainsocket/scaladsl/UnixDomainSocket.scala @@ -140,7 +140,7 @@ final class UnixDomainSocket(system: ExtendedActorSystem) extends UnixDomainSock backlog: Int = 128, halfClose: Boolean = false): Future[ServerBinding] = bind(path, backlog, halfClose) - .to(Sink.foreach { conn: IncomingConnection => + .to(Sink.foreach { (conn: IncomingConnection) => conn.flow.join(handler).run() }) .run()