From e193a2a74e3d492ec9c120fb348dc300f36e6f87 Mon Sep 17 00:00:00 2001 From: Steven Zhang <35498506+stevenpyzhang@users.noreply.github.com> Date: Mon, 9 Dec 2019 13:21:26 -0800 Subject: [PATCH 001/123] fix: UncaughtExceptionHandler not being set for Persistent Queries (#4087) --- .../java/io/confluent/ksql/query/KafkaStreamsBuilderImpl.java | 2 ++ .../src/main/java/io/confluent/ksql/query/QueryExecutor.java | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/ksql-engine/src/main/java/io/confluent/ksql/query/KafkaStreamsBuilderImpl.java b/ksql-engine/src/main/java/io/confluent/ksql/query/KafkaStreamsBuilderImpl.java index e0f868963d76..da991b4481d8 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/query/KafkaStreamsBuilderImpl.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/query/KafkaStreamsBuilderImpl.java @@ -15,6 +15,7 @@ package io.confluent.ksql.query; +import io.confluent.ksql.util.KafkaStreamsUncaughtExceptionHandler; import java.util.Map; import java.util.Objects; import java.util.Properties; @@ -40,6 +41,7 @@ public BuildResult buildKafkaStreams( props.putAll(conf); final Topology topology = builder.build(props); final KafkaStreams kafkaStreams = new KafkaStreams(topology, props, clientSupplier); + kafkaStreams.setUncaughtExceptionHandler(new KafkaStreamsUncaughtExceptionHandler()); return new BuildResult(topology, kafkaStreams); } } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java b/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java index 3bb8a2029196..44dce8de7184 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java @@ -43,7 +43,6 @@ import io.confluent.ksql.serde.GenericKeySerDe; import io.confluent.ksql.serde.KeyFormat; import io.confluent.ksql.services.ServiceContext; -import io.confluent.ksql.util.KafkaStreamsUncaughtExceptionHandler; import io.confluent.ksql.util.KsqlConfig; import io.confluent.ksql.util.KsqlConstants; import io.confluent.ksql.util.KsqlException; @@ -163,8 +162,6 @@ public TransientQueryMetadata buildTransientQuery( final BuildResult built = kafkaStreamsBuilder.buildKafkaStreams(streamsBuilder, streamsProperties); - built.kafkaStreams.setUncaughtExceptionHandler(new KafkaStreamsUncaughtExceptionHandler()); - final LogicalSchema transientSchema = buildTransientQuerySchema(schema); return new TransientQueryMetadata( From d7ce6603512ac157d88c24525e42215a31ceb474 Mon Sep 17 00:00:00 2001 From: Nick Dearden Date: Mon, 9 Dec 2019 14:27:01 -0800 Subject: [PATCH 002/123] docs: intent for klip-16: Introduce K$ Dynamic Views (#4056) --- design-proposals/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/design-proposals/README.md b/design-proposals/README.md index de2f30c53834..e4c5aac05407 100644 --- a/design-proposals/README.md +++ b/design-proposals/README.md @@ -56,3 +56,4 @@ Next KLIP number: **14** | [KLIP-13: Introduce KSQL command to print connect worker properties to the console](klip-13-introduce-KSQL-command-to-print-connect-worker-properties-to-the-console.md) | Proposal | N/A | | [KLIP-14: ROWTIME as Pseudocolumn](klip-14-rowtime-as-pseudocolumn.md) | Proposal | N/A | | [KLIP-15: KSQLDB new API and Client(klip-15-new-api-and-client.md | Proposal | N/A | +| [KLIP-16: Introduce 'K$' dynamic views | Proposal | N/A | From 5553283c265aa74a1af55bc90557a2e7c45cdaa0 Mon Sep 17 00:00:00 2001 From: Victoria Xia Date: Mon, 9 Dec 2019 14:33:24 -0800 Subject: [PATCH 003/123] chore: issue regular LIST TOPICS from healthcheck, not extended (#4061) --- .../io/confluent/ksql/rest/healthcheck/HealthCheckAgent.java | 2 +- .../confluent/ksql/rest/healthcheck/HealthCheckAgentTest.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/healthcheck/HealthCheckAgent.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/healthcheck/HealthCheckAgent.java index aad550e098e4..8031d6446986 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/healthcheck/HealthCheckAgent.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/healthcheck/HealthCheckAgent.java @@ -36,7 +36,7 @@ public class HealthCheckAgent { private static final List DEFAULT_CHECKS = ImmutableList.of( new ExecuteStatementCheck(METASTORE_CHECK_NAME, "list streams; list tables; list queries;"), - new ExecuteStatementCheck(KAFKA_CHECK_NAME, "list topics extended;") + new ExecuteStatementCheck(KAFKA_CHECK_NAME, "list topics;") ); private final SimpleKsqlClient ksqlClient; diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/healthcheck/HealthCheckAgentTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/healthcheck/HealthCheckAgentTest.java index a6a2ae847066..d194a4f4793f 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/healthcheck/HealthCheckAgentTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/healthcheck/HealthCheckAgentTest.java @@ -104,7 +104,7 @@ public void shouldReturnUnhealthyIfMetastoreCheckFails() { @Test public void shouldReturnUnhealthyIfKafkaCheckFails() { // Given: - when(ksqlClient.makeKsqlRequest(SERVER_URI, "list topics extended;")) + when(ksqlClient.makeKsqlRequest(SERVER_URI, "list topics;")) .thenReturn(unSuccessfulResponse); // When: From e0db7285ba4c38e540c392c51bb2d472bf2ae395 Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Mon, 9 Dec 2019 15:09:44 -0800 Subject: [PATCH 004/123] docs: update example output from SHOW TOPICS (KSQL-2981) (#4089) --- docs/tutorials/basics-control-center.rst | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/docs/tutorials/basics-control-center.rst b/docs/tutorials/basics-control-center.rst index 0fc8e648df78..870bc7739202 100644 --- a/docs/tutorials/basics-control-center.rst +++ b/docs/tutorials/basics-control-center.rst @@ -67,26 +67,19 @@ Inspect Topics By Using KSQL in |c3-short| { "name": "pageviews", - "registered": false, "replicaInfo": [ 1 - ], - "consumerCount": 0, - "consumerGroupCount": 0 + ] }, { "name": "users", - "registered": false, "replicaInfo": [ 1 - ], - "consumerCount": 0, - "consumerGroupCount": 0 + ] } - The ``"registered": false`` indicator means that you haven't created a stream - or table on top of these topics, so you can't write streaming queries against - them yet. + To see the count of consumers and consumer groups, use the SHOW TOPICS + EXTENDED command. #. In the editing window, use the PRINT TOPIC statement to inspect the records in the ``users`` topic. Click **Run** to start the query. @@ -151,15 +144,11 @@ statements in KSQL Editor, just like you use them in the KSQL CLI. { "name": "pageviews", - "registered": true, "replicaInfo": [ 1 ] }, - The ``"registered": true`` indicator means that you have registered the topic - and you can write streaming queries against it. - Create a Table in the |c3-short| UI =================================== From 4c6caa67a54dcaa01007b702fd491f5069d9f674 Mon Sep 17 00:00:00 2001 From: Victoria Xia Date: Mon, 9 Dec 2019 16:32:38 -0800 Subject: [PATCH 005/123] docs: fix docker image names + remove monitoring interceptor docs (#4076) --- .../query-with-structured-data.md | 2 +- docs-md/faq.md | 4 +- .../install-ksqldb-with-docker.md | 119 +++--------------- .../installation/installing.md | 4 +- .../server-config/config-reference.md | 28 ++--- .../installation/server-config/index.md | 2 +- .../installation/server-config/security.md | 22 ---- docs-md/tutorials/basics-docker.md | 4 +- docs-md/tutorials/basics-local.md | 2 +- docs-md/tutorials/clickstream-docker.md | 2 +- 10 files changed, 39 insertions(+), 150 deletions(-) diff --git a/docs-md/developer-guide/query-with-structured-data.md b/docs-md/developer-guide/query-with-structured-data.md index e77e20bf6f57..17e0b65b4a68 100644 --- a/docs-md/developer-guide/query-with-structured-data.md +++ b/docs-md/developer-guide/query-with-structured-data.md @@ -114,7 +114,7 @@ Start the ksqlDB CLI: ```bash docker run --network tutorials_default --rm --interactive --tty \ - confluentinc/cp-ksql-cli:{{ site.release }} \ + confluentinc/ksqldb-cli:{{ site.release }} ksql \ http://ksql-server:8088 ``` diff --git a/docs-md/faq.md b/docs-md/faq.md index 6783c24c0703..37975e87af59 100644 --- a/docs-md/faq.md +++ b/docs-md/faq.md @@ -121,11 +121,11 @@ If you're running with Confluent CLI, use the `confluent stop` command: confluent stop KSQL ``` -If you're running ksqlDB in Docker containers, stop the `cp-ksql-server` +If you're running ksqlDB in Docker containers, stop the `ksqldb-server` container: ```bash -docker stop +docker stop ``` If you're running ksqlDB as a system service, use the `systemctl stop` diff --git a/docs-md/operate-and-deploy/installation/install-ksqldb-with-docker.md b/docs-md/operate-and-deploy/installation/install-ksqldb-with-docker.md index 2b27d3d31b42..a2bb69e1b0c6 100644 --- a/docs-md/operate-and-deploy/installation/install-ksqldb-with-docker.md +++ b/docs-md/operate-and-deploy/installation/install-ksqldb-with-docker.md @@ -8,8 +8,8 @@ keywords: ksqldb, docker, install You can deploy ksqlDB by using Docker containers. Starting with {{ site.cp }} 4.1.2, Confluent maintains images at [Docker Hub](https://hub.docker.com/u/confluentinc) -for [ksqlDB Server](https://hub.docker.com/r/confluentinc/cp-ksql-server/) and the -[ksqlDB command-line interface (CLI)](https://hub.docker.com/r/confluentinc/cp-ksql-cli/). +for [ksqlDB Server](https://hub.docker.com/r/confluentinc/ksqldb-server/) and the +[ksqlDB command-line interface (CLI)](https://hub.docker.com/r/confluentinc/ksqldb-cli/). ksqlDB runs separately from your {{ site.aktm }} cluster, so you specify the IP addresses of the cluster's bootstrap servers when you start a @@ -90,7 +90,7 @@ docker run -d \ -e KSQL_BOOTSTRAP_SERVERS=localhost:9092 \ -e KSQL_KSQL_SERVICE_ID=ksql_standalone_1_ \ -e KSQL_KSQL_QUERIES_FILE=/path/in/container/queries.sql \ - confluentinc/cp-ksql-server:{{ site.release }} + confluentinc/ksqldb-server:{{ site.release }} ``` TODO: Figure out how to style these @@ -108,50 +108,6 @@ TODO: Figure out how to style these : A file that specifies predefined SQL queries. -### ksqlDB Headless Server with Interceptors Settings (Production) - -{{ site.cp }} supports pluggable *interceptors* to examine and modify -incoming and outgoing records. Specify interceptor classes by assigning -the `KSQL_PRODUCER_INTERCEPTOR_CLASSES` and `KSQL_CONSUMER_INTERCEPTOR_CLASSES` -settings. For more info on interceptor classes, see -[Confluent Monitoring Interceptors](https://docs.confluent.io/current/control-center/installation/clients.html). - -Use the following command to run a headless, standalone ksqlDB Server with -the specified interceptor classes in a container: - -```bash -docker run -d \ - -v /path/on/host:/path/in/container/ \ - -e KSQL_BOOTSTRAP_SERVERS=localhost:9092 \ - -e KSQL_KSQL_SERVICE_ID=ksql_standalone_2_ \ - -e KSQL_PRODUCER_INTERCEPTOR_CLASSES=io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor \ - -e KSQL_CONSUMER_INTERCEPTOR_CLASSES=io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor \ - -e KSQL_KSQL_QUERIES_FILE=/path/in/container/queries.sql \ - confluentinc/cp-ksql-server:{{ site.release }} -``` - -`KSQL_BOOTSTRAP_SERVERS` - -: A list of hosts for establishing the initial connection to the Kafka - cluster. - -`KSQL_KSQL_SERVICE_ID` - -: The service ID of the ksqlDB server, which is used as the prefix for - the internal topics created by ksqlDB. - -`KSQL_KSQL_QUERIES_FILE` - -: A file that specifies predefined SQL queries. - -`KSQL_PRODUCER_INTERCEPTOR_CLASSES` - -: A list of fully qualified class names for producer interceptors. - -`KSQL_CONSUMER_INTERCEPTOR_CLASSES` - -: A list of fully qualified class names for consumer interceptors. - ### ksqlDB Interactive Server Settings (Development) Develop your ksqlDB applications by using the ksqlDB command-line interface @@ -165,7 +121,7 @@ docker run -d \ -e KSQL_BOOTSTRAP_SERVERS=localhost:9092 \ -e KSQL_LISTENERS=http://0.0.0.0:8088/ \ -e KSQL_KSQL_SERVICE_ID=ksql_service_2_ \ - confluentinc/cp-ksql-server:{{ site.release }} + confluentinc/ksqldb-server:{{ site.release }} ``` `KSQL_BOOTSTRAP_SERVERS` @@ -186,51 +142,6 @@ docker run -d \ In interactive mode, a ksqlDB CLI instance running outside of Docker can connect to the ksqlDB server running in Docker. -### ksqlDB Interactive Server with Interceptors Settings (Development) - -Run a ksqlDB Server with interceptors that enables manual interaction by -using the ksqlDB CLI: - -```bash -docker run -d \ - -p 127.0.0.1:8088:8088 \ - -e KSQL_BOOTSTRAP_SERVERS=localhost:9092 \ - -e KSQL_LISTENERS=http://0.0.0.0:8088/ \ - -e KSQL_KSQL_SERVICE_ID=ksql_service_3_ \ - -e KSQL_PRODUCER_INTERCEPTOR_CLASSES=io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor \ - -e KSQL_CONSUMER_INTERCEPTOR_CLASSES=io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor \ - confluentinc/cp-ksql-server:{{ site.release }} -``` - -`KSQL_BOOTSTRAP_SERVERS` - -: A list of hosts for establishing the initial connection to the Kafka - cluster. - -`KSQL_KSQL_SERVICE_ID` - -: The service ID of the ksqlDB server, which is used as the prefix for - the internal topics created by ksqlDB. - -`KSQL_LISTENERS` - -: A list of URIs, including the protocol, that the broker listens on. - If you are using IPv6, set to `http://[::]:8088`. - -`KSQL_PRODUCER_INTERCEPTOR_CLASSES` - -: A list of fully qualified class names for producer interceptors. - -`KSQL_CONSUMER_INTERCEPTOR_CLASSES` - -: A list of fully qualified class names for consumer interceptors. - -For more info on interceptor classes, see -[Confluent Monitoring Interceptors](https://docs.confluent.io/current/control-center/installation/clients.html). - -In interactive mode, a CLI instance running outside of Docker can -connect to the server running in Docker. - Connect ksqlDB Server to a Secure Kafka Cluster, Like Confluent Cloud =================================================================== @@ -250,7 +161,7 @@ docker run -d \ -e KSQL_SECURITY_PROTOCOL=SASL_SSL \ -e KSQL_SASL_MECHANISM=PLAIN \ -e KSQL_SASL_JAAS_CONFIG="org.apache.kafka.common.security.plain.PlainLoginModule required username=\"\" password=\"\";" \ - confluentinc/cp-ksql-server:{{ site.release }} + confluentinc/ksqldb-server:{{ site.release }} ``` `KSQL_BOOTSTRAP_SERVERS` @@ -312,7 +223,7 @@ docker run -d \ -v /path/on/host:/path/in/container/ \ -e KSQL_BOOTSTRAP_SERVERS=localhost:9092 \ -e KSQL_OPTS="-Dksql.service.id=ksql_service_3_ -Dksql.queries.file=/path/in/container/queries.sql" \ - confluentinc/cp-ksql-server:{{ site.release }} + confluentinc/ksqldb-server:{{ site.release }} ``` `KSQL_BOOTSTRAP_SERVERS` @@ -386,10 +297,10 @@ that's running in a different container. docker run -d -p 10.0.0.11:8088:8088 \ -e KSQL_BOOTSTRAP_SERVERS=localhost:9092 \ -e KSQL_OPTS="-Dksql.service.id=ksql_service_3_ -Dlisteners=http://0.0.0.0:8088/" \ - confluentinc/cp-ksql-server:{{ site.release }} + confluentinc/ksqldb-server:{{ site.release }} # Connect the ksqlDB CLI to the server. -docker run -it confluentinc/cp-ksql-cli http://10.0.0.11:8088 +docker run -it confluentinc/ksqldb-cli ksql http://10.0.0.11:8088 ``` `KSQL_BOOTSTRAP_SERVERS` @@ -417,7 +328,7 @@ ls /path/on/host/ksql-cli.properties docker run -it \ -v /path/on/host/:/path/in/container \ - confluentinc/cp-ksql-cli:{{ site.release }} http://10.0.0.11:8088 \ + confluentinc/ksqldb-cli:{{ site.release }} ksql http://10.0.0.11:8088 \ --config-file /path/in/container/ksql-cli.properties ``` @@ -427,7 +338,7 @@ Run a ksqlDB CLI instance in a container and connect to a remote ksqlDB Server host: ```bash -docker run -it confluentinc/cp-ksql-cli:{{ site.release }} \ +docker run -it confluentinc/ksqldb-cli:{{ site.release }} ksql \ http://ec2-blah.us-blah.compute.amazonaws.com:8080 ``` @@ -515,8 +426,8 @@ which is either `Entrypoint` or `Cmd`: ```bash {% raw %} -docker inspect --format='{{.Config.Entrypoint}}' confluentinc/cp-ksql-server:{{ site.release }} -docker inspect --format='{{.Config.Cmd}}' confluentinc/cp-ksql-server:{{ site.release }} +docker inspect --format='{{.Config.Entrypoint}}' confluentinc/ksqldb-server:{{ site.release }} +docker inspect --format='{{.Config.Cmd}}' confluentinc/ksqldb-server:{{ site.release }} {% endraw %} ``` @@ -538,7 +449,7 @@ a directory and downloads a tar archive into it. ```yaml ksql-server: - image: confluentinc/cp-ksql-server:{{ site.release }} + image: confluentinc/ksqldb-server:{{ site.release }} depends_on: - kafka environment: @@ -555,7 +466,7 @@ ksql-server: ``` After the `mkdir`, `cd`, `curl`, and `tar` commands run, the -`/etc/confluent/docker/run` command starts the `cp-ksql-server` image +`/etc/confluent/docker/run` command starts the `ksqldb-server` image with the specified settings. !!! note @@ -573,7 +484,7 @@ the environment to a desired state. ```yaml ksql-cli: - image: confluentinc/cp-ksql-cli:{{ site.release }} + image: confluentinc/ksqldb-cli:{{ site.release }} depends_on: - ksql-server volumes: diff --git a/docs-md/operate-and-deploy/installation/installing.md b/docs-md/operate-and-deploy/installation/installing.md index 7b20d978c2ce..dcdcc524f223 100644 --- a/docs-md/operate-and-deploy/installation/installing.md +++ b/docs-md/operate-and-deploy/installation/installing.md @@ -19,7 +19,7 @@ Docker support You can deploy ksqlDB by using [Docker containers](install-ksqldb-with-docker.md). Starting with {{ site.cp }} 4.1.2, Confluent maintains images at -[Docker Hub](https://hub.docker.com/r/confluentinc/cp-ksql-server/). +[Docker Hub](https://hub.docker.com/r/confluentinc/ksqldb-server/). To start ksqlDB containers in configurations like "ksqlDB Headless Server" and "Interactive Server with Interceptors", see [Docker Configuration Parameters](https://docs.confluent.io/current/installation/docker/config-reference.html). @@ -156,7 +156,7 @@ After ksqlDB is started, your terminal should resemble this. Copyright 2017-2019 Confluent Inc. -CLI v{{ site.release }}, Server v{{ site.release }} located at http://ksqldb-server:8088 +CLI v{{ site.release }}, Server v{{ site.release }} located at http://localhost:8088 Having trouble? Type 'help' (case-insensitive) for a rundown of how things work! diff --git a/docs-md/operate-and-deploy/installation/server-config/config-reference.md b/docs-md/operate-and-deploy/installation/server-config/config-reference.md index 977b0fed7e1c..87b8f77fbab7 100644 --- a/docs-md/operate-and-deploy/installation/server-config/config-reference.md +++ b/docs-md/operate-and-deploy/installation/server-config/config-reference.md @@ -14,7 +14,7 @@ For more information on setting properties, see !!! tip Each property has a corresponding environment variable in the Docker image for - [ksqlDB Server](https://hub.docker.com/r/confluentinc/cp-ksql-server/). The + [ksqlDB Server](https://hub.docker.com/r/confluentinc/ksqldb-server/). The environment variable name is constructed from the configuration property name by converting to uppercase, replacing periods with underscores, and prepending with `KSQL_`. For example, the name of the `ksql.service.id` @@ -49,7 +49,7 @@ For more information, see [Kafka Consumer](https://docs.confluent.io/current/cli [AUTO_OFFSET_RESET_CONFIG](https://docs.confluent.io/{{ site.release }}/clients/javadocs/org/apache/kafka/clients/consumer/ConsumerConfig.html#AUTO_OFFSET_RESET_CONFIG). The corresponding environment variable in the -[ksqlDB Server image](https://hub.docker.com/r/confluentinc/cp-ksql-server/) +[ksqlDB Server image](https://hub.docker.com/r/confluentinc/ksqldb-server/) is `KSQL_KSQL_STREAMS_AUTO_OFFSET_RESET`. ### ksql.streams.bootstrap.servers @@ -69,7 +69,7 @@ and [BOOTSTRAP_SERVERS_CONFIG](https://docs.confluent.io/{{ site.release }}/streams/javadocs/org/apache/kafka/streams/StreamsConfig.html#BOOTSTRAP_SERVERS_CONFIG). The corresponding environment variable in the -[ksqlDB Server image](https://hub.docker.com/r/confluentinc/cp-ksql-server/) +[ksqlDB Server image](https://hub.docker.com/r/confluentinc/ksqldb-server/) is `KSQL_KSQL_STREAMS_BOOTSTRAP_SERVERS` or `KSQL_BOOTSTRAP_SERVERS`. For more information, see [Install ksqlDB with Docker](../install-ksqldb-with-docker.md). @@ -90,7 +90,7 @@ and [COMMIT_INTERVAL_MS_CONFIG](https://docs.confluent.io/{{ site.release }}/streams/javadocs/org/apache/kafka/streams/StreamsConfig.html#COMMIT_INTERVAL_MS_CONFIG), The corresponding environment variable in the -[ksqlDB Server image](https://hub.docker.com/r/confluentinc/cp-ksql-server/) +[ksqlDB Server image](https://hub.docker.com/r/confluentinc/ksqldb-server/) is `KSQL_KSQL_STREAMS_COMMIT_INTERVAL_MS`. ### ksql.streams.cache.max.bytes.buffering @@ -109,7 +109,7 @@ and [CACHE_MAX_BYTES_BUFFERING_CONFIG](https://docs.confluent.io/{{ site.release }}/streams/javadocs/org/apache/kafka/streams/StreamsConfig.html#CACHE_MAX_BYTES_BUFFERING_CONFIG). The corresponding environment variable in the -[ksqlDB Server image](https://hub.docker.com/r/confluentinc/cp-ksql-server/) +[ksqlDB Server image](https://hub.docker.com/r/confluentinc/ksqldb-server/) is `KSQL_KSQL_STREAMS_CACHE_MAX_BYTES_BUFFERING`. ### ksql.streams.num.stream.threads @@ -120,7 +120,7 @@ information about the {{ site.kstreams }} threading model, see [Threading Model](https://docs.confluent.io/current/streams/architecture.html#threading-model). The corresponding environment variable in the -[ksqlDB Server image](https://hub.docker.com/r/confluentinc/cp-ksql-server/) +[ksqlDB Server image](https://hub.docker.com/r/confluentinc/ksqldb-server/) is `KSQL_KSQL_STREAMS_NUM_STREAM_THREADS`. ### ksql.output.topic.name.prefix @@ -134,7 +134,7 @@ interactive mode. For more information, see [Interactive ksqlDB clusters](security.md#interactive-ksqldb-clusters). The corresponding environment variable in the -[ksqlDB Server image](https://hub.docker.com/r/confluentinc/cp-ksql-server/) +[ksqlDB Server image](https://hub.docker.com/r/confluentinc/ksqldb-server/) is `KSQL_KSQL_OUTPUT_TOPIC_NAME_PREFIX`. ksqlDB Query Settings @@ -181,7 +181,7 @@ ksql.fail.on.production.error=false ``` The corresponding environment variable in the -[ksqlDB Server image](https://hub.docker.com/r/confluentinc/cp-ksql-server/) +[ksqlDB Server image](https://hub.docker.com/r/confluentinc/ksqldb-server/) is `KSQL_KSQL_FAIL_ON_DESERIALIZATION_ERROR`. ### ksql.schema.registry.url @@ -191,7 +191,7 @@ over a secure connection, see [Configure ksqlDB for Secured {{ site.srlong }}](security.md#configure-ksqldb-for-https). The corresponding environment variable in the -[ksqlDB Server image](https://hub.docker.com/r/confluentinc/cp-ksql-server/) +[ksqlDB Server image](https://hub.docker.com/r/confluentinc/ksqldb-server/) is `KSQL_KSQL_SCHEMA_REGISTRY_URL`. ### ksql.service.id @@ -221,7 +221,7 @@ configured separately. For more information, see [Processing Log](../../../developer-guide/test-and-debug/processing-log.md). The corresponding environment variable in the -[ksqlDB Server image](https://hub.docker.com/r/confluentinc/cp-ksql-server/) +[ksqlDB Server image](https://hub.docker.com/r/confluentinc/ksqldb-server/) is `KSQL_KSQL_INTERNAL_TOPIC_REPLICAS`. ### ksql.sink.partitions (Deprecated) @@ -233,7 +233,7 @@ For more info see the WITH clause properties in [CREATE TABLE AS SELECT](../../../developer-guide/ksqldb-reference/create-table-as-select.md). The corresponding environment variable in the -[ksqlDB Server image](https://hub.docker.com/r/confluentinc/cp-ksql-server/) is +[ksqlDB Server image](https://hub.docker.com/r/confluentinc/ksqldb-server/) is `KSQL_KSQL_SINK_PARTITIONS`. ### ksql.sink.replicas (Deprecated) @@ -251,7 +251,7 @@ documentation in the [function](../../../developer-guide/ksqldb-reference/scalar guide for details. The corresponding environment variable in the -[ksqlDB Server image](https://hub.docker.com/r/confluentinc/cp-ksql-server/) +[ksqlDB Server image](https://hub.docker.com/r/confluentinc/ksqldb-server/) is `KSQL_KSQL_FUNCTIONS_SUBSTRING_LEGACY_ARGS`. ### ksql.persistence.wrap.single.values @@ -349,7 +349,7 @@ For an example, see [Non-interactive (Headless) ksqlDB Usage](index.md#non-interactive-headless-ksqldb-usage). The corresponding environment variable in the -[ksqlDB Server image](https://hub.docker.com/r/confluentinc/cp-ksql-server/) +[ksqlDB Server image](https://hub.docker.com/r/confluentinc/ksqldb-server/) is `KSQL_KSQL_QUERIES_FILE`. ### listeners @@ -378,7 +378,7 @@ You can configure ksqlDB Server to use HTTPS. For more information, see [Configure ksqlDB for HTTPS](security.md#configure-ksqldb-for-https). The corresponding environment variable in the -[ksqlDB Server image](https://hub.docker.com/r/confluentinc/cp-ksql-server/) +[ksqlDB Server image](https://hub.docker.com/r/confluentinc/ksqldb-server/) is `KSQL_LISTENERS`. ### ksql.metrics.tags.custom diff --git a/docs-md/operate-and-deploy/installation/server-config/index.md b/docs-md/operate-and-deploy/installation/server-config/index.md index c94aa3b8c6d2..e009c6bbea06 100644 --- a/docs-md/operate-and-deploy/installation/server-config/index.md +++ b/docs-md/operate-and-deploy/installation/server-config/index.md @@ -33,7 +33,7 @@ configuration file and override specific properties as needed, using the !!! tip If you deploy {{ site.cp }} by using Docker containers, you can specify configuration parameters as environment variables to the - [ksqlDB Server image](https://hub.docker.com/r/confluentinc/cp-ksql-server/). + [ksqlDB Server image](https://hub.docker.com/r/confluentinc/ksqldb-server/). For more information, see [Install ksqlDB with Docker](../install-ksqldb-with-docker.md). diff --git a/docs-md/operate-and-deploy/installation/server-config/security.md b/docs-md/operate-and-deploy/installation/server-config/security.md index ba3d21cc93bd..f91cae305b61 100644 --- a/docs-md/operate-and-deploy/installation/server-config/security.md +++ b/docs-md/operate-and-deploy/installation/server-config/security.md @@ -804,28 +804,6 @@ Consumer groups For more information about interactive and non-interactive queries, see [Non-interactive (Headless) ksqlDB Usage](index.md#non-interactive-headless-ksqldb-usage). -#### Configure Control Center Monitoring Interceptors - -This configuration enables SASL and SSL for the [monitoring -interceptors](https://docs.confluent.io/current/control-center/installation/clients.html) -that integrate ksqlDB with {{ site.c3short }}. - -```properties -# Confluent Monitoring Interceptors for Control Center streams monitoring -producer.interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor -consumer.interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor - -# Confluent Monitoring interceptors SASL / SSL config -confluent.monitoring.interceptor.security.protocol=SASL_SSL -confluent.monitoring.interceptor.ssl.truststore.location=/etc/kafka/secrets/kafka.client.truststore.jks -confluent.monitoring.interceptor.ssl.truststore.password=confluent -confluent.monitoring.interceptor.ssl.keystore.location=/etc/kafka/secrets/kafka.client.keystore.jks -confluent.monitoring.interceptor.ssl.keystore.password=confluent -confluent.monitoring.interceptor.ssl.key.password=confluent -confluent.monitoring.interceptor.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="ksql-user" password="ksql-user-secret"; -confluent.monitoring.interceptor.sasl.mechanism=PLAIN -``` - Next Steps ---------- diff --git a/docs-md/tutorials/basics-docker.md b/docs-md/tutorials/basics-docker.md index a039ff783c06..709247bc0b14 100644 --- a/docs-md/tutorials/basics-docker.md +++ b/docs-md/tutorials/basics-docker.md @@ -96,7 +96,7 @@ From the host machine, start ksqlDB CLI: ```bash docker run --network tutorials_default --rm --interactive --tty \ - confluentinc/cp-ksql-cli:{{ site.release }} \ + confluentinc/ksqldb-cli:{{ site.release }} ksql \ http://ksql-server:8088 ``` @@ -116,7 +116,7 @@ Your output should resemble: Copyright 2017-2019 Confluent Inc. -CLI v{{ site.release }}, Server v{{ site.release }} located at http://ksqldb-server:8088 +CLI v{{ site.release }}, Server v{{ site.release }} located at http://ksql-server:8088 Having trouble? Type 'help' (case-insensitive) for a rundown of how things work! diff --git a/docs-md/tutorials/basics-local.md b/docs-md/tutorials/basics-local.md index d7678bc69cc3..d957184b8f49 100644 --- a/docs-md/tutorials/basics-local.md +++ b/docs-md/tutorials/basics-local.md @@ -94,7 +94,7 @@ After KSQL is started, your terminal should resemble this. Copyright 2017-2019 Confluent Inc. -CLI v{{ site.release }}, Server v{{ site.release }} located at http://ksqldb-server:8088 +CLI v{{ site.release }}, Server v{{ site.release }} located at http://localhost:8088 Having trouble? Type 'help' (case-insensitive) for a rundown of how things work! diff --git a/docs-md/tutorials/clickstream-docker.md b/docs-md/tutorials/clickstream-docker.md index fa1c2fe3c74e..2d998863e071 100644 --- a/docs-md/tutorials/clickstream-docker.md +++ b/docs-md/tutorials/clickstream-docker.md @@ -185,7 +185,7 @@ Load the Streaming Data to KSQL Copyright 2017-2019 Confluent Inc. - CLI v{{ site.release }}, Server v{{ site.release }} located at http://ksqldb-server:8088 + CLI v{{ site.release }}, Server v{{ site.release }} located at http://ksql-server:8088 Having trouble? Type 'help' (case-insensitive) for a rundown of how things work! From 9a2bdec9ed8a3a4f5157bc1961fefe19fe5940b0 Mon Sep 17 00:00:00 2001 From: Victoria Xia Date: Mon, 9 Dec 2019 19:35:39 -0800 Subject: [PATCH 006/123] chore: update KSQL_PULL_QUERIES_ENABLE_CONFIG name (MINOR) (#4059) --- .../java/io/confluent/ksql/config/ImmutableProperties.java | 2 +- .../src/main/java/io/confluent/ksql/util/KsqlConfig.java | 4 ++-- ...er_rows_where_the_ARRAY_column_contains_a_specified_STRING | 2 +- ...where_the_STRUCT-_ARRAY_column_contains_a_specified_STRING | 2 +- .../arrayindex_-_select_the_first_element_of_an_Array | 2 +- .../arrayindex_-_select_the_last_element_of_an_Array_(-1) | 2 +- .../0_6_0-pre/asarray_-_construct_a_list_from_two_elements | 2 +- .../0_6_0-pre/asmap_-_create_map_from_key_value_lists | 2 +- .../expected_topology/0_6_0-pre/average-udaf_-_average_double | 2 +- .../expected_topology/0_6_0-pre/average-udaf_-_average_int | 2 +- .../expected_topology/0_6_0-pre/average-udaf_-_average_long | 2 +- .../0_6_0-pre/average_-_calculate_average_in_select | 2 +- .../0_6_0-pre/between_-_test_BETWEEN_with_array_dereference | 2 +- .../0_6_0-pre/between_-_test_BETWEEN_with_bigint | 2 +- .../0_6_0-pre/between_-_test_BETWEEN_with_floating_point | 2 +- .../0_6_0-pre/between_-_test_BETWEEN_with_integers | 2 +- .../between_-_test_BETWEEN_with_integers_and_expressions | 2 +- .../between_-_test_BETWEEN_with_integers_and_variable_values | 2 +- .../0_6_0-pre/between_-_test_BETWEEN_with_string_values | 2 +- .../between_-_test_BETWEEN_with_string_values_with_substring | 2 +- .../0_6_0-pre/between_-_test_NOT_BETWEEN_with_integers | 2 +- .../0_6_0-pre/case-expression_-_searched_case_expression | 2 +- ...ession_with_structs,_multiple_expression_and_the_same_type | 2 +- ...xpression_-_searched_case_returning_null_in_default_branch | 2 +- ...-expression_-_searched_case_returning_null_in_first_branch | 2 +- ...-expression_-_searched_case_returning_null_in_later_branch | 2 +- ...ssion_-_searched_case_with_arithmetic_expression_in_result | 2 +- .../case-expression_-_searched_case_with_null_in_when | 2 +- .../expected_topology/0_6_0-pre/cast_-_decimal_to_decimal | 2 +- .../expected_topology/0_6_0-pre/cast_-_decimal_to_other | 2 +- .../expected_topology/0_6_0-pre/cast_-_double_to_decimal | 2 +- .../expected_topology/0_6_0-pre/cast_-_integer_to_decimal | 2 +- .../test/resources/expected_topology/0_6_0-pre/cast_-_no_op | 2 +- .../resources/expected_topology/0_6_0-pre/cast_-_of_nulls | 2 +- .../expected_topology/0_6_0-pre/cast_-_string_to_decimal | 2 +- .../0_6_0-pre/collect-list_-_collect_list_bool_map | 2 +- .../0_6_0-pre/collect-list_-_collect_list_bool_map_table | 2 +- .../0_6_0-pre/collect-list_-_collect_list_double | 2 +- .../0_6_0-pre/collect-list_-_collect_list_double_table | 2 +- .../0_6_0-pre/collect-list_-_collect_list_int | 2 +- .../0_6_0-pre/collect-list_-_collect_list_int_table | 2 +- .../0_6_0-pre/collect-list_-_collect_list_long | 2 +- .../0_6_0-pre/collect-list_-_collect_list_long_table | 2 +- .../0_6_0-pre/collect-list_-_collect_list_string | 2 +- .../0_6_0-pre/collect-list_-_collect_list_string_table | 2 +- .../0_6_0-pre/collect-set_-_collect_set_bool_map | 2 +- .../0_6_0-pre/collect-set_-_collect_set_double | 2 +- .../expected_topology/0_6_0-pre/collect-set_-_collect_set_int | 2 +- .../0_6_0-pre/collect-set_-_collect_set_long | 2 +- .../0_6_0-pre/collect-set_-_collect_set_string | 2 +- .../0_6_0-pre/concat_-_concat_fields_using_'+'_operator | 2 +- .../0_6_0-pre/concat_-_concat_fields_using_CONCAT | 2 +- .../test/resources/expected_topology/0_6_0-pre/count_-_count | 2 +- .../expected_topology/0_6_0-pre/count_-_count_literal | 2 +- .../resources/expected_topology/0_6_0-pre/count_-_count_star | 2 +- .../resources/expected_topology/0_6_0-pre/count_-_count_table | 2 +- .../expected_topology/0_6_0-pre/datestring_-_date_to_string | 2 +- .../expected_topology/0_6_0-pre/decimal_-_AVRO_in_out | 2 +- .../expected_topology/0_6_0-pre/decimal_-_DELIMITED_in_out | 2 +- .../0_6_0-pre/decimal_-_GEQ_-_decimal_decimal | 2 +- .../expected_topology/0_6_0-pre/decimal_-_JSON_in_out | 2 +- .../0_6_0-pre/decimal_-_LEQ_-_decimal_decimal | 2 +- .../resources/expected_topology/0_6_0-pre/decimal_-_addition | 2 +- .../expected_topology/0_6_0-pre/decimal_-_addition_3_columns | 2 +- .../0_6_0-pre/decimal_-_addition_with_double | 2 +- .../expected_topology/0_6_0-pre/decimal_-_addition_with_int | 2 +- .../resources/expected_topology/0_6_0-pre/decimal_-_division | 2 +- .../0_6_0-pre/decimal_-_equal_-_decimal_decimal | 2 +- .../0_6_0-pre/decimal_-_greater_than_-_decimal_decimal | 2 +- .../0_6_0-pre/decimal_-_is_distinct_-_decimal_decimal | 2 +- .../0_6_0-pre/decimal_-_less_than_-_decimal_decimal | 2 +- .../decimal_-_less_than_-_decimal_decimal_differing_scale | 2 +- .../0_6_0-pre/decimal_-_less_than_-_decimal_int | 2 +- .../test/resources/expected_topology/0_6_0-pre/decimal_-_mod | 2 +- .../expected_topology/0_6_0-pre/decimal_-_multiplication | 2 +- .../resources/expected_topology/0_6_0-pre/decimal_-_negation | 2 +- .../0_6_0-pre/decimal_-_not_equal_-_decimal_decimal | 2 +- .../expected_topology/0_6_0-pre/decimal_-_subtraction | 2 +- .../0_6_0-pre/delimited_-_select_delimited_value_format | 2 +- ...t_with_$_separated_values_using_custom_delimiter_character | 2 +- ...th_SPACE_separated_values_using_custom_delimiter_character | 2 +- ...with_TAB_separated_values_using_custom_delimiter_character | 2 +- ...e_separated_values_-_should_take_source_delimiter_for_sink | 2 +- ...leaves_aliased_ROWKEY_and_ROWTIME_in_output's_value_schema | 2 +- .../0_6_0-pre/elements_-_join_qualified_select_star_left | 2 +- .../0_6_0-pre/elements_-_join_qualified_select_star_right | 2 +- .../0_6_0-pre/elements_-_join_unqualified_select_star | 2 +- ...leaves_aliased_ROWKEY_and_ROWTIME_in_output's_value_schema | 2 +- .../0_6_0-pre/elements_-_non-join_qualified_select_star | 2 +- .../elements_-_validate_AVRO_uses_null_for_unknown_element | 2 +- .../0_6_0-pre/elements_-_validate_array_element_OK | 2 +- .../0_6_0-pre/elements_-_validate_bigint_elements_OK | 2 +- .../0_6_0-pre/elements_-_validate_boolean_elements_OK | 2 +- .../0_6_0-pre/elements_-_validate_double_elements_OK | 2 +- .../0_6_0-pre/elements_-_validate_int_elements_OK | 2 +- .../0_6_0-pre/elements_-_validate_map_element_OK | 2 +- .../0_6_0-pre/elements_-_validate_string_elements_OK | 2 +- .../0_6_0-pre/elements_-_validate_struct_element_OK | 2 +- .../0_6_0-pre/elements_-_validate_with_elements_OK | 2 +- .../0_6_0-pre/elements_-_validate_without_elements_OK | 2 +- .../0_6_0-pre/elt-field_-_ELT_should_undo_FIELD | 2 +- .../0_6_0-pre/elt-field_-_elect_the_second_parameter | 2 +- .../0_6_0-pre/elt-field_-_field_the_correct_parameter | 2 +- .../0_6_0-pre/explode_-_explode_array_with_values | 2 +- .../0_6_0-pre/explode_-_explode_different_types | 2 +- ...termediate_generated_column_names_don't_clash_with_aliases | 2 +- .../extract-json-field_-_concat_two_extracted_fields | 2 +- .../0_6_0-pre/extract-json-field_-_extract_JSON_array_field | 2 +- .../0_6_0-pre/geodistance_-_geo_distance_with_radius | 2 +- .../0_6_0-pre/geodistance_-_geo_distance_without_radius | 2 +- .../0_6_0-pre/group-by_-_ROWKEY_(stream-_table) | 2 +- .../group-by_-_ROWKEY_(stream-_table)_-_without_repartition | 2 +- .../0_6_0-pre/group-by_-_ROWTIME_(stream-_table) | 2 +- ..._-_UDAF_nested_in_UDF_in_select_expression_(stream-_table) | 2 +- ...y_-_UDAF_nested_in_UDF_in_select_expression_(table-_table) | 2 +- ..._-_UDF_nested_in_UDAF_in_select_expression_(stream-_table) | 2 +- ...ection_in-order_&_non-commutative_group_by_(stream-_table) | 2 +- ...jection_in-order_&_non-commutative_group_by_(table-_table) | 2 +- .../0_6_0-pre/group-by_-_constant_(stream-_table) | 2 +- .../0_6_0-pre/group-by_-_constant_(table-_table) | 2 +- .../0_6_0-pre/group-by_-_duplicate_fields_(stream-_table) | 2 +- .../0_6_0-pre/group-by_-_field_(stream-_table) | 2 +- .../0_6_0-pre/group-by_-_field_(stream-_table)_-_format | 2 +- ..._with_field_used_in_function_in_projection_(stream-_table) | 2 +- ...d_with_field_used_in_function_in_projection_(table-_table) | 2 +- .../0_6_0-pre/group-by_-_field_with_re-key_(stream-_table) | 2 +- .../group-by_-_field_with_re-key_(stream-_table)_-_format | 2 +- .../0_6_0-pre/group-by_-_field_with_re-key_(table-_table) | 2 +- .../0_6_0-pre/group-by_-_fields_(stream-_table) | 2 +- .../0_6_0-pre/group-by_-_fields_(stream-_table)_-_format | 2 +- .../0_6_0-pre/group-by_-_fields_(table-_table) | 2 +- .../0_6_0-pre/group-by_-_fields_(table-_table)_-_format | 2 +- .../0_6_0-pre/group-by_-_function_(stream-_table) | 2 +- .../0_6_0-pre/group-by_-_function_(table-_table) | 2 +- .../0_6_0-pre/group-by_-_json_field_(stream-_table) | 2 +- ...oup-by_-_missing_matching_projection_field_(stream-_table) | 2 +- ...roup-by_-_missing_matching_projection_field_(table-_table) | 2 +- .../group-by_-_string_concat_using_+_op_(stream-_table) | 2 +- .../group-by_-_string_concat_using_+_op_(table-_table) | 2 +- .../group-by_-_with_aggregate_arithmetic_(stream-_table) | 2 +- .../group-by_-_with_aggregate_arithmetic_(table-_table) | 2 +- ...ggregate_arithmetic_involving_source_field_(stream-_table) | 2 +- ...aggregate_arithmetic_involving_source_field_(table-_table) | 2 +- .../0_6_0-pre/group-by_-_with_constant_having_(stream-table) | 2 +- ...roup-by_-_with_constants_in_the_projection_(stream-_table) | 2 +- .../0_6_0-pre/group-by_-_with_groupings_(stream-_table) | 2 +- .../group-by_-_with_having_expression_(stream-_table) | 2 +- .../group-by_-_with_having_expression_(table-_table) | 2 +- ...th_having_expression_on_non-group-by_field_(stream-_table) | 2 +- ...roup-by_-_with_multiple_having_expressions_(stream-_table) | 2 +- .../0_6_0-pre/having_-_calculate_average_in_having | 2 +- .../expected_topology/0_6_0-pre/having_-_table_having | 2 +- .../0_6_0-pre/histogram_-_histogram_on_a_table | 2 +- .../expected_topology/0_6_0-pre/histogram_-_histogram_string | 2 +- .../expected_topology/0_6_0-pre/hopping-windows_-_count | 2 +- .../0_6_0-pre/hopping-windows_-_import_hopping_table | 2 +- .../expected_topology/0_6_0-pre/hopping-windows_-_max_hopping | 2 +- .../expected_topology/0_6_0-pre/hopping-windows_-_min_hopping | 2 +- .../0_6_0-pre/hopping-windows_-_topk_hopping | 2 +- .../0_6_0-pre/hopping-windows_-_topkdistinct_hopping | 2 +- .../0_6_0-pre/identifiers_-_aliased_join_source | 2 +- .../0_6_0-pre/identifiers_-_aliased_join_source_with_AS | 2 +- .../0_6_0-pre/identifiers_-_aliased_left_unaliased_right | 2 +- .../expected_topology/0_6_0-pre/identifiers_-_aliased_source | 2 +- .../0_6_0-pre/identifiers_-_aliased_source_with_AS | 2 +- ...identifiers_-_prefixed_wildcard_select_with_aliased_source | 2 +- .../0_6_0-pre/identifiers_-_unaliased_left_aliased_right | 2 +- .../identifiers_-_wildcard_select_with_aliased_source | 2 +- .../expected_topology/0_6_0-pre/initcap_-_do_initcap | 2 +- .../0_6_0-pre/insert-into_-_convert_formats__AVRO_to_JSON | 2 +- .../insert-into_-_convert_formats__DELIMITED_to_JSON | 2 +- .../0_6_0-pre/insert-into_-_convert_formats__JSON_to_AVRO | 2 +- .../expected_topology/0_6_0-pre/insert-into_-_simple | 2 +- .../0_6_0-pre/insert-into_-_with_custom_topic_name | 2 +- ...n-with-custom-timestamp_-_stream_stream_inner_join_with_ts | 2 +- ...mp_-_stream_stream_inner_join_with_ts_extractor_both_sides | 2 +- ...timestamp_-_stream_table_join_with_ts_extractor_both_sides | 2 +- ...oin-with-custom-timestamp_-_table_table_inner_join_with_ts | 2 +- ...tamp_-_table_table_inner_join_with_ts_extractor_both_sides | 2 +- .../0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria | 2 +- ..._using_ROWKEY_in_the_criteria_-_join_key_not_in_projection | 2 +- ...n_using_ROWKEY_in_the_criteria_-_left_rowkey_in_projection | 2 +- ...-_join_using_ROWKEY_in_the_criteria_-_no_source_key_fields | 2 +- ...sing_ROWKEY_in_the_criteria_-_right_join_key_in_projection | 2 +- ..._using_ROWKEY_in_the_criteria_-_right_rowkey_in_projection | 2 +- .../0_6_0-pre/joins_-_multiple_join_keys_in_projection | 2 +- .../0_6_0-pre/joins_-_stream_stream_inner_join | 2 +- ...ns_-_stream_stream_inner_join_-_join_key_not_in_projection | 2 +- ..._-_stream_stream_inner_join_-_right_join_key_in_projection | 2 +- .../0_6_0-pre/joins_-_stream_stream_inner_join_all_fields | 2 +- ...oins_-_stream_stream_inner_join_all_left_fields_some_right | 2 +- ...oins_-_stream_stream_inner_join_all_right_fields_some_left | 2 +- ..._stream_inner_join_with_different_before_and_after_windows | 2 +- ...oins_-_stream_stream_inner_join_with_out_of_order_messages | 2 +- .../0_6_0-pre/joins_-_stream_stream_left_join | 2 +- ...s_-_stream_stream_left_join_-_both_join_keys_in_projection | 2 +- ...ins_-_stream_stream_left_join_-_join_key_not_in_projection | 2 +- .../0_6_0-pre/joins_-_stream_stream_left_join_-_rekey | 2 +- ...s_-_stream_stream_left_join_-_right_join_key_in_projection | 2 +- .../0_6_0-pre/joins_-_stream_stream_outer_join | 2 +- ..._-_stream_stream_outer_join_-_right_join_key_in_projection | 2 +- .../0_6_0-pre/joins_-_stream_table_inner_join | 2 +- ...ins_-_stream_table_inner_join_-_join_key_not_in_projection | 2 +- ...s_-_stream_table_inner_join_-_right_join_key_in_projection | 2 +- .../0_6_0-pre/joins_-_stream_table_left_join | 2 +- ...oins_-_stream_table_left_join_-_join_key_not_in_projection | 2 +- ...ns_-_stream_table_left_join_-_right_join_key_in_projection | 2 +- ...am_to_stream_unwrapped_single_field_value_schema_on_inputs | 2 +- ...m_unwrapped_single_field_value_schema_on_inputs_and_output | 2 +- ...ream_to_stream_wrapped_single_field_value_schema_on_inputs | 2 +- ...eam_to_table_unwrapped_single_field_value_schema_on_inputs | 2 +- ...e_unwrapped_single_field_value_schema_on_inputs_and_output | 2 +- ...le_when_neither_have_key_field_and_joining_by_table_ROWKEY | 2 +- ..._table_does_not_have_key_field_and_joining_by_table_ROWKEY | 2 +- ...tream_to_table_wrapped_single_field_value_schema_on_inputs | 2 +- .../expected_topology/0_6_0-pre/joins_-_table_join_pipeline | 2 +- .../0_6_0-pre/joins_-_table_table_inner_join | 2 +- ...oins_-_table_table_inner_join_-_join_key_not_in_projection | 2 +- ...ns_-_table_table_inner_join_-_right_join_key_in_projection | 2 +- .../0_6_0-pre/joins_-_table_table_join_with_where_clause | 2 +- .../expected_topology/0_6_0-pre/joins_-_table_table_left_join | 2 +- ...ins_-_table_table_left_join_-_both_join_keys_in_projection | 2 +- ...joins_-_table_table_left_join_-_join_key_not_in_projection | 2 +- ...ins_-_table_table_left_join_-_right_join_key_in_projection | 2 +- .../0_6_0-pre/joins_-_table_table_outer_join | 2 +- ...ns_-_table_table_outer_join_-_right_join_key_in_projection | 2 +- ...ble_to_table_unwrapped_single_field_value_schema_on_inputs | 2 +- ...e_unwrapped_single_field_value_schema_on_inputs_and_output | 2 +- ...table_to_table_wrapped_single_field_value_schema_on_inputs | 2 +- .../0_6_0-pre/joins_-_unqualified_join_criteria | 2 +- .../key-field_-_multiple_copies_of_key_field_in_projection | 2 +- ...___initially_null___group_by_(-)___key_in_value___aliasing | 2 +- ...initially_null___group_by_(-)___key_in_value___no_aliasing | 2 +- ...eam___initially_null___group_by_(-)___key_not_in_value___- | 2 +- ...ey-field_-_stream___initially_null___no_key_change___-___- | 2 +- ...ially_null___partition_by_(-)___key_in_value___no_aliasing | 2 +- ...ially_set___group_by_(different)___key_in_value___aliasing | 2 +- ...ly_set___group_by_(different)___key_in_value___no_aliasing | 2 +- ...nitially_set___group_by_(different)___key_not_in_value___- | 2 +- ..._initially_set___group_by_(same)___key_in_value___aliasing | 2 +- ...itially_set___group_by_(same)___key_in_value___no_aliasing | 2 +- ...m___initially_set___group_by_(same)___key_not_in_value___- | 2 +- ...lly_set___group_by_expression___key_in_value___no_aliasing | 2 +- ...ially_set___group_by_multiple___key_in_value___no_aliasing | 2 +- ...___initially_set___no_key_change___key_in_value___aliasing | 2 +- ..._set___no_key_change___key_in_value___aliasing_+_duplicate | 2 +- ...initially_set___no_key_change___key_in_value___no_aliasing | 2 +- ...eam___initially_set___no_key_change___key_not_in_value___- | 2 +- ...___initially_null___group_by_(-)___key_in_value___aliasing | 2 +- ...initially_null___group_by_(-)___key_in_value___no_aliasing | 2 +- ...key-field_-_table___initially_null___no_key_change___-___- | 2 +- ...ially_set___group_by_(different)___key_in_value___aliasing | 2 +- ...ly_set___group_by_(different)___key_in_value___no_aliasing | 2 +- ...nitially_set___group_by_(different)___key_not_in_value___- | 2 +- ..._initially_set___group_by_(same)___key_in_value___aliasing | 2 +- ...itially_set___group_by_(same)___key_in_value___no_aliasing | 2 +- ...e___initially_set___group_by_(same)___key_not_in_value___- | 2 +- ...___initially_set___no_key_change___key_in_value___aliasing | 2 +- ...initially_set___no_key_change___key_in_value___no_aliasing | 2 +- ...ble___initially_set___no_key_change___key_not_in_value___- | 2 +- .../key-field_-_using_full_source_name_in_projection | 2 +- .../0_6_0-pre/key-field_-_using_source_alias_in_projection | 2 +- .../expected_topology/0_6_0-pre/key-field_-_where_clause | 2 +- .../0_6_0-pre/key-field_-_where_clause_with_alias | 2 +- .../0_6_0-pre/key-field_-_where_only_rowkey_is_in_projection | 2 +- .../0_6_0-pre/key-schemas_-_KEY_value_field_name | 2 +- .../0_6_0-pre/key-schemas_-_stream_explicit_STRING_ROWKEY | 2 +- .../0_6_0-pre/key-schemas_-_stream_implicit_STRING_ROWKEY | 2 +- .../0_6_0-pre/key-schemas_-_table_explicit_STRING_ROWKEY | 2 +- .../0_6_0-pre/key-schemas_-_table_implicit_STRING_ROWKEY | 2 +- .../0_6_0-pre/literals_-_BIGINT_literal_min_max | 2 +- .../expected_topology/0_6_0-pre/literals_-_BOOLEAN_literal | 2 +- .../0_6_0-pre/literals_-_DOUBLE_literal_min_max | 2 +- .../0_6_0-pre/literals_-_INT_literal_min_max | 2 +- .../src/test/resources/expected_topology/0_6_0-pre/math_-_abs | 2 +- .../0_6_0-pre/math_-_calculate_CEIL_function | 2 +- .../src/test/resources/expected_topology/0_6_0-pre/math_-_exp | 2 +- .../test/resources/expected_topology/0_6_0-pre/math_-_floor | 2 +- .../src/test/resources/expected_topology/0_6_0-pre/math_-_ln | 2 +- .../test/resources/expected_topology/0_6_0-pre/math_-_round | 2 +- .../0_6_0-pre/math_-_round_with_large_DECIMAL_values | 2 +- .../test/resources/expected_topology/0_6_0-pre/math_-_sign | 2 +- .../test/resources/expected_topology/0_6_0-pre/math_-_sqrt | 2 +- .../0_6_0-pre/max-group-by_-_max_decimal_group_by | 2 +- .../0_6_0-pre/max-group-by_-_max_double_group_by | 2 +- .../0_6_0-pre/max-group-by_-_max_integer_group_by | 2 +- .../0_6_0-pre/max-group-by_-_max_long_group_by | 2 +- .../0_6_0-pre/min-group-by_-_min_decimal_group_by | 2 +- .../0_6_0-pre/min-group-by_-_min_double_group_by | 2 +- .../0_6_0-pre/min-group-by_-_min_integer_group_by | 2 +- .../0_6_0-pre/min-group-by_-_min_long_group_by | 2 +- ...truct_-_complex_struct_select_array,_map,_map_value_struct | 2 +- ...complex-struct_-_complex_struct_select_array_and_map_items | 2 +- .../more-complex-struct_-_complex_struct_select_star | 2 +- .../0_6_0-pre/multiple-avro-maps_-_project_multiple_avro_maps | 2 +- .../0_6_0-pre/partition-by_-_aliased_key_field_-_same_name | 2 +- .../expected_topology/0_6_0-pre/partition-by_-_partition_by | 2 +- .../0_6_0-pre/partition-by_-_partition_by_ROWKEY | 2 +- .../0_6_0-pre/partition-by_-_partition_by_ROWTIME | 2 +- .../partition-by_-_partition_by_with_null_partition_by_value | 2 +- .../0_6_0-pre/partition-by_-_partition_by_with_null_value | 2 +- .../partition-by_-_partition_by_with_projection_select_all | 2 +- .../partition-by_-_partition_by_with_projection_select_some | 2 +- .../project-filter_-_CSAS_with_custom_Kafka_topic_name | 2 +- .../0_6_0-pre/project-filter_-_Filter_on_BETWEEN | 2 +- .../0_6_0-pre/project-filter_-_Filter_on_IS_DISTINCT_FROM | 2 +- .../0_6_0-pre/project-filter_-_Filter_on_IS_NOT_DISTINCT_FROM | 2 +- .../0_6_0-pre/project-filter_-_Filter_on_NOT_BETWEEN | 2 +- .../0_6_0-pre/project-filter_-_Filter_on_NOT_NULL | 2 +- .../0_6_0-pre/project-filter_-_Filter_on_NULL | 2 +- .../0_6_0-pre/project-filter_-_Filter_on_like_pattern | 2 +- .../project-filter_-_Filter_on_like_pattern_without_wildcards | 2 +- .../0_6_0-pre/project-filter_-_Filter_on_long_literal | 2 +- .../0_6_0-pre/project-filter_-_Filter_on_not_like_pattern | 2 +- .../0_6_0-pre/project-filter_-_Filter_on_string_literal | 2 +- .../0_6_0-pre/project-filter_-_Json_Map_filter | 2 +- .../0_6_0-pre/project-filter_-_Json_Multi_Dimensional_Array | 2 +- .../0_6_0-pre/project-filter_-_Json_Multi_Dimensional_Array_2 | 2 +- .../0_6_0-pre/project-filter_-_Null_row_filter | 2 +- .../project-filter_-_Project_fields_with_reserved_name | 2 +- .../project-filter_-_Project_struct_fields_with_reserved_name | 2 +- ..._WHERE_with_many_comparisons._This_tests_the_fix_for_#1784 | 2 +- .../0_6_0-pre/project-filter_-_project_and_filter | 2 +- .../0_6_0-pre/project-filter_-_project_and_negative_filter | 2 +- .../project-filter_-_project_string_with_embedded_code | 2 +- .../quoted-identifiers_-_create_table_with_key_that_is_quoted | 2 +- ...uoted-identifiers_-_joins_using_fields_that_require_quotes | 2 +- ...quoted-identifiers_-_math_using_fields_that_require_quotes | 2 +- .../quoted-identifiers_-_sink_fields_that_require_quotes | 2 +- .../quoted-identifiers_-_source_fields_that_require_quotes | 2 +- .../quoted-identifiers_-_source_names_requiring_quotes | 2 +- .../quoted-identifiers_-_udf_using_fields_that_require_quotes | 2 +- .../resources/expected_topology/0_6_0-pre/replace_-_replace | 2 +- .../expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME | 2 +- .../0_6_0-pre/rowtime_-_test_ROWTIME_with_AND | 2 +- .../0_6_0-pre/rowtime_-_test_ROWTIME_with_BETWEEN | 2 +- .../0_6_0-pre/rowtime_-_test_ROWTIME_with_inexact_timestring | 2 +- .../0_6_0-pre/rowtime_-_test_ROWTIME_with_timezone | 2 +- .../serdes_-_deserialization_should_default_to_wrapped_values | 2 +- ..._deserialization_should_pick_up_value_wrapping_from_config | 2 +- .../0_6_0-pre/serdes_-_deserialize_anonymous_array_-_value | 2 +- ...erdes_-_deserialize_anonymous_array_-_value_-_non-nullable | 2 +- ...rdes_-_deserialize_anonymous_array_-_value_-_with_coercion | 2 +- .../0_6_0-pre/serdes_-_deserialize_anonymous_map_-_value | 2 +- .../serdes_-_deserialize_anonymous_map_-_value_-_non-nullable | 2 +- ...serdes_-_deserialize_anonymous_map_-_value_-_with_coercion | 2 +- .../serdes_-_deserialize_anonymous_primitive_-_value | 2 +- ..._-_deserialize_anonymous_primitive_-_value_-_with_coercion | 2 +- ...erdes_-_deserialize_anonymous_primitive_by_default_-_value | 2 +- .../0_6_0-pre/serdes_-_deserialize_nested_array_-_value | 2 +- .../0_6_0-pre/serdes_-_deserialize_nested_map_-_value | 2 +- .../0_6_0-pre/serdes_-_deserialize_nested_primitive_-_value | 2 +- .../serdes_-_serialization_should_default_to_wrapped_values | 2 +- ..._-_serialization_should_pick_up_value_wrapping_from_config | 2 +- .../0_6_0-pre/serdes_-_serialize_anonymous_array_-_value | 2 +- .../0_6_0-pre/serdes_-_serialize_anonymous_map_-_value | 2 +- .../0_6_0-pre/serdes_-_serialize_anonymous_primitive_-_value | 2 +- .../0_6_0-pre/serdes_-_serialize_anonymous_struct_-_value | 2 +- .../0_6_0-pre/serdes_-_serialize_nested_array_-_value | 2 +- .../0_6_0-pre/serdes_-_serialize_nested_map_-_value | 2 +- .../0_6_0-pre/serdes_-_serialize_nested_primitive_-_value | 2 +- .../0_6_0-pre/serdes_-_serialize_nested_struct_-_value | 2 +- .../0_6_0-pre/session-windows_-_import_session_stream | 2 +- .../0_6_0-pre/session-windows_-_import_session_table | 2 +- .../0_6_0-pre/session-windows_-_inherit_windowed_keys | 2 +- .../expected_topology/0_6_0-pre/session-windows_-_max_session | 2 +- .../simple-struct_-_simple_struct_read_struct_as_json_string | 2 +- .../0_6_0-pre/simple-struct_-_simple_struct_select_filter | 2 +- .../0_6_0-pre/simple-struct_-_simple_struct_select_filter_2 | 2 +- .../0_6_0-pre/simple-struct_-_simple_struct_select_filter_4 | 2 +- .../simple-struct_-_simple_struct_select_for_ambiguity | 2 +- .../0_6_0-pre/simple-struct_-_simple_struct_select_star | 2 +- .../0_6_0-pre/simple-struct_-_simple_struct_select_with_nulls | 2 +- .../0_6_0-pre/simple-struct_-_simples_struct_select_filter_3 | 2 +- ...he_sink_topic_if_default_partitions_and_replicas_were_set. | 2 +- ..._should_copy_partition_and_replica_count_from_source_topic | 2 +- ...c-naming__default_topic_name_is_stream_name,_in_upper-case | 2 +- ...-naming__use_prefixed_default_topic_name_when_property_set | 2 +- ..._sink-topic-naming__use_supplied_topic_name,_when_supplied | 2 +- .../0_6_0-pre/slice_-_sublist_for_list_of_lists | 2 +- .../0_6_0-pre/slice_-_sublist_for_list_of_maps | 2 +- .../0_6_0-pre/slice_-_sublist_for_string_list | 2 +- ...ge_by_commas_and_display_pos_0_and_2_of_the_returned_array | 2 +- .../split_-_split_a_message_by_using_the_'$$'_delimiter | 2 +- .../split_-_split_a_message_by_using_the_'.'_delimiter | 2 +- .../split_-_split_all_characters_by_using_the_''_delimiter | 2 +- .../expected_topology/0_6_0-pre/stringdate_-_string_to_date | 2 +- .../0_6_0-pre/stringtimestamp_-_string_to_timestamp | 2 +- .../0_6_0-pre/struct-udfs_-_Create_a_struct_from_a_string | 2 +- .../0_6_0-pre/struct-udfs_-_Extract_value_from_struct | 2 +- .../0_6_0-pre/substring_-_do_substring_with_just_pos | 2 +- .../0_6_0-pre/substring_-_do_substring_with_pos_and_length | 2 +- .../expected_topology/0_6_0-pre/substring_-_in_group_by | 2 +- ...substring_-_should_default_to_current_mode_for_new_queries | 2 +- .../resources/expected_topology/0_6_0-pre/sum_-_sum_decimal | 2 +- .../resources/expected_topology/0_6_0-pre/sum_-_sum_double | 2 +- .../expected_topology/0_6_0-pre/sum_-_sum_double_map | 2 +- .../test/resources/expected_topology/0_6_0-pre/sum_-_sum_int | 2 +- .../0_6_0-pre/sum_-_sum_int_left_join_of_table | 2 +- .../0_6_0-pre/sum_-_sum_list_of_doubles_into_a_single_double | 2 +- .../0_6_0-pre/sum_-_sum_list_of_ints_into_a_single_int | 2 +- .../0_6_0-pre/sum_-_sum_list_of_longs_into_a_single_long | 2 +- .../test/resources/expected_topology/0_6_0-pre/sum_-_sum_long | 2 +- .../0_6_0-pre/sum_-_sum_with_constant_double_arg | 2 +- .../0_6_0-pre/sum_-_sum_with_constant_int_arg | 2 +- .../0_6_0-pre/sum_-_sum_with_constant_long_arg | 2 +- .../0_6_0-pre/table-functions_-_multiple_table_functions | 2 +- .../table-functions_-_table_function_as_first_select | 2 +- .../0_6_0-pre/table-functions_-_table_function_as_last_select | 2 +- .../0_6_0-pre/table-functions_-_table_function_with_no_alias | 2 +- ...-functions_-_table_function_with_no_other_selected_columns | 2 +- ...table-functions_-_table_function_with_non_selected_columns | 2 +- .../table-functions_-_table_function_with_where_clause | 2 +- ...table-functions_-_table_functions_with_complex_expressions | 2 +- .../0_6_0-pre/table-functions_-_test_udtf_-_array_params | 2 +- .../0_6_0-pre/table-functions_-_test_udtf_-_map_params | 2 +- .../0_6_0-pre/table-functions_-_test_udtf_-_return_vals | 2 +- .../0_6_0-pre/table-functions_-_test_udtf_-_simple_params | 2 +- .../0_6_0-pre/test-custom-udaf_-_test_udaf_group_by | 2 +- .../0_6_0-pre/test-custom-udaf_-_test_udaf_on_a_table | 2 +- .../0_6_0-pre/test-custom-udaf_-_test_udaf_with_struct | 2 +- .../timestamp-extractor_-_KSQL_default_timestamp_extractor | 2 +- .../timestamp-extractor_-_KSQL_override_timestamp_extractor | 2 +- .../0_6_0-pre/timestamp-to-string_-_with_valid_zone | 2 +- .../0_6_0-pre/timestampformat_-_timestamp_format | 2 +- .../timestampformat_-_with_single_digit_ms_and_numeric_tz | 2 +- .../0_6_0-pre/topk-distinct_-_topk_distinct_decimal | 2 +- .../0_6_0-pre/topk-distinct_-_topk_distinct_integer | 2 +- .../0_6_0-pre/topk-distinct_-_topk_distinct_long | 2 +- .../0_6_0-pre/topk-distinct_-_topk_distinct_string | 2 +- .../expected_topology/0_6_0-pre/topk-group-by_-_topk_double | 2 +- .../expected_topology/0_6_0-pre/topk-group-by_-_topk_integer | 2 +- .../expected_topology/0_6_0-pre/topk-group-by_-_topk_long | 2 +- .../expected_topology/0_6_0-pre/topk-group-by_-_topk_string | 2 +- .../0_6_0-pre/tumbling-windows_-_import_tumbling_table | 2 +- .../0_6_0-pre/tumbling-windows_-_inherit_windowed_keys | 2 +- .../0_6_0-pre/tumbling-windows_-_max_tumbling | 2 +- .../0_6_0-pre/tumbling-windows_-_min_tumbling | 2 +- .../0_6_0-pre/tumbling-windows_-_topk_tumbling | 2 +- .../0_6_0-pre/tumbling-windows_-_topkdistinct_tumbling | 2 +- ...hain_a_call_to_URL_EXTRACT_PARAMETER_with_URL_DECODE_PARAM | 2 +- .../url_-_decode_a_url_parameter_using_DECODE_URL_PARAM | 2 +- .../url_-_encode_a_url_parameter_using_ENCODE_URL_PARAM | 2 +- ...-_extract_a_fragment_from_a_URL_using_URL_EXTRACT_FRAGMENT | 2 +- .../url_-_extract_a_host_from_a_URL_using_URL_EXTRACT_HOST | 2 +- ...extract_a_parameter_from_a_URL_using_URL_EXTRACT_PARAMETER | 2 +- .../url_-_extract_a_path_from_a_URL_using_URL_EXTRACT_PATH | 2 +- .../url_-_extract_a_port_from_a_URL_using_URL_EXTRACT_PORT | 2 +- ...-_extract_a_protocol_from_a_URL_using_URL_EXTRACT_PROTOCOL | 2 +- .../url_-_extract_a_query_from_a_URL_using_URL_EXTRACT_QUERY | 2 +- .../0_6_0-pre/window-bounds_-_in_expressions | 2 +- .../expected_topology/0_6_0-pre/window-bounds_-_none | 2 +- .../expected_topology/0_6_0-pre/window-bounds_-_table_hopping | 2 +- .../expected_topology/0_6_0-pre/window-bounds_-_table_session | 2 +- .../0_6_0-pre/window-bounds_-_table_tumbling | 2 +- .../ksql/rest/server/execution/PullQueryExecutor.java | 4 ++-- .../ksql/rest/server/execution/PullQueryExecutorTest.java | 2 +- 457 files changed, 459 insertions(+), 459 deletions(-) diff --git a/ksql-common/src/main/java/io/confluent/ksql/config/ImmutableProperties.java b/ksql-common/src/main/java/io/confluent/ksql/config/ImmutableProperties.java index 3ad421b07cbe..9d0eaf931b11 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/config/ImmutableProperties.java +++ b/ksql-common/src/main/java/io/confluent/ksql/config/ImmutableProperties.java @@ -29,7 +29,7 @@ public final class ImmutableProperties { .add(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG) .add(KsqlConfig.KSQL_EXT_DIR) .add(KsqlConfig.KSQL_ACTIVE_PERSISTENT_QUERY_LIMIT_CONFIG) - .add(KsqlConfig.KSQL_QUERY_PULL_ENABLE_CONFIG) + .add(KsqlConfig.KSQL_PULL_QUERIES_ENABLE_CONFIG) .add(KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG) .addAll(KsqlConfig.SSL_CONFIG_NAMES) .build(); diff --git a/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java b/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java index c2efeb5ce7be..279436dccd0d 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java +++ b/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java @@ -167,7 +167,7 @@ public class KsqlConfig extends AbstractConfig { + " which are secured with ACLs. Please enable only after careful consideration." + " If \"false\", KSQL pull queries will fail against a secure Kafka cluster"; - public static final String KSQL_QUERY_PULL_ENABLE_CONFIG = "ksql.query.pull.enable"; + public static final String KSQL_PULL_QUERIES_ENABLE_CONFIG = "ksql.pull.queries.enable"; public static final String KSQL_QUERY_PULL_ENABLE_DOC = "Config to enable or disable transient pull queries on a specific KSQL server."; public static final boolean KSQL_QUERY_PULL_ENABLE_DEFAULT = true; @@ -464,7 +464,7 @@ private static ConfigDef buildConfigDef(final ConfigGeneration generation) { Importance.LOW, METRIC_REPORTER_CLASSES_DOC ).define( - KSQL_QUERY_PULL_ENABLE_CONFIG, + KSQL_PULL_QUERIES_ENABLE_CONFIG, Type.BOOLEAN, KSQL_QUERY_PULL_ENABLE_DEFAULT, Importance.LOW, diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arraycontains_-_filter_rows_where_the_ARRAY_column_contains_a_specified_STRING b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arraycontains_-_filter_rows_where_the_ARRAY_column_contains_a_specified_STRING index 48537aa2e794..c539d2e04e8e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arraycontains_-_filter_rows_where_the_ARRAY_column_contains_a_specified_STRING +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arraycontains_-_filter_rows_where_the_ARRAY_column_contains_a_specified_STRING @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arraycontains_-_filter_rows_where_the_STRUCT-_ARRAY_column_contains_a_specified_STRING b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arraycontains_-_filter_rows_where_the_STRUCT-_ARRAY_column_contains_a_specified_STRING index b37b22f00042..514c9f94c932 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arraycontains_-_filter_rows_where_the_STRUCT-_ARRAY_column_contains_a_specified_STRING +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arraycontains_-_filter_rows_where_the_STRUCT-_ARRAY_column_contains_a_specified_STRING @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arrayindex_-_select_the_first_element_of_an_Array b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arrayindex_-_select_the_first_element_of_an_Array index 923c87149512..fdd762162314 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arrayindex_-_select_the_first_element_of_an_Array +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arrayindex_-_select_the_first_element_of_an_Array @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arrayindex_-_select_the_last_element_of_an_Array_(-1) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arrayindex_-_select_the_last_element_of_an_Array_(-1) index 923c87149512..fdd762162314 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arrayindex_-_select_the_last_element_of_an_Array_(-1) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/arrayindex_-_select_the_last_element_of_an_Array_(-1) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/asarray_-_construct_a_list_from_two_elements b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/asarray_-_construct_a_list_from_two_elements index 4f12cda3614d..ca507a7ca118 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/asarray_-_construct_a_list_from_two_elements +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/asarray_-_construct_a_list_from_two_elements @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/asmap_-_create_map_from_key_value_lists b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/asmap_-_create_map_from_key_value_lists index c137d8760151..9c6970eca5f0 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/asmap_-_create_map_from_key_value_lists +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/asmap_-_create_map_from_key_value_lists @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average-udaf_-_average_double b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average-udaf_-_average_double index af53232d8c51..e91e5c40484c 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average-udaf_-_average_double +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average-udaf_-_average_double @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average-udaf_-_average_int b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average-udaf_-_average_int index 78cb7e73e6a3..1359554d1f04 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average-udaf_-_average_int +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average-udaf_-_average_int @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average-udaf_-_average_long b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average-udaf_-_average_long index 167df1dcaa5d..9fac85eec88f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average-udaf_-_average_long +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average-udaf_-_average_long @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average_-_calculate_average_in_select b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average_-_calculate_average_in_select index 8045d59047fe..df31f9d0be69 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average_-_calculate_average_in_select +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/average_-_calculate_average_in_select @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_array_dereference b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_array_dereference index 6402e8cff573..ab7626ea62e5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_array_dereference +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_array_dereference @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_bigint b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_bigint index 9e23b26b36d9..adced3a2fa0b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_bigint +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_bigint @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_floating_point b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_floating_point index f5f62fb13baf..791b4e31a6e1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_floating_point +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_floating_point @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_integers b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_integers index f3b3745570fa..81865328ec64 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_integers +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_integers @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_integers_and_expressions b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_integers_and_expressions index b19bf16de8df..8b300a4c020a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_integers_and_expressions +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_integers_and_expressions @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_integers_and_variable_values b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_integers_and_variable_values index 21fa1430b214..56bad54bc682 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_integers_and_variable_values +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_integers_and_variable_values @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_string_values b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_string_values index 4e1846aba68f..605097de6b8f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_string_values +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_string_values @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_string_values_with_substring b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_string_values_with_substring index 4e1846aba68f..605097de6b8f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_string_values_with_substring +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_BETWEEN_with_string_values_with_substring @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_NOT_BETWEEN_with_integers b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_NOT_BETWEEN_with_integers index f3b3745570fa..81865328ec64 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_NOT_BETWEEN_with_integers +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/between_-_test_NOT_BETWEEN_with_integers @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_expression b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_expression index 197588eb397c..6d25027c320e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_expression +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_expression @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_expression_with_structs,_multiple_expression_and_the_same_type b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_expression_with_structs,_multiple_expression_and_the_same_type index 604e40fb2122..df59e2fcdb16 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_expression_with_structs,_multiple_expression_and_the_same_type +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_expression_with_structs,_multiple_expression_and_the_same_type @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_returning_null_in_default_branch b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_returning_null_in_default_branch index 7a755cedbf97..f4d9602f3f49 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_returning_null_in_default_branch +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_returning_null_in_default_branch @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_returning_null_in_first_branch b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_returning_null_in_first_branch index 7a755cedbf97..f4d9602f3f49 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_returning_null_in_first_branch +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_returning_null_in_first_branch @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_returning_null_in_later_branch b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_returning_null_in_later_branch index 7a755cedbf97..f4d9602f3f49 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_returning_null_in_later_branch +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_returning_null_in_later_branch @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_with_arithmetic_expression_in_result b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_with_arithmetic_expression_in_result index ad63e28398d1..b7d365ce613e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_with_arithmetic_expression_in_result +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_with_arithmetic_expression_in_result @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_with_null_in_when b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_with_null_in_when index 25a5e360633e..f64a961854e5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_with_null_in_when +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/case-expression_-_searched_case_with_null_in_when @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_decimal_to_decimal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_decimal_to_decimal index 872a0640ba27..aecdd95424f2 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_decimal_to_decimal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_decimal_to_decimal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_decimal_to_other b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_decimal_to_other index cd72355673a5..2ef88e988095 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_decimal_to_other +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_decimal_to_other @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_double_to_decimal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_double_to_decimal index e4af79ec2ec3..60beb5a8910f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_double_to_decimal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_double_to_decimal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_integer_to_decimal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_integer_to_decimal index e5a2ceaca7b7..4004be4a1a49 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_integer_to_decimal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_integer_to_decimal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_no_op b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_no_op index 487686246c63..3e2d8b732947 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_no_op +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_no_op @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_of_nulls b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_of_nulls index e35215c4462e..354f8ccd6626 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_of_nulls +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_of_nulls @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_string_to_decimal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_string_to_decimal index c73acf84fa9c..d890de2fb1d1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_string_to_decimal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/cast_-_string_to_decimal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_bool_map b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_bool_map index c457166ef1e9..cc1c5d58348c 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_bool_map +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_bool_map @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_bool_map_table b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_bool_map_table index d6983e052792..8b112ffd21d2 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_bool_map_table +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_bool_map_table @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_double b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_double index 9039655b62ea..d3dafab2907d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_double +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_double @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_double_table b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_double_table index 4336ee1ecaf9..becc9b033643 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_double_table +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_double_table @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_int b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_int index da2d24ed79fe..4fc086f42719 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_int +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_int @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_int_table b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_int_table index 3dca097cb3cc..61a70da89043 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_int_table +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_int_table @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_long b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_long index 42983275cb65..20aaf8533015 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_long +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_long @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_long_table b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_long_table index 699639869914..a480051d3e11 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_long_table +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_long_table @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_string b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_string index 39a2b20b37eb..9ba6aa80ce0a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_string +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_string @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_string_table b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_string_table index 7c894fb32e14..87cc7afeaaf0 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_string_table +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-list_-_collect_list_string_table @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_bool_map b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_bool_map index c457166ef1e9..cc1c5d58348c 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_bool_map +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_bool_map @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_double b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_double index 9039655b62ea..d3dafab2907d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_double +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_double @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_int b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_int index da2d24ed79fe..4fc086f42719 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_int +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_int @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_long b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_long index 42983275cb65..20aaf8533015 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_long +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_long @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_string b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_string index 39a2b20b37eb..9ba6aa80ce0a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_string +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/collect-set_-_collect_set_string @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/concat_-_concat_fields_using_'+'_operator b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/concat_-_concat_fields_using_'+'_operator index c2b92880d573..7f06d72f5616 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/concat_-_concat_fields_using_'+'_operator +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/concat_-_concat_fields_using_'+'_operator @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/concat_-_concat_fields_using_CONCAT b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/concat_-_concat_fields_using_CONCAT index c2b92880d573..7f06d72f5616 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/concat_-_concat_fields_using_CONCAT +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/concat_-_concat_fields_using_CONCAT @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count index cf3a0402f98c..16d68b411bf1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count_literal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count_literal index a198e4683561..16476e7808b3 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count_literal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count_literal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count_star b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count_star index 89379e765cc5..3f73e0b5c62d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count_star +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count_star @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count_table b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count_table index 0b6b334bdbdc..833214cba902 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count_table +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/count_-_count_table @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/datestring_-_date_to_string b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/datestring_-_date_to_string index 06958c68aa15..0c9da31ba44d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/datestring_-_date_to_string +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/datestring_-_date_to_string @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_AVRO_in_out b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_AVRO_in_out index 92820bfb2143..454319cd8f16 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_AVRO_in_out +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_AVRO_in_out @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_DELIMITED_in_out b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_DELIMITED_in_out index 92820bfb2143..454319cd8f16 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_DELIMITED_in_out +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_DELIMITED_in_out @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_GEQ_-_decimal_decimal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_GEQ_-_decimal_decimal index 25c76045732d..499765417965 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_GEQ_-_decimal_decimal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_GEQ_-_decimal_decimal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_JSON_in_out b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_JSON_in_out index 92820bfb2143..454319cd8f16 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_JSON_in_out +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_JSON_in_out @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_LEQ_-_decimal_decimal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_LEQ_-_decimal_decimal index 25c76045732d..499765417965 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_LEQ_-_decimal_decimal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_LEQ_-_decimal_decimal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition index fd4f54695d95..908162d17400 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition_3_columns b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition_3_columns index 880987077ffc..754dbef774e1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition_3_columns +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition_3_columns @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition_with_double b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition_with_double index 25c3e07dcf22..7d19f52aebd7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition_with_double +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition_with_double @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition_with_int b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition_with_int index e6c8cf362398..3105685240a0 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition_with_int +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_addition_with_int @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_division b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_division index 1f5bf44edd93..7d665cb1b126 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_division +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_division @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_equal_-_decimal_decimal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_equal_-_decimal_decimal index 25c76045732d..499765417965 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_equal_-_decimal_decimal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_equal_-_decimal_decimal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_greater_than_-_decimal_decimal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_greater_than_-_decimal_decimal index 25c76045732d..499765417965 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_greater_than_-_decimal_decimal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_greater_than_-_decimal_decimal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_is_distinct_-_decimal_decimal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_is_distinct_-_decimal_decimal index 25c76045732d..499765417965 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_is_distinct_-_decimal_decimal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_is_distinct_-_decimal_decimal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_less_than_-_decimal_decimal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_less_than_-_decimal_decimal index 25c76045732d..499765417965 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_less_than_-_decimal_decimal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_less_than_-_decimal_decimal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_less_than_-_decimal_decimal_differing_scale b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_less_than_-_decimal_decimal_differing_scale index ec1c9cc0ee6d..7889e223fee1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_less_than_-_decimal_decimal_differing_scale +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_less_than_-_decimal_decimal_differing_scale @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_less_than_-_decimal_int b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_less_than_-_decimal_int index 36f4c354b9ed..c84b284bc7e1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_less_than_-_decimal_int +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_less_than_-_decimal_int @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_mod b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_mod index 36b7bdea3bfa..aea3691d64cd 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_mod +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_mod @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_multiplication b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_multiplication index 782b80ef0616..117662d5526d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_multiplication +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_multiplication @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_negation b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_negation index bf510b24ae5c..ef5ac994fd1e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_negation +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_negation @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_not_equal_-_decimal_decimal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_not_equal_-_decimal_decimal index 25c76045732d..499765417965 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_not_equal_-_decimal_decimal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_not_equal_-_decimal_decimal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_subtraction b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_subtraction index fd4f54695d95..908162d17400 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_subtraction +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/decimal_-_subtraction @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format index 74f3435ce980..9fed9d6e408a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_$_separated_values_using_custom_delimiter_character b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_$_separated_values_using_custom_delimiter_character index 74f3435ce980..9fed9d6e408a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_$_separated_values_using_custom_delimiter_character +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_$_separated_values_using_custom_delimiter_character @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_SPACE_separated_values_using_custom_delimiter_character b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_SPACE_separated_values_using_custom_delimiter_character index 74f3435ce980..9fed9d6e408a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_SPACE_separated_values_using_custom_delimiter_character +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_SPACE_separated_values_using_custom_delimiter_character @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_TAB_separated_values_using_custom_delimiter_character b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_TAB_separated_values_using_custom_delimiter_character index 74f3435ce980..9fed9d6e408a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_TAB_separated_values_using_custom_delimiter_character +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_TAB_separated_values_using_custom_delimiter_character @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_pipe_separated_values_-_should_take_source_delimiter_for_sink b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_pipe_separated_values_-_should_take_source_delimiter_for_sink index 74f3435ce980..9fed9d6e408a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_pipe_separated_values_-_should_take_source_delimiter_for_sink +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/delimited_-_select_delimited_value_format_with_pipe_separated_values_-_should_take_source_delimiter_for_sink @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_leaves_aliased_ROWKEY_and_ROWTIME_in_output's_value_schema b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_leaves_aliased_ROWKEY_and_ROWTIME_in_output's_value_schema index bc91bf095675..c0a6803c2331 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_leaves_aliased_ROWKEY_and_ROWTIME_in_output's_value_schema +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_leaves_aliased_ROWKEY_and_ROWTIME_in_output's_value_schema @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_qualified_select_star_left b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_qualified_select_star_left index 949105048c59..88ff26f678e3 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_qualified_select_star_left +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_qualified_select_star_left @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_qualified_select_star_right b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_qualified_select_star_right index 9c159832503a..091b47762528 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_qualified_select_star_right +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_qualified_select_star_right @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_unqualified_select_star b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_unqualified_select_star index 71f22cf36ed7..c16398c984ec 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_unqualified_select_star +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_join_unqualified_select_star @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_non-join_leaves_aliased_ROWKEY_and_ROWTIME_in_output's_value_schema b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_non-join_leaves_aliased_ROWKEY_and_ROWTIME_in_output's_value_schema index 674538170b42..06283f57b03a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_non-join_leaves_aliased_ROWKEY_and_ROWTIME_in_output's_value_schema +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_non-join_leaves_aliased_ROWKEY_and_ROWTIME_in_output's_value_schema @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_non-join_qualified_select_star b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_non-join_qualified_select_star index 534280511d39..6a3ab1ba473f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_non-join_qualified_select_star +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_non-join_qualified_select_star @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_AVRO_uses_null_for_unknown_element b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_AVRO_uses_null_for_unknown_element index c0cddcd7e3af..f8151ea678df 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_AVRO_uses_null_for_unknown_element +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_AVRO_uses_null_for_unknown_element @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_array_element_OK b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_array_element_OK index 29fd41efb504..d3a0f15898ea 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_array_element_OK +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_array_element_OK @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_bigint_elements_OK b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_bigint_elements_OK index 4a7231d4c4c8..3df2cb48fa52 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_bigint_elements_OK +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_bigint_elements_OK @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_boolean_elements_OK b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_boolean_elements_OK index 19d176979226..5ce60ce27188 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_boolean_elements_OK +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_boolean_elements_OK @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_double_elements_OK b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_double_elements_OK index fe0f02745266..807de17ae6a0 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_double_elements_OK +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_double_elements_OK @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_int_elements_OK b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_int_elements_OK index fe9133d0b9b9..332312b068eb 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_int_elements_OK +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_int_elements_OK @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_map_element_OK b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_map_element_OK index 1581fb1d7401..07c72165159b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_map_element_OK +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_map_element_OK @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_string_elements_OK b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_string_elements_OK index 603e6279fef4..cd7cbda2d066 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_string_elements_OK +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_string_elements_OK @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_struct_element_OK b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_struct_element_OK index 1e78c8d524fa..25667f5b322d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_struct_element_OK +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_struct_element_OK @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_with_elements_OK b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_with_elements_OK index fe9133d0b9b9..332312b068eb 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_with_elements_OK +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_with_elements_OK @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_without_elements_OK b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_without_elements_OK index 5dc217321ba4..cf0727adbf93 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_without_elements_OK +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elements_-_validate_without_elements_OK @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elt-field_-_ELT_should_undo_FIELD b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elt-field_-_ELT_should_undo_FIELD index 3fc35bf586ff..e5b418df546f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elt-field_-_ELT_should_undo_FIELD +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elt-field_-_ELT_should_undo_FIELD @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elt-field_-_elect_the_second_parameter b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elt-field_-_elect_the_second_parameter index 9b27f7ac26f0..a68bf9349b8e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elt-field_-_elect_the_second_parameter +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elt-field_-_elect_the_second_parameter @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elt-field_-_field_the_correct_parameter b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elt-field_-_field_the_correct_parameter index e5296079a9f4..9d089af10eda 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elt-field_-_field_the_correct_parameter +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/elt-field_-_field_the_correct_parameter @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/explode_-_explode_array_with_values b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/explode_-_explode_array_with_values index e6f57b644e5e..30db3f5fd951 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/explode_-_explode_array_with_values +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/explode_-_explode_array_with_values @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/explode_-_explode_different_types b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/explode_-_explode_different_types index 406455805cd4..c9e5af7bdadc 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/explode_-_explode_different_types +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/explode_-_explode_different_types @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/explode_-_udfs_with_table_functions_and_no_aliases,_verifies_intermediate_generated_column_names_don't_clash_with_aliases b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/explode_-_udfs_with_table_functions_and_no_aliases,_verifies_intermediate_generated_column_names_don't_clash_with_aliases index 08cb4c038161..77453f5bc734 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/explode_-_udfs_with_table_functions_and_no_aliases,_verifies_intermediate_generated_column_names_don't_clash_with_aliases +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/explode_-_udfs_with_table_functions_and_no_aliases,_verifies_intermediate_generated_column_names_don't_clash_with_aliases @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/extract-json-field_-_concat_two_extracted_fields b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/extract-json-field_-_concat_two_extracted_fields index 967dc3c5798f..755f116ef358 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/extract-json-field_-_concat_two_extracted_fields +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/extract-json-field_-_concat_two_extracted_fields @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/extract-json-field_-_extract_JSON_array_field b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/extract-json-field_-_extract_JSON_array_field index b1c79b2e7348..c41dee5ac7c2 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/extract-json-field_-_extract_JSON_array_field +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/extract-json-field_-_extract_JSON_array_field @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/geodistance_-_geo_distance_with_radius b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/geodistance_-_geo_distance_with_radius index 9e60aaf8eafb..537c9332730e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/geodistance_-_geo_distance_with_radius +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/geodistance_-_geo_distance_with_radius @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/geodistance_-_geo_distance_without_radius b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/geodistance_-_geo_distance_without_radius index 413c1fc78526..52271abc8f62 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/geodistance_-_geo_distance_without_radius +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/geodistance_-_geo_distance_without_radius @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_ROWKEY_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_ROWKEY_(stream-_table) index 3fddd4a7a536..b4427ff5502a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_ROWKEY_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_ROWKEY_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_ROWKEY_(stream-_table)_-_without_repartition b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_ROWKEY_(stream-_table)_-_without_repartition index 3fddd4a7a536..b4427ff5502a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_ROWKEY_(stream-_table)_-_without_repartition +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_ROWKEY_(stream-_table)_-_without_repartition @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_ROWTIME_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_ROWTIME_(stream-_table) index b66daeb58fc6..6ead6b77e544 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_ROWTIME_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_ROWTIME_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_UDAF_nested_in_UDF_in_select_expression_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_UDAF_nested_in_UDF_in_select_expression_(stream-_table) index 640833187dc4..a58549c3fc98 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_UDAF_nested_in_UDF_in_select_expression_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_UDAF_nested_in_UDF_in_select_expression_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_UDAF_nested_in_UDF_in_select_expression_(table-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_UDAF_nested_in_UDF_in_select_expression_(table-_table) index 2b626ab564e3..1dfba7ad166e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_UDAF_nested_in_UDF_in_select_expression_(table-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_UDAF_nested_in_UDF_in_select_expression_(table-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_UDF_nested_in_UDAF_in_select_expression_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_UDF_nested_in_UDAF_in_select_expression_(stream-_table) index 93a27c0d1cf6..08f1f5566fee 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_UDF_nested_in_UDAF_in_select_expression_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_UDF_nested_in_UDAF_in_select_expression_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_arithmetic_binary_expression_with_projection_in-order_&_non-commutative_group_by_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_arithmetic_binary_expression_with_projection_in-order_&_non-commutative_group_by_(stream-_table) index 56c112caf6ae..ec6b561e2749 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_arithmetic_binary_expression_with_projection_in-order_&_non-commutative_group_by_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_arithmetic_binary_expression_with_projection_in-order_&_non-commutative_group_by_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_arithmetic_binary_expression_with_projection_in-order_&_non-commutative_group_by_(table-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_arithmetic_binary_expression_with_projection_in-order_&_non-commutative_group_by_(table-_table) index be42ce3b9e2b..6842b747ac5b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_arithmetic_binary_expression_with_projection_in-order_&_non-commutative_group_by_(table-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_arithmetic_binary_expression_with_projection_in-order_&_non-commutative_group_by_(table-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_constant_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_constant_(stream-_table) index b66daeb58fc6..6ead6b77e544 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_constant_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_constant_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_constant_(table-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_constant_(table-_table) index dd55dc558f45..3ab4aa4e15b5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_constant_(table-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_constant_(table-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_duplicate_fields_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_duplicate_fields_(stream-_table) index 146956bd8483..cd1d0f3db491 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_duplicate_fields_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_duplicate_fields_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_(stream-_table) index fc12f0b14cd1..b43d4a8c101b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_(stream-_table)_-_format b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_(stream-_table)_-_format index fc12f0b14cd1..b43d4a8c101b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_(stream-_table)_-_format +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_(stream-_table)_-_format @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_field_used_in_function_in_projection_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_field_used_in_function_in_projection_(stream-_table) index 79d2fd77133b..69ec0fb9ac44 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_field_used_in_function_in_projection_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_field_used_in_function_in_projection_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_field_used_in_function_in_projection_(table-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_field_used_in_function_in_projection_(table-_table) index eea9f1b3f080..401287d0cfc3 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_field_used_in_function_in_projection_(table-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_field_used_in_function_in_projection_(table-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_re-key_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_re-key_(stream-_table) index ad4644a9c4d0..37bcb9e89618 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_re-key_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_re-key_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_re-key_(stream-_table)_-_format b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_re-key_(stream-_table)_-_format index ad4644a9c4d0..37bcb9e89618 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_re-key_(stream-_table)_-_format +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_re-key_(stream-_table)_-_format @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_re-key_(table-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_re-key_(table-_table) index ef0b1e5a82ae..8dafcd50c704 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_re-key_(table-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_field_with_re-key_(table-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(stream-_table) index a8fcbc19a6c0..6ebd1425c141 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(stream-_table)_-_format b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(stream-_table)_-_format index a8fcbc19a6c0..6ebd1425c141 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(stream-_table)_-_format +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(stream-_table)_-_format @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(table-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(table-_table) index 0e0490ada044..ef6f42236623 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(table-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(table-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(table-_table)_-_format b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(table-_table)_-_format index 0e0490ada044..ef6f42236623 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(table-_table)_-_format +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_fields_(table-_table)_-_format @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_function_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_function_(stream-_table) index 339fb430dbb0..27109db7f2cd 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_function_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_function_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_function_(table-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_function_(table-_table) index eea9f1b3f080..401287d0cfc3 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_function_(table-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_function_(table-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_json_field_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_json_field_(stream-_table) index 8c667e581348..51296e9a84a1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_json_field_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_json_field_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_missing_matching_projection_field_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_missing_matching_projection_field_(stream-_table) index 0f730e168010..f145fc9a36cb 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_missing_matching_projection_field_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_missing_matching_projection_field_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_missing_matching_projection_field_(table-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_missing_matching_projection_field_(table-_table) index 069aee083880..574ee13c332d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_missing_matching_projection_field_(table-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_missing_matching_projection_field_(table-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_string_concat_using_+_op_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_string_concat_using_+_op_(stream-_table) index 6a4b72e6ef63..f233e7b368d1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_string_concat_using_+_op_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_string_concat_using_+_op_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_string_concat_using_+_op_(table-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_string_concat_using_+_op_(table-_table) index 00e705c7597d..e0c7097da092 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_string_concat_using_+_op_(table-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_string_concat_using_+_op_(table-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_(stream-_table) index ad4644a9c4d0..37bcb9e89618 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_(table-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_(table-_table) index ef0b1e5a82ae..8dafcd50c704 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_(table-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_(table-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_involving_source_field_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_involving_source_field_(stream-_table) index 72139d62ca56..0be431cc563f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_involving_source_field_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_involving_source_field_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_involving_source_field_(table-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_involving_source_field_(table-_table) index 34a1abdd585b..d875687358c8 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_involving_source_field_(table-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_aggregate_arithmetic_involving_source_field_(table-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_constant_having_(stream-table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_constant_having_(stream-table) index 0e540c4d0eed..f9febe260c8f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_constant_having_(stream-table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_constant_having_(stream-table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_constants_in_the_projection_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_constants_in_the_projection_(stream-_table) index 906b989cd352..c55ca404ff95 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_constants_in_the_projection_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_constants_in_the_projection_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_groupings_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_groupings_(stream-_table) index e090656e8a77..81322c2b269f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_groupings_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_groupings_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_having_expression_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_having_expression_(stream-_table) index ec679de4ab0b..24d816fc6fd7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_having_expression_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_having_expression_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_having_expression_(table-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_having_expression_(table-_table) index 7e673c059882..7bdc0ce749aa 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_having_expression_(table-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_having_expression_(table-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_having_expression_on_non-group-by_field_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_having_expression_on_non-group-by_field_(stream-_table) index aef5214d500d..d03c3b841a7b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_having_expression_on_non-group-by_field_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_having_expression_on_non-group-by_field_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_multiple_having_expressions_(stream-_table) b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_multiple_having_expressions_(stream-_table) index f0c18d8b144b..37ee0f523e59 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_multiple_having_expressions_(stream-_table) +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/group-by_-_with_multiple_having_expressions_(stream-_table) @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/having_-_calculate_average_in_having b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/having_-_calculate_average_in_having index fe3d70dc6a68..e7b9d51677bd 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/having_-_calculate_average_in_having +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/having_-_calculate_average_in_having @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/having_-_table_having b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/having_-_table_having index 81633c93ebea..ab3cdb259c2b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/having_-_table_having +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/having_-_table_having @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/histogram_-_histogram_on_a_table b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/histogram_-_histogram_on_a_table index 7d01856f01bf..2498c85edfb9 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/histogram_-_histogram_on_a_table +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/histogram_-_histogram_on_a_table @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/histogram_-_histogram_string b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/histogram_-_histogram_string index 2baeb2e280da..6ad96386d29a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/histogram_-_histogram_string +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/histogram_-_histogram_string @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_count b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_count index 81e905bd9945..0c5d0eca033b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_count +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_count @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_import_hopping_table b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_import_hopping_table index 79afb0999c39..b1f90fa87f69 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_import_hopping_table +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_import_hopping_table @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_max_hopping b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_max_hopping index f5b0aa0adf1c..8230782d3ef5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_max_hopping +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_max_hopping @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_min_hopping b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_min_hopping index f5b0aa0adf1c..8230782d3ef5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_min_hopping +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_min_hopping @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_topk_hopping b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_topk_hopping index 3f1cd78c2350..c1ea88269246 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_topk_hopping +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_topk_hopping @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_topkdistinct_hopping b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_topkdistinct_hopping index 3f1cd78c2350..c1ea88269246 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_topkdistinct_hopping +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/hopping-windows_-_topkdistinct_hopping @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source index 6e810ea52ad1..a03ebb608392 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source_with_AS b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source_with_AS index 6e810ea52ad1..a03ebb608392 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source_with_AS +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source_with_AS @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_left_unaliased_right b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_left_unaliased_right index c5a61eea92ab..075ff3076b05 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_left_unaliased_right +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_left_unaliased_right @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_source b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_source index d1bc8d618a9b..13e4a10e5bd0 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_source +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_source @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_source_with_AS b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_source_with_AS index d1bc8d618a9b..13e4a10e5bd0 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_source_with_AS +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_source_with_AS @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_prefixed_wildcard_select_with_aliased_source b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_prefixed_wildcard_select_with_aliased_source index d1bc8d618a9b..13e4a10e5bd0 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_prefixed_wildcard_select_with_aliased_source +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_prefixed_wildcard_select_with_aliased_source @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_unaliased_left_aliased_right b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_unaliased_left_aliased_right index 419b8d7e72f7..e5da6585faad 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_unaliased_left_aliased_right +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_unaliased_left_aliased_right @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_wildcard_select_with_aliased_source b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_wildcard_select_with_aliased_source index d1bc8d618a9b..13e4a10e5bd0 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_wildcard_select_with_aliased_source +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_wildcard_select_with_aliased_source @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/initcap_-_do_initcap b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/initcap_-_do_initcap index d34a03d1fb93..157f2033632e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/initcap_-_do_initcap +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/initcap_-_do_initcap @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_convert_formats__AVRO_to_JSON b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_convert_formats__AVRO_to_JSON index 12e04b753393..8c4df3d9bd23 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_convert_formats__AVRO_to_JSON +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_convert_formats__AVRO_to_JSON @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_convert_formats__DELIMITED_to_JSON b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_convert_formats__DELIMITED_to_JSON index 78b05efc9ad6..caac3c62f056 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_convert_formats__DELIMITED_to_JSON +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_convert_formats__DELIMITED_to_JSON @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_convert_formats__JSON_to_AVRO b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_convert_formats__JSON_to_AVRO index 12e04b753393..8c4df3d9bd23 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_convert_formats__JSON_to_AVRO +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_convert_formats__JSON_to_AVRO @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_simple b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_simple index 78b05efc9ad6..caac3c62f056 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_simple +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_simple @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_with_custom_topic_name b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_with_custom_topic_name index ca8a7330c4c1..27266cff9949 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_with_custom_topic_name +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/insert-into_-_with_custom_topic_name @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts index 3f5fc000c67d..87736e88a55a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts_extractor_both_sides b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts_extractor_both_sides index 980be72b6b9b..99366f9414d5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts_extractor_both_sides +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts_extractor_both_sides @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_table_join_with_ts_extractor_both_sides b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_table_join_with_ts_extractor_both_sides index 4b8bcb26b3a4..975b69045c1b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_table_join_with_ts_extractor_both_sides +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_table_join_with_ts_extractor_both_sides @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_table_table_inner_join_with_ts b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_table_table_inner_join_with_ts index 1501b6ba60fb..777e57f76987 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_table_table_inner_join_with_ts +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_table_table_inner_join_with_ts @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_table_table_inner_join_with_ts_extractor_both_sides b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_table_table_inner_join_with_ts_extractor_both_sides index 0cf5c54f7943..0b4d5225b728 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_table_table_inner_join_with_ts_extractor_both_sides +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_table_table_inner_join_with_ts_extractor_both_sides @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria index 5d58e1767475..2926b75846cb 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_join_key_not_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_join_key_not_in_projection index 45ac08487b3c..d02a374b26e5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_join_key_not_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_join_key_not_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_left_rowkey_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_left_rowkey_in_projection index 2153d904faee..7d3efc72e749 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_left_rowkey_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_left_rowkey_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_no_source_key_fields b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_no_source_key_fields index 97b569416aeb..fdabb61566d7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_no_source_key_fields +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_no_source_key_fields @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_join_key_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_join_key_in_projection index bbb62cd156fa..f18d2ed6e9d8 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_join_key_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_join_key_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_rowkey_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_rowkey_in_projection index 2153d904faee..7d3efc72e749 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_rowkey_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_rowkey_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_multiple_join_keys_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_multiple_join_keys_in_projection index 4d28e5e59dcc..5eb6633fb917 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_multiple_join_keys_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_multiple_join_keys_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join index 73024c73c8e2..93fd5415057f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_join_key_not_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_join_key_not_in_projection index 0e732a55c2a0..6e2332e1bd86 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_join_key_not_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_join_key_not_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_right_join_key_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_right_join_key_in_projection index 3f0bef933477..a594dfc27f4c 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_right_join_key_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_right_join_key_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_fields b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_fields index 4e6d7750fdd8..d5b2afcd310b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_fields +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_fields @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_left_fields_some_right b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_left_fields_some_right index 67c96930df5f..3a8c5d7dd970 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_left_fields_some_right +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_left_fields_some_right @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_right_fields_some_left b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_right_fields_some_left index 70dd5d24d9f2..70b894706142 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_right_fields_some_left +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_right_fields_some_left @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_different_before_and_after_windows b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_different_before_and_after_windows index 73024c73c8e2..93fd5415057f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_different_before_and_after_windows +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_different_before_and_after_windows @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_out_of_order_messages b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_out_of_order_messages index 73024c73c8e2..93fd5415057f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_out_of_order_messages +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_out_of_order_messages @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join index 97b569416aeb..fdabb61566d7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_both_join_keys_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_both_join_keys_in_projection index c96d5f89233b..29c84ce6ae2f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_both_join_keys_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_both_join_keys_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_join_key_not_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_join_key_not_in_projection index 478ad3a63f6c..8d3da68c7e0f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_join_key_not_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_join_key_not_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_rekey b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_rekey index 11635186ad0b..c6ff88c986fd 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_rekey +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_rekey @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_right_join_key_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_right_join_key_in_projection index dc0297e2afe2..8cb1a944717a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_right_join_key_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_right_join_key_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join index e71f242a4a7d..90ada71ec074 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join_-_right_join_key_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join_-_right_join_key_in_projection index e43a32b6753e..26e95ddea077 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join_-_right_join_key_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join_-_right_join_key_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join index 5d58e1767475..2926b75846cb 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_join_key_not_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_join_key_not_in_projection index 45ac08487b3c..d02a374b26e5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_join_key_not_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_join_key_not_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_right_join_key_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_right_join_key_in_projection index bbb62cd156fa..f18d2ed6e9d8 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_right_join_key_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_right_join_key_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join index a66917acce0a..804d772620e9 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_join_key_not_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_join_key_not_in_projection index 979db1c867ab..608b6d192db9 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_join_key_not_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_join_key_not_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_right_join_key_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_right_join_key_in_projection index 11884306beb9..9bab9545cfb6 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_right_join_key_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_right_join_key_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_stream_unwrapped_single_field_value_schema_on_inputs b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_stream_unwrapped_single_field_value_schema_on_inputs index b5768972d891..52ff6274c715 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_stream_unwrapped_single_field_value_schema_on_inputs +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_stream_unwrapped_single_field_value_schema_on_inputs @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_stream_unwrapped_single_field_value_schema_on_inputs_and_output b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_stream_unwrapped_single_field_value_schema_on_inputs_and_output index 932c3600f309..d97e282967c0 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_stream_unwrapped_single_field_value_schema_on_inputs_and_output +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_stream_unwrapped_single_field_value_schema_on_inputs_and_output @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_stream_wrapped_single_field_value_schema_on_inputs b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_stream_wrapped_single_field_value_schema_on_inputs index ace850075aed..55711252dac7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_stream_wrapped_single_field_value_schema_on_inputs +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_stream_wrapped_single_field_value_schema_on_inputs @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_unwrapped_single_field_value_schema_on_inputs b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_unwrapped_single_field_value_schema_on_inputs index 9bfe46418262..c72e965d825f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_unwrapped_single_field_value_schema_on_inputs +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_unwrapped_single_field_value_schema_on_inputs @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_unwrapped_single_field_value_schema_on_inputs_and_output b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_unwrapped_single_field_value_schema_on_inputs_and_output index 51e7d8c64516..775317cefa73 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_unwrapped_single_field_value_schema_on_inputs_and_output +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_unwrapped_single_field_value_schema_on_inputs_and_output @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_neither_have_key_field_and_joining_by_table_ROWKEY b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_neither_have_key_field_and_joining_by_table_ROWKEY index 9e5f0e54a2a5..a6aae02ab8c2 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_neither_have_key_field_and_joining_by_table_ROWKEY +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_neither_have_key_field_and_joining_by_table_ROWKEY @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_table_does_not_have_key_field_and_joining_by_table_ROWKEY b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_table_does_not_have_key_field_and_joining_by_table_ROWKEY index a52638a938ad..445292d13f79 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_table_does_not_have_key_field_and_joining_by_table_ROWKEY +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_table_does_not_have_key_field_and_joining_by_table_ROWKEY @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_wrapped_single_field_value_schema_on_inputs b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_wrapped_single_field_value_schema_on_inputs index 615d11a306b8..f0c3b6e0983c 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_wrapped_single_field_value_schema_on_inputs +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_wrapped_single_field_value_schema_on_inputs @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_join_pipeline b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_join_pipeline index ba5452a959a2..a94ddbfdb4b7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_join_pipeline +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_join_pipeline @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_inner_join b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_inner_join index 50fb69f6c7f5..d7c7943639b6 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_inner_join +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_inner_join @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_inner_join_-_join_key_not_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_inner_join_-_join_key_not_in_projection index e0ca2e5d8687..7fb27d67a6e6 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_inner_join_-_join_key_not_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_inner_join_-_join_key_not_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_inner_join_-_right_join_key_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_inner_join_-_right_join_key_in_projection index fb3f3dc3d2f2..d1d2757ff5b3 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_inner_join_-_right_join_key_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_inner_join_-_right_join_key_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_join_with_where_clause b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_join_with_where_clause index 63d4c0919340..e5036b398cb2 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_join_with_where_clause +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_join_with_where_clause @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join index bcc4d582cfcd..b08122809885 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join_-_both_join_keys_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join_-_both_join_keys_in_projection index 61da0e442e47..ab8dce5f9526 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join_-_both_join_keys_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join_-_both_join_keys_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join_-_join_key_not_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join_-_join_key_not_in_projection index 6d3e73972b6a..4701ac9bdf31 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join_-_join_key_not_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join_-_join_key_not_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join_-_right_join_key_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join_-_right_join_key_in_projection index 7c0cf0cbe0ee..504b0021473c 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join_-_right_join_key_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_left_join_-_right_join_key_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_outer_join b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_outer_join index ec6bb112f438..8f7ba46306de 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_outer_join +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_outer_join @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_outer_join_-_right_join_key_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_outer_join_-_right_join_key_in_projection index 2aabb0b50d5f..b653cde34cff 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_outer_join_-_right_join_key_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_table_outer_join_-_right_join_key_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_to_table_unwrapped_single_field_value_schema_on_inputs b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_to_table_unwrapped_single_field_value_schema_on_inputs index 8bb516ad5e89..efa69ac33d66 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_to_table_unwrapped_single_field_value_schema_on_inputs +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_to_table_unwrapped_single_field_value_schema_on_inputs @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_to_table_unwrapped_single_field_value_schema_on_inputs_and_output b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_to_table_unwrapped_single_field_value_schema_on_inputs_and_output index aa46aeaecf89..23afb48dade8 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_to_table_unwrapped_single_field_value_schema_on_inputs_and_output +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_to_table_unwrapped_single_field_value_schema_on_inputs_and_output @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_to_table_wrapped_single_field_value_schema_on_inputs b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_to_table_wrapped_single_field_value_schema_on_inputs index 276a5aea79fb..772f3b90f81f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_to_table_wrapped_single_field_value_schema_on_inputs +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_table_to_table_wrapped_single_field_value_schema_on_inputs @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_unqualified_join_criteria b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_unqualified_join_criteria index f4bbf8697122..4b1e97a64a96 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_unqualified_join_criteria +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_unqualified_join_criteria @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_multiple_copies_of_key_field_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_multiple_copies_of_key_field_in_projection index 82def22a6df6..cd02078d2301 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_multiple_copies_of_key_field_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_multiple_copies_of_key_field_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___group_by_(-)___key_in_value___aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___group_by_(-)___key_in_value___aliasing index 0bfb8e3c9929..385f3796b47d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___group_by_(-)___key_in_value___aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___group_by_(-)___key_in_value___aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___group_by_(-)___key_in_value___no_aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___group_by_(-)___key_in_value___no_aliasing index 2bd362362a4a..1a9caa635102 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___group_by_(-)___key_in_value___no_aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___group_by_(-)___key_in_value___no_aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___group_by_(-)___key_not_in_value___- b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___group_by_(-)___key_not_in_value___- index 04e2fa6178fd..cee4f79e28ed 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___group_by_(-)___key_not_in_value___- +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___group_by_(-)___key_not_in_value___- @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___no_key_change___-___- b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___no_key_change___-___- index d1bc8d618a9b..13e4a10e5bd0 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___no_key_change___-___- +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___no_key_change___-___- @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___partition_by_(-)___key_in_value___no_aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___partition_by_(-)___key_in_value___no_aliasing index 353ee254c64f..492381f393d1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___partition_by_(-)___key_in_value___no_aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_null___partition_by_(-)___key_in_value___no_aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(different)___key_in_value___aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(different)___key_in_value___aliasing index 0bfb8e3c9929..385f3796b47d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(different)___key_in_value___aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(different)___key_in_value___aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(different)___key_in_value___no_aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(different)___key_in_value___no_aliasing index 861f96095c1d..f79d4c9986d8 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(different)___key_in_value___no_aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(different)___key_in_value___no_aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(different)___key_not_in_value___- b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(different)___key_not_in_value___- index 04e2fa6178fd..cee4f79e28ed 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(different)___key_not_in_value___- +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(different)___key_not_in_value___- @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(same)___key_in_value___aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(same)___key_in_value___aliasing index 5a1659054962..8527d8d6a271 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(same)___key_in_value___aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(same)___key_in_value___aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(same)___key_in_value___no_aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(same)___key_in_value___no_aliasing index 458264ea2c3a..407b28ab99bf 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(same)___key_in_value___no_aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(same)___key_in_value___no_aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(same)___key_not_in_value___- b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(same)___key_not_in_value___- index 3e581d63517a..30b88a4d6032 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(same)___key_not_in_value___- +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_(same)___key_not_in_value___- @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_expression___key_in_value___no_aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_expression___key_in_value___no_aliasing index 409af6e926a7..0a100f61fd82 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_expression___key_in_value___no_aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_expression___key_in_value___no_aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_multiple___key_in_value___no_aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_multiple___key_in_value___no_aliasing index abfaf2e6dac7..8bae4c8e5f01 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_multiple___key_in_value___no_aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___group_by_multiple___key_in_value___no_aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_in_value___aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_in_value___aliasing index c5c4ac305853..970d53d73dbc 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_in_value___aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_in_value___aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_in_value___aliasing_+_duplicate b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_in_value___aliasing_+_duplicate index 5f2d82b05dde..4c544dfcbfb5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_in_value___aliasing_+_duplicate +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_in_value___aliasing_+_duplicate @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_in_value___no_aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_in_value___no_aliasing index d1bc8d618a9b..13e4a10e5bd0 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_in_value___no_aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_in_value___no_aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_not_in_value___- b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_not_in_value___- index 140a48d68d5b..78924692cac5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_not_in_value___- +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_stream___initially_set___no_key_change___key_not_in_value___- @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_null___group_by_(-)___key_in_value___aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_null___group_by_(-)___key_in_value___aliasing index bbf20465eec1..702fd788bb2f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_null___group_by_(-)___key_in_value___aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_null___group_by_(-)___key_in_value___aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_null___group_by_(-)___key_in_value___no_aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_null___group_by_(-)___key_in_value___no_aliasing index b1a9a9d59d99..861682625a43 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_null___group_by_(-)___key_in_value___no_aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_null___group_by_(-)___key_in_value___no_aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_null___no_key_change___-___- b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_null___no_key_change___-___- index 2e685a13338f..deddfce34b63 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_null___no_key_change___-___- +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_null___no_key_change___-___- @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(different)___key_in_value___aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(different)___key_in_value___aliasing index 94271f5aa7aa..3cc8f244a1d7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(different)___key_in_value___aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(different)___key_in_value___aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(different)___key_in_value___no_aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(different)___key_in_value___no_aliasing index cf05412ec0f1..b634afd81796 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(different)___key_in_value___no_aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(different)___key_in_value___no_aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(different)___key_not_in_value___- b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(different)___key_not_in_value___- index 5f75273d5b19..7f482569784e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(different)___key_not_in_value___- +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(different)___key_not_in_value___- @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(same)___key_in_value___aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(same)___key_in_value___aliasing index 94271f5aa7aa..3cc8f244a1d7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(same)___key_in_value___aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(same)___key_in_value___aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(same)___key_in_value___no_aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(same)___key_in_value___no_aliasing index 789a096180c9..26d4ff6cdacd 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(same)___key_in_value___no_aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(same)___key_in_value___no_aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(same)___key_not_in_value___- b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(same)___key_not_in_value___- index 5f75273d5b19..7f482569784e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(same)___key_not_in_value___- +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___group_by_(same)___key_not_in_value___- @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___no_key_change___key_in_value___aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___no_key_change___key_in_value___aliasing index 891ba1e47833..db7f3f4326a1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___no_key_change___key_in_value___aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___no_key_change___key_in_value___aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___no_key_change___key_in_value___no_aliasing b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___no_key_change___key_in_value___no_aliasing index bb5a014b3642..35249a338a2d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___no_key_change___key_in_value___no_aliasing +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___no_key_change___key_in_value___no_aliasing @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___no_key_change___key_not_in_value___- b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___no_key_change___key_not_in_value___- index 8febe53a726e..8ecc7321c874 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___no_key_change___key_not_in_value___- +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_table___initially_set___no_key_change___key_not_in_value___- @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_using_full_source_name_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_using_full_source_name_in_projection index c17d00a7c347..94d0338db964 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_using_full_source_name_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_using_full_source_name_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_using_source_alias_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_using_source_alias_in_projection index c17d00a7c347..94d0338db964 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_using_source_alias_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_using_source_alias_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_clause b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_clause index c17d00a7c347..94d0338db964 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_clause +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_clause @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_clause_with_alias b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_clause_with_alias index 736812630ae5..77a24f633c0a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_clause_with_alias +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_clause_with_alias @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_only_rowkey_is_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_only_rowkey_is_in_projection index b73c467256d7..eada382bc1f6 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_only_rowkey_is_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_only_rowkey_is_in_projection @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_KEY_value_field_name b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_KEY_value_field_name index ccce916231df..16d9d69acc27 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_KEY_value_field_name +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_KEY_value_field_name @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_stream_explicit_STRING_ROWKEY b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_stream_explicit_STRING_ROWKEY index 3f407fbe0a48..2116e0089ab4 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_stream_explicit_STRING_ROWKEY +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_stream_explicit_STRING_ROWKEY @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_stream_implicit_STRING_ROWKEY b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_stream_implicit_STRING_ROWKEY index 3f407fbe0a48..2116e0089ab4 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_stream_implicit_STRING_ROWKEY +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_stream_implicit_STRING_ROWKEY @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_table_explicit_STRING_ROWKEY b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_table_explicit_STRING_ROWKEY index 760a4b1c5672..59b4a6605f74 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_table_explicit_STRING_ROWKEY +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_table_explicit_STRING_ROWKEY @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_table_implicit_STRING_ROWKEY b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_table_implicit_STRING_ROWKEY index 760a4b1c5672..59b4a6605f74 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_table_implicit_STRING_ROWKEY +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-schemas_-_table_implicit_STRING_ROWKEY @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_BIGINT_literal_min_max b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_BIGINT_literal_min_max index 2fdd6ce3dcff..9d53c0113007 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_BIGINT_literal_min_max +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_BIGINT_literal_min_max @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_BOOLEAN_literal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_BOOLEAN_literal index 910ade370d78..f227bb3affa5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_BOOLEAN_literal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_BOOLEAN_literal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_DOUBLE_literal_min_max b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_DOUBLE_literal_min_max index 36bd528ad702..4808af42c1ea 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_DOUBLE_literal_min_max +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_DOUBLE_literal_min_max @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_INT_literal_min_max b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_INT_literal_min_max index d1c4f319ef0c..1fe31d3bc537 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_INT_literal_min_max +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/literals_-_INT_literal_min_max @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_abs b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_abs index b937bed08b3f..7f516667f915 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_abs +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_abs @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_calculate_CEIL_function b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_calculate_CEIL_function index b937bed08b3f..7f516667f915 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_calculate_CEIL_function +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_calculate_CEIL_function @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_exp b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_exp index 09c47f6368a9..de0cdfe4617d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_exp +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_exp @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_floor b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_floor index b937bed08b3f..7f516667f915 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_floor +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_floor @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_ln b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_ln index 09c47f6368a9..de0cdfe4617d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_ln +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_ln @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_round b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_round index df588d59a6a1..028631991371 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_round +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_round @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_round_with_large_DECIMAL_values b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_round_with_large_DECIMAL_values index bb179fdcd571..6ab7a768192c 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_round_with_large_DECIMAL_values +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_round_with_large_DECIMAL_values @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_sign b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_sign index b5abbcf72660..63bd555aa11b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_sign +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_sign @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_sqrt b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_sqrt index 09c47f6368a9..de0cdfe4617d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_sqrt +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/math_-_sqrt @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_decimal_group_by b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_decimal_group_by index e5c546f9361a..24503deb4796 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_decimal_group_by +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_decimal_group_by @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_double_group_by b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_double_group_by index 5fa24dcb88dd..78762203e129 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_double_group_by +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_double_group_by @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_integer_group_by b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_integer_group_by index 7959d80f818e..e3ba153dd1af 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_integer_group_by +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_integer_group_by @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_long_group_by b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_long_group_by index 3d667e3a58fb..f769a5c05d92 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_long_group_by +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/max-group-by_-_max_long_group_by @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_decimal_group_by b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_decimal_group_by index ba930a151824..3c883deeed4a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_decimal_group_by +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_decimal_group_by @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_double_group_by b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_double_group_by index c41c75f73163..4e24c90bc73f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_double_group_by +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_double_group_by @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_integer_group_by b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_integer_group_by index c39a77f03b9c..78e9e6f331c3 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_integer_group_by +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_integer_group_by @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_long_group_by b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_long_group_by index 87af22fe9830..c9eefa201513 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_long_group_by +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/min-group-by_-_min_long_group_by @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/more-complex-struct_-_complex_struct_select_array,_map,_map_value_struct b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/more-complex-struct_-_complex_struct_select_array,_map,_map_value_struct index 72276dbe131f..b817880473b8 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/more-complex-struct_-_complex_struct_select_array,_map,_map_value_struct +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/more-complex-struct_-_complex_struct_select_array,_map,_map_value_struct @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/more-complex-struct_-_complex_struct_select_array_and_map_items b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/more-complex-struct_-_complex_struct_select_array_and_map_items index fab08df94caf..be671667300c 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/more-complex-struct_-_complex_struct_select_array_and_map_items +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/more-complex-struct_-_complex_struct_select_array_and_map_items @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/more-complex-struct_-_complex_struct_select_star b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/more-complex-struct_-_complex_struct_select_star index d11ad0ba58de..2a60c19f2ba5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/more-complex-struct_-_complex_struct_select_star +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/more-complex-struct_-_complex_struct_select_star @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/multiple-avro-maps_-_project_multiple_avro_maps b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/multiple-avro-maps_-_project_multiple_avro_maps index be8a213b32f1..cbd8ed927d5b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/multiple-avro-maps_-_project_multiple_avro_maps +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/multiple-avro-maps_-_project_multiple_avro_maps @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_aliased_key_field_-_same_name b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_aliased_key_field_-_same_name index d387d3e8bc68..b06252e8a2a3 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_aliased_key_field_-_same_name +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_aliased_key_field_-_same_name @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by index 65a9c08c516f..57a5b8cb8fc7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_ROWKEY b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_ROWKEY index 1e57a8b412be..489eec49c94b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_ROWKEY +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_ROWKEY @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_ROWTIME b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_ROWTIME index ab27c11a68b0..e16a5bdc538e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_ROWTIME +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_ROWTIME @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_null_partition_by_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_null_partition_by_value index bc4c57bf9b5c..1bb37b657192 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_null_partition_by_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_null_partition_by_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_null_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_null_value index bc4c57bf9b5c..1bb37b657192 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_null_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_null_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_projection_select_all b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_projection_select_all index 6205fe2f848d..e39e3eaa603f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_projection_select_all +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_projection_select_all @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_projection_select_some b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_projection_select_some index bc4c57bf9b5c..1bb37b657192 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_projection_select_some +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/partition-by_-_partition_by_with_projection_select_some @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_CSAS_with_custom_Kafka_topic_name b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_CSAS_with_custom_Kafka_topic_name index 4d4186c6a2b0..0ccf5eb3a4c5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_CSAS_with_custom_Kafka_topic_name +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_CSAS_with_custom_Kafka_topic_name @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_BETWEEN b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_BETWEEN index 9e2ef25f46bc..cfbdcf0f6bb7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_BETWEEN +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_BETWEEN @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_IS_DISTINCT_FROM b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_IS_DISTINCT_FROM index 17406e7b5285..3536a3f1061e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_IS_DISTINCT_FROM +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_IS_DISTINCT_FROM @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_IS_NOT_DISTINCT_FROM b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_IS_NOT_DISTINCT_FROM index 17406e7b5285..3536a3f1061e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_IS_NOT_DISTINCT_FROM +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_IS_NOT_DISTINCT_FROM @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_NOT_BETWEEN b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_NOT_BETWEEN index 9e2ef25f46bc..cfbdcf0f6bb7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_NOT_BETWEEN +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_NOT_BETWEEN @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_NOT_NULL b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_NOT_NULL index 9e2ef25f46bc..cfbdcf0f6bb7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_NOT_NULL +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_NOT_NULL @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_NULL b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_NULL index 9e2ef25f46bc..cfbdcf0f6bb7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_NULL +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_NULL @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_like_pattern b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_like_pattern index 9e2ef25f46bc..cfbdcf0f6bb7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_like_pattern +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_like_pattern @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_like_pattern_without_wildcards b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_like_pattern_without_wildcards index 9e2ef25f46bc..cfbdcf0f6bb7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_like_pattern_without_wildcards +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_like_pattern_without_wildcards @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_long_literal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_long_literal index 9e2ef25f46bc..cfbdcf0f6bb7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_long_literal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_long_literal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_not_like_pattern b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_not_like_pattern index 9e2ef25f46bc..cfbdcf0f6bb7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_not_like_pattern +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_not_like_pattern @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_string_literal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_string_literal index 9e2ef25f46bc..cfbdcf0f6bb7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_string_literal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Filter_on_string_literal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Json_Map_filter b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Json_Map_filter index 5c76ab6af762..7f9e12fd012b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Json_Map_filter +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Json_Map_filter @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Json_Multi_Dimensional_Array b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Json_Multi_Dimensional_Array index 41a0e58842e4..45a1779c64cc 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Json_Multi_Dimensional_Array +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Json_Multi_Dimensional_Array @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Json_Multi_Dimensional_Array_2 b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Json_Multi_Dimensional_Array_2 index c040d2d13eeb..0b7eabd5fc85 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Json_Multi_Dimensional_Array_2 +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Json_Multi_Dimensional_Array_2 @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Null_row_filter b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Null_row_filter index 882877e9e3b5..ac5115104c04 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Null_row_filter +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Null_row_filter @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Project_fields_with_reserved_name b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Project_fields_with_reserved_name index d6613eb78bf7..010467eeb2ee 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Project_fields_with_reserved_name +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Project_fields_with_reserved_name @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Project_struct_fields_with_reserved_name b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Project_struct_fields_with_reserved_name index e479aab6be7f..9006891667b2 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Project_struct_fields_with_reserved_name +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_Project_struct_fields_with_reserved_name @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_WHERE_with_many_comparisons._This_tests_the_fix_for_#1784 b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_WHERE_with_many_comparisons._This_tests_the_fix_for_#1784 index 3302264edda4..6870dd4bc7bb 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_WHERE_with_many_comparisons._This_tests_the_fix_for_#1784 +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_WHERE_with_many_comparisons._This_tests_the_fix_for_#1784 @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_project_and_filter b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_project_and_filter index f911e53c2e95..97e982a2258b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_project_and_filter +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_project_and_filter @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_project_and_negative_filter b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_project_and_negative_filter index 2ab8b5d3c3a0..878183826e63 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_project_and_negative_filter +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_project_and_negative_filter @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_project_string_with_embedded_code b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_project_string_with_embedded_code index 7c3484759c8b..ec9143be9893 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_project_string_with_embedded_code +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/project-filter_-_project_string_with_embedded_code @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_create_table_with_key_that_is_quoted b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_create_table_with_key_that_is_quoted index 78336adac710..21443b9e9d41 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_create_table_with_key_that_is_quoted +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_create_table_with_key_that_is_quoted @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_joins_using_fields_that_require_quotes b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_joins_using_fields_that_require_quotes index c0efdcf97417..31338cc92940 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_joins_using_fields_that_require_quotes +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_joins_using_fields_that_require_quotes @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_math_using_fields_that_require_quotes b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_math_using_fields_that_require_quotes index 8e215dc5cc4e..6014f20f59ab 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_math_using_fields_that_require_quotes +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_math_using_fields_that_require_quotes @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_sink_fields_that_require_quotes b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_sink_fields_that_require_quotes index c46f8ff0fc74..935f9504f5d2 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_sink_fields_that_require_quotes +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_sink_fields_that_require_quotes @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_source_fields_that_require_quotes b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_source_fields_that_require_quotes index 9550ef65138c..e999b891427a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_source_fields_that_require_quotes +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_source_fields_that_require_quotes @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_source_names_requiring_quotes b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_source_names_requiring_quotes index 14ac262e336a..55db88d85d9f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_source_names_requiring_quotes +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_source_names_requiring_quotes @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_udf_using_fields_that_require_quotes b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_udf_using_fields_that_require_quotes index 4f7c55956b2f..5a2ce002ab03 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_udf_using_fields_that_require_quotes +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/quoted-identifiers_-_udf_using_fields_that_require_quotes @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/replace_-_replace b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/replace_-_replace index b5b55d061015..8ec1bf05beb0 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/replace_-_replace +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/replace_-_replace @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME index f3b3745570fa..81865328ec64 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_AND b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_AND index f3b3745570fa..81865328ec64 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_AND +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_AND @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_BETWEEN b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_BETWEEN index f3b3745570fa..81865328ec64 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_BETWEEN +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_BETWEEN @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_inexact_timestring b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_inexact_timestring index f3b3745570fa..81865328ec64 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_inexact_timestring +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_inexact_timestring @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_timezone b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_timezone index f3b3745570fa..81865328ec64 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_timezone +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/rowtime_-_test_ROWTIME_with_timezone @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialization_should_default_to_wrapped_values b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialization_should_default_to_wrapped_values index 7f179e65264d..0b1e5de8ee19 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialization_should_default_to_wrapped_values +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialization_should_default_to_wrapped_values @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialization_should_pick_up_value_wrapping_from_config b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialization_should_pick_up_value_wrapping_from_config index 4a0f418b15b2..b9ad710097b9 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialization_should_pick_up_value_wrapping_from_config +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialization_should_pick_up_value_wrapping_from_config @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_array_-_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_array_-_value index fcdf4f9884d0..90d12d42ab4f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_array_-_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_array_-_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_array_-_value_-_non-nullable b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_array_-_value_-_non-nullable index fcdf4f9884d0..90d12d42ab4f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_array_-_value_-_non-nullable +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_array_-_value_-_non-nullable @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_array_-_value_-_with_coercion b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_array_-_value_-_with_coercion index fcdf4f9884d0..90d12d42ab4f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_array_-_value_-_with_coercion +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_array_-_value_-_with_coercion @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_map_-_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_map_-_value index 1b1902246fb1..1f3d8054890a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_map_-_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_map_-_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_map_-_value_-_non-nullable b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_map_-_value_-_non-nullable index 1b1902246fb1..1f3d8054890a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_map_-_value_-_non-nullable +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_map_-_value_-_non-nullable @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_map_-_value_-_with_coercion b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_map_-_value_-_with_coercion index 4c1e388af7ec..3a2f74d17017 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_map_-_value_-_with_coercion +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_map_-_value_-_with_coercion @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_primitive_-_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_primitive_-_value index 52b1a08b3ad1..972e0e250d6d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_primitive_-_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_primitive_-_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_primitive_-_value_-_with_coercion b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_primitive_-_value_-_with_coercion index ca62b24b0be7..1abaeebb2124 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_primitive_-_value_-_with_coercion +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_primitive_-_value_-_with_coercion @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_primitive_by_default_-_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_primitive_by_default_-_value index a15772b61753..48d0311b3d08 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_primitive_by_default_-_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_anonymous_primitive_by_default_-_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_nested_array_-_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_nested_array_-_value index 9d96c276536d..948bd9259b29 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_nested_array_-_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_nested_array_-_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_nested_map_-_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_nested_map_-_value index 5c676be2d57f..cd3723fa279a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_nested_map_-_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_nested_map_-_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_nested_primitive_-_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_nested_primitive_-_value index a15772b61753..48d0311b3d08 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_nested_primitive_-_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_deserialize_nested_primitive_-_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialization_should_default_to_wrapped_values b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialization_should_default_to_wrapped_values index 7f179e65264d..0b1e5de8ee19 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialization_should_default_to_wrapped_values +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialization_should_default_to_wrapped_values @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialization_should_pick_up_value_wrapping_from_config b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialization_should_pick_up_value_wrapping_from_config index 33a44e468809..83ca600a3d6f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialization_should_pick_up_value_wrapping_from_config +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialization_should_pick_up_value_wrapping_from_config @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_array_-_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_array_-_value index 8c4bbb7f9f85..b81d389bd9f8 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_array_-_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_array_-_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_map_-_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_map_-_value index fbe31b6213e6..793df80d0f10 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_map_-_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_map_-_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_primitive_-_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_primitive_-_value index 9abb7a1a7aa7..bd5838f2311b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_primitive_-_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_primitive_-_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_struct_-_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_struct_-_value index 645aaf527d6d..cd3c9527cead 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_struct_-_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_anonymous_struct_-_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_array_-_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_array_-_value index 393ba8871b56..f2c0bcd383e5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_array_-_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_array_-_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_map_-_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_map_-_value index f661c53a02c8..22bf2277c90f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_map_-_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_map_-_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_primitive_-_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_primitive_-_value index f2179f712e4e..9218af8fdd62 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_primitive_-_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_primitive_-_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_struct_-_value b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_struct_-_value index 6806740426f9..aaebb2ef1cc5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_struct_-_value +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/serdes_-_serialize_nested_struct_-_value @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_import_session_stream b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_import_session_stream index cbefd240ded2..d8a9f02aa3f5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_import_session_stream +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_import_session_stream @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_import_session_table b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_import_session_table index 79afb0999c39..b1f90fa87f69 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_import_session_table +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_import_session_table @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_inherit_windowed_keys b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_inherit_windowed_keys index 8d6eeaf3f525..d462b5be6a6a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_inherit_windowed_keys +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_inherit_windowed_keys @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_max_session b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_max_session index f5b0aa0adf1c..8230782d3ef5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_max_session +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/session-windows_-_max_session @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_read_struct_as_json_string b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_read_struct_as_json_string index fef58f06e1f5..a59c2b7a55f6 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_read_struct_as_json_string +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_read_struct_as_json_string @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_filter b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_filter index fe3555fe412e..eb03239c9c18 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_filter +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_filter @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_filter_2 b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_filter_2 index 9d1d5774ddce..a80621051b62 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_filter_2 +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_filter_2 @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_filter_4 b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_filter_4 index 7b65f196f38c..6d8ac1a16edf 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_filter_4 +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_filter_4 @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_for_ambiguity b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_for_ambiguity index a0689c82bea7..b7fbfd12578f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_for_ambiguity +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_for_ambiguity @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_star b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_star index c7b63a8d8cf8..7a3d75d1204d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_star +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_star @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_with_nulls b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_with_nulls index 081e352ab05b..03d9a3086303 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_with_nulls +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simple_struct_select_with_nulls @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simples_struct_select_filter_3 b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simples_struct_select_filter_3 index f5b1c030d011..4bd72fad19ce 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simples_struct_select_filter_3 +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/simple-struct_-_simples_struct_select_filter_3 @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-partitions-replicas_-_Use_the_legacy_default_sink_properties_for_the_sink_topic_if_default_partitions_and_replicas_were_set. b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-partitions-replicas_-_Use_the_legacy_default_sink_properties_for_the_sink_topic_if_default_partitions_and_replicas_were_set. index a65d25af2a63..3634c4b229a8 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-partitions-replicas_-_Use_the_legacy_default_sink_properties_for_the_sink_topic_if_default_partitions_and_replicas_were_set. +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-partitions-replicas_-_Use_the_legacy_default_sink_properties_for_the_sink_topic_if_default_partitions_and_replicas_were_set. @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-partitions-replicas_-_should_copy_partition_and_replica_count_from_source_topic b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-partitions-replicas_-_should_copy_partition_and_replica_count_from_source_topic index a65d25af2a63..3634c4b229a8 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-partitions-replicas_-_should_copy_partition_and_replica_count_from_source_topic +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-partitions-replicas_-_should_copy_partition_and_replica_count_from_source_topic @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-topic-naming_-_sink-topic-naming__default_topic_name_is_stream_name,_in_upper-case b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-topic-naming_-_sink-topic-naming__default_topic_name_is_stream_name,_in_upper-case index 2f20af6b763f..441710842171 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-topic-naming_-_sink-topic-naming__default_topic_name_is_stream_name,_in_upper-case +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-topic-naming_-_sink-topic-naming__default_topic_name_is_stream_name,_in_upper-case @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-topic-naming_-_sink-topic-naming__use_prefixed_default_topic_name_when_property_set b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-topic-naming_-_sink-topic-naming__use_prefixed_default_topic_name_when_property_set index f514a836e47a..e3455584fb95 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-topic-naming_-_sink-topic-naming__use_prefixed_default_topic_name_when_property_set +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-topic-naming_-_sink-topic-naming__use_prefixed_default_topic_name_when_property_set @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-topic-naming_-_sink-topic-naming__use_supplied_topic_name,_when_supplied b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-topic-naming_-_sink-topic-naming__use_supplied_topic_name,_when_supplied index cbfc3b1e841e..a7d9cadeb70a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-topic-naming_-_sink-topic-naming__use_supplied_topic_name,_when_supplied +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-topic-naming_-_sink-topic-naming__use_supplied_topic_name,_when_supplied @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/slice_-_sublist_for_list_of_lists b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/slice_-_sublist_for_list_of_lists index 0752568e6a64..18423783ad1f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/slice_-_sublist_for_list_of_lists +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/slice_-_sublist_for_list_of_lists @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/slice_-_sublist_for_list_of_maps b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/slice_-_sublist_for_list_of_maps index 267d130ad93c..347531c7028e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/slice_-_sublist_for_list_of_maps +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/slice_-_sublist_for_list_of_maps @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/slice_-_sublist_for_string_list b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/slice_-_sublist_for_string_list index e433c6bc7c7a..ca36984b446c 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/slice_-_sublist_for_string_list +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/slice_-_sublist_for_string_list @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_a_message_by_commas_and_display_pos_0_and_2_of_the_returned_array b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_a_message_by_commas_and_display_pos_0_and_2_of_the_returned_array index 8fdc7593fbe8..69b4e4c30659 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_a_message_by_commas_and_display_pos_0_and_2_of_the_returned_array +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_a_message_by_commas_and_display_pos_0_and_2_of_the_returned_array @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_a_message_by_using_the_'$$'_delimiter b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_a_message_by_using_the_'$$'_delimiter index af25e82c8509..e56cea441def 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_a_message_by_using_the_'$$'_delimiter +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_a_message_by_using_the_'$$'_delimiter @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_a_message_by_using_the_'.'_delimiter b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_a_message_by_using_the_'.'_delimiter index af25e82c8509..e56cea441def 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_a_message_by_using_the_'.'_delimiter +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_a_message_by_using_the_'.'_delimiter @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_all_characters_by_using_the_''_delimiter b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_all_characters_by_using_the_''_delimiter index af25e82c8509..e56cea441def 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_all_characters_by_using_the_''_delimiter +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/split_-_split_all_characters_by_using_the_''_delimiter @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/stringdate_-_string_to_date b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/stringdate_-_string_to_date index 362d6ab375e3..d935ad9d9233 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/stringdate_-_string_to_date +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/stringdate_-_string_to_date @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/stringtimestamp_-_string_to_timestamp b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/stringtimestamp_-_string_to_timestamp index d1fefc485ee7..682f96678b8c 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/stringtimestamp_-_string_to_timestamp +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/stringtimestamp_-_string_to_timestamp @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/struct-udfs_-_Create_a_struct_from_a_string b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/struct-udfs_-_Create_a_struct_from_a_string index 26c1889d5cf5..0bc5ccf6e01d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/struct-udfs_-_Create_a_struct_from_a_string +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/struct-udfs_-_Create_a_struct_from_a_string @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/struct-udfs_-_Extract_value_from_struct b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/struct-udfs_-_Extract_value_from_struct index c8039f005766..279bcf52a92e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/struct-udfs_-_Extract_value_from_struct +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/struct-udfs_-_Extract_value_from_struct @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_do_substring_with_just_pos b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_do_substring_with_just_pos index f4f72930723e..d9ff5a09e288 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_do_substring_with_just_pos +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_do_substring_with_just_pos @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_do_substring_with_pos_and_length b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_do_substring_with_pos_and_length index 83ade2024442..4ba442aea9d1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_do_substring_with_pos_and_length +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_do_substring_with_pos_and_length @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_in_group_by b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_in_group_by index f159343b4155..f33092b26654 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_in_group_by +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_in_group_by @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_should_default_to_current_mode_for_new_queries b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_should_default_to_current_mode_for_new_queries index c5d4e467ec5d..24839ccc03a0 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_should_default_to_current_mode_for_new_queries +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/substring_-_should_default_to_current_mode_for_new_queries @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_decimal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_decimal index 460ed6e89663..40b73d37b44e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_decimal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_decimal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_double b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_double index 2b8e1716b5d2..725d94981385 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_double +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_double @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_double_map b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_double_map index 29d6a74a4776..f8e05123bfb6 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_double_map +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_double_map @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_int b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_int index 38a4e9ddbb3e..de611fdefcaa 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_int +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_int @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_int_left_join_of_table b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_int_left_join_of_table index 6829a048ad8b..e2e2d3e6423b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_int_left_join_of_table +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_int_left_join_of_table @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_list_of_doubles_into_a_single_double b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_list_of_doubles_into_a_single_double index 045aa2de47c2..812c06f267d2 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_list_of_doubles_into_a_single_double +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_list_of_doubles_into_a_single_double @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_list_of_ints_into_a_single_int b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_list_of_ints_into_a_single_int index 9438e281b437..35dede299f0d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_list_of_ints_into_a_single_int +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_list_of_ints_into_a_single_int @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_list_of_longs_into_a_single_long b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_list_of_longs_into_a_single_long index 3c67eab5e9fd..5441948c07c3 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_list_of_longs_into_a_single_long +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_list_of_longs_into_a_single_long @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_long b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_long index 8e84fc6eea0d..53e741e2159a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_long +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_long @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_with_constant_double_arg b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_with_constant_double_arg index a962e854d979..770b9437a7d1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_with_constant_double_arg +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_with_constant_double_arg @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_with_constant_int_arg b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_with_constant_int_arg index d0a6965edc3d..2b443e04d0bc 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_with_constant_int_arg +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_with_constant_int_arg @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_with_constant_long_arg b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_with_constant_long_arg index 25d5863c433e..f2e6f3ee0ff9 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_with_constant_long_arg +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sum_-_sum_with_constant_long_arg @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_multiple_table_functions b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_multiple_table_functions index 272732007b35..1d8241971e5a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_multiple_table_functions +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_multiple_table_functions @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_as_first_select b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_as_first_select index 5ed5022049a5..80f1f0fbdac1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_as_first_select +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_as_first_select @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_as_last_select b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_as_last_select index 003a4fe89360..c3295fbdb28f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_as_last_select +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_as_last_select @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_no_alias b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_no_alias index 02ad472cde80..9e3d1f912a6e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_no_alias +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_no_alias @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_no_other_selected_columns b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_no_other_selected_columns index 390866c8dec3..81b6cc3a2e9a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_no_other_selected_columns +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_no_other_selected_columns @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_non_selected_columns b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_non_selected_columns index f6b94493626f..1f9e5d42ce54 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_non_selected_columns +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_non_selected_columns @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_where_clause b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_where_clause index 782939a35d87..c32fc25798e9 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_where_clause +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_function_with_where_clause @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_functions_with_complex_expressions b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_functions_with_complex_expressions index 67c7d56d9157..d1882cb9d3d6 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_functions_with_complex_expressions +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_table_functions_with_complex_expressions @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_array_params b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_array_params index cbff471691f6..0529204ed7cd 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_array_params +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_array_params @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_map_params b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_map_params index 68b3358aaab4..e36e8dc0f257 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_map_params +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_map_params @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_return_vals b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_return_vals index 152a6268069c..92f12ed1015a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_return_vals +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_return_vals @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_simple_params b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_simple_params index b66b70c42ab2..a43f779a9dab 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_simple_params +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/table-functions_-_test_udtf_-_simple_params @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/test-custom-udaf_-_test_udaf_group_by b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/test-custom-udaf_-_test_udaf_group_by index 69ed61fc59b3..61b72907397e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/test-custom-udaf_-_test_udaf_group_by +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/test-custom-udaf_-_test_udaf_group_by @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/test-custom-udaf_-_test_udaf_on_a_table b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/test-custom-udaf_-_test_udaf_on_a_table index 656dc00c22a3..6ce29c2e63e9 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/test-custom-udaf_-_test_udaf_on_a_table +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/test-custom-udaf_-_test_udaf_on_a_table @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/test-custom-udaf_-_test_udaf_with_struct b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/test-custom-udaf_-_test_udaf_with_struct index 1e5f028f9eb5..f0b1891f7bc1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/test-custom-udaf_-_test_udaf_with_struct +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/test-custom-udaf_-_test_udaf_with_struct @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestamp-extractor_-_KSQL_default_timestamp_extractor b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestamp-extractor_-_KSQL_default_timestamp_extractor index 4cfeb1a62991..0d497dbfada2 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestamp-extractor_-_KSQL_default_timestamp_extractor +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestamp-extractor_-_KSQL_default_timestamp_extractor @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestamp-extractor_-_KSQL_override_timestamp_extractor b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestamp-extractor_-_KSQL_override_timestamp_extractor index 799e742a4a54..c583c9f7bf0d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestamp-extractor_-_KSQL_override_timestamp_extractor +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestamp-extractor_-_KSQL_override_timestamp_extractor @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestamp-to-string_-_with_valid_zone b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestamp-to-string_-_with_valid_zone index 8231f12a9231..4705d62da3ef 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestamp-to-string_-_with_valid_zone +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestamp-to-string_-_with_valid_zone @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestampformat_-_timestamp_format b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestampformat_-_timestamp_format index b7fccd7bd519..023c6d9d864a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestampformat_-_timestamp_format +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestampformat_-_timestamp_format @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestampformat_-_with_single_digit_ms_and_numeric_tz b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestampformat_-_with_single_digit_ms_and_numeric_tz index 72ec03ff4093..144bcc6592ed 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestampformat_-_with_single_digit_ms_and_numeric_tz +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/timestampformat_-_with_single_digit_ms_and_numeric_tz @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_decimal b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_decimal index 637ee528ba89..ecafb2e4f61b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_decimal +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_decimal @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_integer b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_integer index 20ec523c0d91..8c8051b3616f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_integer +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_integer @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_long b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_long index e2a71b1909ca..011d3d6ba992 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_long +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_long @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_string b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_string index b2e11975bffd..abbf58c22678 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_string +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-distinct_-_topk_distinct_string @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_double b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_double index 3f1cd78c2350..c1ea88269246 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_double +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_double @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_integer b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_integer index 20ec523c0d91..8c8051b3616f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_integer +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_integer @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_long b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_long index e2a71b1909ca..011d3d6ba992 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_long +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_long @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_string b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_string index b2e11975bffd..abbf58c22678 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_string +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/topk-group-by_-_topk_string @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_import_tumbling_table b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_import_tumbling_table index 79afb0999c39..b1f90fa87f69 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_import_tumbling_table +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_import_tumbling_table @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_inherit_windowed_keys b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_inherit_windowed_keys index 8d6eeaf3f525..d462b5be6a6a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_inherit_windowed_keys +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_inherit_windowed_keys @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_max_tumbling b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_max_tumbling index f5b0aa0adf1c..8230782d3ef5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_max_tumbling +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_max_tumbling @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_min_tumbling b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_min_tumbling index f5b0aa0adf1c..8230782d3ef5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_min_tumbling +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_min_tumbling @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_topk_tumbling b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_topk_tumbling index 3f1cd78c2350..c1ea88269246 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_topk_tumbling +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_topk_tumbling @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_topkdistinct_tumbling b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_topkdistinct_tumbling index 3f1cd78c2350..c1ea88269246 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_topkdistinct_tumbling +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/tumbling-windows_-_topkdistinct_tumbling @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_chain_a_call_to_URL_EXTRACT_PARAMETER_with_URL_DECODE_PARAM b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_chain_a_call_to_URL_EXTRACT_PARAMETER_with_URL_DECODE_PARAM index cd99e1506f31..a4ecf1a087d2 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_chain_a_call_to_URL_EXTRACT_PARAMETER_with_URL_DECODE_PARAM +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_chain_a_call_to_URL_EXTRACT_PARAMETER_with_URL_DECODE_PARAM @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_decode_a_url_parameter_using_DECODE_URL_PARAM b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_decode_a_url_parameter_using_DECODE_URL_PARAM index eb602cf52066..713ac12338ae 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_decode_a_url_parameter_using_DECODE_URL_PARAM +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_decode_a_url_parameter_using_DECODE_URL_PARAM @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_encode_a_url_parameter_using_ENCODE_URL_PARAM b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_encode_a_url_parameter_using_ENCODE_URL_PARAM index b51c267fce69..df5d3dcd2eb6 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_encode_a_url_parameter_using_ENCODE_URL_PARAM +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_encode_a_url_parameter_using_ENCODE_URL_PARAM @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_fragment_from_a_URL_using_URL_EXTRACT_FRAGMENT b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_fragment_from_a_URL_using_URL_EXTRACT_FRAGMENT index aea36319b8e7..4466f74ca026 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_fragment_from_a_URL_using_URL_EXTRACT_FRAGMENT +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_fragment_from_a_URL_using_URL_EXTRACT_FRAGMENT @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_host_from_a_URL_using_URL_EXTRACT_HOST b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_host_from_a_URL_using_URL_EXTRACT_HOST index 18eb20a08ea8..7dcdd3cdfbb5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_host_from_a_URL_using_URL_EXTRACT_HOST +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_host_from_a_URL_using_URL_EXTRACT_HOST @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_parameter_from_a_URL_using_URL_EXTRACT_PARAMETER b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_parameter_from_a_URL_using_URL_EXTRACT_PARAMETER index 232a382fd358..43552ee55d15 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_parameter_from_a_URL_using_URL_EXTRACT_PARAMETER +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_parameter_from_a_URL_using_URL_EXTRACT_PARAMETER @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_path_from_a_URL_using_URL_EXTRACT_PATH b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_path_from_a_URL_using_URL_EXTRACT_PATH index c0ca78ff9bb6..9c15a8b1c4a6 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_path_from_a_URL_using_URL_EXTRACT_PATH +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_path_from_a_URL_using_URL_EXTRACT_PATH @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_port_from_a_URL_using_URL_EXTRACT_PORT b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_port_from_a_URL_using_URL_EXTRACT_PORT index 626fca6a68c9..75abcae5aca7 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_port_from_a_URL_using_URL_EXTRACT_PORT +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_port_from_a_URL_using_URL_EXTRACT_PORT @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_protocol_from_a_URL_using_URL_EXTRACT_PROTOCOL b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_protocol_from_a_URL_using_URL_EXTRACT_PROTOCOL index 2b75e4ab88f7..4500de60c831 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_protocol_from_a_URL_using_URL_EXTRACT_PROTOCOL +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_protocol_from_a_URL_using_URL_EXTRACT_PROTOCOL @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_query_from_a_URL_using_URL_EXTRACT_QUERY b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_query_from_a_URL_using_URL_EXTRACT_QUERY index 8d5e71d44ec6..72918cbdb180 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_query_from_a_URL_using_URL_EXTRACT_QUERY +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/url_-_extract_a_query_from_a_URL_using_URL_EXTRACT_QUERY @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_in_expressions b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_in_expressions index 10eb0636617f..c0ceac5a31b8 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_in_expressions +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_in_expressions @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_none b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_none index 293fd76c5c2f..a047ffda54b5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_none +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_none @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_table_hopping b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_table_hopping index 10eb0636617f..c0ceac5a31b8 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_table_hopping +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_table_hopping @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_table_session b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_table_session index 10eb0636617f..c0ceac5a31b8 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_table_session +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_table_session @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_table_tumbling b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_table_tumbling index 10eb0636617f..c0ceac5a31b8 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_table_tumbling +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/window-bounds_-_table_tumbling @@ -13,7 +13,7 @@ "ksql.internal.topic.min.insync.replicas" : "1", "ksql.internal.topic.replicas" : "1", "ksql.insert.into.values.enabled" : "true", - "ksql.query.pull.enable" : "true", + "ksql.pull.queries.enable" : "true", "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", "ksql.access.validator.enable" : "auto", "ksql.streams.bootstrap.servers" : "localhost:0", diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/PullQueryExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/PullQueryExecutor.java index 7186f467f61d..37c186dc9f59 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/PullQueryExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/PullQueryExecutor.java @@ -140,12 +140,12 @@ public static TableRowsEntity execute( throw new IllegalArgumentException("Executor can only handle pull queries"); } - if (!statement.getConfig().getBoolean(KsqlConfig.KSQL_QUERY_PULL_ENABLE_CONFIG)) { + if (!statement.getConfig().getBoolean(KsqlConfig.KSQL_PULL_QUERIES_ENABLE_CONFIG)) { throw new KsqlException( "Pull queries are disabled. " + PullQueryValidator.NEW_QUERY_SYNTAX_SHORT_HELP + System.lineSeparator() - + "Please set " + KsqlConfig.KSQL_QUERY_PULL_ENABLE_CONFIG + "=true to enable " + + "Please set " + KsqlConfig.KSQL_PULL_QUERIES_ENABLE_CONFIG + "=true to enable " + "this feature."); } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/PullQueryExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/PullQueryExecutorTest.java index d1b2f7d595bc..c1356758e561 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/PullQueryExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/PullQueryExecutorTest.java @@ -47,7 +47,7 @@ public class PullQueryExecutorTest { public static class Disabled { @Rule public final TemporaryEngine engine = new TemporaryEngine() - .withConfigs(ImmutableMap.of(KsqlConfig.KSQL_QUERY_PULL_ENABLE_CONFIG, false)); + .withConfigs(ImmutableMap.of(KsqlConfig.KSQL_PULL_QUERIES_ENABLE_CONFIG, false)); @Rule public final ExpectedException expectedException = ExpectedException.none(); From 60e20ef0e18ed459d18a53f0549209c8927e503c Mon Sep 17 00:00:00 2001 From: Tim Fox Date: Tue, 10 Dec 2019 09:04:12 +0000 Subject: [PATCH 007/123] fix: Explicitly disallow table functions with table sources, fixes #4033 (#4085) --- .../io/confluent/ksql/analyzer/QueryAnalyzer.java | 9 +++++++++ .../query-validation-tests/table-functions.json | 11 +++++++++++ 2 files changed, 20 insertions(+) diff --git a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/QueryAnalyzer.java b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/QueryAnalyzer.java index f1f21b69aa2c..1b1e87c5fea1 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/QueryAnalyzer.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/QueryAnalyzer.java @@ -21,12 +21,14 @@ import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; import com.google.common.collect.Sets.SetView; +import io.confluent.ksql.analyzer.Analysis.AliasedDataSource; import io.confluent.ksql.engine.rewrite.ExpressionTreeRewriter; import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; import io.confluent.ksql.execution.expression.tree.Expression; import io.confluent.ksql.execution.expression.tree.FunctionCall; import io.confluent.ksql.execution.plan.SelectExpression; import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.metastore.model.DataSource.DataSourceType; import io.confluent.ksql.name.FunctionName; import io.confluent.ksql.parser.tree.Query; import io.confluent.ksql.parser.tree.Sink; @@ -83,6 +85,13 @@ public Analysis analyze( pullQueryValidator.validate(analysis); } + if (!analysis.getTableFunctions().isEmpty()) { + AliasedDataSource ds = analysis.getFromDataSources().get(0); + if (ds.getDataSource().getDataSourceType() == DataSourceType.KTABLE) { + throw new KsqlException("Table source is not supported with table functions"); + } + } + return analysis; } diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/table-functions.json b/ksql-functional-tests/src/test/resources/query-validation-tests/table-functions.json index d602f14ccae6..495cbe9cef6a 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/table-functions.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/table-functions.json @@ -237,6 +237,17 @@ {"topic": "OUTPUT", "key": "0", "value": {"F0": 2, "VAL": 8}}, {"topic": "OUTPUT", "key": "0", "value": {"F0": 2, "VAL": 9}} ] + }, + { + "name": "table functions don't support table sources", + "statements": [ + "CREATE TABLE TEST (ID BIGINT, MY_ARR ARRAY) WITH (kafka_topic='test_topic', KEY='ID', value_format='JSON');", + "CREATE TABLE OUTPUT AS SELECT ID, EXPLODE(MY_ARR) VAL FROM TEST;" + ], + "expectedException": { + "type": "io.confluent.ksql.util.KsqlException", + "message": "Table source is not supported with table functions" + } } ] } \ No newline at end of file From 494b638b13fda4f2a91b57699fe96dffb370e31f Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Tue, 10 Dec 2019 10:49:26 +0000 Subject: [PATCH 008/123] test: have QTT support checking full schema of sources (#4083) With primitive keys coming the QTT test needs to be able to test the full schema is correct, not just the value schema. --- .../confluent/ksql/test/model/SourceNode.java | 47 ++++++------------- .../model/matchers/MetaStoreMatchers.java | 16 +++---- .../query-validation-tests/key-field.json | 2 +- .../query-validation-tests/math.json | 2 +- .../query-validation-tests/serdes.json | 8 ++-- .../table-functions.json | 2 +- 6 files changed, 30 insertions(+), 47 deletions(-) diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/SourceNode.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/SourceNode.java index edb480a4c6b4..b8c058b77024 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/SourceNode.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/SourceNode.java @@ -22,13 +22,12 @@ import com.fasterxml.jackson.databind.JsonDeserializer; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; -import io.confluent.ksql.execution.expression.tree.Type; import io.confluent.ksql.metastore.TypeRegistry; import io.confluent.ksql.metastore.model.DataSource; import io.confluent.ksql.metastore.model.KsqlStream; import io.confluent.ksql.metastore.model.KsqlTable; -import io.confluent.ksql.schema.ksql.SchemaConverters; -import io.confluent.ksql.schema.ksql.SqlTypeParser; +import io.confluent.ksql.parser.SchemaParser; +import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.test.model.matchers.MetaStoreMatchers; import io.confluent.ksql.test.tools.exceptions.InvalidFieldException; import io.confluent.ksql.test.utils.JsonParsingUtil; @@ -36,9 +35,6 @@ import java.util.Objects; import java.util.Optional; import java.util.stream.Stream; -import org.apache.kafka.connect.data.ConnectSchema; -import org.apache.kafka.connect.data.Schema; -import org.apache.kafka.connect.data.SchemaBuilder; import org.hamcrest.Matcher; import org.hamcrest.Matchers; import org.hamcrest.core.IsInstanceOf; @@ -50,20 +46,20 @@ final class SourceNode { private final String name; private final Class type; private final Optional keyField; - private final Optional valueSchema; + private final Optional schema; private final Optional keyFormat; private SourceNode( final String name, final Class type, final Optional keyField, - final Optional valueSchema, + final Optional schema, final Optional keyFormat ) { this.name = Objects.requireNonNull(name, "name"); this.type = Objects.requireNonNull(type, "type"); this.keyField = Objects.requireNonNull(keyField, "keyField"); - this.valueSchema = Objects.requireNonNull(valueSchema, "valueSchema"); + this.schema = Objects.requireNonNull(schema, "schema"); this.keyFormat = Objects.requireNonNull(keyFormat, "keyFormat"); if (this.name.isEmpty()) { @@ -88,9 +84,9 @@ Matcher> build() { .map(MetaStoreMatchers::hasKeyField) .orElse(null); - final Matcher> valueSchemaMatcher = valueSchema + final Matcher> schemaMatcher = schema .map(Matchers::is) - .map(MetaStoreMatchers::hasValueSchema) + .map(MetaStoreMatchers::hasSchema) .orElse(null); final Matcher> keyFormatMatcher = keyFormat @@ -99,7 +95,7 @@ Matcher> build() { .orElse(null); final Matcher>[] matchers = Stream - .of(nameMatcher, typeMatcher, keyFieldMatcher, valueSchemaMatcher, keyFormatMatcher) + .of(nameMatcher, typeMatcher, keyFieldMatcher, schemaMatcher, keyFormatMatcher) .filter(Objects::nonNull) .toArray(Matcher[]::new); @@ -119,22 +115,9 @@ private static Class toType(final String type) { } } - private static Optional parseSchema(final String schema) { - return Optional.ofNullable(schema) - .map(schemaString -> SqlTypeParser.create(TypeRegistry.EMPTY).parse(schemaString)) - .map(Type::getSqlType) - .map(SchemaConverters.sqlToConnectConverter()::toConnectSchema) - .map(SourceNode::makeTopLevelStructNoneOptional); - } - - private static ConnectSchema makeTopLevelStructNoneOptional(final Schema schema) { - if (schema.type() != Schema.Type.STRUCT) { - return (ConnectSchema) schema.schema(); - } - - final SchemaBuilder builder = SchemaBuilder.struct(); - schema.fields().forEach(field -> builder.field(field.name(), field.schema())); - return (ConnectSchema) builder.build(); + private static LogicalSchema parseSchema(final String text) { + return SchemaParser.parse(text, TypeRegistry.EMPTY) + .toLogicalSchema(true); } public static class Deserializer extends JsonDeserializer { @@ -154,14 +137,14 @@ public SourceNode deserialize( final Optional keyField = JsonParsingUtil .getOptionalOrElse("keyField", node, jp, KeyFieldNode.class, KeyFieldNode.none()); - final Optional valueSchema = JsonParsingUtil - .getOptional("valueSchema", node, jp, String.class) - .flatMap(SourceNode::parseSchema); + final Optional schema = JsonParsingUtil + .getOptional("schema", node, jp, String.class) + .map(SourceNode::parseSchema); final Optional keyFormat = JsonParsingUtil .getOptional("keyFormat", node, jp, KeyFormatNode.class); - return new SourceNode(name, type, keyField, valueSchema, keyFormat); + return new SourceNode(name, type, keyField, schema, keyFormat); } } } \ No newline at end of file diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/matchers/MetaStoreMatchers.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/matchers/MetaStoreMatchers.java index 89c4a80ee952..6bca8b5450f4 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/matchers/MetaStoreMatchers.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/matchers/MetaStoreMatchers.java @@ -23,9 +23,9 @@ import io.confluent.ksql.name.SourceName; import io.confluent.ksql.parser.ColumnReferenceParser; import io.confluent.ksql.schema.ksql.ColumnRef; +import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.serde.KeyFormat; import java.util.Optional; -import org.apache.kafka.connect.data.Schema; import org.hamcrest.FeatureMatcher; import org.hamcrest.Matcher; @@ -62,17 +62,17 @@ protected KeyField featureValueOf(final DataSource actual) { }; } - public static Matcher> hasValueSchema( - final Matcher schemaMatcher + public static Matcher> hasSchema( + final Matcher schemaMatcher ) { - return new FeatureMatcher, Schema>( + return new FeatureMatcher, LogicalSchema>( schemaMatcher, - "source with value schema", - "value schema" + "source with schema", + "schema" ) { @Override - protected Schema featureValueOf(final DataSource actual) { - return actual.getSchema().valueConnectSchema(); + protected LogicalSchema featureValueOf(final DataSource actual) { + return actual.getSchema(); } }; } diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json b/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json index a28442df5e57..81649da87bc8 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json @@ -133,7 +133,7 @@ "name": "OUTPUT", "type": "table", "keyField": "FOO", - "valueSchema": "STRUCT" + "schema": "ROWKEY STRING KEY, FOO INT, KSQL_COL_1 BIGINT" } ] } diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/math.json b/ksql-functional-tests/src/test/resources/query-validation-tests/math.json index 0c148d4bc697..2418ad8aad77 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/math.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/math.json @@ -229,7 +229,7 @@ { "name": "OUTPUT", "type": "stream", - "valueSchema": "STRUCT" + "schema": "ROWKEY STRING KEY, R0 DECIMAL(17,0), R00 DECIMAL(33,16), R1 DECIMAL(33,16), R2 DECIMAL(33,16), R10 DECIMAL(33,16), 1R DECIMAL(33,16), 2R DECIMAL(33,16)" } ] } diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/serdes.json b/ksql-functional-tests/src/test/resources/query-validation-tests/serdes.json index 7de6572d718f..b78aa46b724c 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/serdes.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/serdes.json @@ -226,7 +226,7 @@ { "name": "INPUT", "type": "stream", - "valueSchema": "STRUCT>" + "schema": "ROWKEY STRING KEY, FOO MAP" } ] } @@ -260,7 +260,7 @@ { "name": "INPUT", "type": "stream", - "valueSchema": "STRUCT>" + "schema": "ROWKEY STRING KEY, FOO MAP" } ] } @@ -301,7 +301,7 @@ { "name": "INPUT", "type": "stream", - "valueSchema": "STRUCT>" + "schema": "ROWKEY STRING KEY, FOO MAP" } ] } @@ -330,7 +330,7 @@ { "name": "INPUT", "type": "stream", - "valueSchema": "STRUCT>" + "schema": "ROWKEY STRING KEY, FOO MAP" } ] } diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/table-functions.json b/ksql-functional-tests/src/test/resources/query-validation-tests/table-functions.json index 495cbe9cef6a..7672e5b23905 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/table-functions.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/table-functions.json @@ -213,7 +213,7 @@ { "name": "OUTPUT", "type": "stream", - "valueSchema": "STRUCT>" + "schema": "ROWKEY STRING KEY, KSQL_COL_0 INT, KSQL_COL_1 BIGINT, KSQL_COL_2 DOUBLE, KSQL_COL_3 BOOLEAN, KSQL_COL_4 STRING, KSQL_COL_5 DECIMAL(30, 10), KSQL_COL_6 STRUCT" } ] } From dfd44cd11f2dd2b3a053ff1e1687210c6dae30ae Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Tue, 10 Dec 2019 09:25:13 -0800 Subject: [PATCH 009/123] docs: fix munged code blocks in Clickstream tutorial (DOCS-3084) (#4102) --- docs-md/tutorials/clickstream-docker.md | 481 +++++++++++++----------- 1 file changed, 252 insertions(+), 229 deletions(-) diff --git a/docs-md/tutorials/clickstream-docker.md b/docs-md/tutorials/clickstream-docker.md index 2d998863e071..8afa3e4e8fad 100644 --- a/docs-md/tutorials/clickstream-docker.md +++ b/docs-md/tutorials/clickstream-docker.md @@ -42,50 +42,58 @@ Docker images with the required networking and dependencies. The images are quite large and depending on your network connection may take 10-15 minutes to download. -1. Clone the Confluent examples repository. +### 1. Clone the repo - ```bash - git clone https://github.com/confluentinc/examples.git - cd examples - ``` +Clone the Confluent examples repository. -2. Switch to the correct {{ site.cp }} release branch: +```bash +git clone https://github.com/confluentinc/examples.git +cd examples +``` - ```bash - git checkout {{ site.releasepostbranch }} - ``` +### 2. Check out the release branch -3. Navigate to the correct directory and launch the tutorial in Docker. - Depending on your network speed, this may take up to 5-10 minutes. +Switch to the correct {{ site.cp }} release branch: - ```bash - cd clickstream - docker-compose up -d - ``` +```bash +git checkout {{ site.releasepostbranch }} +``` -4. After a minute or so, run the `docker-compose ps` status command to - ensure that everything has started correctly: +### 3. Launch the tutorial in Docker - ```bash - docker-compose ps - ``` +Navigate to the correct directory and start the tutorial in Docker. +Depending on your network speed, this may take up to 5-10 minutes. - Your output should resemble: +```bash +cd clickstream +docker-compose up -d +``` - ``` - Name Command State Ports - ------------------------------------------------------------------------------------------- - datagen bash -c echo Waiting for K ... Up - elasticsearch /usr/local/bin/docker-entr ... Up 0.0.0.0:9200->9200/tcp, 9300/tcp - grafana /run.sh Up 0.0.0.0:3000->3000/tcp - kafka /etc/confluent/docker/run Up 9092/tcp - kafka-connect /etc/confluent/docker/run Up 0.0.0.0:8083->8083/tcp, 9092/tcp - kafkacat /bin/sh Up - ksql-cli /bin/sh Up - ksql-server /etc/confluent/docker/run Up 0.0.0.0:8088->8088/tcp - schema-registry /etc/confluent/docker/run Up 8081/tcp - zookeeper /etc/confluent/docker/run Up 2181/tcp, 2888/tcp, 3888/tcp - ``` +### 4. Check the status + +After a minute or so, run the `docker-compose ps` status command to ensure +that everything has started correctly: + +```bash +docker-compose ps +``` + +Your output should resemble: + +``` + Name Command State Ports +------------------------------------------------------------------------------------------- +datagen bash -c echo Waiting for K ... Up +elasticsearch /usr/local/bin/docker-entr ... Up 0.0.0.0:9200->9200/tcp, 9300/tcp +grafana /run.sh Up 0.0.0.0:3000->3000/tcp +kafka /etc/confluent/docker/run Up 9092/tcp +kafka-connect /etc/confluent/docker/run Up 0.0.0.0:8083->8083/tcp, 9092/tcp +kafkacat /bin/sh Up +ksql-cli /bin/sh Up +ksql-server /etc/confluent/docker/run Up 0.0.0.0:8088->8088/tcp +schema-registry /etc/confluent/docker/run Up 8081/tcp +zookeeper /etc/confluent/docker/run Up 2181/tcp, 2888/tcp, 3888/tcp +``` Create the Clickstream Data --------------------------- @@ -101,8 +109,9 @@ docker-compose exec kafkacat \ -t clickstream ``` ->If you get the message "Broker: Leader not available", try again after a -moment, as the demo is still starting up.* +!!! note + If you get the message "Broker: Leader not available", try again after a + moment, as the demo is still starting up.* The `kafkacat` command stops after ten messages, and your output should resemble: @@ -124,270 +133,283 @@ There are two other sets of data in Kafka topics that have been automatically populated. They hold information about the HTTP status codes, and users. -1. View the status codes data +### 1. View the status codes data - ```bash - docker-compose exec kafkacat \ - kafkacat -b kafka:29092 -C -c 3 -K: \ - -f '\nKey : %k\tValue: %s' \ - -t clickstream_codes - ``` +```bash +docker-compose exec kafkacat \ + kafkacat -b kafka:29092 -C -c 3 -K: \ + -f '\nKey : %k\tValue: %s' \ + -t clickstream_codes +``` - Your output should resemble: +Your output should resemble: - ``` - Key : 405 Value: {"code":405,"definition":"Method not allowed"} - Key : 407 Value: {"code":407,"definition":"Proxy authentication required"} - Key : 302 Value: {"code":302,"definition":"Redirect"} - ... - ``` +``` +Key : 405 Value: {"code":405,"definition":"Method not allowed"} +Key : 407 Value: {"code":407,"definition":"Proxy authentication required"} +Key : 302 Value: {"code":302,"definition":"Redirect"} +... +``` -2. View the user data +### 2. View the user data - ```bash - docker-compose exec kafkacat \ - kafkacat -b kafka:29092 -C -c 3 -K: \ - -f '\nKey : %k\tValue: %s' \ - -t clickstream_users - ``` +```bash +docker-compose exec kafkacat \ + kafkacat -b kafka:29092 -C -c 3 -K: \ + -f '\nKey : %k\tValue: %s' \ + -t clickstream_users +``` - Your output should resemble: +Your output should resemble: - ``` - Key : 1 Value: {"user_id":1,"username":"DimitriSchenz88","registered_at":1432700187062,"first_name":"Arlyne","last_name":"Garrity","city":"Frankfurt","level":"Gold"} - Key : 2 Value: {"user_id":2,"username":"AbdelKable_86","registered_at":1454795231290,"first_name":"Reeva","last_name":"Pask","city":"San Francisco","level":"Silver"} - Key : 3 Value: {"user_id":3,"username":"Antonio_0966","registered_at":1464740725409,"first_name":"Woodrow","last_name":"Vanyard","city":"Frankfurt","level":"Platinum"} - ... - ``` +``` +Key : 1 Value: {"user_id":1,"username":"DimitriSchenz88","registered_at":1432700187062,"first_name":"Arlyne","last_name":"Garrity","city":"Frankfurt","level":"Gold"} +Key : 2 Value: {"user_id":2,"username":"AbdelKable_86","registered_at":1454795231290,"first_name":"Reeva","last_name":"Pask","city":"San Francisco","level":"Silver"} +Key : 3 Value: {"user_id":3,"username":"Antonio_0966","registered_at":1464740725409,"first_name":"Woodrow","last_name":"Vanyard","city":"Frankfurt","level":"Platinum"} +... +``` Load the Streaming Data to KSQL ------------------------------- -1. Launch the KSQL CLI: +### 1. Launch the KSQL CLI: - ```bash - docker-compose exec ksql-cli ksql http://ksql-server:8088 - ``` +```bash +docker-compose exec ksql-cli ksql http://ksql-server:8088 +``` - You should now be in the KSQL CLI. +You should now be in the KSQL CLI. - ``` - =========================================== - = _ _ ____ ____ = - = | | _____ __ _| | _ \| __ ) = - = | |/ / __|/ _` | | | | | _ \ = - = | <\__ \ (_| | | |_| | |_) | = - = |_|\_\___/\__, |_|____/|____/ = - = |_| = - = Event Streaming Database purpose-built = - = for stream processing apps = - =========================================== +``` + =========================================== + = _ _ ____ ____ = + = | | _____ __ _| | _ \| __ ) = + = | |/ / __|/ _` | | | | | _ \ = + = | <\__ \ (_| | | |_| | |_) | = + = |_|\_\___/\__, |_|____/|____/ = + = |_| = + = Event Streaming Database purpose-built = + = for stream processing apps = + =========================================== - Copyright 2017-2019 Confluent Inc. +Copyright 2017-2019 Confluent Inc. - CLI v{{ site.release }}, Server v{{ site.release }} located at http://ksql-server:8088 +CLI v{{ site.release }}, Server v{{ site.release }} located at http://ksql-server:8088 - Having trouble? Type 'help' (case-insensitive) for a rundown of how things work! +Having trouble? Type 'help' (case-insensitive) for a rundown of how things work! - ksql> - ``` +ksql> +``` -2. Load the - [clickstream-schema.sql](https://github.com/confluentinc/examples/blob/master/clickstream/ksql/ksql-clickstream-demo/demo/clickstream-schema.sql) - file that runs the tutorial app. +### 2. Load the SQL file that defines the app - !!! important - Before running this step, you must have already run - ksql-datagen utility to create the clickstream data, status codes, - and set of users. +Load the +[clickstream-schema.sql](https://github.com/confluentinc/examples/blob/master/clickstream/ksql/ksql-clickstream-demo/demo/clickstream-schema.sql) +file that runs the tutorial app. - ```sql - RUN SCRIPT '/usr/share/doc/clickstream/clickstream-schema.sql'; - ``` +!!! important + Before running this step, you must have already run `ksql-datagen` + utility to create the clickstream data, status codes, and set of users. - The output will show either a blank message, or - `Executing statement`, similar to this: +```sql +RUN SCRIPT '/usr/share/doc/clickstream/clickstream-schema.sql'; +``` - ``` - Message - --------- - Executing statement - --------- - ``` +The output will show either a blank message, or +`Executing statement`, similar to this: + +``` + Message +--------- + Executing statement +--------- +``` + +Verify the data +--------------- -### Verify the data +Verify that the data was loaded properly. !!! note The following steps are optional and can be used to verify that the data was loaded properly. Otherwise, you can skip to [Load and View the Clickstream Data in Grafana](#load-and-view-the-clickstream-data-in-grafana). -1. Verify that the tables are created. +### 1. Verify that the tables are created. - ```sql - LIST TABLES; - ``` +```sql +LIST TABLES; +``` - Your output should resemble: +Your output should resemble: - ``` - Table Name | Kafka Topic | Format | Windowed - ----------------------------------------------------------------------------- - WEB_USERS | clickstream_users | JSON | false - ERRORS_PER_MIN_ALERT | ERRORS_PER_MIN_ALERT | JSON | true - USER_IP_ACTIVITY | USER_IP_ACTIVITY | JSON | true - CLICKSTREAM_CODES | clickstream_codes | JSON | false - PAGES_PER_MIN | PAGES_PER_MIN | JSON | true - CLICK_USER_SESSIONS | CLICK_USER_SESSIONS | JSON | true - ENRICHED_ERROR_CODES_COUNT | ENRICHED_ERROR_CODES_COUNT | JSON | true - ERRORS_PER_MIN | ERRORS_PER_MIN | JSON | true - EVENTS_PER_MIN | EVENTS_PER_MIN | JSON | true - ``` +``` + Table Name | Kafka Topic | Format | Windowed +----------------------------------------------------------------------------- + WEB_USERS | clickstream_users | JSON | false + ERRORS_PER_MIN_ALERT | ERRORS_PER_MIN_ALERT | JSON | true + USER_IP_ACTIVITY | USER_IP_ACTIVITY | JSON | true + CLICKSTREAM_CODES | clickstream_codes | JSON | false + PAGES_PER_MIN | PAGES_PER_MIN | JSON | true + CLICK_USER_SESSIONS | CLICK_USER_SESSIONS | JSON | true + ENRICHED_ERROR_CODES_COUNT | ENRICHED_ERROR_CODES_COUNT | JSON | true + ERRORS_PER_MIN | ERRORS_PER_MIN | JSON | true + EVENTS_PER_MIN | EVENTS_PER_MIN | JSON | true +``` -2. Verify that the streams are created. +### 2. Verify that the streams are created. - ```sql - LIST STREAMS; - ``` +```sql +LIST STREAMS; +``` - Your output should resemble: +Your output should resemble: - ``` - Stream Name | Kafka Topic | Format - ---------------------------------------------------------------- - USER_CLICKSTREAM | USER_CLICKSTREAM | JSON - ENRICHED_ERROR_CODES | ENRICHED_ERROR_CODES | JSON - CLICKSTREAM | clickstream | JSON - ``` +``` + Stream Name | Kafka Topic | Format +---------------------------------------------------------------- + USER_CLICKSTREAM | USER_CLICKSTREAM | JSON + ENRICHED_ERROR_CODES | ENRICHED_ERROR_CODES | JSON + CLICKSTREAM | clickstream | JSON +``` -3. Verify that data is being streamed through various tables and - streams. +### 3. Verify the event streams - **View clickstream data** +Verify that events are streaming through various tables and streams. - ```sql - SELECT * FROM CLICKSTREAM EMIT CHANGES LIMIT 5; - ``` +#### View clickstream data - Your output should resemble: +```sql +SELECT * FROM CLICKSTREAM EMIT CHANGES LIMIT 5; +``` - ``` - 1536662784214 | 111.168.57.122 | 1536662783614 | 11/Sep/2018:10:46:23 +0000 | 111.168.57.122 | GET /images/logo-small.png HTTP/1.1 | 200 | 35 | 1289 | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 - 1536662784261 | 222.245.174.248 | 1536662784260 | 11/Sep/2018:10:46:24 +0000 | 222.245.174.248 | GET /index.html HTTP/1.1 | 404 | 7 | 14096 | Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html) - 1536662784335 | 111.90.225.227 | 1536662784335 | 11/Sep/2018:10:46:24 +0000 | 111.90.225.227 | GET /site/login.html HTTP/1.1 | 302 | 36 | 4096 | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 - 1536662784351 | 233.245.174.248 | 1536662784351 | 11/Sep/2018:10:46:24 +0000 | 233.245.174.248 | GET /site/user_status.html HTTP/1.1 | 405 | 15 | 2048 | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 - 1536662784421 | 222.168.57.122 | 1536662784421 | 11/Sep/2018:10:46:24 +0000 | 222.168.57.122 | GET /images/logo-small.png HTTP/1.1 | 302 | 28 | 14096 | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 - Limit Reached - Query terminated - ksql> - ``` +Your output should resemble: - **View the events per minute** +``` +1536662784214 | 111.168.57.122 | 1536662783614 | 11/Sep/2018:10:46:23 +0000 | 111.168.57.122 | GET /images/logo-small.png HTTP/1.1 | 200 | 35 | 1289 | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 +1536662784261 | 222.245.174.248 | 1536662784260 | 11/Sep/2018:10:46:24 +0000 | 222.245.174.248 | GET /index.html HTTP/1.1 | 404 | 7 | 14096 | Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html) +1536662784335 | 111.90.225.227 | 1536662784335 | 11/Sep/2018:10:46:24 +0000 | 111.90.225.227 | GET /site/login.html HTTP/1.1 | 302 | 36 | 4096 | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 +1536662784351 | 233.245.174.248 | 1536662784351 | 11/Sep/2018:10:46:24 +0000 | 233.245.174.248 | GET /site/user_status.html HTTP/1.1 | 405 | 15 | 2048 | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 +1536662784421 | 222.168.57.122 | 1536662784421 | 11/Sep/2018:10:46:24 +0000 | 222.168.57.122 | GET /images/logo-small.png HTTP/1.1 | 302 | 28 | 14096 | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 +Limit Reached +Query terminated +ksql> +``` - ```sql - SELECT * FROM EVENTS_PER_MIN EMIT CHANGES LIMIT 5; - ``` +#### View the events per minute - Your output should resemble: +```sql +SELECT * FROM EVENTS_PER_MIN EMIT CHANGES LIMIT 5; +``` - ``` - 1536662819576 | 24 : Window{start=1536662760000 end=-} | 24 | 12 - 1536662819685 | 4 : Window{start=1536662760000 end=-} | 4 | 19 - 1536662847582 | 4 : Window{start=1536662820000 end=-} | 4 | 75 - 1536662847586 | 24 : Window{start=1536662820000 end=-} | 24 | 101 - 1536662879959 | 29 : Window{start=1536662820000 end=-} | 29 | 2 - Limit Reached - Query terminated - ``` +Your output should resemble: - **View pages per minute** +``` +1536662819576 | 24 : Window{start=1536662760000 end=-} | 24 | 12 +1536662819685 | 4 : Window{start=1536662760000 end=-} | 4 | 19 +1536662847582 | 4 : Window{start=1536662820000 end=-} | 4 | 75 +1536662847586 | 24 : Window{start=1536662820000 end=-} | 24 | 101 +1536662879959 | 29 : Window{start=1536662820000 end=-} | 29 | 2 +Limit Reached +Query terminated +``` - ```sql - SELECT * FROM PAGES_PER_MIN EMIT CHANGES LIMIT 5; - ``` +#### View pages per minute - Your output should resemble: +```sql +SELECT * FROM PAGES_PER_MIN EMIT CHANGES LIMIT 5; +``` - ``` - 1536662784977 | 21 : Window{start=1536662725000 end=-} | 21 | 2 - 1536662789353 | 21 : Window{start=1536662730000 end=-} | 21 | 7 - 1536662793715 | 21 : Window{start=1536662735000 end=-} | 21 | 20 - 1536662799627 | 21 : Window{start=1536662740000 end=-} | 21 | 35 - 1536662804534 | 21 : Window{start=1536662745000 end=-} | 21 | 40 - Limit Reached - Query terminated - ``` +Your output should resemble: + +``` +1536662784977 | 21 : Window{start=1536662725000 end=-} | 21 | 2 +1536662789353 | 21 : Window{start=1536662730000 end=-} | 21 | 7 +1536662793715 | 21 : Window{start=1536662735000 end=-} | 21 | 20 +1536662799627 | 21 : Window{start=1536662740000 end=-} | 21 | 35 +1536662804534 | 21 : Window{start=1536662745000 end=-} | 21 | 40 +Limit Reached +Query terminated +``` Load and View the Clickstream Data in Grafana --------------------------------------------- Send the KSQL tables to Elasticsearch and Grafana. -1. Exit the KSQL CLI with Ctrl+D. +### 1. Exit the ksqlDB CLI - ``` - ksql> - Exiting ksqlDB. - ``` +Exit the ksqlDB CLI by using Ctrl+D. -2. Set up the required Elasticsearch document mapping template +``` +ksql> +Exiting ksqlDB. +``` - ```bash - docker-compose exec elasticsearch bash -c '/scripts/elastic-dynamic-template.sh' - ``` +### 2. Start Elasticsearch -3. Run this command to send the KSQL tables to Elasticsearch and - Grafana: +Set up the required Elasticsearch document mapping template - ```bash - docker-compose exec kafka-connect bash -c '/scripts/ksql-tables-to-grafana.sh' - ``` +```bash +docker-compose exec elasticsearch bash -c '/scripts/elastic-dynamic-template.sh' +``` - Your output should resemble: +### 3. Connect to Elasticsearch and Grafana - ``` - Loading Clickstream-Demo TABLES to Confluent-Connect => Elastic => Grafana datasource +Run the following command to send the ksqlDB tables to Elasticsearch and +Grafana: +```bash +docker-compose exec kafka-connect bash -c '/scripts/ksql-tables-to-grafana.sh' +``` - ================================================================== - Charting CLICK_USER_SESSIONS - -> Remove any existing Elastic search config - -> Remove any existing Connect config - -> Remove any existing Grafana config - -> Connecting KSQL->Elastic->Grafana click_user_sessions - -> Connecting: click_user_sessions - -> Adding Kafka Connect Elastic Source es_sink_CLICK_USER_SESSIONS - ->Adding Grafana Source +Your output should resemble: - [...] - ``` +``` +Loading Clickstream-Demo TABLES to Confluent-Connect => Elastic => Grafana datasource -4. Load the dashboard into Grafana. - ```bash - docker-compose exec grafana bash -c '/scripts/clickstream-analysis-dashboard.sh' - ``` +================================================================== +Charting CLICK_USER_SESSIONS + -> Remove any existing Elastic search config + -> Remove any existing Connect config + -> Remove any existing Grafana config + -> Connecting KSQL->Elastic->Grafana click_user_sessions + -> Connecting: click_user_sessions + -> Adding Kafka Connect Elastic Source es_sink_CLICK_USER_SESSIONS + ->Adding Grafana Source - Your output should resemble: +[...] +``` - ``` - Loading Grafana ClickStream Dashboard - {"id":1,"slug":"click-stream-analysis","status":"success","uid":"lUHTGDTmz","url":"/d/lUHTGDTmz/click-stream-analysis","version":4} +### 4. Load the dashboard into Grafana - Navigate to: - http://localhost:3000/d/lUHTGDTmz/click-stream-analysis - (Default user: admin / password: admin) - ``` +```bash +docker-compose exec grafana bash -c '/scripts/clickstream-analysis-dashboard.sh' +``` + +Your output should resemble: + +``` +Loading Grafana ClickStream Dashboard +{"id":1,"slug":"click-stream-analysis","status":"success","uid":"lUHTGDTmz","url":"/d/lUHTGDTmz/click-stream-analysis","version":4} + +Navigate to: + http://localhost:3000/d/lUHTGDTmz/click-stream-analysis +(Default user: admin / password: admin) +``` + +### 5. Open the Grafana UI -5. Open your your browser using the URL output from the previous - step's command. You can login with user ID `admin` and password - `admin`. +Open your your browser using the URL output from the previous +step's command. You can login with user ID `admin` and password +`admin`. - !!! important - If you already have Grafana UI open, you may need to - enter the specific clickstream URL output by the previous step. +!!! important + If you already have Grafana UI open, you may need to + enter the specific clickstream URL output by the previous step. - ![Grafana UI success](../img/grafana-success.png) +![Grafana UI success](../img/grafana-success.png) This dashboard demonstrates a series of streaming functionality where the title of each panel describes the type of stream processing required @@ -398,7 +420,7 @@ Editing the panel allows you to view the datasource - which is named after the streams and tables captured in the `clickstream-schema.sql` file. -Things to try +Things to try: - Understand how the `clickstream-schema.sql` file is structured. We use a **DataGen.KafkaTopic.clickstream -> Stream -> Table** @@ -408,7 +430,8 @@ Things to try persisted - Run the KSQL CLI `history` command -### Troubleshooting +Troubleshooting +--------------- - Check the Data Sources page in Grafana. - If your data source is shown, select it and scroll to the bottom From fe2ca2df6d5cf3c3d5444b0dc5774e84b4d4dfc0 Mon Sep 17 00:00:00 2001 From: Derek Nelson Date: Tue, 10 Dec 2019 09:54:18 -0800 Subject: [PATCH 010/123] chore: update the status of approved KLIPs --- design-proposals/README.md | 2 +- design-proposals/klip-4-custom-types.md | 2 +- design-proposals/klip-7-connect-integration.md | 2 +- design-proposals/klip-8-queryable-state-stores.md | 2 +- design-proposals/klip-9-table-functions.md | 4 ++-- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/design-proposals/README.md b/design-proposals/README.md index e4c5aac05407..aaaff7eacf8e 100644 --- a/design-proposals/README.md +++ b/design-proposals/README.md @@ -48,7 +48,7 @@ Next KLIP number: **14** | [KLIP-4: Custom Type Registry](klip-4-custom-types.md) | Approved | 5.4 | | [KLIP-6: Execution Plans](klip-6-execution-plans.md) | Approved | N/A | | [KLIP-7: Kafka Connect Integration](klip-7-connect-integration.md) | Approved | 5.4 | -| [KLIP-8: Interactive Queries](klip-8-interactive-queries.md) | Approved | 5.4 | +| [KLIP-8: Queryable State Stores](klip-8-queryable-state-stores.md) | Approved | 5.4 | | [KLIP-9: Table Functions](klip-9-table-functions.md) | Approved | 5.4 | | [KLIP-10: Suppress](klip-10-suppress.md) | Proposal | N/A | | [KLIP-11: Redesign KSQL query language](klip-11-DQL.md) | Proposal | N/A | diff --git a/design-proposals/klip-4-custom-types.md b/design-proposals/klip-4-custom-types.md index 75bda2a64739..f2081656b65e 100644 --- a/design-proposals/klip-4-custom-types.md +++ b/design-proposals/klip-4-custom-types.md @@ -2,7 +2,7 @@ **Author**: agavra | **Release Target**: 5.4 | -**Status**: In Discussion | +**Status**: Approved | **Discussion**: https://github.com/confluentinc/ksql/pull/2894 **tl;dr:** *Introduce a feature that makes custom types easier to work with in KSQL by aliasing diff --git a/design-proposals/klip-7-connect-integration.md b/design-proposals/klip-7-connect-integration.md index 4aafd5fc36c6..137a5847380c 100644 --- a/design-proposals/klip-7-connect-integration.md +++ b/design-proposals/klip-7-connect-integration.md @@ -2,7 +2,7 @@ **Author**: agavra | **Release Target**: 5.4 | -**Status**: _In Discussion_ | +**Status**: Approved | **Discussion**: link **tl;dr:** _provide first-class integration with Kafka connect for KSQL data ingress and egress_ diff --git a/design-proposals/klip-8-queryable-state-stores.md b/design-proposals/klip-8-queryable-state-stores.md index 03967e24d395..f94418c7cd59 100644 --- a/design-proposals/klip-8-queryable-state-stores.md +++ b/design-proposals/klip-8-queryable-state-stores.md @@ -2,7 +2,7 @@ **Author**: derekjn | **Release Target**: 5.4+ | -**Status**: In Discussion | +**Status**: Approved | **Discussion**: [#3117](https://github.com/confluentinc/ksql/pull/3117), [#530](https://github.com/confluentinc/ksql/issues/530) ## Motivation and background diff --git a/design-proposals/klip-9-table-functions.md b/design-proposals/klip-9-table-functions.md index b310bf48443d..356d75a19ff0 100644 --- a/design-proposals/klip-9-table-functions.md +++ b/design-proposals/klip-9-table-functions.md @@ -2,7 +2,7 @@ **Author**: Tim Fox (GitHub: purplefox) **Release Target**: 5.4 -**Status**: In Discussion +**Status**: Approved **Discussion**: https://github.com/confluentinc/ksql/issues/527 ## Motivation and background @@ -383,4 +383,4 @@ Allowing any kind of user defined extensions can introduce possible security iss * Malicious code extracting information and sending it to a third party * Malicious code creating a denial of service by deliberately consuming memory, stalling execution, or creating large amounts of results. -* Malicious code damaging the installation, e.g. by deleting files. \ No newline at end of file +* Malicious code damaging the installation, e.g. by deleting files. From 331e3041dcd9872110244fb448b2c0b29459a1b0 Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Tue, 10 Dec 2019 10:11:36 -0800 Subject: [PATCH 011/123] docs: note CREATE CONNECTOR works only in interactive mode (DOCS-3036) (#4103) --- docs/developer-guide/ksql-connect.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/developer-guide/ksql-connect.rst b/docs/developer-guide/ksql-connect.rst index f5e02aed5557..c2e6bed54a37 100644 --- a/docs/developer-guide/ksql-connect.rst +++ b/docs/developer-guide/ksql-connect.rst @@ -68,6 +68,8 @@ Create a new Connector in the |kconnect-long| cluster with the configuration pas clause. Note that some connectors have KSQL templates that simplify the configuration - for more information see :ref:`native-connectors`. +.. note:: CREATE CONNECTOR works only in interactive mode. + Example: .. code:: sql From d68c63633c7899173c9e4484ab4304a5b536506c Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Tue, 10 Dec 2019 10:24:14 -0800 Subject: [PATCH 012/123] docs: note CREATE CONNECTOR works only in interactive mode (md docs) (#4104) --- docs-md/developer-guide/ksqldb-reference/create-connector.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs-md/developer-guide/ksqldb-reference/create-connector.md b/docs-md/developer-guide/ksqldb-reference/create-connector.md index 78ebbcdee7e9..c316e969ae46 100644 --- a/docs-md/developer-guide/ksqldb-reference/create-connector.md +++ b/docs-md/developer-guide/ksqldb-reference/create-connector.md @@ -24,6 +24,9 @@ configuration passed in the WITH clause. Some connectors have ksqlDB templates that simplify configuring them. For more information, see [Natively Supported Connectors](../../concepts/connectors.md#natively-supported-connectors). +!!! note + CREATE CONNECTOR works only in interactive mode. + Example ------- From 8dbfbb71c2eec62e09974ccda2a23e8943ffb2b8 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Tue, 10 Dec 2019 18:32:10 +0000 Subject: [PATCH 013/123] chore: primitive keys for simple queries (#4096) First of a few commits to start introducing support for primitive keys in different query types. This commit opens the door for CT/CS statements with primitive keys, (`STRING`, `INT`, `BIGINT`, `BOOLEAN` and `DOUBLE`), and for using those sources in non-join, non-aggregate and non-partition-by queries. --- .../ddl/commands/CreateSourceFactory.java | 7 --- .../ksql/planner/plan/AggregateNode.java | 5 ++ .../confluent/ksql/planner/plan/JoinNode.java | 5 ++ .../ksql/planner/plan/RepartitionNode.java | 6 +++ .../ddl/commands/CreateSourceFactoryTest.java | 16 +++--- .../query-validation-tests/key-schemas.json | 54 +++++++++++++++++-- .../ksql/execution/streams/SourceBuilder.java | 17 +++--- 7 files changed, 81 insertions(+), 29 deletions(-) diff --git a/ksql-engine/src/main/java/io/confluent/ksql/ddl/commands/CreateSourceFactory.java b/ksql-engine/src/main/java/io/confluent/ksql/ddl/commands/CreateSourceFactory.java index 6acc1e4ab266..b272b21c2e33 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/ddl/commands/CreateSourceFactory.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/ddl/commands/CreateSourceFactory.java @@ -36,7 +36,6 @@ import io.confluent.ksql.schema.ksql.FormatOptions; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.PhysicalSchema; -import io.confluent.ksql.schema.ksql.SqlBaseType; import io.confluent.ksql.serde.Format; import io.confluent.ksql.serde.GenericRowSerDe; import io.confluent.ksql.serde.SerdeOption; @@ -179,12 +178,6 @@ private static LogicalSchema buildSchema(final TableElements tableElements) { throw new KsqlException("'" + e.getName().name() + "' is an invalid KEY column name. " + "KSQL currently only supports KEY columns named ROWKEY."); } - - if (e.getType().getSqlType().baseType() != SqlBaseType.STRING) { - throw new KsqlException("'" + e.getName().name() - + "' is a KEY column with an unsupported type. " - + "KSQL currently only supports KEY columns of type " + SqlBaseType.STRING + "."); - } } else if (isRowKey) { throw new KsqlException("'" + e.getName().name() + "' is a reserved column name. " + "It can only be used for KEY columns."); diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java index 6bcb6e98a85e..8600272e28ca 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java @@ -34,6 +34,7 @@ import io.confluent.ksql.parser.tree.WindowExpression; import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.schema.ksql.SqlBaseType; import io.confluent.ksql.serde.ValueFormat; import io.confluent.ksql.services.KafkaTopicClient; import io.confluent.ksql.structured.SchemaKGroupedStream; @@ -108,6 +109,10 @@ public AggregateNode( this.havingExpressions = havingExpressions; this.keyField = KeyField.of(requireNonNull(keyFieldName, "keyFieldName")) .validateKeyExistsIn(schema); + + if (schema.key().get(0).type().baseType() != SqlBaseType.STRING) { + throw new KsqlException("GROUP BY is not supported with non-STRING keys"); + } } @Override diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java index 897479977e7b..40863a1c4958 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java @@ -31,6 +31,7 @@ import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.FormatOptions; import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.schema.ksql.SqlBaseType; import io.confluent.ksql.serde.ValueFormat; import io.confluent.ksql.services.KafkaTopicClient; import io.confluent.ksql.structured.SchemaKStream; @@ -93,6 +94,10 @@ public JoinNode( : KeyField.of(leftKeyCol.ref()); this.schema = JoinParamsFactory.createSchema(left.getSchema(), right.getSchema()); + + if (schema.key().get(0).type().baseType() != SqlBaseType.STRING) { + throw new KsqlException("GROUP BY is not supported with non-STRING keys"); + } } @Override diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java index d80bba1dd8e3..a10c0c07234d 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java @@ -23,8 +23,10 @@ import io.confluent.ksql.execution.plan.SelectExpression; import io.confluent.ksql.metastore.model.KeyField; import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.schema.ksql.SqlBaseType; import io.confluent.ksql.services.KafkaTopicClient; import io.confluent.ksql.structured.SchemaKStream; +import io.confluent.ksql.util.KsqlException; import java.util.List; import java.util.Objects; @@ -45,6 +47,10 @@ public RepartitionNode( this.source = Objects.requireNonNull(source, "source"); this.partitionBy = Objects.requireNonNull(partitionBy, "partitionBy"); this.keyField = Objects.requireNonNull(keyField, "keyField"); + + if (source.getSchema().key().get(0).type().baseType() != SqlBaseType.STRING) { + throw new KsqlException("GROUP BY is not supported with non-STRING keys"); + } } @Override diff --git a/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/CreateSourceFactoryTest.java b/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/CreateSourceFactoryTest.java index 28b46d5ac19a..abbb3cd9541b 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/CreateSourceFactoryTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/CreateSourceFactoryTest.java @@ -20,6 +20,7 @@ import static io.confluent.ksql.model.WindowType.TUMBLING; import static io.confluent.ksql.parser.tree.TableElement.Namespace.KEY; import static io.confluent.ksql.parser.tree.TableElement.Namespace.VALUE; +import static io.confluent.ksql.schema.ksql.ColumnMatchers.keyColumn; import static io.confluent.ksql.serde.Format.AVRO; import static io.confluent.ksql.serde.Format.JSON; import static io.confluent.ksql.serde.Format.KAFKA; @@ -765,7 +766,7 @@ public void shouldNotThrowOnRowKeyKeyColumn() { } @Test - public void shouldThrowOnRowKeyIfNotString() { + public void shouldAllowNonStringKeyColumn() { // Given: final CreateStream statement = new CreateStream( SOME_NAME, @@ -774,13 +775,14 @@ public void shouldThrowOnRowKeyIfNotString() { withProperties ); - // Then: - expectedException.expect(KsqlException.class); - expectedException.expectMessage("'ROWKEY' is a KEY column with an unsupported type. " - + "KSQL currently only supports KEY columns of type STRING."); - // When: - createSourceFactory.createStreamCommand(statement, ksqlConfig); + final CreateStreamCommand cmd = createSourceFactory + .createStreamCommand(statement, ksqlConfig); + + // Then: + assertThat(cmd.getSchema().key(), contains( + keyColumn(ROWKEY_NAME, SqlTypes.INTEGER) + )); } @Test diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/key-schemas.json b/ksql-functional-tests/src/test/resources/query-validation-tests/key-schemas.json index cc83364dabb7..c80206653832 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/key-schemas.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/key-schemas.json @@ -73,13 +73,57 @@ ] }, { - "name": "explicit non-STRING ROWKEY", + "name": "stream explicit non-STRING ROWKEY", "statements": [ - "CREATE STREAM INPUT (ROWKEY INT KEY, ID bigint) WITH (kafka_topic='input',value_format='JSON');" + "CREATE STREAM INPUT (ROWKEY INT KEY, ID bigint) WITH (kafka_topic='input',value_format='JSON');", + "CREATE STREAM OUTPUT as SELECT ID, ROWKEY as KEY FROM INPUT;" ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "'ROWKEY' is a KEY column with an unsupported type. KSQL currently only supports KEY columns of type STRING." + "inputs": [ + {"topic": "input", "key": 3, "value": {"id": 1}}, + {"topic": "input", "key": 2, "value": {"id": 2}}, + {"topic": "input", "key": null, "value": {"id": 3}} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 3, "value": {"ID": 1, "KEY": 3}}, + {"topic": "OUTPUT", "key": 2, "value": {"ID": 2, "KEY": 2}}, + {"topic": "OUTPUT", "key": null, "value": {"ID": 3, "KEY": null}} + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "stream", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY INT KEY, ID BIGINT, KEY INT" + } + ] + } + }, + { + "name": "table explicit non-STRING ROWKEY", + "statements": [ + "CREATE TABLE INPUT (ROWKEY BIGINT KEY, ID bigint) WITH (kafka_topic='input',value_format='JSON');", + "CREATE TABLE OUTPUT as SELECT ID, ROWKEY as KEY FROM INPUT;" + ], + "inputs": [ + {"topic": "input", "key": 3, "value": {"id": 1}}, + {"topic": "input", "key": 2, "value": {"id": 2}}, + {"topic": "input", "key": 1, "value": {"id": 3}} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 3, "value": {"ID": 1, "KEY": 3}}, + {"topic": "OUTPUT", "key": 2, "value": {"ID": 2, "KEY": 2}}, + {"topic": "OUTPUT", "key": 1, "value": {"ID": 3, "KEY": 1}} + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "table", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY BIGINT KEY, ID BIGINT, KEY BIGINT" + } + ] } }, { diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/SourceBuilder.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/SourceBuilder.java index cb78a5011ed1..f3c7d585b7c3 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/SourceBuilder.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/SourceBuilder.java @@ -267,7 +267,7 @@ private static KStream buildKStream( final AbstractStreamSource streamSource, final KsqlQueryBuilder queryBuilder, final Consumed consumed, - final Function rowKeyGenerator + final Function rowKeyGenerator ) { KStream stream = queryBuilder.getStreamsBuilder() .stream(streamSource.getTopicName(), consumed); @@ -280,7 +280,7 @@ private static KTable buildKTable( final AbstractStreamSource streamSource, final KsqlQueryBuilder queryBuilder, final Consumed consumed, - final Function rowKeyGenerator, + final Function rowKeyGenerator, final Materialized> materialized ) { final KTable table = queryBuilder.getStreamsBuilder() @@ -344,7 +344,7 @@ private static String tableChangeLogOpName(final ExecutionStepPropertiesV1 props return StreamsUtil.buildOpName(stacker.push("Reduce").getQueryContext()); } - private static Function, String> windowedRowKeyGenerator( + private static Function, Object> windowedRowKeyGenerator( final LogicalSchema schema ) { final org.apache.kafka.connect.data.Field keyField = getKeySchemaSingleField(schema); @@ -362,7 +362,7 @@ private static Function, String> windowedRowKeyGenerator( }; } - private static Function nonWindowedRowKeyGenerator( + private static Function nonWindowedRowKeyGenerator( final LogicalSchema schema ) { final org.apache.kafka.connect.data.Field keyField = getKeySchemaSingleField(schema); @@ -371,19 +371,16 @@ private static Function nonWindowedRowKeyGenerator( return null; } - final Object k = key.get(keyField); - return k == null - ? null - : k.toString(); + return key.get(keyField); }; } private static class AddKeyAndTimestampColumns implements ValueTransformerWithKeySupplier { - private final Function rowKeyGenerator; + private final Function rowKeyGenerator; - AddKeyAndTimestampColumns(final Function rowKeyGenerator) { + AddKeyAndTimestampColumns(final Function rowKeyGenerator) { this.rowKeyGenerator = requireNonNull(rowKeyGenerator, "rowKeyGenerator"); } From 4dd76ac8bdb556abdbf347d230f849e7764c69b1 Mon Sep 17 00:00:00 2001 From: Rohan Date: Tue, 10 Dec 2019 13:48:18 -0800 Subject: [PATCH 014/123] test: serialize/deserialize plans from qtt (#4080) Extends qtt to build the query plan and deserialize/serialize it before executing. Should be enough to make sure plans are serializable and executable until we improve qtt. --- .../ksql/execution/json/PlanJsonMapper.java | 8 ++++- .../execution/json/PlanJsonMapperTest.java | 0 .../ksql/structured/QueryContextTest.java | 27 ++++++++++++++ .../ksql/execution/context/QueryContext.java | 5 +-- .../ksql/test/tools/TestExecutorUtil.java | 36 +++++++++++++++++-- .../json/KsqlTypesDeserializationModule.java | 6 ++-- .../json/LogicalSchemaDeserializer.java | 9 +++-- .../json/LogicalSchemaDeserializerTest.java | 27 +++++++++++--- .../ksql/rest/client/KsqlClient.java | 4 +-- .../ksql/rest/entity/TableRowsEntityTest.java | 4 +-- 10 files changed, 108 insertions(+), 18 deletions(-) rename {ksql-execution => ksql-engine}/src/main/java/io/confluent/ksql/execution/json/PlanJsonMapper.java (80%) rename {ksql-execution => ksql-engine}/src/test/java/io/confluent/ksql/execution/json/PlanJsonMapperTest.java (100%) rename {ksql-rest-client/src/main/java/io/confluent/ksql/rest/client => ksql-parser/src/main/java/io/confluent/ksql/parser}/json/KsqlTypesDeserializationModule.java (85%) rename {ksql-rest-client/src/main/java/io/confluent/ksql/rest/client => ksql-parser/src/main/java/io/confluent/ksql/parser}/json/LogicalSchemaDeserializer.java (83%) rename {ksql-rest-client/src/test/java/io/confluent/ksql/rest/client => ksql-parser/src/test/java/io/confluent/ksql/parser}/json/LogicalSchemaDeserializerTest.java (78%) diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/json/PlanJsonMapper.java b/ksql-engine/src/main/java/io/confluent/ksql/execution/json/PlanJsonMapper.java similarity index 80% rename from ksql-execution/src/main/java/io/confluent/ksql/execution/json/PlanJsonMapper.java rename to ksql-engine/src/main/java/io/confluent/ksql/execution/json/PlanJsonMapper.java index b913ac928823..b84d6e8d8f02 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/json/PlanJsonMapper.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/execution/json/PlanJsonMapper.java @@ -19,6 +19,9 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.datatype.jdk8.Jdk8Module; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; +import io.confluent.ksql.json.KsqlTypesSerializationModule; +import io.confluent.ksql.parser.json.KsqlParserSerializationModule; +import io.confluent.ksql.parser.json.KsqlTypesDeserializationModule; public final class PlanJsonMapper { private PlanJsonMapper() { @@ -33,7 +36,10 @@ public static ObjectMapper create() { ObjectMapper mapper = new ObjectMapper(); mapper.registerModules( new Jdk8Module(), - new JavaTimeModule() + new JavaTimeModule(), + new KsqlParserSerializationModule(), + new KsqlTypesSerializationModule(), + new KsqlTypesDeserializationModule(true) ); mapper.enable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); mapper.enable(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES); diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/json/PlanJsonMapperTest.java b/ksql-engine/src/test/java/io/confluent/ksql/execution/json/PlanJsonMapperTest.java similarity index 100% rename from ksql-execution/src/test/java/io/confluent/ksql/execution/json/PlanJsonMapperTest.java rename to ksql-engine/src/test/java/io/confluent/ksql/execution/json/PlanJsonMapperTest.java diff --git a/ksql-engine/src/test/java/io/confluent/ksql/structured/QueryContextTest.java b/ksql-engine/src/test/java/io/confluent/ksql/structured/QueryContextTest.java index 49f70376f01d..ce6f9d5cbbc9 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/structured/QueryContextTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/structured/QueryContextTest.java @@ -18,13 +18,18 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableSet; import io.confluent.ksql.execution.context.QueryContext; +import java.io.IOException; import org.junit.Test; public class QueryContextTest { + private static final ObjectMapper MAPPER = new ObjectMapper(); + private final QueryContext.Stacker contextStacker = new QueryContext.Stacker().push("node"); private final QueryContext queryContext = contextStacker.getQueryContext(); @@ -51,4 +56,26 @@ public void shouldGenerateNewContextOnPush() { assertQueryContext(childContext, "node", "child"); assertQueryContext(grandchildContext, "node", "child", "grandchild"); } + + @Test + public void shouldSerializeCorrectly() throws IOException { + // Given: + final QueryContext context = contextStacker.push("child").getQueryContext(); + + // When: + final String serialized = MAPPER.writeValueAsString(context); + + // Then: + assertThat(serialized, is("\"node/child\"")); + } + + @Test + public void shouldDeserializeCorrectly() throws IOException { + // When: + final QueryContext deserialized = MAPPER.readValue("\"node/child\"", QueryContext.class); + + // Then: + final QueryContext expected = contextStacker.push("child").getQueryContext(); + assertThat(deserialized, is(expected)); + } } \ No newline at end of file diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/context/QueryContext.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/context/QueryContext.java index 50cdd7793670..b2f5cf886c7d 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/context/QueryContext.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/context/QueryContext.java @@ -16,6 +16,7 @@ package io.confluent.ksql.execution.context; import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonValue; import com.google.common.collect.ImmutableList; import com.google.errorprone.annotations.Immutable; @@ -43,12 +44,12 @@ private QueryContext(List context) { } } - @SuppressWarnings("unused")// Invoked via reflection by Jackson @JsonCreator - private QueryContext(final String context) { + private QueryContext(String context) { this(ImmutableList.copyOf(context.split(DELIMITER))); } + @JsonIgnore public List getContext() { return context; } diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java index f37bccbe9c60..e94b4edbf5fa 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java @@ -20,14 +20,17 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; import io.confluent.kafka.schemaregistry.client.SchemaMetadata; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.KsqlExecutionContext; import io.confluent.ksql.KsqlExecutionContext.ExecuteResult; import io.confluent.ksql.engine.KsqlEngine; +import io.confluent.ksql.engine.KsqlPlan; import io.confluent.ksql.engine.SqlFormatInjector; import io.confluent.ksql.engine.StubInsertValuesExecutor; +import io.confluent.ksql.execution.json.PlanJsonMapper; import io.confluent.ksql.metastore.MetaStore; import io.confluent.ksql.metastore.model.DataSource; import io.confluent.ksql.name.SourceName; @@ -41,12 +44,14 @@ import io.confluent.ksql.parser.tree.Query; import io.confluent.ksql.parser.tree.Relation; import io.confluent.ksql.parser.tree.Table; +import io.confluent.ksql.planner.plan.ConfiguredKsqlPlan; import io.confluent.ksql.schema.ksql.inference.DefaultSchemaInjector; import io.confluent.ksql.schema.ksql.inference.SchemaRegistryTopicSchemaSupplier; import io.confluent.ksql.serde.Format; import io.confluent.ksql.services.KafkaTopicClient; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; +import io.confluent.ksql.test.TestFrameworkException; import io.confluent.ksql.test.serde.SerdeSupplier; import io.confluent.ksql.test.tools.stubs.StubKafkaService; import io.confluent.ksql.test.utils.SerdeUtil; @@ -55,6 +60,7 @@ import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.KsqlStatementException; import io.confluent.ksql.util.PersistentQueryMetadata; +import java.io.IOException; import java.time.Duration; import java.util.ArrayList; import java.util.List; @@ -71,6 +77,8 @@ public final class TestExecutorUtil { // CHECKSTYLE_RULES.ON: ClassDataAbstractionCoupling + private static final ObjectMapper PLAN_MAPPER = PlanJsonMapper.create(); + private TestExecutorUtil() { } @@ -275,7 +283,6 @@ private static List execute( .collect(Collectors.toList()); } - @SuppressWarnings({"rawtypes", "unchecked"}) private static ExecuteResultAndSortedSources execute( final KsqlExecutionContext executionContext, @@ -308,7 +315,7 @@ private static ExecuteResultAndSortedSources execute( final ExecuteResult executeResult; try { - executeResult = executionContext.execute(executionContext.getServiceContext(), reformatted); + executeResult = executeConfiguredStatement(executionContext, reformatted); } catch (final KsqlStatementException statementException) { // use the original statement text in the exception so that tests // can easily check that the failed statement is the input statement @@ -339,6 +346,31 @@ private static ExecuteResultAndSortedSources execute( Optional.empty()); } + @SuppressWarnings("unchecked") + private static ExecuteResult executeConfiguredStatement( + final KsqlExecutionContext executionContext, + final ConfiguredStatement stmt) { + final ConfiguredKsqlPlan configuredPlan; + try { + configuredPlan = buildConfiguredPlan(executionContext, stmt); + } catch (final IOException e) { + throw new TestFrameworkException("Error (de)serializing plan: " + e.getMessage(), e); + } + return executionContext.execute(executionContext.getServiceContext(), configuredPlan); + } + + private static ConfiguredKsqlPlan buildConfiguredPlan( + final KsqlExecutionContext executionContext, + final ConfiguredStatement stmt + ) throws IOException { + final KsqlPlan plan = executionContext.plan(executionContext.getServiceContext(), stmt); + final String serialized = PLAN_MAPPER.writeValueAsString(plan); + return ConfiguredKsqlPlan.of( + PLAN_MAPPER.readValue(serialized, KsqlPlan.class), + stmt.getOverrides(), + stmt.getConfig()); + } + private static Optional getWindowSize(final Query query) { return query.getWindow().flatMap(window -> window .getKsqlWindowExpression() diff --git a/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/json/KsqlTypesDeserializationModule.java b/ksql-parser/src/main/java/io/confluent/ksql/parser/json/KsqlTypesDeserializationModule.java similarity index 85% rename from ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/json/KsqlTypesDeserializationModule.java rename to ksql-parser/src/main/java/io/confluent/ksql/parser/json/KsqlTypesDeserializationModule.java index 992010afd770..b1fc161fe2ab 100644 --- a/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/json/KsqlTypesDeserializationModule.java +++ b/ksql-parser/src/main/java/io/confluent/ksql/parser/json/KsqlTypesDeserializationModule.java @@ -13,14 +13,14 @@ * specific language governing permissions and limitations under the License. */ -package io.confluent.ksql.rest.client.json; +package io.confluent.ksql.parser.json; import com.fasterxml.jackson.databind.module.SimpleModule; import io.confluent.ksql.schema.ksql.LogicalSchema; public class KsqlTypesDeserializationModule extends SimpleModule { - public KsqlTypesDeserializationModule() { - addDeserializer(LogicalSchema.class, new LogicalSchemaDeserializer()); + public KsqlTypesDeserializationModule(boolean withImplicitColumns) { + addDeserializer(LogicalSchema.class, new LogicalSchemaDeserializer(withImplicitColumns)); } } diff --git a/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/json/LogicalSchemaDeserializer.java b/ksql-parser/src/main/java/io/confluent/ksql/parser/json/LogicalSchemaDeserializer.java similarity index 83% rename from ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/json/LogicalSchemaDeserializer.java rename to ksql-parser/src/main/java/io/confluent/ksql/parser/json/LogicalSchemaDeserializer.java index 304c73ae3ec1..21ea5a9a8a2d 100644 --- a/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/json/LogicalSchemaDeserializer.java +++ b/ksql-parser/src/main/java/io/confluent/ksql/parser/json/LogicalSchemaDeserializer.java @@ -13,7 +13,7 @@ * specific language governing permissions and limitations under the License. */ -package io.confluent.ksql.rest.client.json; +package io.confluent.ksql.parser.json; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.databind.DeserializationContext; @@ -25,6 +25,11 @@ import java.io.IOException; final class LogicalSchemaDeserializer extends JsonDeserializer { + final boolean withImplicitColumns; + + LogicalSchemaDeserializer(final boolean withImplicitColumns) { + this.withImplicitColumns = withImplicitColumns; + } @Override public LogicalSchema deserialize( @@ -36,6 +41,6 @@ public LogicalSchema deserialize( final TableElements tableElements = SchemaParser.parse(text, TypeRegistry.EMPTY); - return tableElements.toLogicalSchema(false); + return tableElements.toLogicalSchema(withImplicitColumns); } } diff --git a/ksql-rest-client/src/test/java/io/confluent/ksql/rest/client/json/LogicalSchemaDeserializerTest.java b/ksql-parser/src/test/java/io/confluent/ksql/parser/json/LogicalSchemaDeserializerTest.java similarity index 78% rename from ksql-rest-client/src/test/java/io/confluent/ksql/rest/client/json/LogicalSchemaDeserializerTest.java rename to ksql-parser/src/test/java/io/confluent/ksql/parser/json/LogicalSchemaDeserializerTest.java index 11f6e13625f5..65f4e904bcf0 100644 --- a/ksql-rest-client/src/test/java/io/confluent/ksql/rest/client/json/LogicalSchemaDeserializerTest.java +++ b/ksql-parser/src/test/java/io/confluent/ksql/parser/json/LogicalSchemaDeserializerTest.java @@ -13,7 +13,7 @@ * specific language governing permissions and limitations under the License. */ -package io.confluent.ksql.rest.client.json; +package io.confluent.ksql.parser.json; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; @@ -32,7 +32,7 @@ public class LogicalSchemaDeserializerTest { @BeforeClass public static void classSetUp() { - MAPPER.registerModule(new TestModule()); + MAPPER.registerModule(new TestModule(false)); } @Test @@ -83,10 +83,29 @@ public void shouldDeserializeSchemaWithKeyAfterValue() throws Exception { .build())); } + @Test + public void shouldAddImplicitColumns() throws Exception { + // Given: + final ObjectMapper mapper = new ObjectMapper(); + mapper.registerModule(new TestModule(true)); + final String json = "\"`v0` INTEGER\""; + + // When: + final LogicalSchema schema = mapper.readValue(json, LogicalSchema.class); + + // Then: + assertThat(schema, is(LogicalSchema.builder() + .valueColumn(ColumnName.of("v0"), SqlTypes.INTEGER) + .build())); + } + private static class TestModule extends SimpleModule { - private TestModule() { - addDeserializer(LogicalSchema.class, new LogicalSchemaDeserializer()); + private TestModule(boolean withImplicitColumns) { + addDeserializer( + LogicalSchema.class, + new LogicalSchemaDeserializer(withImplicitColumns) + ); } } } diff --git a/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/KsqlClient.java b/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/KsqlClient.java index da01dc0dfec7..2f26d8542d38 100644 --- a/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/KsqlClient.java +++ b/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/KsqlClient.java @@ -20,8 +20,8 @@ import com.fasterxml.jackson.databind.DeserializationFeature; import com.google.common.annotations.VisibleForTesting; import io.confluent.ksql.json.JsonMapper; +import io.confluent.ksql.parser.json.KsqlTypesDeserializationModule; import io.confluent.ksql.properties.LocalProperties; -import io.confluent.ksql.rest.client.json.KsqlTypesDeserializationModule; import java.net.URI; import java.util.Map; import java.util.Optional; @@ -35,7 +35,7 @@ public final class KsqlClient implements AutoCloseable { static { JsonMapper.INSTANCE.mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); JsonMapper.INSTANCE.mapper.configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, false); - JsonMapper.INSTANCE.mapper.registerModule(new KsqlTypesDeserializationModule()); + JsonMapper.INSTANCE.mapper.registerModule(new KsqlTypesDeserializationModule(false)); } private final Client httpClient; diff --git a/ksql-rest-client/src/test/java/io/confluent/ksql/rest/entity/TableRowsEntityTest.java b/ksql-rest-client/src/test/java/io/confluent/ksql/rest/entity/TableRowsEntityTest.java index cd158bf1cf81..59bcd372cb4b 100644 --- a/ksql-rest-client/src/test/java/io/confluent/ksql/rest/entity/TableRowsEntityTest.java +++ b/ksql-rest-client/src/test/java/io/confluent/ksql/rest/entity/TableRowsEntityTest.java @@ -25,7 +25,7 @@ import io.confluent.ksql.json.KsqlTypesSerializationModule; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.query.QueryId; -import io.confluent.ksql.rest.client.json.KsqlTypesDeserializationModule; +import io.confluent.ksql.parser.json.KsqlTypesDeserializationModule; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.types.SqlTypes; import java.util.Arrays; @@ -53,7 +53,7 @@ public class TableRowsEntityTest { MAPPER = new ObjectMapper(); MAPPER.registerModule(new Jdk8Module()); MAPPER.registerModule(new KsqlTypesSerializationModule()); - MAPPER.registerModule(new KsqlTypesDeserializationModule()); + MAPPER.registerModule(new KsqlTypesDeserializationModule(false)); } @Test(expected = IllegalArgumentException.class) From 6c8094116197e77ca426a62ed70b6a0fcbc661be Mon Sep 17 00:00:00 2001 From: Almog Gavra Date: Tue, 10 Dec 2019 13:54:42 -0800 Subject: [PATCH 015/123] fix: properly set key when partition by ROWKEY and join on non-ROWKEY (#4090) --- .../ksql/structured/SchemaKStream.java | 7 +- .../query-validation-tests/partition-by.json | 95 +++++++++++++++++-- 2 files changed, 93 insertions(+), 9 deletions(-) diff --git a/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKStream.java b/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKStream.java index fd8ccae90f6a..3597d04ce806 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKStream.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKStream.java @@ -382,8 +382,11 @@ private boolean needsRepartition(final Expression expression) { return !namesMatch && !isRowKey(columnRef); } - private static boolean isRowKey(final ColumnRef fieldName) { - return fieldName.name().equals(SchemaUtil.ROWKEY_NAME); + private boolean isRowKey(final ColumnRef fieldName) { + // until we support structured keys, there will never be any key column other + // than "ROWKEY" - furthermore, that key column is always prefixed at this point + // unless it is a join, in which case every other source field is prefixed + return fieldName.equals(schema.key().get(0).ref()); } private static ColumnName fieldNameFromExpression(final Expression expression) { diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json b/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json index 7ff4e9699672..8f9fbfd3c2f9 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json @@ -132,28 +132,109 @@ "inputs": [{"topic": "input", "value": {"ID": 22}, "timestamp": 10}], "outputs": [{"topic": "OUTPUT", "key": "10", "value": {"ID": 22}, "timestamp": 10}] }, + { + "name": "partition by ROWKEY in join on ROWKEY", + "statements": [ + "CREATE STREAM L (A STRING, B STRING) WITH (kafka_topic='LEFT', value_format='JSON', KEY='A');", + "CREATE STREAM R (C STRING, D STRING) WITH (kafka_topic='RIGHT', value_format='JSON', KEY='C');", + "CREATE STREAM OUTPUT AS SELECT L.ROWKEY, R.ROWKEY FROM L JOIN R WITHIN 10 SECONDS ON L.A = R.C PARTITION BY L.ROWKEY;" + ], + "inputs": [ + {"topic": "LEFT", "key": "join", "value": {"A": "join", "B": "b"}}, + {"topic": "RIGHT", "key": "join", "value": {"C": "join", "D": "d"}} + ], + "outputs": [ + {"topic": "OUTPUT", "key": "join", "value": {"L_ROWKEY": "join", "R_ROWKEY": "join"}} + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "stream", "keyField": "L_ROWKEY"} + ], + "topics": { + "blacklist": ".*-repartition" + } + } + }, { "name": "partition by ROWKEY in join on non-ROWKEY", "statements": [ "CREATE STREAM L (A STRING, B STRING) WITH (kafka_topic='LEFT', value_format='JSON', KEY='A');", "CREATE STREAM R (C STRING, D STRING) WITH (kafka_topic='RIGHT', value_format='JSON', KEY='C');", - "CREATE STREAM OUTPUT AS SELECT L.A, L.B, R.C, R.D, L.ROWKEY, R.ROWKEY FROM L JOIN R WITHIN 10 SECONDS ON L.B = R.D PARTITION BY L.ROWKEY;" + "CREATE STREAM OUTPUT AS SELECT L.ROWKEY, R.ROWKEY FROM L JOIN R WITHIN 10 SECONDS ON L.B = R.D PARTITION BY L.ROWKEY;" ], - "comments": [ - "This test demonstrates a problem when we JOIN on a non-ROWKEY field and then PARTITION BY ", - "a ROWKEY field. Note that the key is 'join' when it should be 'a' and the key-field is 'B' ", - "when it should be 'L_ROWKEY'" + "inputs": [ + {"topic": "LEFT", "key": "a", "value": {"A": "a", "B": "join"}}, + {"topic": "RIGHT", "key": "c", "value": {"C": "c", "D": "join"}} + ], + "outputs": [ + {"topic": "OUTPUT", "key": "a", "value": {"L_ROWKEY": "a", "R_ROWKEY": "c"}} + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "stream", "keyField": "L_ROWKEY"} + ] + } + }, + { + "name": "partition by ROWKEY in join on ROWKEY ALIASED", + "statements": [ + "CREATE STREAM L (A STRING, B STRING) WITH (kafka_topic='LEFT', value_format='JSON', KEY='A');", + "CREATE STREAM R (C STRING, D STRING) WITH (kafka_topic='RIGHT', value_format='JSON', KEY='C');", + "CREATE STREAM OUTPUT AS SELECT L.ROWKEY, R.ROWKEY FROM L JOIN R WITHIN 10 SECONDS ON L.A = R.C PARTITION BY L.A;" + ], + "inputs": [ + {"topic": "LEFT", "key": "join", "value": {"A": "join", "B": "b"}}, + {"topic": "RIGHT", "key": "join", "value": {"C": "join", "D": "d"}} + ], + "outputs": [ + {"topic": "OUTPUT", "key": "join", "value": {"L_ROWKEY": "join", "R_ROWKEY": "join"}} + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "stream", "keyField": null} + ], + "topics": { + "blacklist": ".*-repartition" + } + } + }, + { + "name": "partition by non-ROWKEY in join on ROWKEY", + "statements": [ + "CREATE STREAM L (A STRING, B STRING) WITH (kafka_topic='LEFT', value_format='JSON', KEY='A');", + "CREATE STREAM R (C STRING, D STRING) WITH (kafka_topic='RIGHT', value_format='JSON', KEY='C');", + "CREATE STREAM OUTPUT AS SELECT L.ROWKEY, R.ROWKEY FROM L JOIN R WITHIN 10 SECONDS ON L.A = R.C PARTITION BY L.B;" + ], + "inputs": [ + {"topic": "LEFT", "key": "join", "value": {"A": "join", "B": "b"}}, + {"topic": "RIGHT", "key": "join", "value": {"C": "join", "D": "d"}} + ], + "outputs": [ + {"topic": "OUTPUT", "key": "b", "value": {"L_ROWKEY": "join", "R_ROWKEY": "join"}} + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "stream", "keyField": null} + ] + } + }, + { + "name": "partition by non-ROWKEY in join on non-ROWKEY", + "statements": [ + "CREATE STREAM L (A STRING, B STRING) WITH (kafka_topic='LEFT', value_format='JSON', KEY='A');", + "CREATE STREAM R (C STRING, D STRING) WITH (kafka_topic='RIGHT', value_format='JSON', KEY='C');", + "CREATE STREAM OUTPUT AS SELECT L.ROWKEY, R.ROWKEY FROM L JOIN R WITHIN 10 SECONDS ON L.B = R.D PARTITION BY L.B;" ], "inputs": [ {"topic": "LEFT", "key": "a", "value": {"A": "a", "B": "join"}}, {"topic": "RIGHT", "key": "c", "value": {"C": "c", "D": "join"}} ], "outputs": [ - {"topic": "OUTPUT", "key": "join", "value": {"A": "a", "B": "join", "C": "c", "D": "join", "L_ROWKEY": "a", "R_ROWKEY": "c"}} + {"topic": "OUTPUT", "key": "join", "value": {"L_ROWKEY": "a", "R_ROWKEY": "c"}} ], "post": { "sources": [ - {"name": "OUTPUT", "type": "stream", "keyField": "B"} + {"name": "OUTPUT", "type": "stream", "keyField": null} ] } } From 7addf8856a6d62a6890a5f2520eead26538233a6 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Tue, 10 Dec 2019 22:48:27 +0000 Subject: [PATCH 016/123] chore: partition-by primitive key support (#4098) * chore: partition-by primitive key support Fixes: https://github.com/confluentinc/ksql/issues/4092 WIP: This commit gets `PARTITION BY` clauses working with primitive key types. However, it does disable a couple of join until https://github.com/confluentinc/ksql/issues/4094 has been completed. BREAKING CHANGE: A `PARTITION BY` now changes the SQL type of `ROWKEY` in the output schema of a query. For example, consider: ```sql CREATE STREAM INPUT (ROWKEY STRING KEY, ID INT) WITH (...); CREATE STREAM OUTPUT AS SELECT ROWKEY AS NAME FROM INPUT PARTITION BY ID; ``` Previously, the above would have resulted in an output schema of `ROWKEY STRING KEY, NAME STRING`, where `ROWKEY` would have stored the string representation of the integer from the `ID` column. With this commit the output schema will be `ROWKEY INT KEY, NAME STRING`. --- .../ksql/planner/LogicalPlanner.java | 65 +++++++++++---- .../confluent/ksql/planner/plan/JoinNode.java | 2 +- .../ksql/planner/plan/RepartitionNode.java | 28 +++---- .../physical/PhysicalPlanBuilderTest.java | 10 +-- .../ksql/execution/util/StructKeyUtil.java | 46 +++++++++-- .../execution/util/StructKeyUtilTest.java | 82 +++++++++++++++++++ .../query-validation-tests/joins.json | 10 +++ .../query-validation-tests/key-field.json | 28 +++---- .../query-validation-tests/partition-by.json | 67 +++++++++++++-- .../execution/streams/StepSchemaResolver.java | 25 +++++- .../streams/StreamSelectKeyBuilder.java | 35 ++++++-- .../streams/StepSchemaResolverTest.java | 15 +++- .../streams/StreamSelectKeyBuilderTest.java | 72 ++++++++++------ 13 files changed, 381 insertions(+), 104 deletions(-) create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/util/StructKeyUtilTest.java diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java index 7dfdf4ba9f47..66837ecfdbe1 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java @@ -43,10 +43,12 @@ import io.confluent.ksql.planner.plan.RepartitionNode; import io.confluent.ksql.schema.ksql.Column; import io.confluent.ksql.schema.ksql.ColumnRef; +import io.confluent.ksql.schema.ksql.FormatOptions; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.LogicalSchema.Builder; import io.confluent.ksql.schema.ksql.types.SqlType; import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.SchemaUtil; import java.util.List; import java.util.Optional; @@ -201,36 +203,46 @@ private static FilterNode buildFilterNode( return new FilterNode(new PlanNodeId("WhereFilter"), sourcePlanNode, filterExpression); } - private static RepartitionNode buildRepartitionNode( + private RepartitionNode buildRepartitionNode( final PlanNode sourceNode, final Expression partitionBy ) { - if (!(partitionBy instanceof ColumnReferenceExp)) { - return new RepartitionNode( - new PlanNodeId("PartitionBy"), - sourceNode, - partitionBy, - KeyField.none()); - } - - final ColumnRef partitionColumn = ((ColumnReferenceExp) partitionBy).getReference(); - final LogicalSchema schema = sourceNode.getSchema(); - final KeyField keyField; - if (schema.isMetaColumn(partitionColumn.name())) { + + if (!(partitionBy instanceof ColumnReferenceExp)) { keyField = KeyField.none(); - } else if (schema.isKeyColumn(partitionColumn.name())) { - keyField = sourceNode.getKeyField(); } else { - keyField = KeyField.of(partitionColumn); + final ColumnRef columnRef = ((ColumnReferenceExp) partitionBy).getReference(); + final LogicalSchema sourceSchema = sourceNode.getSchema(); + + final Column proposedKey = sourceSchema + .findValueColumn(columnRef) + .orElseThrow(() -> new KsqlException("Invalid identifier for PARTITION BY clause: '" + + columnRef.name().toString(FormatOptions.noEscape()) + "' Only columns from the " + + "source schema can be referenced in the PARTITION BY clause.")); + + switch (proposedKey.namespace()) { + case KEY: + keyField = sourceNode.getKeyField(); + break; + case VALUE: + keyField = KeyField.of(columnRef); + break; + default: + keyField = KeyField.none(); + break; + } } + final LogicalSchema schema = buildRepartitionedSchema(sourceNode, partitionBy); + return new RepartitionNode( new PlanNodeId("PartitionBy"), sourceNode, + schema, partitionBy, - keyField); - + keyField + ); } private FlatMapNode buildFlatMapNode(final PlanNode sourcePlanNode) { @@ -331,4 +343,21 @@ private LogicalSchema buildProjectionSchema(final PlanNode sourcePlanNode) { return builder.build(); } + + private LogicalSchema buildRepartitionedSchema( + final PlanNode sourceNode, + final Expression partitionBy + ) { + final LogicalSchema sourceSchema = sourceNode.getSchema(); + + final ExpressionTypeManager typeManager = + new ExpressionTypeManager(sourceSchema, functionRegistry); + + final SqlType keyType = typeManager.getExpressionSqlType(partitionBy); + + return LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, keyType) + .valueColumns(sourceSchema.value()) + .build(); + } } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java index 40863a1c4958..358a0ee390bb 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java @@ -96,7 +96,7 @@ public JoinNode( this.schema = JoinParamsFactory.createSchema(left.getSchema(), right.getSchema()); if (schema.key().get(0).type().baseType() != SqlBaseType.STRING) { - throw new KsqlException("GROUP BY is not supported with non-STRING keys"); + throw new KsqlException("JOIN is not supported with non-STRING keys"); } } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java index a10c0c07234d..b29b32f10636 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java @@ -15,6 +15,8 @@ package io.confluent.ksql.planner.plan; +import static java.util.Objects.requireNonNull; + import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; import com.google.errorprone.annotations.Immutable; @@ -23,12 +25,9 @@ import io.confluent.ksql.execution.plan.SelectExpression; import io.confluent.ksql.metastore.model.KeyField; import io.confluent.ksql.schema.ksql.LogicalSchema; -import io.confluent.ksql.schema.ksql.SqlBaseType; import io.confluent.ksql.services.KafkaTopicClient; import io.confluent.ksql.structured.SchemaKStream; -import io.confluent.ksql.util.KsqlException; import java.util.List; -import java.util.Objects; @Immutable public class RepartitionNode extends PlanNode { @@ -36,26 +35,25 @@ public class RepartitionNode extends PlanNode { private final PlanNode source; private final Expression partitionBy; private final KeyField keyField; + private final LogicalSchema schema; public RepartitionNode( - PlanNodeId id, - PlanNode source, - Expression partitionBy, - KeyField keyField + final PlanNodeId id, + final PlanNode source, + final LogicalSchema schema, + final Expression partitionBy, + final KeyField keyField ) { super(id, source.getNodeOutputType()); - this.source = Objects.requireNonNull(source, "source"); - this.partitionBy = Objects.requireNonNull(partitionBy, "partitionBy"); - this.keyField = Objects.requireNonNull(keyField, "keyField"); - - if (source.getSchema().key().get(0).type().baseType() != SqlBaseType.STRING) { - throw new KsqlException("GROUP BY is not supported with non-STRING keys"); - } + this.source = requireNonNull(source, "source"); + this.partitionBy = requireNonNull(partitionBy, "partitionBy"); + this.keyField = requireNonNull(keyField, "keyField"); + this.schema = requireNonNull(schema, "schema"); } @Override public LogicalSchema getSchema() { - return source.getSchema(); + return schema; } @Override diff --git a/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java b/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java index c165a709b96d..7272ed4af575 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java @@ -294,12 +294,12 @@ public void shouldRekeyIfPartitionByDoesNotMatchResultKey() { final String planText = queryMetadataList.get(1).getExecutionPlan(); final String[] lines = planText.split("\n"); assertThat(lines.length, equalTo(4)); - assertThat(lines[0], equalTo(" > [ SINK ] | Schema: [ROWKEY STRING KEY, COL0 BIGINT, COL1 STRING, COL2 " + assertThat(lines[0], equalTo(" > [ SINK ] | Schema: [ROWKEY BIGINT KEY, COL0 BIGINT, COL1 STRING, COL2 " + "DOUBLE] | Logger: InsertQuery_1.S1")); assertThat(lines[2], - containsString("[ REKEY ] | Schema: [TEST1.ROWKEY STRING KEY, TEST1.ROWTIME BIGINT, TEST1.ROWKEY STRING, TEST1.COL0 BIGINT, TEST1.COL1 STRING, TEST1.COL2 DOUBLE] " + containsString("[ REKEY ] | Schema: [ROWKEY BIGINT KEY, TEST1.ROWTIME BIGINT, TEST1.ROWKEY STRING, TEST1.COL0 BIGINT, TEST1.COL1 STRING, TEST1.COL2 DOUBLE] " + "| Logger: InsertQuery_1.PartitionBy")); - assertThat(lines[1], containsString("[ PROJECT ] | Schema: [ROWKEY STRING KEY, COL0 BIGINT, COL1 STRING" + assertThat(lines[1], containsString("[ PROJECT ] | Schema: [ROWKEY BIGINT KEY, COL0 BIGINT, COL1 STRING" + ", COL2 DOUBLE] | Logger: InsertQuery_1.Project")); } @@ -316,7 +316,7 @@ public void shouldRepartitionLeftStreamIfNotCorrectKey() { .get(0); // Then: - assertThat(result.getExecutionPlan(), containsString("[ REKEY ] | Schema: [TEST2.")); + assertThat(result.getExecutionPlan(), containsString("[ REKEY ] | Schema: [ROWKEY DOUBLE KEY, TEST2.")); } @Test @@ -332,7 +332,7 @@ public void shouldRepartitionRightStreamIfNotCorrectKey() { .get(0); // Then: - assertThat(result.getExecutionPlan(), containsString("[ REKEY ] | Schema: [TEST3.")); + assertThat(result.getExecutionPlan(), containsString("[ REKEY ] | Schema: [ROWKEY STRING KEY, TEST3.")); } @Test diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/StructKeyUtil.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/StructKeyUtil.java index 696c444744d9..c1ca8bd27e2a 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/StructKeyUtil.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/StructKeyUtil.java @@ -15,9 +15,13 @@ package io.confluent.ksql.execution.util; -import io.confluent.ksql.schema.ksql.PersistenceSchema; +import io.confluent.ksql.schema.ksql.Column; +import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.schema.ksql.SchemaConverters; +import io.confluent.ksql.schema.ksql.types.SqlType; import io.confluent.ksql.util.SchemaUtil; -import org.apache.kafka.connect.data.ConnectSchema; +import java.util.List; +import java.util.Objects; import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.Struct; @@ -35,11 +39,6 @@ public final class StructKeyUtil { private static final org.apache.kafka.connect.data.Field ROWKEY_FIELD = ROWKEY_STRUCT_SCHEMA.fields().get(0); - public static final PersistenceSchema ROWKEY_SERIALIZED_SCHEMA = PersistenceSchema.from( - (ConnectSchema) ROWKEY_STRUCT_SCHEMA, - false - ); - private StructKeyUtil() { } @@ -48,4 +47,37 @@ public static Struct asStructKey(String rowKey) { keyStruct.put(ROWKEY_FIELD, rowKey); return keyStruct; } + + public static KeyBuilder keySchema(final LogicalSchema schema) { + final List keyCols = schema.key(); + if (keyCols.size() != 1) { + throw new UnsupportedOperationException("Only single keys supported"); + } + + final SqlType sqlType = keyCols.get(0).type(); + final Schema connectSchema = SchemaConverters.sqlToConnectConverter().toConnectSchema(sqlType); + + return new KeyBuilder(SchemaBuilder + .struct() + .field(SchemaUtil.ROWKEY_NAME.name(), connectSchema) + .build() + ); + } + + public static final class KeyBuilder { + + private final Schema keySchema; + private final org.apache.kafka.connect.data.Field keyField; + + private KeyBuilder(final Schema keySchema) { + this.keySchema = Objects.requireNonNull(keySchema, "keySchema"); + this.keyField = keySchema.field(SchemaUtil.ROWKEY_NAME.name()); + } + + public Struct build(final Object rowKey) { + final Struct keyStruct = new Struct(keySchema); + keyStruct.put(keyField, rowKey); + return keyStruct; + } + } } diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/util/StructKeyUtilTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/StructKeyUtilTest.java new file mode 100644 index 000000000000..fe80c92f2a9d --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/StructKeyUtilTest.java @@ -0,0 +1,82 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.util; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +import io.confluent.ksql.execution.util.StructKeyUtil.KeyBuilder; +import io.confluent.ksql.name.ColumnName; +import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.schema.ksql.types.SqlTypes; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.apache.kafka.connect.data.Struct; +import org.junit.Before; +import org.junit.Test; + +public class StructKeyUtilTest { + + private static final LogicalSchema LOGICAL_SCHEMA = LogicalSchema.builder() + .keyColumn(ColumnName.of("BOB"), SqlTypes.INTEGER) + .valueColumn(ColumnName.of("DOES_NOT_MATTER"), SqlTypes.STRING) + .build(); + private KeyBuilder builder; + + @Before + public void setUp() { + builder = StructKeyUtil.keySchema(LOGICAL_SCHEMA); + } + + @Test(expected = UnsupportedOperationException.class) + public void shouldThrowOnMultipleKeyColumns() { + // Only single key columns initially supported + StructKeyUtil.keySchema(LogicalSchema.builder() + .keyColumn(ColumnName.of("BOB"), SqlTypes.STRING) + .keyColumn(ColumnName.of("JOHN"), SqlTypes.STRING) + .build()); + } + + @Test + public void shouldBuildCorrectSchema() { + // When: + final Struct result = builder.build(1); + + // Then: + assertThat(result.schema(), is(SchemaBuilder.struct() + .field("ROWKEY", Schema.OPTIONAL_INT32_SCHEMA) + .build())); + } + + @Test + public void shouldHandleValue() { + // When: + final Struct result = builder.build(1); + + // Then: + assertThat(result.getInt32("ROWKEY"), is(1)); + } + + @Test + public void shouldHandleNulls() { + // When: + final Struct result = builder.build(null); + + // Then: + assertThat(result.getInt32("ROWKEY"), is(nullValue())); + } +} \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json b/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json index 654da3830a83..3179a9cc501a 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json @@ -56,6 +56,8 @@ { "name": "stream stream left join with rowkey - rekey", "format": ["AVRO", "JSON"], + "enabled": false, + "comment": "disabled until https://github.com/confluentinc/ksql/issues/4094 is done", "statements": [ "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}');", "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}');", @@ -89,6 +91,8 @@ { "name": "stream stream left join - rekey", "format": ["AVRO", "JSON"], + "enabled": false, + "comment": "disabled until https://github.com/confluentinc/ksql/issues/4094 is done", "statements": [ "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}');", "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}');", @@ -169,6 +173,8 @@ }, { "name": "stream stream left join - right join key in projection", + "enabled": false, + "comment": "disabled until https://github.com/confluentinc/ksql/issues/4094 is done", "statements": [ "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON');", "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON');", @@ -204,6 +210,8 @@ }, { "name": "stream stream left join - both join keys in projection", + "enabled": false, + "comment": "disabled until https://github.com/confluentinc/ksql/issues/4094 is done", "statements": [ "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON');", "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON');", @@ -1283,6 +1291,8 @@ }, { "name": "stream to table when neither have key field and joining by table ROWKEY", + "enabled": false, + "comment": "disabled until https://github.com/confluentinc/ksql/issues/4094 is done", "statements": [ "CREATE STREAM S (ID bigint) WITH (kafka_topic='S', value_format='JSON');", "CREATE TABLE NO_KEY (ID bigint, NAME string) WITH (kafka_topic='NO_KEY', value_format='JSON');", diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json b/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json index 81649da87bc8..26c15d185630 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json @@ -75,7 +75,7 @@ {"topic": "input_topic", "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": {"FOO":1, "BAR": 2}} + {"topic": "OUTPUT", "key": 1, "value": {"FOO":1, "BAR": 2}} ], "post": { "sources": [ @@ -105,7 +105,7 @@ {"topic": "input_topic", "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": {"BAR": 2}} + {"topic": "OUTPUT", "key": 1, "value": {"BAR": 2}} ], "post": { "sources": [ @@ -255,19 +255,19 @@ { "name": "stream | initially set | partition by (same) | key in value | no aliasing", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT * FROM INPUT PARTITION BY foo;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key":"1", "value": {"FOO":1, "BAR": 2}} + {"topic": "OUTPUT", "key": 1, "value": {"FOO":1, "BAR": 2}} ], "post": { "sources": [ {"name": "INPUT", "type": "stream", "keyField": "FOO"}, - {"name": "OUTPUT", "type": "stream", "keyField": "FOO"} + {"name": "OUTPUT", "type": "stream", "keyField": "FOO", "schema": "ROWKEY INT KEY, FOO INT, BAR INT"} ], "topics": { "blacklist": ".*-repartition" @@ -277,7 +277,7 @@ { "name": "stream | initially set | partition by (same) | key in value | aliasing", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT foo AS aliased, bar FROM INPUT PARTITION BY aliased;" ], "expectedException": { @@ -288,19 +288,19 @@ { "name": "stream | initially set | partition by (same) | key not in value | -", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT bar FROM INPUT PARTITION BY foo;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key":"1", "value": {"BAR": 2}} + {"topic": "OUTPUT", "key": 1, "value": {"BAR": 2}} ], "post": { "sources": [ {"name": "INPUT", "type": "stream", "keyField": "FOO"}, - {"name": "OUTPUT", "type": "stream", "keyField": null} + {"name": "OUTPUT", "type": "stream", "keyField": null, "schema": "ROWKEY INT KEY, BAR INT"} ], "topics": { "blacklist": ".*-repartition" @@ -317,7 +317,7 @@ {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key":"2", "value": {"FOO": 1, "BAR": 2}} + {"topic": "OUTPUT", "key": 2, "value": {"FOO": 1, "BAR": 2}} ], "post": { "sources": [ @@ -347,7 +347,7 @@ {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key":"2", "value": {"FOO":1}} + {"topic": "OUTPUT", "key": 2, "value": {"FOO":1}} ], "post": { "sources": [ @@ -709,7 +709,7 @@ {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "3", "value": {"KSQL_COL_0": 3}} + {"topic": "OUTPUT", "key": 3, "value": {"KSQL_COL_0": 3}} ], "post": { "sources": [ diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json b/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json index 8f9fbfd3c2f9..6d0b16be480b 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json @@ -12,7 +12,54 @@ "outputs": [ {"topic": "REPARTITIONED", "key": "zero", "value": "zero,0"} ] - },{ + }, + { + "name": "int column", + "statements": [ + "CREATE STREAM TEST (ROWKEY STRING KEY, ID bigint) with (kafka_topic='test_topic', value_format = 'delimited');", + "CREATE STREAM OUTPUT AS SELECT * from TEST partition by id;" + ], + "inputs": [ + {"topic": "test_topic", "key": "a", "value": "10"} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 10, "value": "10"} + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "stream", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY BIGINT KEY, ID BIGINT" + } + ] + } + }, + { + "name": "bigint key field", + "statements": [ + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID BIGINT) with (kafka_topic='test_topic', value_format = 'delimited', key='ID');", + "CREATE STREAM OUTPUT AS select * from TEST partition by ID;" + ], + "inputs": [ + {"topic": "test_topic", "key": 0, "value": "0"} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 0, "value": "0"} + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "stream", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY BIGINT KEY, ID BIGINT" + } + ] + } + }, + { "name": "partition by - KAFKA", "statements": [ "CREATE STREAM INPUT (ID int) with (kafka_topic='input', value_format = 'KAFKA');", @@ -22,8 +69,18 @@ {"topic": "input", "key": "0", "value": 10} ], "outputs": [ - {"topic": "OUTPUT", "key": "10", "value": 10} - ] + {"topic": "OUTPUT", "key": 10, "value": 10} + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "stream", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY INT KEY, ID INT" + } + ] + } }, { "name": "partition by with projection select all", @@ -107,7 +164,7 @@ "CREATE STREAM OUTPUT AS select ROWKEY AS OLDKEY from INPUT partition by ID;" ], "inputs": [{"topic": "input", "key": "foo", "value": {"ID": 10}}], - "outputs": [{"topic": "OUTPUT", "key": "10", "value": {"OLDKEY": "foo"}}] + "outputs": [{"topic": "OUTPUT", "key": 10, "value": {"OLDKEY": "foo"}}] }, { "name": "partition by ROWKEY", @@ -130,7 +187,7 @@ "CREATE STREAM OUTPUT AS select * from INPUT partition by ROWTIME;" ], "inputs": [{"topic": "input", "value": {"ID": 22}, "timestamp": 10}], - "outputs": [{"topic": "OUTPUT", "key": "10", "value": {"ID": 22}, "timestamp": 10}] + "outputs": [{"topic": "OUTPUT", "key": 10, "value": {"ID": 22}, "timestamp": 10}] }, { "name": "partition by ROWKEY in join on ROWKEY", diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java index 545f53f1718e..f5180d00b54a 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java @@ -39,10 +39,13 @@ import io.confluent.ksql.execution.plan.WindowedStreamSource; import io.confluent.ksql.execution.plan.WindowedTableSource; import io.confluent.ksql.execution.transform.select.Selection; +import io.confluent.ksql.execution.util.ExpressionTypeManager; import io.confluent.ksql.function.FunctionRegistry; import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.schema.ksql.types.SqlType; import io.confluent.ksql.util.HandlerMaps; import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.SchemaUtil; import java.util.Objects; import java.util.Optional; @@ -62,7 +65,7 @@ public final class StepSchemaResolver { .put(StreamGroupBy.class, StepSchemaResolver::sameSchema) .put(StreamGroupByKey.class, StepSchemaResolver::sameSchema) .put(StreamSelect.class, StepSchemaResolver::handleStreamSelect) - .put(StreamSelectKey.class, StepSchemaResolver::sameSchema) + .put(StreamSelectKey.class, StepSchemaResolver::handleSelectKey) .put(StreamSink.class, StepSchemaResolver::sameSchema) .put(StreamSource.class, StepSchemaResolver::handleSource) .put(WindowedStreamSource.class, StepSchemaResolver::handleSource) @@ -169,13 +172,29 @@ private LogicalSchema handleStreamSelect( ).getSchema(); } + private LogicalSchema handleSelectKey( + final LogicalSchema sourceSchema, + final StreamSelectKey step + ) { + final ExpressionTypeManager expressionTypeManager = + new ExpressionTypeManager(sourceSchema, functionRegistry); + + final SqlType keyType = expressionTypeManager + .getExpressionSqlType(step.getKeyExpression()); + + return LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, keyType) + .valueColumns(sourceSchema.value()) + .build(); + } + private LogicalSchema handleSource( final LogicalSchema schema, final AbstractStreamSource step) { return schema.withAlias(step.getAlias()).withMetaAndKeyColsInValue(); } - private LogicalSchema handleJoin(final JoinSchemas schemas, final ExecutionStep step) { + private LogicalSchema handleJoin(final JoinSchemas schemas, final ExecutionStep step) { return JoinParamsFactory.createSchema(schemas.left, schemas.right); } @@ -203,7 +222,7 @@ private LogicalSchema handleTableSelect( ).getSchema(); } - private LogicalSchema sameSchema(final LogicalSchema schema, final ExecutionStep step) { + private LogicalSchema sameSchema(final LogicalSchema schema, final ExecutionStep step) { return schema; } diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilder.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilder.java index e3d8b2ca1a38..5f158db9fcd3 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilder.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilder.java @@ -23,6 +23,7 @@ import io.confluent.ksql.execution.plan.KeySerdeFactory; import io.confluent.ksql.execution.plan.StreamSelectKey; import io.confluent.ksql.execution.util.StructKeyUtil; +import io.confluent.ksql.execution.util.StructKeyUtil.KeyBuilder; import io.confluent.ksql.schema.ksql.LogicalSchema; import org.apache.kafka.connect.data.Struct; import org.apache.kafka.streams.kstream.KStream; @@ -40,23 +41,41 @@ public static KStreamHolder build( final KsqlQueryBuilder queryBuilder ) { final LogicalSchema sourceSchema = stream.getSchema(); - final CodeGenRunner codeGen = new CodeGenRunner( - sourceSchema, - queryBuilder.getKsqlConfig(), - queryBuilder.getFunctionRegistry()); - final ExpressionMetadata expression = - codeGen.buildCodeGenFromParseTree(selectKey.getKeyExpression(), EXP_TYPE); + final ExpressionMetadata expression = buildExpressionEvaluator( + selectKey, + queryBuilder, + sourceSchema + ); + + final LogicalSchema resultSchema = new StepSchemaResolver(queryBuilder.getKsqlConfig(), + queryBuilder.getFunctionRegistry()).resolve(selectKey, sourceSchema); + + final KeyBuilder keyBuilder = StructKeyUtil.keySchema(resultSchema); final KStream kstream = stream.getStream(); final KStream rekeyed = kstream .filter((key, val) -> val != null && expression.evaluate(val) != null) - .selectKey((key, val) -> StructKeyUtil.asStructKey(expression.evaluate(val).toString())); + .selectKey((key, val) -> keyBuilder.build(expression.evaluate(val))); return new KStreamHolder<>( rekeyed, - stream.getSchema(), + resultSchema, KeySerdeFactory.unwindowed(queryBuilder) ); } + + private static ExpressionMetadata buildExpressionEvaluator( + final StreamSelectKey selectKey, + final KsqlQueryBuilder queryBuilder, + final LogicalSchema sourceSchema + ) { + final CodeGenRunner codeGen = new CodeGenRunner( + sourceSchema, + queryBuilder.getKsqlConfig(), + queryBuilder.getFunctionRegistry() + ); + + return codeGen.buildCodeGenFromParseTree(selectKey.getKeyExpression(), EXP_TYPE); + } } diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java index 99c135bb7cd4..6f788b84bb41 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java @@ -67,6 +67,7 @@ import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.serde.WindowInfo; import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.SchemaUtil; import java.time.Duration; import java.util.Collections; import java.util.Optional; @@ -261,17 +262,24 @@ public void shouldResolveSchemaForStreamGroupByKey() { @Test public void shouldResolveSchemaForStreamSelectKey() { // Given: + final Expression keyExpression = + new ColumnReferenceExp(ColumnRef.withoutSource(ColumnName.of("ORANGE"))); + final StreamSelectKey step = new StreamSelectKey( PROPERTIES, streamSource, - mock(ColumnReferenceExp.class) + keyExpression ); // When: final LogicalSchema result = resolver.resolve(step, SCHEMA); // Then: - assertThat(result, is(SCHEMA)); + assertThat(result, is(LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.INTEGER) + .valueColumns(SCHEMA.value()) + .build() + )); } @Test @@ -395,6 +403,7 @@ public void shouldResolveSchemaForTableFilter() { @Test public void shouldResolveSchemaForTableSource() { + // Given: final TableSource step = new TableSource( PROPERTIES, "foo", @@ -413,6 +422,7 @@ public void shouldResolveSchemaForTableSource() { @Test public void shouldResolveSchemaForWindowedTableSource() { + // Given: final WindowedTableSource step = new WindowedTableSource( PROPERTIES, "foo", @@ -430,7 +440,6 @@ public void shouldResolveSchemaForWindowedTableSource() { assertThat(result, is(SCHEMA.withAlias(SourceName.of("alias")).withMetaAndKeyColsInValue())); } - @SuppressWarnings("unchecked") private void givenTableFunction(final String name, final SqlType returnType) { final KsqlTableFunction tableFunction = mock(KsqlTableFunction.class); when(functionRegistry.isTableFunction(name)).thenReturn(true); diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilderTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilderTest.java index dc98fa3a354a..8907c820d8d7 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilderTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilderTest.java @@ -34,10 +34,11 @@ import io.confluent.ksql.execution.plan.KeySerdeFactory; import io.confluent.ksql.execution.plan.PlanBuilder; import io.confluent.ksql.execution.plan.StreamSelectKey; +import io.confluent.ksql.execution.util.StructKeyUtil; +import io.confluent.ksql.execution.util.StructKeyUtil.KeyBuilder; import io.confluent.ksql.function.FunctionRegistry; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.name.SourceName; -import io.confluent.ksql.query.QueryId; import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.PhysicalSchema; @@ -46,32 +47,52 @@ import io.confluent.ksql.serde.FormatInfo; import io.confluent.ksql.serde.SerdeOption; import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.SchemaUtil; import org.apache.kafka.connect.data.Struct; import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.KeyValueMapper; import org.apache.kafka.streams.kstream.Predicate; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; +import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.InOrder; import org.mockito.Mock; import org.mockito.Mockito; -import org.mockito.junit.MockitoJUnit; -import org.mockito.junit.MockitoRule; +import org.mockito.junit.MockitoJUnitRunner; +@RunWith(MockitoJUnitRunner.class) public class StreamSelectKeyBuilderTest { + private static final SourceName ALIAS = SourceName.of("ATL"); - private static final LogicalSchema SCHEMA = LogicalSchema.builder() + + private static final LogicalSchema SOURCE_SCHEMA = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.STRING) .valueColumn(ColumnName.of("BIG"), SqlTypes.BIGINT) - .valueColumn(ColumnName.of("BOI"), SqlTypes.STRING) + .valueColumn(ColumnName.of("BOI"), SqlTypes.BIGINT) .build() .withAlias(ALIAS) .withMetaAndKeyColsInValue(); + private static final ColumnReferenceExp KEY = new ColumnReferenceExp(ColumnRef.of(SourceName.of("ATL"), ColumnName.of("BOI"))); + private static final LogicalSchema RESULT_SCHEMA = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) + .valueColumn(ALIAS, SchemaUtil.ROWTIME_NAME, SqlTypes.BIGINT) + .valueColumn(ALIAS, SchemaUtil.ROWKEY_NAME, SqlTypes.STRING) + .valueColumn(ALIAS, ColumnName.of("BIG"), SqlTypes.BIGINT) + .valueColumn(ALIAS, ColumnName.of("BOI"), SqlTypes.BIGINT) + .build(); + + private static final KeyBuilder RESULT_KEY_BUILDER = StructKeyUtil.keySchema(RESULT_SCHEMA); + + + private static final long A_BOI = 5000; + private static final long A_BIG = 3000; + private static final Struct SOURCE_KEY = asStructKey("dre"); + @Mock private KStream kstream; @Mock @@ -91,25 +112,19 @@ public class StreamSelectKeyBuilderTest { private final QueryContext queryContext = new QueryContext.Stacker().push("ya").getQueryContext(); - private final ExecutionStepPropertiesV1 properties = new ExecutionStepPropertiesV1(queryContext); private PlanBuilder planBuilder; private StreamSelectKey selectKey; - @Rule - public final MockitoRule mockitoRule = MockitoJUnit.rule(); - @Before @SuppressWarnings("unchecked") public void init() { - when(queryBuilder.getQueryId()).thenReturn(new QueryId("hey")); when(queryBuilder.getFunctionRegistry()).thenReturn(functionRegistry); when(queryBuilder.getKsqlConfig()).thenReturn(new KsqlConfig(ImmutableMap.of())); - when(sourceStep.getProperties()).thenReturn(properties); when(kstream.filter(any())).thenReturn(filteredKStream); when(filteredKStream.selectKey(any(KeyValueMapper.class))).thenReturn(rekeyedKstream); when(sourceStep.build(any())).thenReturn( - new KStreamHolder<>(kstream, SCHEMA, mock(KeySerdeFactory.class))); + new KStreamHolder<>(kstream, SOURCE_SCHEMA, mock(KeySerdeFactory.class))); planBuilder = new KSPlanBuilder( queryBuilder, mock(SqlPredicateFactory.class), @@ -117,14 +132,13 @@ public void init() { mock(StreamsFactories.class) ); selectKey = new StreamSelectKey( - properties, + new ExecutionStepPropertiesV1(queryContext), sourceStep, KEY ); } @Test - @SuppressWarnings("unchecked") public void shouldRekeyCorrectly() { // When: final KStreamHolder result = selectKey.build(planBuilder); @@ -138,7 +152,6 @@ public void shouldRekeyCorrectly() { } @Test - @SuppressWarnings("unchecked") public void shouldReturnCorrectSerdeFactory() { // When: final KStreamHolder result = selectKey.build(planBuilder); @@ -146,12 +159,12 @@ public void shouldReturnCorrectSerdeFactory() { // Then: result.getKeySerdeFactory().buildKeySerde( FormatInfo.of(Format.JSON), - PhysicalSchema.from(SCHEMA, SerdeOption.none()), + PhysicalSchema.from(SOURCE_SCHEMA, SerdeOption.none()), queryContext ); verify(queryBuilder).buildKeySerde( FormatInfo.of(Format.JSON), - PhysicalSchema.from(SCHEMA, SerdeOption.none()), + PhysicalSchema.from(SOURCE_SCHEMA, SerdeOption.none()), queryContext); } @@ -163,7 +176,7 @@ public void shouldFilterOutNullValues() { // Then: verify(kstream).filter(predicateCaptor.capture()); final Predicate predicate = getPredicate(); - assertThat(predicate.test(asStructKey("dre"), null), is(false)); + assertThat(predicate.test(SOURCE_KEY, null), is(false)); } @Test @@ -175,7 +188,7 @@ public void shouldFilterOutNullKeyColumns() { verify(kstream).filter(predicateCaptor.capture()); final Predicate predicate = getPredicate(); assertThat( - predicate.test(asStructKey("dre"), new GenericRow(0, "dre", 3000, null)), + predicate.test(SOURCE_KEY, value(0, "dre", A_BIG, null)), is(false) ); } @@ -189,7 +202,7 @@ public void shouldNotFilterOutNonNullKeyColumns() { verify(kstream).filter(predicateCaptor.capture()); final Predicate predicate = getPredicate(); assertThat( - predicate.test(asStructKey("dre"), new GenericRow(0, "dre", 3000, "bob")), + predicate.test(SOURCE_KEY, value(0, "dre", A_BIG, A_BOI)), is(true) ); } @@ -202,7 +215,7 @@ public void shouldIgnoreNullNonKeyColumns() { // Then: verify(kstream).filter(predicateCaptor.capture()); final Predicate predicate = getPredicate(); - assertThat(predicate.test(asStructKey("dre"), new GenericRow(0, "dre", null, "bob")), is(true)); + assertThat(predicate.test(SOURCE_KEY, value(0, "dre", null, A_BOI)), is(true)); } @Test @@ -213,8 +226,8 @@ public void shouldComputeCorrectKey() { // Then: final KeyValueMapper keyValueMapper = getKeyMapper(); assertThat( - keyValueMapper.apply(asStructKey("dre"), new GenericRow(0, "dre", 3000, "bob")), - is(asStructKey("bob")) + keyValueMapper.apply(SOURCE_KEY, value(0, "dre", A_BIG, A_BOI)), + is(RESULT_KEY_BUILDER.build(A_BOI)) ); } @@ -224,7 +237,7 @@ public void shouldReturnCorrectSchema() { final KStreamHolder result = selectKey.build(planBuilder); // Then: - assertThat(result.getSchema(), is(SCHEMA)); + assertThat(result.getSchema(), is(RESULT_SCHEMA)); } private KeyValueMapper getKeyMapper() { @@ -236,4 +249,13 @@ private Predicate getPredicate() { verify(kstream).filter(predicateCaptor.capture()); return predicateCaptor.getValue(); } + + private static GenericRow value( + final int rowTime, + final String rowKey, + final Long big, + final Long boi + ) { + return new GenericRow(rowTime, rowKey, big, boi); + } } From 707c5ba73a3d95cfeaea59beec462ecd2b44ddd6 Mon Sep 17 00:00:00 2001 From: Apurva Mehta Date: Tue, 10 Dec 2019 14:55:43 -0800 Subject: [PATCH 017/123] Add milestone 0.7.0 to all new bugs. Adding the next release milestone (0.7.0 at this time) to every bug report so that they are triaged weekly and can be prioritized accordingly. --- .github/ISSUE_TEMPLATE/bug_report.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 77055eaab4b5..1c3d379eb2d1 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -4,7 +4,7 @@ about: Create a report to help us improve title: '' labels: bug assignees: '' - +milestone: 0.7.0 --- **Describe the bug** From e6605e4eaddcfcebb7a86c27e1b9e6f51a94ebaa Mon Sep 17 00:00:00 2001 From: Apurva Mehta Date: Tue, 10 Dec 2019 14:57:34 -0800 Subject: [PATCH 018/123] Changing the default milestone to the milestone id --- .github/ISSUE_TEMPLATE/bug_report.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 1c3d379eb2d1..7c3dffe27890 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -4,7 +4,7 @@ about: Create a report to help us improve title: '' labels: bug assignees: '' -milestone: 0.7.0 +milestone: 5 --- **Describe the bug** From 61fc0bbc40300153d95afbcd08e4c377600c23b0 Mon Sep 17 00:00:00 2001 From: Apurva Mehta Date: Tue, 10 Dec 2019 15:02:04 -0800 Subject: [PATCH 019/123] reverting the milestone field the milestone field seems to have no effect, reverting. --- .github/ISSUE_TEMPLATE/bug_report.md | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 7c3dffe27890..8f72c8221d2a 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -4,7 +4,6 @@ about: Create a report to help us improve title: '' labels: bug assignees: '' -milestone: 5 --- **Describe the bug** From 0a29eace19227af3c8a6fed400211a3e72ceb23c Mon Sep 17 00:00:00 2001 From: Rohan Date: Tue, 10 Dec 2019 23:32:56 -0800 Subject: [PATCH 020/123] refactor: nicer spec for aggregation steps (#4063) * refactor: nicer spec for aggregation steps This patch cleans up the aggregation step to specify a list of non-aggregate column references instead of a count of non-aggregate columns. --- .../ksql/planner/plan/AggregateNode.java | 6 ++- .../ksql/structured/SchemaKGroupedStream.java | 7 +-- .../ksql/structured/SchemaKGroupedTable.java | 5 +- .../function/KudafUndoAggregatorTest.java | 2 +- .../structured/SchemaKGroupedStreamTest.java | 14 ++++-- .../structured/SchemaKGroupedTableTest.java | 14 ++++-- .../function/udaf/KudafAggregator.java | 31 +++++++----- .../function/udaf/KudafUndoAggregator.java | 16 ++++--- .../ksql/execution/plan/StreamAggregate.java | 21 ++++---- .../plan/StreamWindowedAggregate.java | 23 +++++---- .../ksql/execution/plan/TableAggregate.java | 19 ++++---- .../resources/ksql-plan-schema/schema.json | 27 +++++++---- .../streams/AggregateParamsFactory.java | 48 ++++++++++++------- .../streams/ExecutionStepFactory.java | 13 ++--- .../execution/streams/StepSchemaResolver.java | 6 +-- .../streams/StreamAggregateBuilder.java | 6 ++- .../streams/TableAggregateBuilder.java | 4 +- .../streams/AggregateParamsFactoryTest.java | 24 ++++++---- .../streams/StepSchemaResolverTest.java | 6 +-- .../streams/StreamAggregateBuilderTest.java | 26 ++++++---- .../streams/TableAggregateBuilderTest.java | 16 +++++-- 21 files changed, 210 insertions(+), 124 deletions(-) diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java index 8600272e28ca..97f2d05f1115 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java @@ -228,8 +228,12 @@ public SchemaKStream buildStream(final KsqlQueryBuilder builder) { final QueryContext.Stacker aggregationContext = contextStacker.push(AGGREGATION_OP_NAME); + final List requiredColumnRefs = requiredColumns.stream() + .map(e -> (ColumnReferenceExp) internalSchema.resolveToInternal(e)) + .map(ColumnReferenceExp::getReference) + .collect(Collectors.toList()); SchemaKTable aggregated = schemaKGroupedStream.aggregate( - requiredColumns.size(), + requiredColumnRefs, functionsWithInternalIdentifiers, windowExpression, valueFormat, diff --git a/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKGroupedStream.java b/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKGroupedStream.java index 5aba9c773f31..5dbb2fa36c00 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKGroupedStream.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKGroupedStream.java @@ -26,6 +26,7 @@ import io.confluent.ksql.function.FunctionRegistry; import io.confluent.ksql.metastore.model.KeyField; import io.confluent.ksql.parser.tree.WindowExpression; +import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.serde.Format; import io.confluent.ksql.serde.FormatInfo; @@ -72,7 +73,7 @@ public ExecutionStep getSourceStep() { @SuppressWarnings("unchecked") public SchemaKTable aggregate( - final int nonFuncColumnCount, + final List nonAggregateColumns, final List aggregations, final Optional windowExpression, final ValueFormat valueFormat, @@ -87,7 +88,7 @@ public SchemaKTable aggregate( contextStacker, sourceStep, Formats.of(keyFormat, valueFormat, SerdeOption.none()), - nonFuncColumnCount, + nonAggregateColumns, aggregations, windowExpression.get().getKsqlWindowExpression() ); @@ -97,7 +98,7 @@ public SchemaKTable aggregate( contextStacker, sourceStep, Formats.of(keyFormat, valueFormat, SerdeOption.none()), - nonFuncColumnCount, + nonAggregateColumns, aggregations ); } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKGroupedTable.java b/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKGroupedTable.java index 5cf1c07636ba..6e80882e6824 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKGroupedTable.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKGroupedTable.java @@ -29,6 +29,7 @@ import io.confluent.ksql.metastore.model.KeyField; import io.confluent.ksql.name.FunctionName; import io.confluent.ksql.parser.tree.WindowExpression; +import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.serde.KeyFormat; import io.confluent.ksql.serde.SerdeOption; @@ -71,7 +72,7 @@ public ExecutionStep getSourceTableStep() { @SuppressWarnings("unchecked") @Override public SchemaKTable aggregate( - final int nonFuncColumnCount, + final List nonAggregateColumns, final List aggregations, final Optional windowExpression, final ValueFormat valueFormat, @@ -98,7 +99,7 @@ public SchemaKTable aggregate( contextStacker, sourceTableStep, Formats.of(keyFormat, valueFormat, SerdeOption.none()), - nonFuncColumnCount, + nonAggregateColumns, aggregations ); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/KudafUndoAggregatorTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/KudafUndoAggregatorTest.java index 326ba3891983..a603f4f159cf 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/KudafUndoAggregatorTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/KudafUndoAggregatorTest.java @@ -42,7 +42,7 @@ public class KudafUndoAggregatorTest { public void init() { final List> functions = ImmutableList.of((TableAggregationFunction)SUM_INFO); - aggregator = new KudafUndoAggregator(2, functions); + aggregator = new KudafUndoAggregator(ImmutableList.of(0, 1), functions); } @Test diff --git a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKGroupedStreamTest.java b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKGroupedStreamTest.java index e275f3597285..61125aca9718 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKGroupedStreamTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKGroupedStreamTest.java @@ -46,6 +46,7 @@ import io.confluent.ksql.serde.ValueFormat; import io.confluent.ksql.serde.WindowInfo; import io.confluent.ksql.util.KsqlConfig; +import java.util.List; import java.util.Optional; import java.util.concurrent.TimeUnit; import org.junit.Before; @@ -72,6 +73,9 @@ public class SchemaKGroupedStreamTest { private static final KsqlWindowExpression KSQL_WINDOW_EXP = new SessionWindowExpression( 100, TimeUnit.SECONDS ); + private static final List NON_AGGREGATE_COLUMNS = ImmutableList.of( + ColumnRef.withoutSource(ColumnName.of("IN0")) + ); @Mock private KeyField keyField; @@ -115,7 +119,7 @@ public void setUp() { public void shouldReturnKTableWithOutputSchema() { // When: final SchemaKTable result = schemaGroupedStream.aggregate( - 1, + NON_AGGREGATE_COLUMNS, ImmutableList.of(AGG), Optional.empty(), valueFormat, @@ -130,7 +134,7 @@ public void shouldReturnKTableWithOutputSchema() { public void shouldBuildStepForAggregate() { // When: final SchemaKTable result = schemaGroupedStream.aggregate( - 1, + NON_AGGREGATE_COLUMNS, ImmutableList.of(AGG), Optional.empty(), valueFormat, @@ -145,7 +149,7 @@ public void shouldBuildStepForAggregate() { queryContext, schemaGroupedStream.getSourceStep(), Formats.of(keyFormat, valueFormat, SerdeOption.none()), - 1, + NON_AGGREGATE_COLUMNS, ImmutableList.of(AGG) ) ) @@ -156,7 +160,7 @@ public void shouldBuildStepForAggregate() { public void shouldBuildStepForWindowedAggregate() { // When: final SchemaKTable result = schemaGroupedStream.aggregate( - 1, + NON_AGGREGATE_COLUMNS, ImmutableList.of(AGG), Optional.of(windowExp), valueFormat, @@ -175,7 +179,7 @@ public void shouldBuildStepForWindowedAggregate() { queryContext, schemaGroupedStream.getSourceStep(), Formats.of(expected, valueFormat, SerdeOption.none()), - 1, + NON_AGGREGATE_COLUMNS, ImmutableList.of(AGG), KSQL_WINDOW_EXP ) diff --git a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKGroupedTableTest.java b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKGroupedTableTest.java index 9895af9574dc..8c21a7f9060f 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKGroupedTableTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKGroupedTableTest.java @@ -44,6 +44,7 @@ import io.confluent.ksql.util.KsqlConfig; import io.confluent.ksql.util.KsqlException; import java.util.Collections; +import java.util.List; import java.util.Optional; import org.junit.Rule; import org.junit.Test; @@ -63,6 +64,9 @@ public class SchemaKGroupedTableTest { .valueColumn(ColumnName.of("KSQL_AGG_VARIABLE_0"), SqlTypes.INTEGER) .valueColumn(ColumnName.of("KSQL_AGG_VARIABLE_1"), SqlTypes.BIGINT) .build(); + private static final List NON_AGG_COLUMNS = ImmutableList.of( + ColumnRef.withoutSource(ColumnName.of("IN0")) + ); private static final FunctionCall MIN = udaf("MIN"); private static final FunctionCall MAX = udaf("MAX"); private static final FunctionCall SUM = udaf("SUM"); @@ -91,7 +95,7 @@ public void shouldFailWindowedTableAggregation() { // When: groupedTable.aggregate( - 1, + NON_AGG_COLUMNS, ImmutableList.of(SUM, COUNT), Optional.of(windowExp), valueFormat, @@ -111,7 +115,7 @@ public void shouldFailUnsupportedAggregateFunction() { // When: kGroupedTable.aggregate( - 1, + NON_AGG_COLUMNS, ImmutableList.of(MIN, MAX), Optional.empty(), valueFormat, @@ -136,7 +140,7 @@ public void shouldBuildStepForAggregate() { final SchemaKGroupedTable kGroupedTable = buildSchemaKGroupedTable(); final SchemaKTable result = kGroupedTable.aggregate( - 1, + NON_AGG_COLUMNS, ImmutableList.of(SUM, COUNT), Optional.empty(), valueFormat, @@ -151,7 +155,7 @@ public void shouldBuildStepForAggregate() { queryContext, kGroupedTable.getSourceTableStep(), Formats.of(keyFormat, valueFormat, SerdeOption.none()), - 1, + NON_AGG_COLUMNS, ImmutableList.of(SUM, COUNT) ) ) @@ -165,7 +169,7 @@ public void shouldReturnKTableWithOutputSchema() { // When: final SchemaKTable result = groupedTable.aggregate( - 1, + NON_AGG_COLUMNS, ImmutableList.of(SUM, COUNT), Optional.empty(), valueFormat, diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafAggregator.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafAggregator.java index 0ba4056ad98a..421429e0e84e 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafAggregator.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafAggregator.java @@ -30,14 +30,17 @@ public class KudafAggregator implements UdafAggregator { - private final int initialUdafIndex; + private final List nonAggColumnIndexes; private final List> aggregateFunctions; private final int columnCount; - public KudafAggregator(int initialUdafIndex, List> functions) { - this.initialUdafIndex = initialUdafIndex; + public KudafAggregator( + List nonAggColumnIndexes, + List> functions) { + this.nonAggColumnIndexes = + ImmutableList.copyOf(requireNonNull(nonAggColumnIndexes, "nonAggColumnIndexes")); this.aggregateFunctions = ImmutableList.copyOf(requireNonNull(functions, "functions")); - this.columnCount = initialUdafIndex + aggregateFunctions.size(); + this.columnCount = nonAggColumnIndexes.size() + aggregateFunctions.size(); if (aggregateFunctions.isEmpty()) { throw new IllegalArgumentException("Aggregator needs aggregate functions"); @@ -47,8 +50,10 @@ public KudafAggregator(int initialUdafIndex, List @Override public GenericRow apply(K k, GenericRow rowValue, GenericRow aggRowValue) { // copy over group-by and aggregate parameter columns into the output row + int initialUdafIndex = nonAggColumnIndexes.size(); for (int idx = 0; idx < initialUdafIndex; idx++) { - aggRowValue.getColumns().set(idx, rowValue.getColumns().get(idx)); + int idxInRow = nonAggColumnIndexes.get(idx); + aggRowValue.getColumns().set(idx, rowValue.getColumns().get(idxInRow)); } // compute the aggregation and write it into the output row. Its assumed that @@ -75,11 +80,13 @@ public Merger getMerger() { return (key, aggRowOne, aggRowTwo) -> { List columns = new ArrayList<>(columnCount); + int initialUdafIndex = nonAggColumnIndexes.size(); for (int idx = 0; idx < initialUdafIndex; idx++) { - if (aggRowOne.getColumns().get(idx) == null) { - columns.add(idx, aggRowTwo.getColumns().get(idx)); + int idxInRow = nonAggColumnIndexes.get(idx); + if (aggRowOne.getColumns().get(idxInRow) == null) { + columns.add(idx, aggRowTwo.getColumns().get(idxInRow)); } else { - columns.add(idx, aggRowOne.getColumns().get(idx)); + columns.add(idx, aggRowOne.getColumns().get(idxInRow)); } } @@ -99,7 +106,7 @@ public Merger getMerger() { private KsqlAggregateFunction aggregateFunctionForColumn( final int columnIndex ) { - return (KsqlAggregateFunction) aggregateFunctions.get(columnIndex - initialUdafIndex); + return (KsqlAggregateFunction) aggregateFunctions.get(columnIndex - nonAggColumnIndexes.size()); } private final class ResultTransformer implements KsqlTransformer { @@ -116,11 +123,11 @@ public GenericRow transform( final List columns = new ArrayList<>(columnCount); - for (int idx = 0; idx < initialUdafIndex; idx++) { - columns.add(idx, value.getColumns().get(idx)); + for (int idx = 0; idx < nonAggColumnIndexes.size(); idx++) { + columns.add(idx, value.getColumns().get(nonAggColumnIndexes.get(idx))); } - for (int idx = initialUdafIndex; idx < columnCount; idx++) { + for (int idx = nonAggColumnIndexes.size(); idx < columnCount; idx++) { final KsqlAggregateFunction function = aggregateFunctionForColumn(idx); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafUndoAggregator.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafUndoAggregator.java index cf8b4387749c..96b9acbeb022 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafUndoAggregator.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafUndoAggregator.java @@ -25,23 +25,25 @@ public class KudafUndoAggregator implements Aggregator { - private final int initialUdafIndex; + private final List nonAggColumnIndexes; private final List> aggregateFunctions; public KudafUndoAggregator( - int initialUdafIndex, List> aggregateFunctions + List nonAggColumnIndexes, + List> aggregateFunctions ) { Objects.requireNonNull(aggregateFunctions, "aggregateFunctions"); this.aggregateFunctions = ImmutableList.copyOf(aggregateFunctions); - this.initialUdafIndex = initialUdafIndex; + this.nonAggColumnIndexes = ImmutableList.copyOf(nonAggColumnIndexes); } @SuppressWarnings("unchecked") @Override public GenericRow apply(Struct k, GenericRow rowValue, GenericRow aggRowValue) { int idx = 0; - for (; idx < initialUdafIndex; idx++) { - aggRowValue.getColumns().set(idx, rowValue.getColumns().get(idx)); + for (; idx < nonAggColumnIndexes.size(); idx++) { + final int idxInRow = nonAggColumnIndexes.get(idx); + aggRowValue.getColumns().set(idx, rowValue.getColumns().get(idxInRow)); } for (TableAggregationFunction function : aggregateFunctions) { @@ -54,8 +56,8 @@ public GenericRow apply(Struct k, GenericRow rowValue, GenericRow aggRowValue) { return aggRowValue; } - public int getInitialUdafIndex() { - return initialUdafIndex; + public List getNonAggColumnIndexes() { + return nonAggColumnIndexes; } public List> getAggregateFunctions() { diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamAggregate.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamAggregate.java index a4a662d62336..582beac72ee7 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamAggregate.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamAggregate.java @@ -21,6 +21,7 @@ import com.google.common.collect.ImmutableList; import com.google.errorprone.annotations.Immutable; import io.confluent.ksql.execution.expression.tree.FunctionCall; +import io.confluent.ksql.schema.ksql.ColumnRef; import java.util.Collections; import java.util.List; import java.util.Objects; @@ -31,7 +32,7 @@ public class StreamAggregate implements ExecutionStep> { private final ExecutionStepPropertiesV1 properties; private final ExecutionStep source; private final Formats internalFormats; - private final int nonFuncColumnCount; + private final ImmutableList nonAggregateColumns; private final ImmutableList aggregationFunctions; public StreamAggregate( @@ -40,13 +41,15 @@ public StreamAggregate( @JsonProperty(value = "source", required = true) ExecutionStep source, @JsonProperty(value = "internalFormats", required = true) Formats internalFormats, - @JsonProperty(value = "nonFuncColumnCount", required = true) int nonFuncColumnCount, + @JsonProperty(value = "nonAggregateColumns", required = true) + List nonAggregateColumns, @JsonProperty(value = "aggregationFunctions", required = true) List aggregationFunctions) { this.properties = requireNonNull(properties, "properties"); this.source = requireNonNull(source, "source"); this.internalFormats = requireNonNull(internalFormats, "internalFormats"); - this.nonFuncColumnCount = nonFuncColumnCount; + this.nonAggregateColumns = + ImmutableList.copyOf(requireNonNull(nonAggregateColumns, "nonAggregateColumns")); this.aggregationFunctions = ImmutableList.copyOf( requireNonNull(aggregationFunctions, "aggregationFunctions")); } @@ -62,10 +65,6 @@ public List> getSources() { return Collections.singletonList(source); } - public int getNonFuncColumnCount() { - return nonFuncColumnCount; - } - public List getAggregationFunctions() { return aggregationFunctions; } @@ -74,6 +73,10 @@ public Formats getInternalFormats() { return internalFormats; } + public List getNonAggregateColumns() { + return nonAggregateColumns; + } + public ExecutionStep getSource() { return source; } @@ -96,7 +99,7 @@ public boolean equals(Object o) { && Objects.equals(source, that.source) && Objects.equals(internalFormats, that.internalFormats) && Objects.equals(aggregationFunctions, that.aggregationFunctions) - && nonFuncColumnCount == that.nonFuncColumnCount; + && Objects.equals(nonAggregateColumns, that.nonAggregateColumns); } @Override @@ -107,7 +110,7 @@ public int hashCode() { source, internalFormats, aggregationFunctions, - nonFuncColumnCount + nonAggregateColumns ); } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamWindowedAggregate.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamWindowedAggregate.java index 01b294fdea95..d888a800a41d 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamWindowedAggregate.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamWindowedAggregate.java @@ -23,6 +23,7 @@ import com.google.errorprone.annotations.Immutable; import io.confluent.ksql.execution.expression.tree.FunctionCall; import io.confluent.ksql.execution.windows.KsqlWindowExpression; +import io.confluent.ksql.schema.ksql.ColumnRef; import java.util.Collections; import java.util.List; import java.util.Objects; @@ -35,7 +36,7 @@ public class StreamWindowedAggregate private final ExecutionStepPropertiesV1 properties; private final ExecutionStep source; private final Formats internalFormats; - private final int nonFuncColumnCount; + private final ImmutableList nonAggregateColumns; private final ImmutableList aggregationFunctions; private final KsqlWindowExpression windowExpression; @@ -44,7 +45,8 @@ public StreamWindowedAggregate( @JsonProperty(value = "source", required = true) ExecutionStep source, @JsonProperty(value = "internalFormats", required = true) Formats internalFormats, - @JsonProperty(value = "nonFuncColumnCount", required = true) int nonFuncColumnCount, + @JsonProperty(value = "nonAggregateColumns", required = true) + List nonAggregateColumns, @JsonProperty(value = "aggregationFunctions", required = true) List aggregationFunctions, @JsonProperty(value = "windowExpression", required = true) @@ -52,7 +54,8 @@ public StreamWindowedAggregate( this.properties = requireNonNull(properties, "properties"); this.source = requireNonNull(source, "source"); this.internalFormats = requireNonNull(internalFormats, "internalFormats"); - this.nonFuncColumnCount = nonFuncColumnCount; + this.nonAggregateColumns + = ImmutableList.copyOf(requireNonNull(nonAggregateColumns, "nonAggregateColumns")); this.aggregationFunctions = ImmutableList.copyOf( requireNonNull(aggregationFunctions, "aggregationFunctions")); this.windowExpression = requireNonNull(windowExpression, "windowExpression"); @@ -69,10 +72,6 @@ public List> getSources() { return Collections.singletonList(source); } - public int getNonFuncColumnCount() { - return nonFuncColumnCount; - } - public List getAggregationFunctions() { return aggregationFunctions; } @@ -81,6 +80,10 @@ public Formats getInternalFormats() { return internalFormats; } + public List getNonAggregateColumns() { + return nonAggregateColumns; + } + public KsqlWindowExpression getWindowExpression() { return windowExpression; } @@ -107,8 +110,8 @@ public boolean equals(Object o) { && Objects.equals(source, that.source) && Objects.equals(internalFormats, that.internalFormats) && Objects.equals(aggregationFunctions, that.aggregationFunctions) - && nonFuncColumnCount == that.nonFuncColumnCount - && Objects.equals(windowExpression, that.windowExpression); + && Objects.equals(windowExpression, that.windowExpression) + && Objects.equals(nonAggregateColumns, that.nonAggregateColumns); } @Override @@ -119,7 +122,7 @@ public int hashCode() { source, internalFormats, aggregationFunctions, - nonFuncColumnCount, + nonAggregateColumns, windowExpression ); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableAggregate.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableAggregate.java index 8db5734c380d..3e4841648e59 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableAggregate.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableAggregate.java @@ -21,6 +21,7 @@ import com.google.common.collect.ImmutableList; import com.google.errorprone.annotations.Immutable; import io.confluent.ksql.execution.expression.tree.FunctionCall; +import io.confluent.ksql.schema.ksql.ColumnRef; import java.util.Collections; import java.util.List; import java.util.Objects; @@ -31,21 +32,23 @@ public class TableAggregate implements ExecutionStep> { private final ExecutionStepPropertiesV1 properties; private final ExecutionStep source; private final Formats internalFormats; - private final int nonFuncColumnCount; private final ImmutableList aggregationFunctions; + private final ImmutableList nonAggregateColumns; public TableAggregate( @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, @JsonProperty(value = "source", required = true) ExecutionStep source, @JsonProperty(value = "internalFormats", required = true) Formats internalFormats, - @JsonProperty(value = "nonFuncColumnCount", required = true) int nonFuncColumnCount, + @JsonProperty(value = "nonAggregateColumns", required = true) + List nonAggregateColumns, @JsonProperty(value = "aggregationFunctions", required = true) List aggregationFunctions) { this.properties = requireNonNull(properties, "properties"); this.source = requireNonNull(source, "source"); this.internalFormats = requireNonNull(internalFormats, "internalFormats"); - this.nonFuncColumnCount = nonFuncColumnCount; + this.nonAggregateColumns + = ImmutableList.copyOf(requireNonNull(nonAggregateColumns, "nonAggregatecolumns")); this.aggregationFunctions = ImmutableList .copyOf(requireNonNull(aggregationFunctions, "aggValToFunctionMap")); } @@ -69,8 +72,8 @@ public List getAggregationFunctions() { return aggregationFunctions; } - public int getNonFuncColumnCount() { - return nonFuncColumnCount; + public List getNonAggregateColumns() { + return nonAggregateColumns; } public ExecutionStep getSource() { @@ -94,14 +97,14 @@ public boolean equals(Object o) { return Objects.equals(properties, that.properties) && Objects.equals(source, that.source) && Objects.equals(internalFormats, that.internalFormats) - && nonFuncColumnCount == that.nonFuncColumnCount - && Objects.equals(aggregationFunctions, that.aggregationFunctions); + && Objects.equals(aggregationFunctions, that.aggregationFunctions) + && Objects.equals(nonAggregateColumns, that.nonAggregateColumns); } @Override public int hashCode() { - return Objects.hash(properties, source, internalFormats, nonFuncColumnCount, + return Objects.hash(properties, source, internalFormats, nonAggregateColumns, aggregationFunctions); } } diff --git a/ksql-rest-app/src/test/resources/ksql-plan-schema/schema.json b/ksql-rest-app/src/test/resources/ksql-plan-schema/schema.json index cac7ce5b2d9a..ee20905190e9 100644 --- a/ksql-rest-app/src/test/resources/ksql-plan-schema/schema.json +++ b/ksql-rest-app/src/test/resources/ksql-plan-schema/schema.json @@ -260,8 +260,11 @@ "internalFormats" : { "$ref" : "#/definitions/Formats" }, - "nonFuncColumnCount" : { - "type" : "integer" + "nonAggregateColumns" : { + "type" : "array", + "items" : { + "type" : "string" + } }, "aggregationFunctions" : { "type" : "array", @@ -271,7 +274,7 @@ } }, "title" : "streamAggregateV1", - "required" : [ "@type", "properties", "source", "internalFormats", "nonFuncColumnCount", "aggregationFunctions" ] + "required" : [ "@type", "properties", "source", "internalFormats", "nonAggregateColumns", "aggregationFunctions" ] }, "ExecutionStepPropertiesV1" : { "type" : "object", @@ -602,8 +605,11 @@ "internalFormats" : { "$ref" : "#/definitions/Formats" }, - "nonFuncColumnCount" : { - "type" : "integer" + "nonAggregateColumns" : { + "type" : "array", + "items" : { + "type" : "string" + } }, "aggregationFunctions" : { "type" : "array", @@ -616,7 +622,7 @@ } }, "title" : "streamWindowedAggregateV1", - "required" : [ "@type", "properties", "source", "internalFormats", "nonFuncColumnCount", "aggregationFunctions", "windowExpression" ] + "required" : [ "@type", "properties", "source", "internalFormats", "nonAggregateColumns", "aggregationFunctions", "windowExpression" ] }, "TableSource" : { "type" : "object", @@ -701,8 +707,11 @@ "internalFormats" : { "$ref" : "#/definitions/Formats" }, - "nonFuncColumnCount" : { - "type" : "integer" + "nonAggregateColumns" : { + "type" : "array", + "items" : { + "type" : "string" + } }, "aggregationFunctions" : { "type" : "array", @@ -712,7 +721,7 @@ } }, "title" : "tableAggregateV1", - "required" : [ "@type", "properties", "source", "internalFormats", "nonFuncColumnCount", "aggregationFunctions" ] + "required" : [ "@type", "properties", "source", "internalFormats", "nonAggregateColumns", "aggregationFunctions" ] }, "TableFilter" : { "type" : "object", diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/AggregateParamsFactory.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/AggregateParamsFactory.java index a9b36d34a077..117ccbdfe49f 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/AggregateParamsFactory.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/AggregateParamsFactory.java @@ -28,8 +28,11 @@ import io.confluent.ksql.function.KsqlAggregateFunction; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.schema.ksql.Column; +import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.types.SqlType; +import java.util.ArrayList; +import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Objects; @@ -51,29 +54,30 @@ public AggregateParamsFactory() { public AggregateParams createUndoable( final LogicalSchema schema, - final int initialUdafIndex, + final List nonAggregateColumns, final FunctionRegistry functionRegistry, final List functionList ) { - return create(schema, initialUdafIndex, functionRegistry, functionList, true); + return create(schema, nonAggregateColumns, functionRegistry, functionList, true); } public AggregateParams create( final LogicalSchema schema, - final int initialUdafIndex, + final List nonAggregateColumns, final FunctionRegistry functionRegistry, final List functionList ) { - return create(schema, initialUdafIndex, functionRegistry, functionList, false); + return create(schema, nonAggregateColumns, functionRegistry, functionList, false); } private AggregateParams create( final LogicalSchema schema, - final int initialUdafIndex, + final List nonAggregateColumns, final FunctionRegistry functionRegistry, final List functionList, final boolean table ) { + final List nonAggColumnIndexes = nonAggColumnIndexes(schema, nonAggregateColumns); final List> functions = ImmutableList.copyOf( functionList.stream().map( funcCall -> UdafUtil.resolveAggregateFunction( @@ -90,32 +94,44 @@ private AggregateParams create( for (final KsqlAggregateFunction function : functions) { tableFunctions.add((TableAggregationFunction) function); } - undoAggregator = Optional.of(new KudafUndoAggregator(initialUdafIndex, tableFunctions)); + undoAggregator = Optional.of(new KudafUndoAggregator(nonAggColumnIndexes, tableFunctions)); } else { undoAggregator = Optional.empty(); } return new AggregateParams( - new KudafInitializer(initialUdafIndex, initialValueSuppliers), - aggregatorFactory.create(initialUdafIndex, functions), + new KudafInitializer(nonAggregateColumns.size(), initialValueSuppliers), + aggregatorFactory.create(nonAggColumnIndexes, functions), undoAggregator, - new WindowSelectMapper(initialUdafIndex, functions), - buildSchema(schema, initialUdafIndex, functions, true), - buildSchema(schema, initialUdafIndex, functions, false) + new WindowSelectMapper(nonAggregateColumns.size(), functions), + buildSchema(schema, nonAggregateColumns, functions, true), + buildSchema(schema, nonAggregateColumns, functions, false) ); } + private static List nonAggColumnIndexes( + final LogicalSchema schema, + final List nonAggregateColumns + ) { + final List indexes = new ArrayList<>(nonAggregateColumns.size()); + for (final ColumnRef columnRef : nonAggregateColumns) { + indexes.add(schema.findValueColumn(columnRef).map(Column::index).orElseThrow( + () -> new IllegalStateException("invalid column ref: " + columnRef) + )); + } + return Collections.unmodifiableList(indexes); + } + private static LogicalSchema buildSchema( final LogicalSchema schema, - final int initialUdafIndex, + final List nonAggregateColumns, final List> aggregateFunctions, final boolean useAggregate) { final LogicalSchema.Builder schemaBuilder = LogicalSchema.builder(); - final List cols = schema.value(); schemaBuilder.keyColumns(schema.key()); - for (int i = 0; i < initialUdafIndex; i++) { - schemaBuilder.valueColumn(cols.get(i)); + for (final ColumnRef columnRef : nonAggregateColumns) { + schemaBuilder.valueColumn(schema.findValueColumn(columnRef).get()); } for (int i = 0; i < aggregateFunctions.size(); i++) { @@ -132,7 +148,7 @@ private static LogicalSchema buildSchema( interface KudafAggregatorFactory { KudafAggregator create( - int initialUdafIndex, + List nonAggColumnIndexes, List> functions ); } diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/ExecutionStepFactory.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/ExecutionStepFactory.java index dd44dc28f97e..a7981d5415db 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/ExecutionStepFactory.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/ExecutionStepFactory.java @@ -51,6 +51,7 @@ import io.confluent.ksql.execution.timestamp.TimestampColumn; import io.confluent.ksql.execution.windows.KsqlWindowExpression; import io.confluent.ksql.name.SourceName; +import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.serde.WindowInfo; import java.time.Duration; @@ -310,7 +311,7 @@ public static StreamAggregate streamAggregate( final QueryContext.Stacker stacker, final ExecutionStep sourceStep, final Formats formats, - final int nonFuncColumnCount, + final List nonAggregateColumns, final List aggregations ) { final QueryContext queryContext = stacker.getQueryContext(); @@ -318,7 +319,7 @@ public static StreamAggregate streamAggregate( new ExecutionStepPropertiesV1(queryContext), sourceStep, formats, - nonFuncColumnCount, + nonAggregateColumns, aggregations ); } @@ -327,7 +328,7 @@ public static StreamWindowedAggregate streamWindowedAggregate( final QueryContext.Stacker stacker, final ExecutionStep sourceStep, final Formats formats, - final int nonFuncColumnCount, + final List nonAggregateColumns, final List aggregations, final KsqlWindowExpression window ) { @@ -336,7 +337,7 @@ public static StreamWindowedAggregate streamWindowedAggregate( new ExecutionStepPropertiesV1(queryContext), sourceStep, formats, - nonFuncColumnCount, + nonAggregateColumns, aggregations, window ); @@ -370,7 +371,7 @@ public static TableAggregate tableAggregate( final QueryContext.Stacker stacker, final ExecutionStep sourceStep, final Formats formats, - final int nonFuncColumnCount, + final List nonAggregateColumns, final List aggregations ) { final QueryContext queryContext = stacker.getQueryContext(); @@ -378,7 +379,7 @@ public static TableAggregate tableAggregate( new ExecutionStepPropertiesV1(queryContext), sourceStep, formats, - nonFuncColumnCount, + nonAggregateColumns, aggregations ); } diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java index f5180d00b54a..46aef7f8ab1c 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java @@ -131,7 +131,7 @@ private LogicalSchema handleStreamAggregate( final StreamAggregate step) { return new AggregateParamsFactory().create( schema, - step.getNonFuncColumnCount(), + step.getNonAggregateColumns(), functionRegistry, step.getAggregationFunctions() ).getSchema(); @@ -143,7 +143,7 @@ private LogicalSchema handleStreamWindowedAggregate( ) { return new AggregateParamsFactory().create( schema, - step.getNonFuncColumnCount(), + step.getNonAggregateColumns(), functionRegistry, step.getAggregationFunctions() ).getSchema(); @@ -204,7 +204,7 @@ private LogicalSchema handleTableAggregate( ) { return new AggregateParamsFactory().create( schema, - step.getNonFuncColumnCount(), + step.getNonAggregateColumns(), functionRegistry, step.getAggregationFunctions() ).getSchema(); diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamAggregateBuilder.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamAggregateBuilder.java index a0c996ca9bfa..c401d2747b62 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamAggregateBuilder.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamAggregateBuilder.java @@ -33,9 +33,11 @@ import io.confluent.ksql.execution.windows.SessionWindowExpression; import io.confluent.ksql.execution.windows.TumblingWindowExpression; import io.confluent.ksql.execution.windows.WindowVisitor; +import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.PhysicalSchema; import java.time.Duration; +import java.util.List; import java.util.Objects; import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.common.utils.Bytes; @@ -74,7 +76,7 @@ static KTableHolder build( final MaterializedFactory materializedFactory, final AggregateParamsFactory aggregateParamsFactory) { final LogicalSchema sourceSchema = groupedStream.getSchema(); - final int nonFuncColumns = aggregate.getNonFuncColumnCount(); + final List nonFuncColumns = aggregate.getNonAggregateColumns(); final AggregateParams aggregateParams = aggregateParamsFactory.create( sourceSchema, nonFuncColumns, @@ -146,7 +148,7 @@ static KTableHolder> build( final AggregateParamsFactory aggregateParamsFactory ) { final LogicalSchema sourceSchema = groupedStream.getSchema(); - final int nonFuncColumns = aggregate.getNonFuncColumnCount(); + final List nonFuncColumns = aggregate.getNonAggregateColumns(); final AggregateParams aggregateParams = aggregateParamsFactory.create( sourceSchema, nonFuncColumns, diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/TableAggregateBuilder.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/TableAggregateBuilder.java index dfd02235d948..9f67cf98daa6 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/TableAggregateBuilder.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/TableAggregateBuilder.java @@ -23,7 +23,9 @@ import io.confluent.ksql.execution.plan.KeySerdeFactory; import io.confluent.ksql.execution.plan.TableAggregate; import io.confluent.ksql.execution.streams.transform.KsTransformer; +import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.LogicalSchema; +import java.util.List; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.connect.data.Struct; import org.apache.kafka.streams.kstream.KTable; @@ -57,7 +59,7 @@ public static KTableHolder build( final AggregateParamsFactory aggregateParamsFactory ) { final LogicalSchema sourceSchema = groupedTable.getSchema(); - final int nonFuncColumns = aggregate.getNonFuncColumnCount(); + final List nonFuncColumns = aggregate.getNonAggregateColumns(); final AggregateParams aggregateParams = aggregateParamsFactory.createUndoable( sourceSchema, nonFuncColumns, diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/AggregateParamsFactoryTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/AggregateParamsFactoryTest.java index 1c7896f491db..90e4d86bb780 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/AggregateParamsFactoryTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/AggregateParamsFactoryTest.java @@ -42,10 +42,14 @@ public class AggregateParamsFactoryTest { private static final LogicalSchema INPUT_SCHEMA = LogicalSchema.builder() .valueColumn(ColumnName.of("REQUIRED0"), SqlTypes.BIGINT) - .valueColumn(ColumnName.of("REQUIRED1"), SqlTypes.STRING) .valueColumn(ColumnName.of("ARGUMENT0"), SqlTypes.INTEGER) + .valueColumn(ColumnName.of("REQUIRED1"), SqlTypes.STRING) .valueColumn(ColumnName.of("ARGUMENT1"), SqlTypes.DOUBLE) .build(); + private static final List NON_AGG_COLUMNS = ImmutableList.of( + INPUT_SCHEMA.value().get(0).ref(), + INPUT_SCHEMA.value().get(2).ref() + ); private static final FunctionCall AGG0 = new FunctionCall( FunctionName.of("AGG0"), ImmutableList.of(new ColumnReferenceExp(ColumnRef.withoutSource(ColumnName.of("ARGUMENT0")))) @@ -111,11 +115,11 @@ public void init() { when(windowStart.returnType()).thenReturn(SqlTypes.BIGINT); when(windowStart.getAggregateType()).thenReturn(SqlTypes.BIGINT); - when(udafFactory.create(anyInt(), any())).thenReturn(aggregator); + when(udafFactory.create(any(), any())).thenReturn(aggregator); aggregateParams = new AggregateParamsFactory(udafFactory).create( INPUT_SCHEMA, - 2, + NON_AGG_COLUMNS, functionRegistry, FUNCTIONS ); @@ -125,7 +129,7 @@ public void init() { @Test public void shouldCreateAggregatorWithCorrectParams() { verify(udafFactory).create( - 2, + ImmutableList.of(0, 2), ImmutableList.of(agg0, agg1) ); } @@ -163,14 +167,18 @@ public void shouldReturnEmptyUndoAggregator() { @Test public void shouldReturnUndoAggregator() { // Given: - aggregateParams = new AggregateParamsFactory(udafFactory) - .createUndoable(INPUT_SCHEMA, 2, functionRegistry, ImmutableList.of(TABLE_AGG)); + aggregateParams = new AggregateParamsFactory(udafFactory).createUndoable( + INPUT_SCHEMA, + NON_AGG_COLUMNS, + functionRegistry, + ImmutableList.of(TABLE_AGG) + ); // When: final KudafUndoAggregator undoAggregator = aggregateParams.getUndoAggregator().get(); // Then: - assertThat(undoAggregator.getInitialUdafIndex(), equalTo(2)); + assertThat(undoAggregator.getNonAggColumnIndexes(), equalTo(ImmutableList.of(0, 2))); assertThat( undoAggregator.getAggregateFunctions(), equalTo(ImmutableList.of(tableAgg)) @@ -191,7 +199,7 @@ public void shouldReturnCorrectWindowSelectMapperForWindowSelections() { // Given: aggregateParams = new AggregateParamsFactory(udafFactory).create( INPUT_SCHEMA, - 2, + NON_AGG_COLUMNS, functionRegistry, ImmutableList.of(WINDOW_START) ); diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java index 6f788b84bb41..d0e8e674c3a8 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java @@ -120,7 +120,7 @@ public void shouldResolveSchemaForStreamAggregate() { PROPERTIES, groupedStreamSource, formats, - 1, + ImmutableList.of(ColumnRef.withoutSource(ColumnName.of("ORANGE"))), ImmutableList.of(functionCall("SUM", "APPLE")) ); @@ -144,7 +144,7 @@ public void shouldResolveSchemaForStreamWindowedAggregate() { PROPERTIES, groupedStreamSource, formats, - 1, + ImmutableList.of(ColumnRef.withoutSource(ColumnName.of("ORANGE"))), ImmutableList.of(functionCall("SUM", "APPLE")), new TumblingWindowExpression(10, TimeUnit.SECONDS) ); @@ -327,7 +327,7 @@ public void shouldResolveSchemaForTableAggregate() { PROPERTIES, groupedTableSource, formats, - 1, + ImmutableList.of(ColumnRef.withoutSource(ColumnName.of("ORANGE"))), ImmutableList.of(functionCall("SUM", "APPLE")) ); diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamAggregateBuilderTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamAggregateBuilderTest.java index 40657311ecb7..aa46273467fb 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamAggregateBuilderTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamAggregateBuilderTest.java @@ -20,7 +20,6 @@ import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.clearInvocations; @@ -117,6 +116,10 @@ public class StreamAggregateBuilderTest { .valueColumn(ColumnName.of("OUTPUT0"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("OUTPUT1"), SqlTypes.STRING) .build(); + private static final List NON_AGG_COLUMNS = ImmutableList.of( + INPUT_SCHEMA.value().get(0).ref(), + INPUT_SCHEMA.value().get(1).ref() + ); private static final PhysicalSchema PHYSICAL_AGGREGATE_SCHEMA = PhysicalSchema.from( AGGREGATE_SCHEMA, SerdeOption.none() @@ -206,7 +209,7 @@ public void init() { when(queryBuilder.buildKeySerde(any(), any(), any())).thenReturn(keySerde); when(queryBuilder.buildValueSerde(any(), any(), any())).thenReturn(valueSerde); when(queryBuilder.getFunctionRegistry()).thenReturn(functionRegistry); - when(aggregateParamsFactory.create(any(), anyInt(), any(), any())) + when(aggregateParamsFactory.create(any(), any(), any(), any())) .thenReturn(aggregateParams); when(aggregateParams.getAggregator()).thenReturn((KudafAggregator) aggregator); when(aggregateParams.getAggregateSchema()).thenReturn(AGGREGATE_SCHEMA); @@ -242,7 +245,7 @@ private void givenUnwindowedAggregate() { new ExecutionStepPropertiesV1(CTX), sourceStep, Formats.of(KEY_FORMAT, VALUE_FORMAT, SerdeOption.none()), - 2, + NON_AGG_COLUMNS, FUNCTIONS ); } @@ -264,7 +267,7 @@ private void givenTumblingWindowedAggregate() { new ExecutionStepPropertiesV1(CTX), sourceStep, Formats.of(KEY_FORMAT, VALUE_FORMAT, SerdeOption.none()), - 2, + NON_AGG_COLUMNS, FUNCTIONS, new TumblingWindowExpression(WINDOW.getSeconds(), TimeUnit.SECONDS) ); @@ -276,7 +279,7 @@ private void givenHoppingWindowedAggregate() { new ExecutionStepPropertiesV1(CTX), sourceStep, Formats.of(KEY_FORMAT, VALUE_FORMAT, SerdeOption.none()), - 2, + NON_AGG_COLUMNS, FUNCTIONS, new HoppingWindowExpression( WINDOW.getSeconds(), @@ -300,7 +303,7 @@ private void givenSessionWindowedAggregate() { new ExecutionStepPropertiesV1(CTX), sourceStep, Formats.of(KEY_FORMAT, VALUE_FORMAT, SerdeOption.none()), - 2, + NON_AGG_COLUMNS, FUNCTIONS, new SessionWindowExpression(WINDOW.getSeconds(), TimeUnit.SECONDS) ); @@ -407,7 +410,12 @@ public void shouldBuildAggregatorParamsCorrectlyForUnwindowedAggregate() { aggregate.build(planBuilder); // Then: - verify(aggregateParamsFactory).create(INPUT_SCHEMA, 2, functionRegistry, FUNCTIONS); + verify(aggregateParamsFactory).create( + INPUT_SCHEMA, + NON_AGG_COLUMNS, + functionRegistry, + FUNCTIONS + ); } @Test @@ -611,7 +619,7 @@ public void shouldBuildAggregatorParamsCorrectlyForWindowedAggregate() { aggregated, aggregateParamsFactory ); - when(aggregateParamsFactory.create(any(), anyInt(), any(), any())) + when(aggregateParamsFactory.create(any(), any(), any(), any())) .thenReturn(aggregateParams); given.run(); @@ -620,7 +628,7 @@ public void shouldBuildAggregatorParamsCorrectlyForWindowedAggregate() { // Then: verify(aggregateParamsFactory) - .create(INPUT_SCHEMA, 2, functionRegistry, FUNCTIONS); + .create(INPUT_SCHEMA, NON_AGG_COLUMNS, functionRegistry, FUNCTIONS); } } diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/TableAggregateBuilderTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/TableAggregateBuilderTest.java index b81edf93da29..0eaff3f1b568 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/TableAggregateBuilderTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/TableAggregateBuilderTest.java @@ -20,7 +20,6 @@ import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.mock; @@ -89,6 +88,10 @@ public class TableAggregateBuilderTest { .valueColumn(ColumnName.of("RESULT0"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("RESULT1"), SqlTypes.STRING) .build(); + private static final List NON_AGG_COLUMNS = ImmutableList.of( + INPUT_SCHEMA.value().get(0).ref(), + INPUT_SCHEMA.value().get(1).ref() + ); private static final PhysicalSchema PHYSICAL_AGGREGATE_SCHEMA = PhysicalSchema.from( AGGREGATE_SCHEMA, SerdeOption.none() @@ -153,7 +156,7 @@ public void init() { when(queryBuilder.buildKeySerde(any(), any(), any())).thenReturn(keySerde); when(queryBuilder.buildValueSerde(any(), any(), any())).thenReturn(valueSerde); when(queryBuilder.getFunctionRegistry()).thenReturn(functionRegistry); - when(aggregateParamsFactory.createUndoable(any(), anyInt(), any(), any())) + when(aggregateParamsFactory.createUndoable(any(), any(), any(), any())) .thenReturn(aggregateParams); when(aggregateParams.getAggregator()).thenReturn((KudafAggregator)aggregator); when(aggregateParams.getUndoAggregator()).thenReturn(Optional.of(undoAggregator)); @@ -171,7 +174,7 @@ public void init() { new ExecutionStepPropertiesV1(CTX), sourceStep, Formats.of(KEY_FORMAT, VALUE_FORMAT, SerdeOption.none()), - 2, + NON_AGG_COLUMNS, FUNCTIONS ); when(sourceStep.build(any())).thenReturn(KGroupedTableHolder.of(groupedTable, INPUT_SCHEMA)); @@ -257,7 +260,12 @@ public void shouldBuildAggregatorParamsCorrectlyForAggregate() { aggregate.build(planBuilder); // Then: - verify(aggregateParamsFactory).createUndoable(INPUT_SCHEMA, 2, functionRegistry, FUNCTIONS); + verify(aggregateParamsFactory).createUndoable( + INPUT_SCHEMA, + NON_AGG_COLUMNS, + functionRegistry, + FUNCTIONS + ); } @Test From 48a3b7ec416f7344333a1eca7e9a1c2c6bdafbfd Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Wed, 11 Dec 2019 08:51:40 -0800 Subject: [PATCH 021/123] docs: fix markdown docs build (DOCS-3091) (#4112) --- docs-md/requirements.txt | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs-md/requirements.txt b/docs-md/requirements.txt index 33eba16e08a8..7208b92fb755 100644 --- a/docs-md/requirements.txt +++ b/docs-md/requirements.txt @@ -1,5 +1,6 @@ +mkdocs==1.0.4 mdx_gh_links>=0.2 -mkdocs-macros-plugin +mkdocs-macros-plugin==0.2.4 mkdocs-git-revision-date-plugin -pymdown-extensions -mkdocs-material +pymdown-extensions +mkdocs-material From 5a4e5510b3fae543207f6c9f143e27a6ed60919d Mon Sep 17 00:00:00 2001 From: Almog Gavra Date: Wed, 11 Dec 2019 11:15:42 -0800 Subject: [PATCH 022/123] chore: split ARRAYCONTAINS into JSON_ARRAY_CONTAINS and ARRAY_CONTAINS (#4105) * chore: split ARRAYCONTAINS into JSON_ARRAY_CONTAINS and ARRAY_CONTAINS BREAKING CHANGE: the ARRAYCONTAINS function now needs to be referenced as either JSON_ARRAY_CONTAINS or ARRAY_CONTAINS depending on the intended param types --- .../function/InternalFunctionRegistry.java | 50 ------ .../function/udf/json/ArrayContainsKudf.java | 130 --------------- .../function/udf/json/JsonArrayContains.java | 98 +++++++++++ .../ksql/function/udf/list/ArrayContains.java | 40 +++++ .../InternalFunctionRegistryTest.java | 2 +- .../udf/json/ArrayContainsKudfTest.java | 157 ------------------ .../udf/json/JsonArrayContainsTest.java | 104 ++++++++++++ .../function/udf/list/ArrayContainsTest.java | 85 ++++++++++ .../query-validation-tests/arraycontains.json | 12 +- .../json_array_contains.json | 27 +++ .../execution/ListFunctionsExecutorTest.java | 2 - .../server/resources/KsqlResourceTest.java | 2 - 12 files changed, 362 insertions(+), 347 deletions(-) delete mode 100644 ksql-engine/src/main/java/io/confluent/ksql/function/udf/json/ArrayContainsKudf.java create mode 100644 ksql-engine/src/main/java/io/confluent/ksql/function/udf/json/JsonArrayContains.java create mode 100644 ksql-engine/src/main/java/io/confluent/ksql/function/udf/list/ArrayContains.java delete mode 100644 ksql-engine/src/test/java/io/confluent/ksql/function/udf/json/ArrayContainsKudfTest.java create mode 100644 ksql-engine/src/test/java/io/confluent/ksql/function/udf/json/JsonArrayContainsTest.java create mode 100644 ksql-engine/src/test/java/io/confluent/ksql/function/udf/list/ArrayContainsTest.java create mode 100644 ksql-functional-tests/src/test/resources/query-validation-tests/json_array_contains.json diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/InternalFunctionRegistry.java b/ksql-engine/src/main/java/io/confluent/ksql/function/InternalFunctionRegistry.java index 9b85af7fa062..c10cf9fa99e6 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/InternalFunctionRegistry.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/InternalFunctionRegistry.java @@ -16,7 +16,6 @@ package io.confluent.ksql.function; import com.google.common.collect.ImmutableList; -import io.confluent.ksql.function.types.ArrayType; import io.confluent.ksql.function.types.ParamTypes; import io.confluent.ksql.function.udaf.count.CountAggFunctionFactory; import io.confluent.ksql.function.udaf.max.MaxAggFunctionFactory; @@ -25,7 +24,6 @@ import io.confluent.ksql.function.udaf.topk.TopKAggregateFunctionFactory; import io.confluent.ksql.function.udaf.topkdistinct.TopkDistinctAggFunctionFactory; import io.confluent.ksql.function.udf.UdfMetadata; -import io.confluent.ksql.function.udf.json.ArrayContainsKudf; import io.confluent.ksql.function.udf.json.JsonExtractStringKudf; import io.confluent.ksql.function.udf.math.RandomKudf; import io.confluent.ksql.function.udf.string.ConcatKudf; @@ -313,60 +311,12 @@ private void addMathFunctions() { } private void addJsonFunctions() { - addBuiltInFunction(KsqlScalarFunction.createLegacyBuiltIn( SqlTypes.STRING, ImmutableList.of(ParamTypes.STRING, ParamTypes.STRING), JsonExtractStringKudf.FUNCTION_NAME, JsonExtractStringKudf.class )); - - addBuiltInFunction(KsqlScalarFunction.createLegacyBuiltIn( - SqlTypes.BOOLEAN, - ImmutableList.of(ParamTypes.STRING, ParamTypes.STRING), - FunctionName.of("ARRAYCONTAINS"), - ArrayContainsKudf.class - )); - - addBuiltInFunction(KsqlScalarFunction.createLegacyBuiltIn( - SqlTypes.BOOLEAN, - ImmutableList.of( - ArrayType.of(ParamTypes.STRING), - ParamTypes.STRING - ), - FunctionName.of("ARRAYCONTAINS"), - ArrayContainsKudf.class - )); - - addBuiltInFunction(KsqlScalarFunction.createLegacyBuiltIn( - SqlTypes.BOOLEAN, - ImmutableList.of( - ArrayType.of(ParamTypes.INTEGER), - ParamTypes.INTEGER - ), - FunctionName.of("ARRAYCONTAINS"), - ArrayContainsKudf.class - )); - - addBuiltInFunction(KsqlScalarFunction.createLegacyBuiltIn( - SqlTypes.BOOLEAN, - ImmutableList.of( - ArrayType.of(ParamTypes.LONG), - ParamTypes.LONG - ), - FunctionName.of("ARRAYCONTAINS"), - ArrayContainsKudf.class - )); - - addBuiltInFunction(KsqlScalarFunction.createLegacyBuiltIn( - SqlTypes.BOOLEAN, - ImmutableList.of( - ArrayType.of(ParamTypes.DOUBLE), - ParamTypes.DOUBLE - ), - FunctionName.of("ARRAYCONTAINS"), - ArrayContainsKudf.class - )); } private void addUdafFunctions() { diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/udf/json/ArrayContainsKudf.java b/ksql-engine/src/main/java/io/confluent/ksql/function/udf/json/ArrayContainsKudf.java deleted file mode 100644 index c4679791cc06..000000000000 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/udf/json/ArrayContainsKudf.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright 2018 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.function.udf.json; - -import static com.fasterxml.jackson.core.JsonFactory.Feature.CANONICALIZE_FIELD_NAMES; -import static com.fasterxml.jackson.core.JsonToken.END_ARRAY; -import static com.fasterxml.jackson.core.JsonToken.START_ARRAY; -import static com.fasterxml.jackson.core.JsonToken.VALUE_FALSE; -import static com.fasterxml.jackson.core.JsonToken.VALUE_NULL; -import static com.fasterxml.jackson.core.JsonToken.VALUE_NUMBER_FLOAT; -import static com.fasterxml.jackson.core.JsonToken.VALUE_NUMBER_INT; -import static com.fasterxml.jackson.core.JsonToken.VALUE_STRING; -import static com.fasterxml.jackson.core.JsonToken.VALUE_TRUE; - -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonToken; -import io.confluent.ksql.function.KsqlFunctionException; -import io.confluent.ksql.function.udf.Kudf; -import io.confluent.ksql.util.KsqlException; -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; - -public class ArrayContainsKudf implements Kudf { - private static final JsonFactory JSON_FACTORY = - new JsonFactory().disable(CANONICALIZE_FIELD_NAMES); - - interface Matcher { - boolean matches(JsonParser parser, Object searchValue) throws IOException; - } - - private final Map matchers = new HashMap<>(); - - - public ArrayContainsKudf() { - matchers.put(JsonToken.VALUE_NULL, (parser, value) -> value == null); - matchers.put(JsonToken.VALUE_STRING, - (parser, value) -> parser.getText().equals(value)); - final Matcher booleanMatcher = - (parser, value) -> parser.getBooleanValue() == (boolean) value; - matchers.put(JsonToken.VALUE_FALSE, booleanMatcher); - matchers.put(JsonToken.VALUE_TRUE, booleanMatcher); - matchers.put(JsonToken.VALUE_NUMBER_INT, - (parser, value) -> - value instanceof Integer && parser.getIntValue() == (int) value - || value instanceof Long && parser.getLongValue() == (long) value); - matchers.put(JsonToken.VALUE_NUMBER_FLOAT, - (parser, value) -> parser.getDoubleValue() == (double) value); - } - - @Override - public Object evaluate(final Object... args) { - if (args.length != 2) { - throw new KsqlFunctionException("ARRAY_CONTAINS udf should have two input argument. " - + "Given: " + Arrays.toString(args)); - } - final Object searchValue = args[1]; - if (args[0] instanceof String) { - return jsonStringArrayContains(searchValue, (String) args[0]); - } else if (args[0] instanceof Collection) { - return ((Collection) args[0]).contains(searchValue); - } - - throw new KsqlFunctionException("Invalid type parameters for " + Arrays.toString(args)); - } - - private boolean jsonStringArrayContains(final Object searchValue, final String jsonArray) { - final JsonToken valueType = getType(searchValue); - try (JsonParser parser = JSON_FACTORY.createParser(jsonArray)) { - if (parser.nextToken() != START_ARRAY) { - return false; - } - - while (parser.currentToken() != null) { - final JsonToken token = parser.nextToken(); - if (token == null) { - return searchValue == null; - } - if (token == END_ARRAY) { - return false; - } - parser.skipChildren(); - if (valueType == token) { - final Matcher matcher = matchers.get(valueType); - if (matcher != null && matcher.matches(parser, searchValue)) { - return true; - } - } - } - } catch (final IOException e) { - throw new KsqlException("Invalid JSON format: " + jsonArray, e); - } - return false; - } - - /** - * Returns JsonToken type of the targetValue - */ - private JsonToken getType(final Object searchValue) { - if (searchValue instanceof Long || searchValue instanceof Integer) { - return VALUE_NUMBER_INT; - } else if (searchValue instanceof Double) { - return VALUE_NUMBER_FLOAT; - } else if (searchValue instanceof String) { - return VALUE_STRING; - } else if (searchValue == null) { - return VALUE_NULL; - } else if (searchValue instanceof Boolean) { - final boolean value = (boolean) searchValue; - return value ? VALUE_TRUE : VALUE_FALSE; - } - throw new KsqlFunctionException("Invalid Type for search value " + searchValue); - } -} diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/udf/json/JsonArrayContains.java b/ksql-engine/src/main/java/io/confluent/ksql/function/udf/json/JsonArrayContains.java new file mode 100644 index 000000000000..c1d7424b61ae --- /dev/null +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/udf/json/JsonArrayContains.java @@ -0,0 +1,98 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.function.udf.json; + +import static com.fasterxml.jackson.core.JsonFactory.Feature.CANONICALIZE_FIELD_NAMES; +import static com.fasterxml.jackson.core.JsonToken.END_ARRAY; +import static com.fasterxml.jackson.core.JsonToken.START_ARRAY; +import static com.fasterxml.jackson.core.JsonToken.VALUE_FALSE; +import static com.fasterxml.jackson.core.JsonToken.VALUE_NULL; +import static com.fasterxml.jackson.core.JsonToken.VALUE_NUMBER_FLOAT; +import static com.fasterxml.jackson.core.JsonToken.VALUE_NUMBER_INT; +import static com.fasterxml.jackson.core.JsonToken.VALUE_STRING; +import static com.fasterxml.jackson.core.JsonToken.VALUE_TRUE; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; +import io.confluent.ksql.function.udf.Udf; +import io.confluent.ksql.function.udf.UdfDescription; +import io.confluent.ksql.function.udf.UdfParameter; +import io.confluent.ksql.json.JsonMapper; +import java.io.IOException; +import java.util.EnumMap; +import java.util.Objects; +import java.util.function.Predicate; + +@UdfDescription( + name = "JSON_ARRAY_CONTAINS", + description = JsonArrayContains.DESCRIPTION +) +public class JsonArrayContains { + + static final String DESCRIPTION = "Parses a JSON array and determines whether or not the " + + "supplied value is contained within the array."; + + private static final JsonFactory PARSER_FACTORY = new JsonFactory() + .setCodec(JsonMapper.INSTANCE.mapper) + .disable(CANONICALIZE_FIELD_NAMES); + + private static final EnumMap> TOKEN_COMPAT; + + static { + TOKEN_COMPAT = new EnumMap<>(JsonToken.class); + TOKEN_COMPAT.put(VALUE_NUMBER_INT, obj -> obj instanceof Long || obj instanceof Integer); + TOKEN_COMPAT.put(VALUE_NUMBER_FLOAT, Double.class::isInstance); + TOKEN_COMPAT.put(VALUE_STRING, String.class::isInstance); + TOKEN_COMPAT.put(VALUE_TRUE, obj -> obj instanceof Boolean && (Boolean) obj); + TOKEN_COMPAT.put(VALUE_FALSE, obj -> obj instanceof Boolean && !(Boolean) obj); + TOKEN_COMPAT.put(VALUE_NULL, Objects::isNull); + } + + @Udf + public Boolean contains( + @UdfParameter final String jsonArray, + @UdfParameter final T val + ) { + try (JsonParser parser = PARSER_FACTORY.createParser(jsonArray)) { + if (parser.nextToken() != START_ARRAY) { + return false; + } + + while (parser.nextToken() != null) { + final JsonToken token = parser.currentToken(); + + if (token == null) { + return val == null; + } else if (token == END_ARRAY) { + return false; + } + + parser.skipChildren(); + if (TOKEN_COMPAT.getOrDefault(token, foo -> false).test(val)) { + if (token == VALUE_NULL || Objects.equals(parser.readValueAs(val.getClass()), val)) { + return true; + } + } + } + + return false; + } catch (IOException e) { + return false; + } + } + +} diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/udf/list/ArrayContains.java b/ksql-engine/src/main/java/io/confluent/ksql/function/udf/list/ArrayContains.java new file mode 100644 index 000000000000..9d2f6017d5dd --- /dev/null +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/udf/list/ArrayContains.java @@ -0,0 +1,40 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.function.udf.list; + +import io.confluent.ksql.function.udf.Udf; +import io.confluent.ksql.function.udf.UdfDescription; +import io.confluent.ksql.function.udf.UdfParameter; +import java.util.List; + +@UdfDescription( + name = "ARRAY_CONTAINS", + description = ArrayContains.DESCRIPTION +) +public class ArrayContains { + + static final String DESCRIPTION = "Returns true if the array is non-null and contains the " + + "supplied value."; + + @Udf + public boolean contains( + @UdfParameter List array, + @UdfParameter T val + ) { + return array != null && array.contains(val); + } + +} diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/InternalFunctionRegistryTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/InternalFunctionRegistryTest.java index ae875ae6242a..874b541d8ea4 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/InternalFunctionRegistryTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/InternalFunctionRegistryTest.java @@ -321,7 +321,7 @@ public void shouldHaveBuiltInUDFRegistered() { // Math UDF "RANDOM", // JSON UDF - "EXTRACTJSONFIELD", "ARRAYCONTAINS" + "EXTRACTJSONFIELD" ); Collection names = Collections2.transform(functionRegistry.listFunctions(), diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/json/ArrayContainsKudfTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/json/ArrayContainsKudfTest.java deleted file mode 100644 index 1c62e1d0b625..000000000000 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/json/ArrayContainsKudfTest.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright 2018 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.function.udf.json; - -import static org.junit.Assert.assertEquals; - -import java.util.Arrays; -import java.util.Collections; -import org.junit.Test; - -public class ArrayContainsKudfTest -{ - private ArrayContainsKudf jsonUdf = new ArrayContainsKudf(); - - @Test - public void shouldReturnFalseOnEmptyArray() { - assertEquals(false, jsonUdf.evaluate("[]", true)); - assertEquals(false, jsonUdf.evaluate("[]", false)); - assertEquals(false, jsonUdf.evaluate("[]", null)); - assertEquals(false, jsonUdf.evaluate("[]", 1.0)); - assertEquals(false, jsonUdf.evaluate("[]", 100)); - assertEquals(false, jsonUdf.evaluate("[]", "abc")); - assertEquals(false, jsonUdf.evaluate("[]", "")); - } - - @Test - public void shouldNotFindValuesInNullArray() { - assertEquals(true, jsonUdf.evaluate("[null]", null)); - assertEquals(false, jsonUdf.evaluate("[null]", "null")); - assertEquals(false, jsonUdf.evaluate("[null]", true)); - assertEquals(false, jsonUdf.evaluate("[null]", false)); - assertEquals(false, jsonUdf.evaluate("[null]", 1.0)); - assertEquals(false, jsonUdf.evaluate("[null]", 100)); - assertEquals(false, jsonUdf.evaluate("[null]", "abc")); - assertEquals(false, jsonUdf.evaluate("[null]", "")); - } - - @Test - public void shouldFindIntegersInJsonArray() { - final String json = "[2147483647, {\"ab\":null }, -2147483648, 1, 2, 3, null, [4], 4]"; - assertEquals(true, jsonUdf.evaluate(json, 2147483647)); - assertEquals(true, jsonUdf.evaluate(json, -2147483648)); - assertEquals(true, jsonUdf.evaluate(json, 1)); - assertEquals(true, jsonUdf.evaluate(json, 2)); - assertEquals(true, jsonUdf.evaluate(json, 3)); - assertEquals(false, jsonUdf.evaluate("5", 5)); - assertEquals(false, jsonUdf.evaluate(json, 5)); - } - - @Test - public void shouldFindLongsInJsonArray() { - assertEquals(true, jsonUdf.evaluate("[1]", 1L)); - assertEquals(true, jsonUdf.evaluate("[1111111111111111]", 1111111111111111L)); - assertEquals(true, jsonUdf.evaluate("[[222222222222222], 33333]", 33333L)); - assertEquals(true, jsonUdf.evaluate("[{}, \"abc\", null, 1]", 1L)); - assertEquals(false, jsonUdf.evaluate("[[222222222222222], 33333]", 222222222222222L)); - assertEquals(false, jsonUdf.evaluate("[{}, \"abc\", null, [1]]", 1L)); - assertEquals(false, jsonUdf.evaluate("[{}, \"abc\", null, {\"1\":1}]", 1L)); - } - - @Test - public void shouldFindDoublesInJsonArray() { - assertEquals(true, jsonUdf.evaluate("[-1.0, 2.0, 3.0]", 2.0)); - assertEquals(true, jsonUdf.evaluate("[1.0, -2.0, 3.0]", -2.0)); - assertEquals(true, jsonUdf.evaluate("[1.0, 2.0, 1.6E3]", 1600.0)); - assertEquals(true, jsonUdf.evaluate("[1.0, 2.0, -1.6E3]", -1600.0)); - assertEquals(true, jsonUdf.evaluate("[{}, \"abc\", null, -2.0]", -2.0)); - assertEquals(false, jsonUdf.evaluate("[[2.0], 3.0]", 2.0)); - } - - @Test - public void shouldFindStringsInJsonArray() { - assertEquals(true, jsonUdf.evaluate("[\"abc\"]", "abc")); - assertEquals(true, jsonUdf.evaluate("[\"cbda\", \"abc\"]", "abc")); - assertEquals(true, jsonUdf.evaluate("[{}, \"abc\", null, 1]", "abc")); - assertEquals(true, jsonUdf.evaluate("[\"\"]", "")); - assertEquals(false, jsonUdf.evaluate("[\"\"]", null)); - assertEquals(false, jsonUdf.evaluate("[1,2,3]", "1")); - assertEquals(false, jsonUdf.evaluate("[null]", "")); - assertEquals(false, jsonUdf.evaluate("[\"abc\", \"dba\"]", "abd")); - } - - @Test - public void shouldFindBooleansInJsonArray() { - assertEquals(true, jsonUdf.evaluate("[false, false, true, false]", true)); - assertEquals(true, jsonUdf.evaluate("[true, true, false]", false)); - assertEquals(false, jsonUdf.evaluate("[true, true]", false)); - assertEquals(false, jsonUdf.evaluate("[false, false]", true)); - } - - @Test - public void shouldReturnFalseOnEmptyList() { - assertEquals(false, jsonUdf.evaluate(Collections.emptyList(), true)); - assertEquals(false, jsonUdf.evaluate(Collections.emptyList(), false)); - assertEquals(false, jsonUdf.evaluate(Collections.emptyList(), null)); - assertEquals(false, jsonUdf.evaluate(Collections.emptyList(), 1.0)); - assertEquals(false, jsonUdf.evaluate(Collections.emptyList(), 100)); - assertEquals(false, jsonUdf.evaluate(Collections.emptyList(), "abc")); - assertEquals(false, jsonUdf.evaluate(Collections.emptyList(), "")); - } - - @Test - public void shouldNotFindValuesInNullListElements() { - assertEquals(true, jsonUdf.evaluate(Collections.singletonList(null), null)); - assertEquals(false, jsonUdf.evaluate(Collections.singletonList(null), "null")); - assertEquals(false, jsonUdf.evaluate(Collections.singletonList(null), true)); - assertEquals(false, jsonUdf.evaluate(Collections.singletonList(null), false)); - assertEquals(false, jsonUdf.evaluate(Collections.singletonList(null), 1.0)); - assertEquals(false, jsonUdf.evaluate(Collections.singletonList(null), 100)); - assertEquals(false, jsonUdf.evaluate(Collections.singletonList(null), "abc")); - assertEquals(false, jsonUdf.evaluate(Collections.singletonList(null), "")); - } - - @Test - public void shouldFindStringInList() { - assertEquals(true, jsonUdf.evaluate(Arrays.asList("abc", "bd", "DC"), "DC")); - assertEquals(false, jsonUdf.evaluate(Arrays.asList("abc", "bd", "DC"), "dc")); - assertEquals(false, jsonUdf.evaluate(Arrays.asList("abc", "bd", "1"), 1)); - } - - @Test - public void shouldFindIntegersInList() { - assertEquals(true, jsonUdf.evaluate(Arrays.asList(1, 2, 3), 2)); - assertEquals(false, jsonUdf.evaluate(Arrays.asList(1, 2, 3), 0)); - assertEquals(false, jsonUdf.evaluate(Arrays.asList(1, 2, 3), "1")); - assertEquals(false, jsonUdf.evaluate(Arrays.asList(1, 2, 3), "aa")); - } - - @Test - public void shouldFindLongInList() { - assertEquals(true, jsonUdf.evaluate(Arrays.asList(1L, 2L, 3L), 2L)); - assertEquals(false, jsonUdf.evaluate(Arrays.asList(1L, 2L, 3L), 0L)); - assertEquals(false, jsonUdf.evaluate(Arrays.asList(1L, 2L, 3L), "1")); - assertEquals(false, jsonUdf.evaluate(Arrays.asList(1L, 2L, 3L), "aaa")); - } - - @Test - public void shouldFindDoublesInList() { - assertEquals(true, jsonUdf.evaluate(Arrays.asList(1.0, 2.0, 3.0), 2.0)); - assertEquals(false, jsonUdf.evaluate(Arrays.asList(1.0, 2.0, 3.0), 4.0)); - assertEquals(false, jsonUdf.evaluate(Arrays.asList(1.0, 2.0, 3.0), "1")); - assertEquals(false, jsonUdf.evaluate(Arrays.asList(1.0, 2.0, 3.0), "aaa")); - } -} \ No newline at end of file diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/json/JsonArrayContainsTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/json/JsonArrayContainsTest.java new file mode 100644 index 000000000000..5c092c208f34 --- /dev/null +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/json/JsonArrayContainsTest.java @@ -0,0 +1,104 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.function.udf.json; + +import static org.junit.Assert.assertEquals; + +import org.junit.Test; + +public class JsonArrayContainsTest +{ + private JsonArrayContains jsonUdf = new JsonArrayContains(); + + @Test + public void shouldReturnFalseOnEmptyArray() { + assertEquals(false, jsonUdf.contains("[]", true)); + assertEquals(false, jsonUdf.contains("[]", false)); + assertEquals(false, jsonUdf.contains("[]", null)); + assertEquals(false, jsonUdf.contains("[]", 1.0)); + assertEquals(false, jsonUdf.contains("[]", 100)); + assertEquals(false, jsonUdf.contains("[]", "abc")); + assertEquals(false, jsonUdf.contains("[]", "")); + } + + @Test + public void shouldNotFindValuesInNullArray() { + assertEquals(true, jsonUdf.contains("[null]", null)); + assertEquals(false, jsonUdf.contains("[null]", "null")); + assertEquals(false, jsonUdf.contains("[null]", true)); + assertEquals(false, jsonUdf.contains("[null]", false)); + assertEquals(false, jsonUdf.contains("[null]", 1.0)); + assertEquals(false, jsonUdf.contains("[null]", 100)); + assertEquals(false, jsonUdf.contains("[null]", "abc")); + assertEquals(false, jsonUdf.contains("[null]", "")); + } + + @Test + public void shouldFindIntegersInJsonArray() { + final String json = "[2147483647, {\"ab\":null }, -2147483648, 1, 2, 3, null, [4], 4]"; + assertEquals(true, jsonUdf.contains(json, 2147483647)); + assertEquals(true, jsonUdf.contains(json, -2147483648)); + assertEquals(true, jsonUdf.contains(json, 1)); + assertEquals(true, jsonUdf.contains(json, 2)); + assertEquals(true, jsonUdf.contains(json, 3)); + assertEquals(false, jsonUdf.contains("5", 5)); + assertEquals(false, jsonUdf.contains(json, 5)); + } + + @Test + public void shouldFindLongsInJsonArray() { + assertEquals(true, jsonUdf.contains("[1]", 1L)); + assertEquals(true, jsonUdf.contains("[1111111111111111]", 1111111111111111L)); + assertEquals(true, jsonUdf.contains("[[222222222222222], 33333]", 33333L)); + assertEquals(true, jsonUdf.contains("[{}, \"abc\", null, 1]", 1L)); + assertEquals(false, jsonUdf.contains("[[222222222222222], 33333]", 222222222222222L)); + assertEquals(false, jsonUdf.contains("[{}, \"abc\", null, [1]]", 1L)); + assertEquals(false, jsonUdf.contains("[{}, \"abc\", null, {\"1\":1}]", 1L)); + assertEquals(false, jsonUdf.contains("[1]", 1.0)); + } + + @Test + public void shouldFindDoublesInJsonArray() { + assertEquals(true, jsonUdf.contains("[-1.0, 2.0, 3.0]", 2.0)); + assertEquals(true, jsonUdf.contains("[1.0, -2.0, 3.0]", -2.0)); + assertEquals(true, jsonUdf.contains("[1.0, 2.0, 1.6E3]", 1600.0)); + assertEquals(true, jsonUdf.contains("[1.0, 2.0, -1.6E3]", -1600.0)); + assertEquals(true, jsonUdf.contains("[{}, \"abc\", null, -2.0]", -2.0)); + assertEquals(false, jsonUdf.contains("[[2.0], 3.0]", 2.0)); + } + + @Test + public void shouldFindStringsInJsonArray() { + assertEquals(true, jsonUdf.contains("[\"abc\"]", "abc")); + assertEquals(true, jsonUdf.contains("[\"cbda\", \"abc\"]", "abc")); + assertEquals(true, jsonUdf.contains("[{}, \"abc\", null, 1]", "abc")); + assertEquals(true, jsonUdf.contains("[\"\"]", "")); + assertEquals(false, jsonUdf.contains("[\"\"]", null)); + assertEquals(false, jsonUdf.contains("[1,2,3]", "1")); + assertEquals(false, jsonUdf.contains("[null]", "")); + assertEquals(false, jsonUdf.contains("[\"abc\", \"dba\"]", "abd")); + } + + @Test + public void shouldFindBooleansInJsonArray() { + assertEquals(true, jsonUdf.contains("[false, false, true, false]", true)); + assertEquals(true, jsonUdf.contains("[true, true, false]", false)); + assertEquals(false, jsonUdf.contains("[true, true]", false)); + assertEquals(false, jsonUdf.contains("[false, false]", true)); + } + +// +} \ No newline at end of file diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/list/ArrayContainsTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/list/ArrayContainsTest.java new file mode 100644 index 000000000000..14c4b682513d --- /dev/null +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/list/ArrayContainsTest.java @@ -0,0 +1,85 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.function.udf.list; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; +import java.util.Collections; +import org.junit.Test; + +public class ArrayContainsTest { + + private ArrayContains udf = new ArrayContains(); + + @Test + public void shouldReturnFalseOnEmptyList() { + assertFalse(udf.contains(Collections.emptyList(), true)); + assertFalse(udf.contains(Collections.emptyList(), false)); + assertFalse(udf.contains(Collections.emptyList(), null)); + assertFalse(udf.contains(Collections.emptyList(), 1.0)); + assertFalse(udf.contains(Collections.emptyList(), 100)); + assertFalse(udf.contains(Collections.emptyList(), "abc")); + assertFalse(udf.contains(Collections.emptyList(), "")); + } + + @Test + public void shouldNotFindValuesInNullListElements() { + assertTrue(udf.contains(Collections.singletonList(null), null)); + assertFalse(udf.contains(Collections.singletonList(null), "null")); + assertFalse(udf.contains(Collections.singletonList(null), true)); + assertFalse(udf.contains(Collections.singletonList(null), false)); + assertFalse(udf.contains(Collections.singletonList(null), 1.0)); + assertFalse(udf.contains(Collections.singletonList(null), 100)); + assertFalse(udf.contains(Collections.singletonList(null), "abc")); + assertFalse(udf.contains(Collections.singletonList(null), "")); + assertFalse(udf.contains(null, "null")); + } + + @Test + public void shouldFindStringInList() { + assertTrue(udf.contains(Arrays.asList("abc", "bd", "DC"), "DC")); + assertFalse(udf.contains(Arrays.asList("abc", "bd", "DC"), "dc")); + assertFalse(udf.contains(Arrays.asList("abc", "bd", "1"), 1)); + } + + @Test + public void shouldFindIntegersInList() { + assertTrue(udf.contains(Arrays.asList(1, 2, 3), 2)); + assertFalse(udf.contains(Arrays.asList(1, 2, 3), 0)); + assertFalse(udf.contains(Arrays.asList(1, 2, 3), "1")); + assertFalse(udf.contains(Arrays.asList(1, 2, 3), "aa")); + } + + @Test + public void shouldFindLongInList() { + assertTrue(udf.contains(Arrays.asList(1L, 2L, 3L), 2L)); + assertFalse(udf.contains(Arrays.asList(1L, 2L, 3L), 0L)); + assertFalse(udf.contains(Arrays.asList(1L, 2L, 3L), "1")); + assertFalse(udf.contains(Arrays.asList(1L, 2L, 3L), "aaa")); + } + + @Test + public void shouldFindDoublesInList() { + assertTrue(udf.contains(Arrays.asList(1.0, 2.0, 3.0), 2.0)); + assertFalse(udf.contains(Arrays.asList(1.0, 2.0, 3.0), 4.0)); + assertFalse(udf.contains(Arrays.asList(1.0, 2.0, 3.0), "1")); + assertFalse(udf.contains(Arrays.asList(1.0, 2.0, 3.0), "aaa")); + } + +} \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/arraycontains.json b/ksql-functional-tests/src/test/resources/query-validation-tests/arraycontains.json index b7bd92e6d879..ceecaafd216c 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/arraycontains.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/arraycontains.json @@ -1,20 +1,21 @@ { "comments": [ - "Tests covering the use of the ARRAYCONTAINS function." + "Tests covering the use of the ARRAY_CONTAINS function." ], "tests": [ { "name": "filter rows where the ARRAY column contains a specified STRING", "statements": [ "CREATE STREAM test (colors ARRAY) WITH (kafka_topic='test_topic', value_format='JSON');", - "CREATE STREAM OUTPUT AS SELECT colors FROM test WHERE ARRAYCONTAINS(colors, 'Pink');" + "CREATE STREAM OUTPUT AS SELECT colors FROM test WHERE ARRAY_CONTAINS(colors, 'Pink');" ], "inputs": [ {"topic": "test_topic", "key": "1", "value": {"colors": ["Red", "Green"]}, "timestamp": 0}, {"topic": "test_topic", "key": "1", "value": {"colors": ["Black"]}, "timestamp": 0}, {"topic": "test_topic", "key": "1", "value": {"colors": ["Pink", "Yellow", "Pink"]}, "timestamp": 0}, {"topic": "test_topic", "key": "1", "value": {"colors": ["White", "Pink"]}, "timestamp": 0}, - {"topic": "test_topic", "key": "1", "value": {"colors": ["Pink", null]}, "timestamp": 0} + {"topic": "test_topic", "key": "1", "value": {"colors": ["Pink", null]}, "timestamp": 0}, + {"topic": "test_topic", "key": "1", "value": {"colors": null, "timestamp": 0}} ], "outputs": [ {"topic": "OUTPUT", "key": "1", "value": {"COLORS":["Pink", "Yellow", "Pink"]}, "timestamp": 0}, @@ -26,14 +27,15 @@ "name": "filter rows where the STRUCT->ARRAY column contains a specified STRING", "statements": [ "CREATE STREAM test (c1 STRUCT>) WITH (kafka_topic='test_topic', value_format='JSON');", - "CREATE STREAM OUTPUT AS SELECT c1->colors AS colors FROM test WHERE ARRAYCONTAINS(c1->colors, 'Pink');" + "CREATE STREAM OUTPUT AS SELECT c1->colors AS colors FROM test WHERE ARRAY_CONTAINS(c1->colors, 'Pink');" ], "inputs": [ {"topic": "test_topic", "key": "1", "value": {"c1":{"colors": ["Red", "Green"]}}, "timestamp": 0}, {"topic": "test_topic", "key": "1", "value": {"c1":{"colors": ["Black"]}}, "timestamp": 0}, {"topic": "test_topic", "key": "1", "value": {"c1":{"colors": ["Pink", "Yellow", "Pink"]}}, "timestamp": 0}, {"topic": "test_topic", "key": "1", "value": {"c1":{"colors": ["White", "Pink"]}}, "timestamp": 0}, - {"topic": "test_topic", "key": "1", "value": {"c1":{"colors": ["Pink", null]}}, "timestamp": 0} + {"topic": "test_topic", "key": "1", "value": {"c1":{"colors": ["Pink", null]}}, "timestamp": 0}, + {"topic": "test_topic", "key": "1", "value": {"c1":{"colors": null}}, "timestamp": 0} ], "outputs": [ {"topic": "OUTPUT", "key": "1", "value": {"COLORS":["Pink", "Yellow", "Pink"]}, "timestamp": 0}, diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/json_array_contains.json b/ksql-functional-tests/src/test/resources/query-validation-tests/json_array_contains.json new file mode 100644 index 000000000000..96df265199e2 --- /dev/null +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/json_array_contains.json @@ -0,0 +1,27 @@ +{ + "comments": [ + "Tests covering the use of the JSON_ARRAY_CONTAINS function." + ], + "tests": [ + { + "name": "filter rows where the ARRAY column contains a specified STRING", + "statements": [ + "CREATE STREAM test (colors STRING) WITH (kafka_topic='test_topic', value_format='JSON');", + "CREATE STREAM OUTPUT AS SELECT colors FROM test WHERE JSON_ARRAY_CONTAINS(colors, 'Pink');" + ], + "inputs": [ + {"topic": "test_topic", "key": "1", "value": {"colors": "[\"Red\", \"Green\"]"}, "timestamp": 0}, + {"topic": "test_topic", "key": "1", "value": {"colors": "[\"Black\"]"}, "timestamp": 0}, + {"topic": "test_topic", "key": "1", "value": {"colors": "[\"Pink\", \"Yellow\", \"Pink\"]"}, "timestamp": 0}, + {"topic": "test_topic", "key": "1", "value": {"colors": "[\"White\", \"Pink\"]"}, "timestamp": 0}, + {"topic": "test_topic", "key": "1", "value": {"colors": "[\"Pink\", null]"}, "timestamp": 0}, + {"topic": "test_topic", "key": "1", "value": {"colors": null, "timestamp": 0}} + ], + "outputs": [ + {"topic": "OUTPUT", "key": "1", "value": {"COLORS":"[\"Pink\", \"Yellow\", \"Pink\"]"}, "timestamp": 0}, + {"topic": "OUTPUT", "key": "1", "value": {"COLORS":"[\"White\", \"Pink\"]"}, "timestamp": 0}, + {"topic": "OUTPUT", "key": "1", "value": {"COLORS":"[\"Pink\", null]"}, "timestamp": 0} + ] + } + ] +} \ No newline at end of file diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListFunctionsExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListFunctionsExecutorTest.java index 5b44c87d2b43..a377cf0c7bb1 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListFunctionsExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListFunctionsExecutorTest.java @@ -50,8 +50,6 @@ public void shouldListFunctions() { // Then: Collection functions = functionList.getFunctions(); assertThat(functions, hasItems( - new SimpleFunctionInfo("EXTRACTJSONFIELD", FunctionType.SCALAR), - new SimpleFunctionInfo("ARRAYCONTAINS", FunctionType.SCALAR), new SimpleFunctionInfo("CONCAT", FunctionType.SCALAR), new SimpleFunctionInfo("TOPK", FunctionType.AGGREGATE), new SimpleFunctionInfo("MAX", FunctionType.AGGREGATE), diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java index 67210bd9f966..66b54559ef54 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java @@ -422,8 +422,6 @@ public void shouldListFunctions() { // Then: assertThat(functionList.getFunctions(), hasItems( - new SimpleFunctionInfo("EXTRACTJSONFIELD", FunctionType.SCALAR), - new SimpleFunctionInfo("ARRAYCONTAINS", FunctionType.SCALAR), new SimpleFunctionInfo("CONCAT", FunctionType.SCALAR), new SimpleFunctionInfo("TOPK", FunctionType.AGGREGATE), new SimpleFunctionInfo("MAX", FunctionType.AGGREGATE) From 0d679c9f3cfc9d924dc80fb94b5fd47b1749880b Mon Sep 17 00:00:00 2001 From: Almog Gavra Date: Wed, 11 Dec 2019 15:01:48 -0800 Subject: [PATCH 023/123] chore: remove RUN SCRIPT from the server implementation (#4116) BREAKING CHANGE: commands that were persisted with RUN SCRIPT will no longer be executable --- .../io/confluent/ksql/parser/SqlBase.g4 | 1 - .../io/confluent/ksql/parser/AstBuilder.java | 6 -- .../confluent/ksql/parser/tree/RunScript.java | 50 ------------ .../ksql/parser/tree/RunScriptTest.java | 38 --------- .../InteractiveStatementExecutor.java | 3 - .../rest/server/execution/RequestHandler.java | 56 +++----------- .../server/validation/RequestValidator.java | 31 +------- .../InteractiveStatementExecutorTest.java | 77 ------------------- .../server/execution/RequestHandlerTest.java | 52 ------------- .../server/resources/KsqlResourceTest.java | 32 -------- .../validation/RequestValidatorTest.java | 27 ------- 11 files changed, 12 insertions(+), 361 deletions(-) delete mode 100644 ksql-parser/src/main/java/io/confluent/ksql/parser/tree/RunScript.java delete mode 100644 ksql-parser/src/test/java/io/confluent/ksql/parser/tree/RunScriptTest.java diff --git a/ksql-parser/src/main/antlr4/io/confluent/ksql/parser/SqlBase.g4 b/ksql-parser/src/main/antlr4/io/confluent/ksql/parser/SqlBase.g4 index aa85babceef3..a87a44af7831 100644 --- a/ksql-parser/src/main/antlr4/io/confluent/ksql/parser/SqlBase.g4 +++ b/ksql-parser/src/main/antlr4/io/confluent/ksql/parser/SqlBase.g4 @@ -66,7 +66,6 @@ statement | DROP TABLE (IF EXISTS)? sourceName (DELETE TOPIC)? #dropTable | DROP CONNECTOR identifier #dropConnector | EXPLAIN (statement | identifier) #explain - | RUN SCRIPT STRING #runScript | CREATE TYPE identifier AS type #registerType | DROP TYPE identifier #dropType ; diff --git a/ksql-parser/src/main/java/io/confluent/ksql/parser/AstBuilder.java b/ksql-parser/src/main/java/io/confluent/ksql/parser/AstBuilder.java index 8500c005f021..68775ee6204b 100644 --- a/ksql-parser/src/main/java/io/confluent/ksql/parser/AstBuilder.java +++ b/ksql-parser/src/main/java/io/confluent/ksql/parser/AstBuilder.java @@ -109,7 +109,6 @@ import io.confluent.ksql.parser.tree.RegisterType; import io.confluent.ksql.parser.tree.Relation; import io.confluent.ksql.parser.tree.ResultMaterialization; -import io.confluent.ksql.parser.tree.RunScript; import io.confluent.ksql.parser.tree.Select; import io.confluent.ksql.parser.tree.SelectItem; import io.confluent.ksql.parser.tree.SetProperty; @@ -571,11 +570,6 @@ public Node visitSelectSingle(final SqlBaseParser.SelectSingleContext context) { } } - @Override - public Node visitRunScript(final SqlBaseParser.RunScriptContext context) { - return new RunScript(getLocation(context)); - } - @Override public Node visitListTopics(final SqlBaseParser.ListTopicsContext context) { return new ListTopics(getLocation(context), context.EXTENDED() != null); diff --git a/ksql-parser/src/main/java/io/confluent/ksql/parser/tree/RunScript.java b/ksql-parser/src/main/java/io/confluent/ksql/parser/tree/RunScript.java deleted file mode 100644 index 1f80f7547900..000000000000 --- a/ksql-parser/src/main/java/io/confluent/ksql/parser/tree/RunScript.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2018 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.parser.tree; - -import com.google.common.base.MoreObjects; -import com.google.errorprone.annotations.Immutable; -import io.confluent.ksql.parser.NodeLocation; -import java.util.Objects; -import java.util.Optional; - -@Immutable -public class RunScript extends Statement { - - public RunScript(final Optional location) { - super(location); - } - - @Override - public int hashCode() { - return Objects.hash(getClass()); - } - - @Override - public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - - return obj != null && obj.getClass().equals(getClass()); - } - - @Override - public String toString() { - return MoreObjects.toStringHelper(this) - .toString(); - } -} diff --git a/ksql-parser/src/test/java/io/confluent/ksql/parser/tree/RunScriptTest.java b/ksql-parser/src/test/java/io/confluent/ksql/parser/tree/RunScriptTest.java deleted file mode 100644 index 1edc1f48beea..000000000000 --- a/ksql-parser/src/test/java/io/confluent/ksql/parser/tree/RunScriptTest.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2019 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.parser.tree; - -import com.google.common.testing.EqualsTester; -import io.confluent.ksql.parser.NodeLocation; -import java.util.Optional; -import org.junit.Test; - -public class RunScriptTest { - - public static final NodeLocation SOME_LOCATION = new NodeLocation(0, 0); - public static final NodeLocation OTHER_LOCATION = new NodeLocation(1, 0); - - @Test - public void shouldImplementHashCodeAndEqualsProperty() { - new EqualsTester() - .addEqualityGroup( - // Note: At the moment location does not take part in equality testing - new RunScript(Optional.of(SOME_LOCATION)), - new RunScript(Optional.of(OTHER_LOCATION)) - ) - .testEquals(); - } -} \ No newline at end of file diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutor.java index ca41d8769ff9..8cf58aa3faaa 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutor.java @@ -25,7 +25,6 @@ import io.confluent.ksql.parser.tree.CreateTableAsSelect; import io.confluent.ksql.parser.tree.ExecutableDdlStatement; import io.confluent.ksql.parser.tree.InsertInto; -import io.confluent.ksql.parser.tree.RunScript; import io.confluent.ksql.parser.tree.TerminateQuery; import io.confluent.ksql.planner.plan.ConfiguredKsqlPlan; import io.confluent.ksql.query.QueryId; @@ -244,8 +243,6 @@ private void executeStatement( } else if (statement.getStatement() instanceof TerminateQuery) { terminateQuery((PreparedStatement) statement); successMessage = "Query terminated."; - } else if (statement.getStatement() instanceof RunScript) { - handleLegacyRunScript(command, mode); } else { throw new KsqlException(String.format( "Unexpected statement type: %s", diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java index 7b52c488a9b3..bf9f1c479c6e 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java @@ -15,11 +15,9 @@ package io.confluent.ksql.rest.server.execution; -import com.google.common.collect.Iterables; import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.parser.KsqlParser.ParsedStatement; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; -import io.confluent.ksql.parser.tree.RunScript; import io.confluent.ksql.parser.tree.Statement; import io.confluent.ksql.rest.entity.KsqlEntity; import io.confluent.ksql.rest.entity.KsqlEntityList; @@ -27,8 +25,6 @@ import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; import io.confluent.ksql.util.KsqlConfig; -import io.confluent.ksql.util.KsqlConstants; -import io.confluent.ksql.util.KsqlStatementException; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -79,29 +75,16 @@ public KsqlEntityList execute( final KsqlEntityList entities = new KsqlEntityList(); for (ParsedStatement parsed : statements) { final PreparedStatement prepared = ksqlEngine.prepare(parsed); - if (prepared.getStatement() instanceof RunScript) { - final KsqlEntityList result = executeRunScript( - serviceContext, - prepared, - propertyOverrides - ); - if (!result.isEmpty()) { - // This is to maintain backwards compatibility until we deprecate - // RunScript in the next major release - the expected behavior was - // to return only the last entity - entities.add(Iterables.getLast(result)); - } - } else { - final ConfiguredStatement configured = ConfiguredStatement.of( - prepared, scopedPropertyOverrides, ksqlConfig); - executeStatement( - serviceContext, - configured, - parsed, - scopedPropertyOverrides, - entities - ).ifPresent(entities::add); - } + final ConfiguredStatement configured = ConfiguredStatement.of( + prepared, scopedPropertyOverrides, ksqlConfig); + + executeStatement( + serviceContext, + configured, + parsed, + scopedPropertyOverrides, + entities + ).ifPresent(entities::add); } return entities; } @@ -131,23 +114,4 @@ private Optional executeStatement( ); } - private KsqlEntityList executeRunScript( - final ServiceContext serviceContext, - final PreparedStatement statement, - final Map propertyOverrides - ) { - final String sql = (String) propertyOverrides - .get(KsqlConstants.LEGACY_RUN_SCRIPT_STATEMENTS_CONTENT); - - if (sql == null) { - throw new KsqlStatementException( - "Request is missing script content", statement.getStatementText()); - } - - return execute( - serviceContext, - ksqlEngine.parse(sql), - propertyOverrides - ); - } } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/RequestValidator.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/RequestValidator.java index c07602fa6b39..7716e7f49b5a 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/RequestValidator.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/RequestValidator.java @@ -24,14 +24,12 @@ import io.confluent.ksql.parser.KsqlParser.PreparedStatement; import io.confluent.ksql.parser.tree.CreateAsSelect; import io.confluent.ksql.parser.tree.InsertInto; -import io.confluent.ksql.parser.tree.RunScript; import io.confluent.ksql.parser.tree.Statement; import io.confluent.ksql.rest.util.QueryCapacityUtil; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; import io.confluent.ksql.statement.Injector; import io.confluent.ksql.util.KsqlConfig; -import io.confluent.ksql.util.KsqlConstants; import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.KsqlStatementException; import java.util.HashMap; @@ -39,8 +37,6 @@ import java.util.Map; import java.util.function.BiFunction; import java.util.function.Function; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Wraps an execution context and information about how to validate statements @@ -49,8 +45,6 @@ */ public class RequestValidator { - private static final Logger LOG = LoggerFactory.getLogger(RequestValidator.class); - private final Map, StatementValidator> customValidators; private final BiFunction injectorFactory; private final Function snapshotSupplier; @@ -108,9 +102,8 @@ public int validate( final ConfiguredStatement configured = ConfiguredStatement.of( prepared, scopedPropertyOverrides, ksqlConfig); - numPersistentQueries += (prepared.getStatement() instanceof RunScript) - ? validateRunScript(serviceContext, configured, scopedPropertyOverrides, ctx) - : validate(serviceContext, configured, scopedPropertyOverrides, ctx, injector); + numPersistentQueries += + validate(serviceContext, configured, scopedPropertyOverrides, ctx, injector); } if (QueryCapacityUtil.exceedsPersistentQueryCapacity(ctx, ksqlConfig, numPersistentQueries)) { @@ -154,24 +147,4 @@ private int validate( return (statement instanceof CreateAsSelect || statement instanceof InsertInto) ? 1 : 0; } - private int validateRunScript( - final ServiceContext serviceContext, - final ConfiguredStatement statement, - final Map mutableScopedProperties, - final KsqlExecutionContext executionContext - ) { - final String sql = (String) statement.getOverrides() - .get(KsqlConstants.LEGACY_RUN_SCRIPT_STATEMENTS_CONTENT); - - if (sql == null) { - throw new KsqlStatementException( - "Request is missing script content", statement.getStatementText()); - } - - LOG.warn("RUN SCRIPT statement detected. " - + "Note: RUN SCRIPT is deprecated and will be removed in the next major version. " - + "statement: " + statement.getStatementText()); - - return validate(serviceContext, executionContext.parse(sql), mutableScopedProperties, sql); - } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java index dc2837aaa222..6856ab54b2cc 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java @@ -44,12 +44,9 @@ import io.confluent.ksql.metastore.MetaStore; import io.confluent.ksql.metastore.MetaStoreImpl; import io.confluent.ksql.name.SourceName; -import io.confluent.ksql.parser.KsqlParser.ParsedStatement; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; -import io.confluent.ksql.parser.SqlBaseParser.SingleStatementContext; import io.confluent.ksql.parser.tree.CreateStreamAsSelect; import io.confluent.ksql.parser.tree.DropStream; -import io.confluent.ksql.parser.tree.RunScript; import io.confluent.ksql.parser.tree.Statement; import io.confluent.ksql.parser.tree.TerminateQuery; import io.confluent.ksql.planner.plan.ConfiguredKsqlPlan; @@ -67,7 +64,6 @@ import io.confluent.ksql.statement.ConfiguredStatement; import io.confluent.ksql.test.util.EmbeddedSingleNodeKafkaCluster; import io.confluent.ksql.util.KsqlConfig; -import io.confluent.ksql.util.KsqlServerException; import io.confluent.ksql.util.KsqlStatementException; import io.confluent.ksql.util.Pair; import io.confluent.ksql.util.PersistentQueryMetadata; @@ -461,30 +457,6 @@ public void shouldEnforceReferentialIntegrity() { CoreMatchers.equalTo(CommandStatus.Status.SUCCESS)); } - @SuppressWarnings("unchecked") - private PersistentQueryMetadata mockReplayRunScript( - final String runScriptStatement, - final String queryStatement - ) { - final PersistentQueryMetadata mockQuery = mock(PersistentQueryMetadata.class); - final Statement mockRunScript = mock(RunScript.class); - final PreparedStatement runScript = - PreparedStatement.of(queryStatement, mockRunScript); - when(mockParser.parseSingleStatement(runScriptStatement)) - .thenReturn(runScript); - final ImmutableList parsedStatements = ImmutableList - .of(ParsedStatement.of(queryStatement, mock(SingleStatementContext.class))); - final PreparedStatement preparedStatement = - PreparedStatement.of(queryStatement, mock(Statement.class)); - when(mockEngine.parse(eq(queryStatement))).thenReturn(parsedStatements); - when(mockEngine.prepare(parsedStatements.get(0))) - .thenReturn((PreparedStatement)preparedStatement); - when(mockEngine.execute(eq(serviceContext), eqConfiguredStatement(preparedStatement))) - .thenReturn(ExecuteResult.of(mockQuery)); - when(mockEngine.getPersistentQueries()).thenReturn(ImmutableList.of()); - return mockQuery; - } - @Test public void shouldSkipStartWhenReplayingLog() { // Given: @@ -567,55 +539,6 @@ public void shouldNotCascadeDropStreamCommand() { ); } - @Test - public void shouldHandleLegacyRunScriptCommand() { - // Given: - final String runScriptStatement = "run script"; - final String queryStatement = "a query"; - final PersistentQueryMetadata mockQuery = mockReplayRunScript(runScriptStatement, queryStatement); - - // When: - statementExecutorWithMocks.handleStatement( - new QueuedCommand( - new CommandId(CommandId.Type.STREAM, "RunScript", CommandId.Action.EXECUTE), - new Command( - runScriptStatement, - Collections.singletonMap("ksql.run.script.statements", queryStatement), - emptyMap()), - Optional.empty(), - 0L - ) - ); - - // Then: - verify(mockQuery, times(1)).start(); - } - - @Test - public void shouldRestoreLegacyRunScriptCommand() { - // Given: - final String runScriptStatement = "run script"; - final String queryStatement = "a persistent query"; - final PersistentQueryMetadata mockQuery = mockReplayRunScript(runScriptStatement, queryStatement); - - // When: - statementExecutorWithMocks.handleRestore( - new QueuedCommand( - new CommandId(CommandId.Type.STREAM, "RunScript", CommandId.Action.EXECUTE), - new Command( - runScriptStatement, - Collections.singletonMap("ksql.run.script.statements", queryStatement), - emptyMap() - ), - Optional.empty(), - 0L - ) - ); - - // Then: - verify(mockQuery, times(0)).start(); - } - @Test public void shouldTerminateAll() { // Given: diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java index 0bcb4e104a72..12026386224e 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java @@ -196,58 +196,6 @@ public void shouldWaitForDistributedStatements() { verify(sync, times(3)).waitFor(any(), any()); } - @Test - public void shouldInlineRunScriptStatements() { - // Given: - final Map props = ImmutableMap.of( - KsqlConstants.LEGACY_RUN_SCRIPT_STATEMENTS_CONTENT, - SOME_STREAM_SQL); - - final StatementExecutor customExecutor = givenReturningExecutor( - CreateStream.class, - (KsqlEntity) null); - givenRequestHandler(ImmutableMap.of(CreateStream.class, customExecutor)); - - // When: - final List statements = new DefaultKsqlParser() - .parse("RUN SCRIPT '/some/script.sql';" ); - handler.execute(serviceContext, statements, props); - - // Then: - verify(customExecutor, times(1)) - .execute( - argThat(is(configured(preparedStatementText(SOME_STREAM_SQL)))), - any(), - eq(ksqlEngine), - eq(serviceContext) - ); - } - - @Test - public void shouldOnlyReturnLastInRunScript() { - // Given: - final KsqlEntity entity1 = mock(KsqlEntity.class); - final KsqlEntity entity2 = mock(KsqlEntity.class); - - final Map props = ImmutableMap.of( - KsqlConstants.LEGACY_RUN_SCRIPT_STATEMENTS_CONTENT, - SOME_STREAM_SQL - + "CREATE STREAM Y WITH (value_format='json', kafka_topic='y');"); - - final StatementExecutor customExecutor = givenReturningExecutor( - CreateStream.class, entity1, entity2); - givenRequestHandler(ImmutableMap.of(CreateStream.class, customExecutor)); - - final List statements = new DefaultKsqlParser() - .parse("RUN SCRIPT '/some/script.sql';" ); - - // When: - final KsqlEntityList result = handler.execute(serviceContext, statements, props); - - // Then: - assertThat(result, contains(entity2)); - } - private void givenRequestHandler( final Map, StatementExecutor> executors) { handler = new RequestHandler( diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java index 66b54559ef54..b9c512376480 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java @@ -1845,34 +1845,6 @@ public void shouldFailIfCreateAsSelectExistingSourceTable() { makeSingleRequest(createSql, CommandStatusEntity.class); } - @Test - public void shouldInlineRunScriptStatements() { - // Given: - final Map props = ImmutableMap.of( - KsqlConstants.LEGACY_RUN_SCRIPT_STATEMENTS_CONTENT, - "CREATE STREAM " + streamName + " AS SELECT * FROM test_stream;"); - - // When: - makeRequest("RUN SCRIPT '/some/script.sql';", props); - - // Then: - verify(commandStore).enqueueCommand( - argThat(is(configured(preparedStatement(instanceOf(CreateStreamAsSelect.class))))), - any(Producer.class)); - } - - @Test - public void shouldThrowOnRunScriptStatementMissingScriptContent() { - // Then: - expectedException.expect(KsqlRestException.class); - expectedException.expect(exceptionStatusCode(is(Code.BAD_REQUEST))); - expectedException.expect(exceptionErrorMessage(errorMessage(is( - "Request is missing script content")))); - - // When: - makeRequest("RUN SCRIPT '/some/script.sql';"); - } - @Test public void shouldThrowServerErrorOnFailedToDistribute() { // Given: @@ -1995,10 +1967,6 @@ private void makeRequest(final String sql) { makeMultipleRequest(sql, KsqlEntity.class); } - private void makeRequest(final String sql, final Map props) { - makeMultipleRequest(sql, props, KsqlEntity.class); - } - private T makeSingleRequest( final String sql, final Class expectedEntityType) { diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/RequestValidatorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/RequestValidatorTest.java index 08d9e451c34b..c796eaf33b42 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/RequestValidatorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/RequestValidatorTest.java @@ -224,33 +224,6 @@ public void shouldNotThrowIfManyNonPersistentQueries() { validator.validate(serviceContext, statements, ImmutableMap.of(), "sql"); } - @Test - public void shouldValidateRunScript() { - // Given: - final Map props = ImmutableMap.of( - KsqlConstants.LEGACY_RUN_SCRIPT_STATEMENTS_CONTENT, - SOME_STREAM_SQL); - - givenRequestValidator( - ImmutableMap.of(CreateStream.class, statementValidator) - ); - - final List statements = givenParsed("RUN SCRIPT '/some/script.sql';"); - - // When: - validator.validate(serviceContext, statements, props, "sql"); - - // Then: - verify(statementValidator, times(1)).validate( - argThat(is(configured(preparedStatement(instanceOf(CreateStream.class))))), - any(), - eq(executionContext), - any() - ); - } - - - @Test public void shouldThrowIfServiceContextIsNotSandbox() { // Given: From 37f8d89f4088b37378baa227cb0974a4dc49514b Mon Sep 17 00:00:00 2001 From: Almog Gavra Date: Wed, 11 Dec 2019 15:22:19 -0800 Subject: [PATCH 024/123] chore: the KsqlJsonDeserializer no longer guarantees ordering (#4115) --- .../query-validation-tests/simple-struct.json | 10 +++++----- .../ksql/serde/json/KsqlJsonDeserializer.java | 10 +--------- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/simple-struct.json b/ksql-functional-tests/src/test/resources/query-validation-tests/simple-struct.json index 6396c3482717..bd38e6754f33 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/simple-struct.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/simple-struct.json @@ -590,7 +590,7 @@ "topic": "S1", "timestamp": 0, "value": { - "ITEMID": "{\"CATEGORY\":{\"ID\":2,\"NAME\":\"Food\"},\"ITEMID\":6,\"NAME\":\"Item_6\"}", + "ITEMID": "{\"ITEMID\":6,\"CATEGORY\":{\"ID\":2,\"NAME\":\"Food\"},\"NAME\":\"Item_6\"}", "KSQL_COL_1": "6" }, "key": "0" @@ -599,7 +599,7 @@ "topic": "S1", "timestamp": 0, "value": { - "ITEMID": "{\"CATEGORY\":{\"ID\":2,\"NAME\":\"Produce\"},\"ITEMID\":6,\"NAME\":\"Item_6\"}", + "ITEMID": "{\"ITEMID\":6,\"CATEGORY\":{\"ID\":2,\"NAME\":\"Produce\"},\"NAME\":\"Item_6\"}", "KSQL_COL_1": "6" }, "key": "100" @@ -608,7 +608,7 @@ "topic": "S1", "timestamp": 0, "value": { - "ITEMID": "{\"CATEGORY\":{\"ID\":2,\"NAME\":\"Produce\"},\"ITEMID\":6,\"NAME\":\"Item_6\"}", + "ITEMID": "{\"ITEMID\":6,\"CATEGORY\":{\"ID\":2,\"NAME\":\"Produce\"},\"NAME\":\"Item_6\"}", "KSQL_COL_1": "6" }, "key": "101" @@ -617,7 +617,7 @@ "topic": "S1", "timestamp": 0, "value": { - "ITEMID": "{\"CATEGORY\":{\"ID\":1,\"NAME\":\"Food\"},\"ITEMID\":2,\"NAME\":\"Item_2\"}", + "ITEMID": "{\"ITEMID\":2,\"CATEGORY\":{\"ID\":1,\"NAME\":\"Food\"},\"NAME\":\"Item_2\"}", "KSQL_COL_1": "2" }, "key": "101" @@ -626,7 +626,7 @@ "topic": "S1", "timestamp": 0, "value": { - "ITEMID": "{\"CATEGORY\":{\"ID\":1,\"NAME\":\"Produce\"},\"ITEMID\":5,\"NAME\":\"Item_5\"}", + "ITEMID": "{\"ITEMID\":5,\"CATEGORY\":{\"ID\":1,\"NAME\":\"Produce\"},\"NAME\":\"Item_5\"}", "KSQL_COL_1": "5" }, "key": "101" diff --git a/ksql-serde/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java b/ksql-serde/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java index bc9773a6d10b..5d027db5970e 100644 --- a/ksql-serde/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java +++ b/ksql-serde/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java @@ -19,7 +19,6 @@ import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.NumericNode; @@ -60,8 +59,6 @@ public class KsqlJsonDeserializer implements Deserializer { private static final SqlSchemaFormatter FORMATTER = new SqlSchemaFormatter(word -> false); private static final ObjectMapper MAPPER = new ObjectMapper() .enable(DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS); - private static final ObjectMapper SORTED_MAPPER = new ObjectMapper() - .enable(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS); private static final Map> HANDLERS = ImmutableMap .>builder() @@ -139,12 +136,7 @@ private static Object enforceFieldType(final JsonValueContext context) { private static String processString(final JsonValueContext context) { if (context.val instanceof ObjectNode) { try { - // this ensure sorted order, there's an issue with Jackson where just enabling - // SORT_PROPERTIES_ALPHABETICALLY does not work if it is not a POJO-backed - // JSON object - return SORTED_MAPPER.writeValueAsString( - SORTED_MAPPER.treeToValue(context.val, Object.class) - ); + return MAPPER.writeValueAsString(MAPPER.treeToValue(context.val, Object.class)); } catch (JsonProcessingException e) { throw new KsqlException("Unexpected inability to write value as string: " + context.val); } From c6c00b1bfd15b84bae091606a48dce52ba973a7f Mon Sep 17 00:00:00 2001 From: Steven Zhang <35498506+stevenpyzhang@users.noreply.github.com> Date: Wed, 11 Dec 2019 16:02:52 -0800 Subject: [PATCH 025/123] fix: NPE when starting StandaloneExecutor (#4119) --- .../ksql/rest/server/StandaloneExecutor.java | 7 ++++++- .../rest/server/StandaloneExecutorTest.java | 20 +++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StandaloneExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StandaloneExecutor.java index ce9f102f245d..56f6a29adf3d 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StandaloneExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StandaloneExecutor.java @@ -15,6 +15,7 @@ package io.confluent.ksql.rest.server; +import static java.util.Objects.nonNull; import static java.util.Objects.requireNonNull; import com.google.common.collect.ImmutableMap; @@ -118,7 +119,11 @@ public void startAsync() { processesQueryFile(readQueriesFile(queriesFile)); showWelcomeMessage(); final Properties properties = new Properties(); - ksqlConfig.originals().forEach((key, value) -> properties.put(key, value.toString())); + ksqlConfig.originals().forEach((key, value) -> { + if (nonNull(value)) { + properties.put(key, value.toString()); + } + }); versionChecker.start(KsqlModuleType.SERVER, properties); } catch (final Exception e) { log.error("Failed to start KSQL Server with query file: " + queriesFile, e); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorTest.java index 2c994b3c6eae..ca2a68c0aa83 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorTest.java @@ -83,6 +83,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.OptionalInt; @@ -340,6 +341,25 @@ public void shouldLoadQueryFile() { verify(ksqlEngine).parse("This statement"); } + + @Test + public void shouldNotThrowIfNullValueInKsqlConfig() { + standaloneExecutor = new StandaloneExecutor( + serviceContext, + processingLogConfig, + new KsqlConfig(Collections.singletonMap("test", null)), + ksqlEngine, + queriesFile.toString(), + udfLoader, + false, + versionChecker, + injectorFactory + ); + + // When: + standaloneExecutor.startAsync(); + } + @Test public void shouldThrowIfCanNotLoadQueryFile() { // Given: From 87b03e3dbf0c81c8fe5184a83ad90e7d54ad3579 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Thu, 12 Dec 2019 08:59:15 +0000 Subject: [PATCH 026/123] chore: group-by primitive key support (#4108) * chore: group-by primitive key support Fixes: https://github.com/confluentinc/ksql/issues/4092 This commit gets `GROUP BY` clauses working with primitive key types. BREAKING CHANGE: A `GROUP BY` on single expressions now changes the SQL type of `ROWKEY` in the output schema of the query to match the SQL type of the expression. For example, consider: ```sql CREATE STREAM INPUT (ROWKEY STRING KEY, ID INT) WITH (...); CREATE TABLE OUTPUT AS SELECT COUNT(*) AS COUNT FROM INPUT GROUP BY ID; ``` Previously, the above would have resulted in an output schema of `ROWKEY STRING KEY, COUNT BIGINT`, where `ROWKEY` would have stored the string representation of the integer from the `ID` column. With this commit the output schema will be `ROWKEY INT KEY COUNT BIGINT`. BREAKING CHANGE: Any`GROUP BY` expression that resolves to `NULL`, including because a UDF throws an exception, now results in the row being excluded from the result. Previously, as the key was a `STRING` a value of `"null"` could be used. With other primitive types this is not possible. As key columns must be non-null any exception is logged and the row is excluded. --- .../ksql/planner/LogicalPlanner.java | 41 +- .../ksql/planner/plan/AggregateNode.java | 5 - .../confluent/ksql/function/udf/BadUdf.java | 26 ++ .../SelectValueMapperIntegrationTest.java | 4 +- .../physical/PhysicalPlanBuilderTest.java | 6 +- .../structured/SchemaKGroupedTableTest.java | 1 - .../ksql/structured/SchemaKTableTest.java | 5 +- .../ksql/execution/util/StructKeyUtil.java | 12 +- .../execution/util/StructKeyUtilTest.java | 4 +- .../test/serde/kafka/KafkaSerdeSupplier.java | 10 +- .../ksql/test/tools/TestExecutor.java | 219 ++++++---- .../io/confluent/ksql/test/tools/Topic.java | 4 - .../ksql/test/tools/TestExecutorTest.java | 79 +++- .../query-validation-tests/average.json | 22 +- .../query-validation-tests/collect-list.json | 202 ++++----- .../query-validation-tests/collect-set.json | 90 ++-- .../query-validation-tests/count.json | 14 +- .../query-validation-tests/group-by.json | 397 ++++++++++++++---- .../query-validation-tests/having.json | 36 +- .../query-validation-tests/histogram.json | 22 +- .../hopping-windows.json | 130 +++--- .../query-validation-tests/key-field.json | 118 +++--- .../query-validation-tests/max-group-by.json | 176 ++++---- .../query-validation-tests/min-group-by.json | 152 +++---- .../session-windows.json | 32 +- .../resources/query-validation-tests/sum.json | 180 ++++---- .../test-custom-udaf.json | 52 +-- .../query-validation-tests/topk-distinct.json | 93 ++-- .../query-validation-tests/topk-group-by.json | 88 ++-- .../tumbling-windows.json | 100 ++--- .../query-validation-tests/window-bounds.json | 108 ++--- .../test-runner/correct/aggregate/input.json | 14 +- .../test-runner/correct/aggregate/output.json | 28 +- .../correct/aggregate/statements.sql | 2 +- .../entity/TableRowsEntityFactoryTest.java | 11 +- .../streams/AggregateParamsFactory.java | 75 +++- .../ksql/execution/streams/GroupByMapper.java | 71 ---- .../ksql/execution/streams/GroupByParams.java | 42 ++ .../streams/GroupByParamsFactory.java | 152 +++++++ .../execution/streams/StepSchemaResolver.java | 20 +- .../streams/StreamGroupByBuilder.java | 28 +- .../streams/StreamSelectKeyBuilder.java | 2 +- .../streams/TableGroupByBuilder.java | 46 +- .../streams/AggregateParamsFactoryTest.java | 4 +- .../execution/streams/GroupByMapperTest.java | 112 ----- .../streams/GroupByParamsFactoryTest.java | 175 ++++++++ .../streams/StreamGroupByBuilderTest.java | 45 +- .../streams/StreamSelectKeyBuilderTest.java | 7 +- .../streams/TableGroupByBuilderTest.java | 40 +- .../KsqlMaterializationTest.java | 2 +- .../ks/KsMaterializedSessionTableTest.java | 2 +- .../ks/KsMaterializedTableTest.java | 2 +- .../ks/KsMaterializedWindowTableTest.java | 2 +- 53 files changed, 1991 insertions(+), 1319 deletions(-) create mode 100644 ksql-engine/src/test/java/io/confluent/ksql/function/udf/BadUdf.java delete mode 100644 ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByMapper.java create mode 100644 ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByParams.java create mode 100644 ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByParamsFactory.java delete mode 100644 ksql-streams/src/test/java/io/confluent/ksql/execution/streams/GroupByMapperTest.java create mode 100644 ksql-streams/src/test/java/io/confluent/ksql/execution/streams/GroupByParamsFactoryTest.java diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java index 66837ecfdbe1..b6488ff65f79 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java @@ -47,6 +47,7 @@ import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.LogicalSchema.Builder; import io.confluent.ksql.schema.ksql.types.SqlType; +import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.util.KsqlConfig; import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.SchemaUtil; @@ -147,16 +148,18 @@ private Optional getTimestampColumn( } private AggregateNode buildAggregateNode(final PlanNode sourcePlanNode) { - final Expression groupBy = analysis.getGroupByExpressions().size() == 1 - ? analysis.getGroupByExpressions().get(0) - : null; + final List groupByExps = analysis.getGroupByExpressions(); - final LogicalSchema schema = buildProjectionSchema(sourcePlanNode); + final LogicalSchema schema = buildAggregateSchema(sourcePlanNode, groupByExps); + + final Expression groupBy = groupByExps.size() == 1 + ? groupByExps.get(0) + : null; final Optional keyFieldName = getSelectAliasMatching((expression, alias) -> - expression.equals(groupBy) - && !SchemaUtil.isFieldName(alias.name(), SchemaUtil.ROWTIME_NAME.name()) - && !SchemaUtil.isFieldName(alias.name(), SchemaUtil.ROWKEY_NAME.name()), + expression.equals(groupBy) + && !SchemaUtil.isFieldName(alias.name(), SchemaUtil.ROWTIME_NAME.name()) + && !SchemaUtil.isFieldName(alias.name(), SchemaUtil.ROWKEY_NAME.name()), sourcePlanNode); return new AggregateNode( @@ -164,7 +167,7 @@ private AggregateNode buildAggregateNode(final PlanNode sourcePlanNode) { sourcePlanNode, schema, keyFieldName.map(ColumnRef::withoutSource), - analysis.getGroupByExpressions(), + groupByExps, analysis.getWindowExpression(), aggregateAnalysis.getAggregateFunctionArguments(), aggregateAnalysis.getAggregateFunctions(), @@ -344,6 +347,28 @@ private LogicalSchema buildProjectionSchema(final PlanNode sourcePlanNode) { return builder.build(); } + private LogicalSchema buildAggregateSchema( + final PlanNode sourcePlanNode, + final List groupByExps + ) { + final SqlType keyType; + if (groupByExps.size() != 1) { + keyType = SqlTypes.STRING; + } else { + final ExpressionTypeManager typeManager = + new ExpressionTypeManager(sourcePlanNode.getSchema(), functionRegistry); + + keyType = typeManager.getExpressionSqlType(groupByExps.get(0)); + } + + final LogicalSchema sourceSchema = buildProjectionSchema(sourcePlanNode); + + return LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, keyType) + .valueColumns(sourceSchema.value()) + .build(); + } + private LogicalSchema buildRepartitionedSchema( final PlanNode sourceNode, final Expression partitionBy diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java index 97f2d05f1115..38bfe0cebdcb 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/AggregateNode.java @@ -34,7 +34,6 @@ import io.confluent.ksql.parser.tree.WindowExpression; import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.LogicalSchema; -import io.confluent.ksql.schema.ksql.SqlBaseType; import io.confluent.ksql.serde.ValueFormat; import io.confluent.ksql.services.KafkaTopicClient; import io.confluent.ksql.structured.SchemaKGroupedStream; @@ -109,10 +108,6 @@ public AggregateNode( this.havingExpressions = havingExpressions; this.keyField = KeyField.of(requireNonNull(keyFieldName, "keyFieldName")) .validateKeyExistsIn(schema); - - if (schema.key().get(0).type().baseType() != SqlBaseType.STRING) { - throw new KsqlException("GROUP BY is not supported with non-STRING keys"); - } } @Override diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/BadUdf.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/BadUdf.java new file mode 100644 index 000000000000..7c14a33887fb --- /dev/null +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/BadUdf.java @@ -0,0 +1,26 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.function.udf; + +@UdfDescription(name="bad_udf", description = "throws exceptions when called") +@SuppressWarnings("unused") +public class BadUdf { + + @Udf(description = "throws") + public String blowUp(final int arg1) { + throw new RuntimeException("boom!"); + } +} diff --git a/ksql-engine/src/test/java/io/confluent/ksql/integration/SelectValueMapperIntegrationTest.java b/ksql-engine/src/test/java/io/confluent/ksql/integration/SelectValueMapperIntegrationTest.java index c8658cca9bb4..ee4254c1707a 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/integration/SelectValueMapperIntegrationTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/integration/SelectValueMapperIntegrationTest.java @@ -30,6 +30,7 @@ import io.confluent.ksql.planner.plan.PlanNode; import io.confluent.ksql.planner.plan.ProjectNode; import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.testutils.AnalysisTestUtil; import io.confluent.ksql.util.KsqlConfig; import io.confluent.ksql.util.MetaStoreFixture; @@ -45,7 +46,8 @@ public class SelectValueMapperIntegrationTest { - private static final Struct NON_WINDOWED_KEY = StructKeyUtil.asStructKey("someKey"); + private static final Struct NON_WINDOWED_KEY = StructKeyUtil.keyBuilder(SqlTypes.STRING) + .build("someKey"); private final MetaStore metaStore = MetaStoreFixture .getNewMetaStore(TestFunctionRegistry.INSTANCE.get()); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java b/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java index 7272ed4af575..6890b89091a3 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java @@ -203,14 +203,14 @@ public void shouldCreateExecutionPlan() { final String[] lines = planText.split("\n"); assertThat(lines[0], startsWith( - " > [ PROJECT ] | Schema: [ROWKEY STRING KEY, COL0 BIGINT, KSQL_COL_1 DOUBLE, " + " > [ PROJECT ] | Schema: [ROWKEY BIGINT KEY, COL0 BIGINT, KSQL_COL_1 DOUBLE, " + "KSQL_COL_2 BIGINT] |")); assertThat(lines[1], startsWith( - "\t\t > [ AGGREGATE ] | Schema: [ROWKEY STRING KEY, KSQL_INTERNAL_COL_0 BIGINT, " + "\t\t > [ AGGREGATE ] | Schema: [ROWKEY BIGINT KEY, KSQL_INTERNAL_COL_0 BIGINT, " + "KSQL_INTERNAL_COL_1 DOUBLE, KSQL_AGG_VARIABLE_0 DOUBLE, " + "KSQL_AGG_VARIABLE_1 BIGINT] |")); assertThat(lines[2], startsWith( - "\t\t\t\t > [ GROUP_BY ] | Schema: [ROWKEY STRING KEY, KSQL_INTERNAL_COL_0 BIGINT, " + "\t\t\t\t > [ GROUP_BY ] | Schema: [ROWKEY BIGINT KEY, KSQL_INTERNAL_COL_0 BIGINT, " + "KSQL_INTERNAL_COL_1 DOUBLE] |" )); assertThat(lines[3], startsWith( diff --git a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKGroupedTableTest.java b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKGroupedTableTest.java index 8c21a7f9060f..989112d91f3c 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKGroupedTableTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKGroupedTableTest.java @@ -19,7 +19,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import com.google.common.collect.ImmutableList; import io.confluent.ksql.execution.context.QueryContext; diff --git a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java index 94321a73dd10..1c0b08ba863f 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java @@ -65,6 +65,7 @@ import io.confluent.ksql.execution.streams.StreamsFactories; import io.confluent.ksql.execution.streams.StreamsUtil; import io.confluent.ksql.execution.util.StructKeyUtil; +import io.confluent.ksql.execution.util.StructKeyUtil.KeyBuilder; import io.confluent.ksql.function.InternalFunctionRegistry; import io.confluent.ksql.logging.processing.ProcessingLogContext; import io.confluent.ksql.metastore.MetaStore; @@ -121,6 +122,8 @@ @RunWith(MockitoJUnitRunner.class) public class SchemaKTableTest { + private static final KeyBuilder STRING_KEY_BUILDER = StructKeyUtil.keyBuilder(SqlTypes.STRING); + private final KsqlConfig ksqlConfig = new KsqlConfig(Collections.emptyMap()); private final MetaStore metaStore = MetaStoreFixture.getNewMetaStore(new InternalFunctionRegistry()); private final GroupedFactory groupedFactory = mock(GroupedFactory.class); @@ -584,7 +587,7 @@ public void shouldGroupKeysCorrectly() { (KeyValue) keySelector.apply("key", value); // Validate that the captured mapper produces the correct key - assertThat(keyValue.key, equalTo(StructKeyUtil.asStructKey("bar|+|foo"))); + assertThat(keyValue.key, equalTo(STRING_KEY_BUILDER.build("bar|+|foo"))); assertThat(keyValue.value, equalTo(value)); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/StructKeyUtil.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/StructKeyUtil.java index c1ca8bd27e2a..b41d9dd13d26 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/StructKeyUtil.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/StructKeyUtil.java @@ -42,19 +42,17 @@ public final class StructKeyUtil { private StructKeyUtil() { } - public static Struct asStructKey(String rowKey) { - Struct keyStruct = new Struct(ROWKEY_STRUCT_SCHEMA); - keyStruct.put(ROWKEY_FIELD, rowKey); - return keyStruct; - } - - public static KeyBuilder keySchema(final LogicalSchema schema) { + public static KeyBuilder keyBuilder(final LogicalSchema schema) { final List keyCols = schema.key(); if (keyCols.size() != 1) { throw new UnsupportedOperationException("Only single keys supported"); } final SqlType sqlType = keyCols.get(0).type(); + return keyBuilder(sqlType); + } + + public static KeyBuilder keyBuilder(final SqlType sqlType) { final Schema connectSchema = SchemaConverters.sqlToConnectConverter().toConnectSchema(sqlType); return new KeyBuilder(SchemaBuilder diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/util/StructKeyUtilTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/StructKeyUtilTest.java index fe80c92f2a9d..db73f046b610 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/util/StructKeyUtilTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/StructKeyUtilTest.java @@ -39,13 +39,13 @@ public class StructKeyUtilTest { @Before public void setUp() { - builder = StructKeyUtil.keySchema(LOGICAL_SCHEMA); + builder = StructKeyUtil.keyBuilder(LOGICAL_SCHEMA); } @Test(expected = UnsupportedOperationException.class) public void shouldThrowOnMultipleKeyColumns() { // Only single key columns initially supported - StructKeyUtil.keySchema(LogicalSchema.builder() + StructKeyUtil.keyBuilder(LogicalSchema.builder() .keyColumn(ColumnName.of("BOB"), SqlTypes.STRING) .keyColumn(ColumnName.of("JOHN"), SqlTypes.STRING) .build()); diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/kafka/KafkaSerdeSupplier.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/kafka/KafkaSerdeSupplier.java index 818fb92a875a..6a23853317fd 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/kafka/KafkaSerdeSupplier.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/kafka/KafkaSerdeSupplier.java @@ -20,6 +20,7 @@ import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.SchemaConverters; import io.confluent.ksql.schema.ksql.types.SqlType; +import io.confluent.ksql.test.TestFrameworkException; import io.confluent.ksql.test.serde.SerdeSupplier; import java.util.List; import java.util.Map; @@ -105,9 +106,11 @@ public byte[] serialize(final String topic, final Object value) { private final class RowDeserializer implements Deserializer { private Deserializer delegate; + private String type; @Override public void configure(final Map configs, final boolean isKey) { + this.type = isKey ? "key" : "value"; final SqlType sqlType = getColumnType(isKey); delegate = getSerde(sqlType).deserializer(); delegate.configure(configs, isKey); @@ -115,7 +118,12 @@ public void configure(final Map configs, final boolean isKey) { @Override public Object deserialize(final String topic, final byte[] bytes) { - return delegate.deserialize(topic, bytes); + try { + return delegate.deserialize(topic, bytes); + } catch (final Exception e) { + throw new TestFrameworkException("Failed to deserialize " + type + ". " + + e.getMessage(), e); + } } } } \ No newline at end of file diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java index d184c7cf037a..bdc8f0c866ca 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java @@ -27,9 +27,11 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; +import com.google.errorprone.annotations.Immutable; import io.confluent.common.utils.TestUtils; import io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; +import io.confluent.kafka.serializers.KafkaAvroSerializerConfig; import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.internal.KsqlEngineMetrics; import io.confluent.ksql.logging.processing.ProcessingLogContext; @@ -39,20 +41,24 @@ import io.confluent.ksql.name.SourceName; import io.confluent.ksql.query.QueryId; import io.confluent.ksql.query.id.SequentialQueryIdGenerator; -import io.confluent.ksql.schema.ksql.Column; import io.confluent.ksql.schema.ksql.DefaultSqlValueCoercer; +import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.schema.ksql.SchemaConverters; import io.confluent.ksql.schema.ksql.types.SqlType; +import io.confluent.ksql.serde.KeyFormat; import io.confluent.ksql.services.DefaultConnectClient; import io.confluent.ksql.services.DefaultServiceContext; import io.confluent.ksql.services.DisabledKsqlClient; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.test.TestFrameworkException; +import io.confluent.ksql.test.serde.SerdeSupplier; import io.confluent.ksql.test.serde.avro.AvroSerdeSupplier; import io.confluent.ksql.test.serde.avro.ValueSpecAvroSerdeSupplier; import io.confluent.ksql.test.tools.stubs.StubKafkaClientSupplier; import io.confluent.ksql.test.tools.stubs.StubKafkaRecord; import io.confluent.ksql.test.tools.stubs.StubKafkaService; import io.confluent.ksql.test.tools.stubs.StubKafkaTopicClient; +import io.confluent.ksql.test.utils.SerdeUtil; import io.confluent.ksql.util.KsqlConfig; import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.KsqlServerException; @@ -72,6 +78,7 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.TopologyTestDriver; @@ -177,19 +184,13 @@ public void buildAndExecuteQuery(final TestCase testCase) { .filter(topic -> !inputTopics.contains(topic.getName())) .collect(Collectors.toSet()); if (!topicsFromInput.isEmpty()) { - pipeRecordsFromProvidedInput( - testCase, - stubKafkaService, - topologyTestDriverContainer, - serviceContext); + pipeRecordsFromProvidedInput(testCase, topologyTestDriverContainer); } for (final Topic kafkaTopic : topicsFromKafka) { pipeRecordsFromKafka( - testCase, kafkaTopic.getName(), - stubKafkaService, - topologyTestDriverContainer, - serviceContext); + topologyTestDriverContainer + ); } allTopicNames.addAll( @@ -249,7 +250,14 @@ private void validateTopicData( final Function keyCoercer = keyCoercerForTopic(topicName); for (int i = 0; i < expected.size(); i++) { - final Record expectedRecord = expected.get(i).coerceKey(keyCoercer); + final Record expectedRecord; + try { + expectedRecord = expected.get(i).coerceKey(keyCoercer); + } catch (final Exception e) { + throw new AssertionError( + "Topic '" + topicName + "', message " + i + + ": Could not coerce key in test case to required type. " + e.getMessage(), e); + } final ProducerRecord actualProducerRecord = actual.get(i).getProducerRecord(); validateCreatedMessage( @@ -263,7 +271,11 @@ private void validateTopicData( } private Function keyCoercerForTopic(final String topicName) { - final SqlType keyType = getKeyType(topicName); + final SqlType keyType = getTopicInfo(topicName) + .getSchema() + .key() + .get(0) + .type(); return key -> { if (key == null) { @@ -271,13 +283,18 @@ private Function keyCoercerForTopic(final String topicName) { } return DefaultSqlValueCoercer.INSTANCE .coerce(key, keyType) - .orElseThrow(() -> new TestFrameworkException("Failed to coerce key to required type. " - + "type: " + keyType - + ", key: " + key)); + .orElseThrow(() -> new AssertionError("Invalid key value for topic " + topicName + "." + + System.lineSeparator() + + "Expected KeyType: " + keyType + + System.lineSeparator() + + "Actual KeyType: " + SchemaConverters.javaToSqlConverter().toSqlType(key.getClass()) + + ", key: " + key + "." + + System.lineSeparator() + + "This is likely caused by the key type in the test-case not matching the schema.")); }; } - private SqlType getKeyType(final String topicName) { + private TopicInfo getTopicInfo(final String topicName) { try { final java.util.regex.Matcher matcher = INTERNAL_TOPIC_PATTERN.matcher(topicName); if (matcher.matches()) { @@ -290,14 +307,14 @@ private SqlType getKeyType(final String topicName) { final SourceName sinkName = query.getSinkName(); final DataSource source = ksqlEngine.getMetaStore().getSource(sinkName); - return getKeyType(source); + return new TopicInfo(source.getSchema(), source.getKsqlTopic().getKeyFormat()); } // Source / sink topic: - final Set keyTypes = ksqlEngine.getMetaStore().getAllDataSources().values() + final Set keyTypes = ksqlEngine.getMetaStore().getAllDataSources().values() .stream() .filter(source -> source.getKafkaTopicName().equals(topicName)) - .map(TestExecutor::getKeyType) + .map(source -> new TopicInfo(source.getSchema(), source.getKsqlTopic().getKeyFormat())) .collect(Collectors.toSet()); if (keyTypes.isEmpty()) { @@ -312,14 +329,37 @@ private SqlType getKeyType(final String topicName) { } } - private static SqlType getKeyType(final DataSource source) { - final List keyColumns = source.getSchema().key(); - if (keyColumns.size() != 1) { - throw new UnsupportedOperationException("only single key columns currently supported. " - + "got: " + keyColumns); - } + @SuppressWarnings({"unchecked", "rawtypes"}) + private Serializer getKeySerializer(final String topicName) { + final TopicInfo topicInfo = getTopicInfo(topicName); + + final SerdeSupplier keySerdeSupplier = SerdeUtil + .getKeySerdeSupplier(topicInfo.getKeyFormat(), topicInfo::getSchema); + + final Serializer serializer = keySerdeSupplier.getSerializer( + serviceContext.getSchemaRegistryClient() + ); + + serializer.configure(ImmutableMap.of( + KafkaAvroSerializerConfig.SCHEMA_REGISTRY_URL_CONFIG, "something" + ), true); + + return (Serializer) serializer; + } + + private Deserializer getKeyDeserializer(final String topicName) { + final TopicInfo topicInfo = getTopicInfo(topicName); + + final SerdeSupplier keySerdeSupplier = SerdeUtil + .getKeySerdeSupplier(topicInfo.getKeyFormat(), topicInfo::getSchema); + + final Deserializer deserializer = keySerdeSupplier.getDeserializer( + serviceContext.getSchemaRegistryClient() + ); + + deserializer.configure(ImmutableMap.of(), true); - return keyColumns.get(0).type(); + return deserializer; } private static String getActualsForErrorMessage(final List actual) { @@ -365,9 +405,7 @@ private static void verifyTopology(final TestCase testCase) { private void pipeRecordsFromProvidedInput( final TestCase testCase, - final StubKafkaService stubKafkaService, - final TopologyTestDriverContainer topologyTestDriverContainer, - final ServiceContext serviceContext + final TopologyTestDriverContainer topologyTestDriverContainer ) { for (final Record record : testCase.getInputRecords()) { @@ -376,48 +414,39 @@ private void pipeRecordsFromProvidedInput( final Record coerced = record.coerceKey(keyCoercerForTopic(record.topic.getName())); processSingleRecord( - testCase, StubKafkaRecord.of(coerced, null), - stubKafkaService, topologyTestDriverContainer, - serviceContext.getSchemaRegistryClient(), ImmutableSet.copyOf(stubKafkaService.getAllTopics()) ); } } } - private static void pipeRecordsFromKafka( - final TestCase testCase, + private void pipeRecordsFromKafka( final String kafkaTopicName, - final StubKafkaService stubKafkaService, - final TopologyTestDriverContainer topologyTestDriverContainer, - final ServiceContext serviceContext + final TopologyTestDriverContainer topologyTestDriverContainer ) { for (final StubKafkaRecord stubKafkaRecord : stubKafkaService.readRecords(kafkaTopicName)) { processSingleRecord( - testCase, stubKafkaRecord, - stubKafkaService, topologyTestDriverContainer, - serviceContext.getSchemaRegistryClient(), Collections.emptySet() ); } } @SuppressWarnings("unchecked") - private static void processSingleRecord( - final TestCase testCase, + private void processSingleRecord( final StubKafkaRecord inputRecord, - final StubKafkaService stubKafkaService, final TopologyTestDriverContainer testDriver, - final SchemaRegistryClient schemaRegistryClient, final Set possibleSinkTopics ) { final Topic recordTopic = stubKafkaService .getTopic(inputRecord.getTestRecord().topic.getName()); - final Serializer keySerializer = recordTopic.getKeySerializer(schemaRegistryClient); + + final SchemaRegistryClient schemaRegistryClient = serviceContext.getSchemaRegistryClient(); + + final Serializer keySerializer = getKeySerializer(recordTopic.getName()); final Serializer valueSerializer = recordTopic.getValueSerdeSupplier() instanceof AvroSerdeSupplier @@ -427,24 +456,21 @@ private static void processSingleRecord( final Object key = getKey(inputRecord); final ConsumerRecord consumerRecord = new org.apache.kafka.streams.test.ConsumerRecordFactory<>( - keySerializer, - valueSerializer - ).create( - recordTopic.getName(), - key, - inputRecord.getTestRecord().value(), - inputRecord.getTestRecord().timestamp().orElse(0L) - ); + keySerializer, + valueSerializer + ).create( + recordTopic.getName(), + key, + inputRecord.getTestRecord().value(), + inputRecord.getTestRecord().timestamp().orElse(0L) + ); testDriver.getTopologyTestDriver().pipeInput(consumerRecord); final Topic sinkTopic = testDriver.getSinkTopic(); processRecordsForTopic( - testCase, testDriver.getTopologyTestDriver(), - sinkTopic, - stubKafkaService, - schemaRegistryClient + sinkTopic ); for (final Topic possibleSinkTopic : possibleSinkTopics) { @@ -452,29 +478,19 @@ private static void processSingleRecord( continue; } processRecordsForTopic( - testCase, testDriver.getTopologyTestDriver(), - possibleSinkTopic, - stubKafkaService, - schemaRegistryClient + possibleSinkTopic ); } } - @SuppressWarnings("unchecked") - private static void processRecordsForTopic( - final TestCase testCase, + private void processRecordsForTopic( final TopologyTestDriver topologyTestDriver, - final Topic sinkTopic, - final StubKafkaService stubKafkaService, - final SchemaRegistryClient schemaRegistryClient + final Topic sinkTopic ) { + int idx = 0; while (true) { - final ProducerRecord producerRecord = topologyTestDriver.readOutput( - sinkTopic.getName(), - sinkTopic.getKeyDeserializer(schemaRegistryClient), - sinkTopic.getValueDeserializer(schemaRegistryClient) - ); + final ProducerRecord producerRecord = readOutput(topologyTestDriver, sinkTopic, idx); if (producerRecord == null) { break; } @@ -488,6 +504,27 @@ private static void processRecordsForTopic( } } + @SuppressWarnings("unchecked") + private ProducerRecord readOutput( + final TopologyTestDriver topologyTestDriver, + final Topic sinkTopic, + final int idx + ) { + try { + return topologyTestDriver.readOutput( + sinkTopic.getName(), + getKeyDeserializer(sinkTopic.getName()), + sinkTopic.getValueDeserializer(serviceContext.getSchemaRegistryClient()) + ); + } catch (final Exception e) { + throw new AssertionError("Topic " + sinkTopic.getName() + + ": Failed to read record " + idx + + ". " + e.getMessage(), + e + ); + } + } + private static Object getKey(final StubKafkaRecord stubKafkaRecord) { return stubKafkaRecord.getProducerRecord() == null ? stubKafkaRecord.getTestRecord().key() @@ -588,4 +625,42 @@ List buildStreamsTopologyTestDrivers( StubKafkaService stubKafkaService ); } + + @Immutable + private static final class TopicInfo { + + private final LogicalSchema schema; + private final KeyFormat keyFormat; + + TopicInfo(final LogicalSchema schema, final KeyFormat keyFormat) { + this.schema = requireNonNull(schema, "schema"); + this.keyFormat = requireNonNull(keyFormat, "keyFormat"); + } + + public KeyFormat getKeyFormat() { + return keyFormat; + } + + public LogicalSchema getSchema() { + return schema; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final TopicInfo topicInfo = (TopicInfo) o; + return Objects.equals(schema, topicInfo.schema) + && Objects.equals(keyFormat, topicInfo.keyFormat); + } + + @Override + public int hashCode() { + return Objects.hash(schema, keyFormat); + } + } } diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Topic.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Topic.java index ad1e0b842772..16da97c458b0 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Topic.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Topic.java @@ -68,10 +68,6 @@ public short getReplicas() { return replicas; } - public SerdeSupplier getKeySerdeSupplier() { - return keySerdeFactory; - } - public SerdeSupplier getValueSerdeSupplier() { return valueSerdeSupplier; } diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorTest.java index 4e19584cfee1..c2eb6c164c9b 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorTest.java @@ -24,20 +24,27 @@ import com.fasterxml.jackson.databind.node.TextNode; import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.confluent.ksql.engine.KsqlEngine; +import io.confluent.ksql.execution.ddl.commands.KsqlTopic; import io.confluent.ksql.metastore.MetaStore; import io.confluent.ksql.metastore.model.DataSource; +import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.schema.ksql.types.SqlTypes; +import io.confluent.ksql.serde.Format; +import io.confluent.ksql.serde.FormatInfo; +import io.confluent.ksql.serde.KeyFormat; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.test.tools.TestExecutor.TopologyBuilder; import io.confluent.ksql.test.tools.conditions.PostConditions; import io.confluent.ksql.test.tools.stubs.StubKafkaRecord; import io.confluent.ksql.test.tools.stubs.StubKafkaService; import io.confluent.ksql.util.KsqlException; +import java.util.HashMap; +import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.function.Function; @@ -57,6 +64,11 @@ public class TestExecutorTest { private static final String INTERNAL_TOPIC_0 = "internal"; + private static final String SINK_TOPIC_NAME = "sink_topic"; + + private static final LogicalSchema SCHEMA = LogicalSchema.builder() + .valueColumn(ColumnName.of("v0"), SqlTypes.INTEGER) + .build(); @Rule public final ExpectedException expectedException = ExpectedException.none(); @@ -87,9 +99,12 @@ public class TestExecutorTest { private Function> internalTopicsAccessor; private TestExecutor executor; + private final Map> allSources = new HashMap<>(); @Before public void setUp() { + allSources.clear(); + executor = new TestExecutor( kafkaService, serviceContext, @@ -99,7 +114,7 @@ public void setUp() { ); when(sourceTopic.getName()).thenReturn("source_topic"); - when(sinkTopic.getName()).thenReturn("sink_topic"); + when(sinkTopic.getName()).thenReturn(SINK_TOPIC_NAME); final TopologyTestDriverContainer container = TopologyTestDriverContainer.of( topologyTestDriver, @@ -114,15 +129,12 @@ public void setUp() { when(ksqlEngine.getMetaStore()).thenReturn(metaStore); + when(metaStore.getAllDataSources()).thenReturn(allSources); + when(internalTopicsAccessor.apply(topologyTestDriver)) .thenReturn(ImmutableSet.of(INTERNAL_TOPIC_0)); - final DataSource sink = mock(DataSource.class); - when(sink.getKafkaTopicName()).thenReturn("sink_topic"); - when(sink.getSchema()).thenReturn(LogicalSchema.builder().build()); - when(metaStore.getAllDataSources()).thenReturn( - ImmutableMap.of(SourceName.of("sink"), sink) - ); + givenDataSourceTopic(SCHEMA); } @Test @@ -204,7 +216,7 @@ public void shouldFailOnSchemasMismatch() { public void shouldFailOnTwoLittleOutput() { // Given: final StubKafkaRecord actual_0 = kafkaRecord(sinkTopic, 123456719L, "k1", "v1"); - when(kafkaService.readRecords("sink_topic")).thenReturn(ImmutableList.of(actual_0)); + when(kafkaService.readRecords(SINK_TOPIC_NAME)).thenReturn(ImmutableList.of(actual_0)); final Record expected_0 = new Record(sinkTopic, "k1", "v1", null, Optional.of(1L), null); final Record expected_1 = new Record(sinkTopic, "k1", "v1", null, Optional.of(1L), null); @@ -225,7 +237,8 @@ public void shouldFailOnTwoMuchOutput() { // Given: final StubKafkaRecord actual_0 = kafkaRecord(sinkTopic, 123456719L, "k1", "v1"); final StubKafkaRecord actual_1 = kafkaRecord(sinkTopic, 123456789L, "k2", "v2"); - when(kafkaService.readRecords("sink_topic")).thenReturn(ImmutableList.of(actual_0, actual_1)); + when(kafkaService.readRecords(SINK_TOPIC_NAME)) + .thenReturn(ImmutableList.of(actual_0, actual_1)); final Record expected_0 = new Record(sinkTopic, "k1", "v1", null, Optional.of(1L), null); when(testCase.getOutputRecords()).thenReturn(ImmutableList.of(expected_0)); @@ -246,7 +259,8 @@ public void shouldFailOnUnexpectedOutput() { // Given: final StubKafkaRecord actual_0 = kafkaRecord(sinkTopic, 123456719L, "k1", "v1"); final StubKafkaRecord actual_1 = kafkaRecord(sinkTopic, 123456789L, "k2", "v2"); - when(kafkaService.readRecords("sink_topic")).thenReturn(ImmutableList.of(actual_0, actual_1)); + when(kafkaService.readRecords(SINK_TOPIC_NAME)) + .thenReturn(ImmutableList.of(actual_0, actual_1)); final Record expected_0 = new Record(sinkTopic, "k1", "v1", TextNode.valueOf("v1"), Optional.of(123456719L), null); final Record expected_1 = new Record(sinkTopic, "k2", "different", TextNode.valueOf("different"), Optional.of(123456789L), null); @@ -266,7 +280,8 @@ public void shouldPassOnExpectedOutput() { // Given: final StubKafkaRecord actual_0 = kafkaRecord(sinkTopic, 123456719L, "k1", "v1"); final StubKafkaRecord actual_1 = kafkaRecord(sinkTopic, 123456789L, "k2", "v2"); - when(kafkaService.readRecords("sink_topic")).thenReturn(ImmutableList.of(actual_0, actual_1)); + when(kafkaService.readRecords(SINK_TOPIC_NAME)) + .thenReturn(ImmutableList.of(actual_0, actual_1)); final Record expected_0 = new Record(sinkTopic, "k1", "v1", TextNode.valueOf("v1"), Optional.of(123456719L), null); final Record expected_1 = new Record(sinkTopic, "k2", "v2", TextNode.valueOf("v2"), Optional.of(123456789L), null); @@ -278,6 +293,31 @@ public void shouldPassOnExpectedOutput() { // Then: no exception. } + @Test + public void shouldHandleNonStringKeys() { + // Given: + final StubKafkaRecord actual_0 = kafkaRecord(sinkTopic, 123456719L, 1, "v1"); + final StubKafkaRecord actual_1 = kafkaRecord(sinkTopic, 123456789L, 1, "v2"); + when(kafkaService.readRecords(SINK_TOPIC_NAME)) + .thenReturn(ImmutableList.of(actual_0, actual_1)); + + final Record expected_0 = new Record(sinkTopic, 1, "v1", TextNode.valueOf("v1"), Optional.of(123456719L), null); + final Record expected_1 = new Record(sinkTopic, 1, "v2", TextNode.valueOf("v2"), Optional.of(123456789L), null); + when(testCase.getOutputRecords()).thenReturn(ImmutableList.of(expected_0, expected_1)); + + final LogicalSchema schema = LogicalSchema.builder() + .keyColumn(ColumnName.of("key"), SqlTypes.INTEGER) + .valueColumn(ColumnName.of("v0"), SqlTypes.STRING) + .build(); + + givenDataSourceTopic(schema); + + // When: + executor.buildAndExecuteQuery(testCase); + + // Then: no exception. + } + @Test public void shouldCheckPostConditions() { // When: @@ -306,13 +346,24 @@ private void givenActualTopology(final String topology, final String schemas) { when(testCase.getGeneratedSchemas()).thenReturn(ImmutableList.of(schemas)); } + private void givenDataSourceTopic(final LogicalSchema schema) { + final KsqlTopic topic = mock(KsqlTopic.class); + when(topic.getKeyFormat()) + .thenReturn(KeyFormat.of(FormatInfo.of(Format.KAFKA), Optional.empty())); + final DataSource dataSource = mock(DataSource.class); + when(dataSource.getKsqlTopic()).thenReturn(topic); + when(dataSource.getSchema()).thenReturn(schema); + when(dataSource.getKafkaTopicName()).thenReturn(TestExecutorTest.SINK_TOPIC_NAME); + allSources.put(SourceName.of(TestExecutorTest.SINK_TOPIC_NAME + "_source"), dataSource); + } + private static StubKafkaRecord kafkaRecord( final Topic topic, final long rowTime, - final String key, + final Object key, final String value ) { - final ProducerRecord record = new ProducerRecord<>( + final ProducerRecord record = new ProducerRecord<>( topic.getName(), 1, rowTime, diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/average.json b/ksql-functional-tests/src/test/resources/query-validation-tests/average.json index 5a7c672985c5..cfcc5f4bbeef 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/average.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/average.json @@ -11,22 +11,22 @@ { "name": "calculate average in select", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE AVG AS select id, abs(sum(value)/count(id)) * 10 as avg from test GROUP BY id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0,zero,-50"}, - {"topic": "test_topic", "key": "0", "value": "0,zero,-10"}, - {"topic": "test_topic", "key": "0", "value": "0,zero,-15"}, - {"topic": "test_topic", "key": "1", "value": "1,one,100"}, - {"topic": "test_topic", "key": "1", "value": "1,one,10"} + {"topic": "test_topic", "key": 0, "value": "0,zero,-50"}, + {"topic": "test_topic", "key": 0, "value": "0,zero,-10"}, + {"topic": "test_topic", "key": 0, "value": "0,zero,-15"}, + {"topic": "test_topic", "key": 1, "value": "1,one,100"}, + {"topic": "test_topic", "key": 1, "value": "1,one,10"} ], "outputs": [ - {"topic": "AVG", "key": "0", "value": "0,500"}, - {"topic": "AVG", "key": "0", "value": "0,300"}, - {"topic": "AVG", "key": "0", "value": "0,250"}, - {"topic": "AVG", "key": "1", "value": "1,1000"}, - {"topic": "AVG", "key": "1", "value": "1,550"} + {"topic": "AVG", "key": 0, "value": "0,500"}, + {"topic": "AVG", "key": 0, "value": "0,300"}, + {"topic": "AVG", "key": 0, "value": "0,250"}, + {"topic": "AVG", "key": 1, "value": "1,1000"}, + {"topic": "AVG", "key": 1, "value": "1,550"} ] } ] diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/collect-list.json b/ksql-functional-tests/src/test/resources/query-validation-tests/collect-list.json index 0fd54d1777d3..5bc2a6c0c787 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/collect-list.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/collect-list.json @@ -12,211 +12,211 @@ "name": "collect_list int", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE integer) WITH (kafka_topic='test_topic',value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE integer) WITH (kafka_topic='test_topic',value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, collect_list(value) as collected FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 0}}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 100}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 500}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 100}} + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "VALUE": 0}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "VALUE": 100}}, + {"topic": "test_topic", "key": 100, "value": {"ID": 100, "VALUE": 500}}, + {"topic": "test_topic", "key": 100, "value": {"ID": 100, "VALUE": 100}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [0]}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [0,100]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [500]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [500,100]}} + {"topic": "S2", "key": 0, "value": {"ID": 0, "COLLECTED": [0]}}, + {"topic": "S2", "key": 0, "value": {"ID": 0, "COLLECTED": [0,100]}}, + {"topic": "S2", "key": 100, "value": {"ID": 100, "COLLECTED": [500]}}, + {"topic": "S2", "key": 100, "value": {"ID": 100, "COLLECTED": [500,100]}} ] }, { "name": "collect_list long", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, collect_list(value) as collected FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 2147483648}}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 100}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 500}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 100}} + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "VALUE": 2147483648}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "VALUE": 100}}, + {"topic": "test_topic", "key": 100, "value": {"ID": 100, "VALUE": 500}}, + {"topic": "test_topic", "key": 100, "value": {"ID": 100, "VALUE": 100}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [2147483648]}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [2147483648,100]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [500]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [500,100]}} + {"topic": "S2", "key": 0, "value": {"ID": 0, "COLLECTED": [2147483648]}}, + {"topic": "S2", "key": 0, "value": {"ID": 0, "COLLECTED": [2147483648,100]}}, + {"topic": "S2", "key": 100, "value": {"ID": 100, "COLLECTED": [500]}}, + {"topic": "S2", "key": 100, "value": {"ID": 100, "COLLECTED": [500,100]}} ] }, { "name": "collect_list double", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE double) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE double) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, collect_list(value) as collected FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 5.4}}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 100.1}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 500.9}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 300.8}} + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "VALUE": 5.4}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "VALUE": 100.1}}, + {"topic": "test_topic", "key": 100, "value": {"ID": 100, "VALUE": 500.9}}, + {"topic": "test_topic", "key": 100, "value": {"ID": 100, "VALUE": 300.8}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [5.4]}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [5.4,100.1]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [500.9]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [500.9,300.8]}} + {"topic": "S2", "key": 0, "value": {"ID": 0, "COLLECTED": [5.4]}}, + {"topic": "S2", "key": 0, "value": {"ID": 0, "COLLECTED": [5.4,100.1]}}, + {"topic": "S2", "key": 100, "value": {"ID": 100, "COLLECTED": [500.9]}}, + {"topic": "S2", "key": 100, "value": {"ID": 100, "COLLECTED": [500.9,300.8]}} ] }, { "name": "collect_list string", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE varchar) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE varchar) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, collect_list(value) as collected FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": "foo"}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": "baz"}}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": "bar"}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": "baz"}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": "foo"}} + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "VALUE": "foo"}}, + {"topic": "test_topic", "key": 100, "value": {"ID": 100, "VALUE": "baz"}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "VALUE": "bar"}}, + {"topic": "test_topic", "key": 100, "value": {"ID": 100, "VALUE": "baz"}}, + {"topic": "test_topic", "key": 100, "value": {"ID": 100, "VALUE": "foo"}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": ["foo"]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": ["baz"]}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": ["foo","bar"]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": ["baz","baz"]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": ["baz","baz","foo"]}} + {"topic": "S2", "key": 0, "value": {"ID": 0, "COLLECTED": ["foo"]}}, + {"topic": "S2", "key": 100, "value": {"ID": 100, "COLLECTED": ["baz"]}}, + {"topic": "S2", "key": 0, "value": {"ID": 0, "COLLECTED": ["foo","bar"]}}, + {"topic": "S2", "key": 100, "value": {"ID": 100, "COLLECTED": ["baz","baz"]}}, + {"topic": "S2", "key": 100, "value": {"ID": 100, "COLLECTED": ["baz","baz","foo"]}} ] }, { "name": "collect_list bool map", "format": ["JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE map) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE map) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, collect_list(value['key1']) AS collected FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":true, "key2":false}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":false, "key2":true}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":true, "key2":true}}} + {"topic": "test_topic", "key": 0, "value": {"id": 0, "name": "zero", "value": {"key1":true, "key2":false}}}, + {"topic": "test_topic", "key": 0, "value": {"id": 0, "name": "zero", "value": {"key1":false, "key2":true}}}, + {"topic": "test_topic", "key": 0, "value": {"id": 0, "name": "zero", "value": {"key1":true, "key2":true}}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID":0,"COLLECTED":[true]}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"COLLECTED":[true,false]}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"COLLECTED":[true,false,true]}} + {"topic": "S2", "key": 0, "value": {"ID":0,"COLLECTED":[true]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"COLLECTED":[true,false]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"COLLECTED":[true,false,true]}} ] }, { "name": "collect_list int table", "format": ["AVRO", "JSON"], "statements": [ - "CREATE TABLE TEST (ID bigint, VALUE integer) WITH (kafka_topic='test_topic',value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, VALUE integer) WITH (kafka_topic='test_topic',value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, collect_list(value) as collected FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 0}}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 100}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 500}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 100}} + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "VALUE": 0}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "VALUE": 100}}, + {"topic": "test_topic", "key": 100, "value": {"ID": 100, "VALUE": 500}}, + {"topic": "test_topic", "key": 100, "value": {"ID": 100, "VALUE": 100}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [0]}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": []}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [100]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [500]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": []}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [100]}} + {"topic": "S2", "key": 0, "value": {"ID": 0, "COLLECTED": [0]}}, + {"topic": "S2", "key": 0, "value": {"ID": 0, "COLLECTED": []}}, + {"topic": "S2", "key": 0, "value": {"ID": 0, "COLLECTED": [100]}}, + {"topic": "S2", "key": 100, "value": {"ID": 100, "COLLECTED": [500]}}, + {"topic": "S2", "key": 100, "value": {"ID": 100, "COLLECTED": []}}, + {"topic": "S2", "key": 100, "value": {"ID": 100, "COLLECTED": [100]}} ] }, { "name": "collect_list long table", "format": ["AVRO", "JSON"], "statements": [ - "CREATE TABLE TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, collect_list(value) as collected FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 2147483648}}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 100}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 500}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 100}} + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "VALUE": 2147483648}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "VALUE": 100}}, + {"topic": "test_topic", "key": 100, "value": {"ID": 100, "VALUE": 500}}, + {"topic": "test_topic", "key": 100, "value": {"ID": 100, "VALUE": 100}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [2147483648]}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": []}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [100]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [500]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": []}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [100]}} + {"topic": "S2", "key": 0, "value": {"ID": 0, "COLLECTED": [2147483648]}}, + {"topic": "S2", "key": 0, "value": {"ID": 0, "COLLECTED": []}}, + {"topic": "S2", "key": 0, "value": {"ID": 0, "COLLECTED": [100]}}, + {"topic": "S2", "key": 100, "value": {"ID": 100, "COLLECTED": [500]}}, + {"topic": "S2", "key": 100, "value": {"ID": 100, "COLLECTED": []}}, + {"topic": "S2", "key": 100, "value": {"ID": 100, "COLLECTED": [100]}} ] }, { "name": "collect_list double table", "format": ["AVRO", "JSON"], "statements": [ - "CREATE TABLE TEST (ID bigint, VALUE double) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, VALUE double) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, collect_list(value) as collected FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 5.4}}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 100.1}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 500.9}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 300.8}} + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "VALUE": 5.4}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "VALUE": 100.1}}, + {"topic": "test_topic", "key": 100, "value": {"ID": 100, "VALUE": 500.9}}, + {"topic": "test_topic", "key": 100, "value": {"ID": 100, "VALUE": 300.8}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [5.4]}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": []}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [100.1]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [500.9]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": []}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [300.8]}} + {"topic": "S2", "key": 0,"value": {"ID": 0, "COLLECTED": [5.4]}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "COLLECTED": []}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "COLLECTED": [100.1]}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": [500.9]}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": []}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": [300.8]}} ] }, { "name": "collect_list string table", "format": ["AVRO", "JSON"], "statements": [ - "CREATE TABLE TEST (ID bigint, VALUE varchar) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, VALUE varchar) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, collect_list(value) as collected FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": "foo"}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": "baz"}}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": "bar"}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": "baz"}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": "foo"}} + {"topic": "test_topic", "key": 0,"value": {"ID": 0, "VALUE": "foo"}}, + {"topic": "test_topic", "key": 100,"value": {"ID": 100, "VALUE": "baz"}}, + {"topic": "test_topic", "key": 0,"value": {"ID": 0, "VALUE": "bar"}}, + {"topic": "test_topic", "key": 100,"value": {"ID": 100, "VALUE": "baz"}}, + {"topic": "test_topic", "key": 100,"value": {"ID": 100, "VALUE": "foo"}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": ["foo"]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": ["baz"]}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": []}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": ["bar"]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": []}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": ["baz"]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": []}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": ["foo"]}} + {"topic": "S2", "key": 0,"value": {"ID": 0, "COLLECTED": ["foo"]}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": ["baz"]}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "COLLECTED": []}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "COLLECTED": ["bar"]}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": []}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": ["baz"]}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": []}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": ["foo"]}} ] }, { "name": "collect_list bool map table", "format": ["JSON"], "statements": [ - "CREATE TABLE TEST (ID bigint, NAME varchar, VALUE map) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE map) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, collect_list(value['key1']) AS collected FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":true, "key2":false}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":false, "key2":true}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":true, "key2":true}}} + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":true, "key2":false}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":false, "key2":true}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":true, "key2":true}}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID":0,"COLLECTED":[true]}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"COLLECTED":[]}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"COLLECTED":[false]}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"COLLECTED":[]}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"COLLECTED":[true]}} + {"topic": "S2", "key": 0,"value": {"ID":0,"COLLECTED":[true]}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"COLLECTED":[]}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"COLLECTED":[false]}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"COLLECTED":[]}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"COLLECTED":[true]}} ] } ] diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/collect-set.json b/ksql-functional-tests/src/test/resources/query-validation-tests/collect-set.json index b720a62f0403..b409311ef655 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/collect-set.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/collect-set.json @@ -12,100 +12,100 @@ "name": "collect_set int", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE integer) WITH (kafka_topic='test_topic',value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE integer) WITH (kafka_topic='test_topic',value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, collect_set(value) as collected FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 0}}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 100}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 500}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 100}} + {"topic": "test_topic", "key": 0,"value": {"ID": 0, "VALUE": 0}}, + {"topic": "test_topic", "key": 0,"value": {"ID": 0, "VALUE": 100}}, + {"topic": "test_topic", "key": 100,"value": {"ID": 100, "VALUE": 500}}, + {"topic": "test_topic", "key": 100,"value": {"ID": 100, "VALUE": 100}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [0]}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [0,100]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [500]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [500,100]}} + {"topic": "S2", "key": 0,"value": {"ID": 0, "COLLECTED": [0]}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "COLLECTED": [0,100]}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": [500]}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": [500,100]}} ] }, { "name": "collect_set long", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, collect_set(value) as collected FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 2147483648}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 500}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 100}}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 100}} + {"topic": "test_topic", "key": 0,"value": {"ID": 0, "VALUE": 2147483648}}, + {"topic": "test_topic", "key": 100,"value": {"ID": 100, "VALUE": 500}}, + {"topic": "test_topic", "key": 100,"value": {"ID": 100, "VALUE": 100}}, + {"topic": "test_topic", "key": 0,"value": {"ID": 0, "VALUE": 100}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [2147483648]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [500]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [500,100]}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [2147483648,100]}} + {"topic": "S2", "key": 0,"value": {"ID": 0, "COLLECTED": [2147483648]}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": [500]}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": [500,100]}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "COLLECTED": [2147483648,100]}} ] }, { "name": "collect_set double", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE double) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE double) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, collect_set(value) as collected FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 5.4}}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": 100.1}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 500.9}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": 300.8}} + {"topic": "test_topic", "key": 0,"value": {"ID": 0, "VALUE": 5.4}}, + {"topic": "test_topic", "key": 0,"value": {"ID": 0, "VALUE": 100.1}}, + {"topic": "test_topic", "key": 100,"value": {"ID": 100, "VALUE": 500.9}}, + {"topic": "test_topic", "key": 100,"value": {"ID": 100, "VALUE": 300.8}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [5.4]}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": [5.4,100.1]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [500.9]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": [500.9,300.8]}} + {"topic": "S2", "key": 0,"value": {"ID": 0, "COLLECTED": [5.4]}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "COLLECTED": [5.4,100.1]}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": [500.9]}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": [500.9,300.8]}} ] }, { "name": "collect_set string", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE varchar) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE varchar) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, collect_set(value) as collected FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": "foo"}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": "baz"}}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": "bar"}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": "baz"}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": "foo"}} + {"topic": "test_topic", "key": 0,"value": {"ID": 0, "VALUE": "foo"}}, + {"topic": "test_topic", "key": 100,"value": {"ID": 100, "VALUE": "baz"}}, + {"topic": "test_topic", "key": 0,"value": {"ID": 0, "VALUE": "bar"}}, + {"topic": "test_topic", "key": 100,"value": {"ID": 100, "VALUE": "baz"}}, + {"topic": "test_topic", "key": 100,"value": {"ID": 100, "VALUE": "foo"}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": ["foo"]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": ["baz"]}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COLLECTED": ["foo","bar"]}, "timestamp": 0}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": ["baz"]}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COLLECTED": ["baz","foo"]}} + {"topic": "S2", "key": 0,"value": {"ID": 0, "COLLECTED": ["foo"]}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": ["baz"]}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "COLLECTED": ["foo","bar"]}, "timestamp": 0}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": ["baz"]}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COLLECTED": ["baz","foo"]}} ] }, { "name": "collect_set bool map", "format": ["JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE map) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE map) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, collect_set(value['key1']) AS collected FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":true, "key2":false}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":false, "key2":true}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":true, "key2":true}}} + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":true, "key2":false}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":false, "key2":true}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":true, "key2":true}}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID":0,"COLLECTED":[true]}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"COLLECTED":[true,false]}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"COLLECTED":[true,false]}} + {"topic": "S2", "key": 0,"value": {"ID":0,"COLLECTED":[true]}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"COLLECTED":[true,false]}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"COLLECTED":[true,false]}} ] } ] diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/count.json b/ksql-functional-tests/src/test/resources/query-validation-tests/count.json index 1cc848c0426c..dfc5383a832d 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/count.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/count.json @@ -6,18 +6,18 @@ { "name": "count", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE S2 as SELECT id, count() FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0,zero,0.0"}, - {"topic": "test_topic", "key": "0", "value": "0,100,0.0"}, - {"topic": "test_topic", "key": "100", "value": "100,100,0.0"} + {"topic": "test_topic", "key": 0,"value": "0,zero,0.0"}, + {"topic": "test_topic", "key": 0,"value": "0,100,0.0"}, + {"topic": "test_topic", "key": 100,"value": "100,100,0.0"} ], "outputs": [ - {"topic": "S2", "key": "0", "value": "0,1"}, - {"topic": "S2", "key": "0", "value": "0,2"}, - {"topic": "S2", "key": "100", "value": "100,1"} + {"topic": "S2", "key": 0,"value": "0,1"}, + {"topic": "S2", "key": 0,"value": "0,2"}, + {"topic": "S2", "key": 100,"value": "100,1"} ] },{ "name": "count star", diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/group-by.json b/ksql-functional-tests/src/test/resources/query-validation-tests/group-by.json index f7347f915864..e83d899c6931 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/group-by.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/group-by.json @@ -27,7 +27,12 @@ {"topic": "OUTPUT", "key": "d1", "value": "d1,2"}, {"topic": "OUTPUT", "key": "d2", "value": "d2,2"}, {"topic": "OUTPUT", "key": "d1", "value": "d1,3"} - ] + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY STRING KEY, DATA STRING, KSQL_COL_1 BIGINT"} + ] + } }, { "name": "field (stream->table) - KAFKA", @@ -65,7 +70,43 @@ {"topic": "OUTPUT", "key": "d1", "value": {"DATA": "d1", "KSQL_COL_1":2}, "timestamp": 3}, {"topic": "OUTPUT", "key": "d2", "value": {"DATA": "d2", "KSQL_COL_1":2}, "timestamp": 4}, {"topic": "OUTPUT", "key": "d1", "value": {"DATA": "d1", "KSQL_COL_1":3}, "timestamp": 5} - ] + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY STRING KEY, DATA STRING, KSQL_COL_1 BIGINT"} + ] + } + }, + { + "name": "int field (stream->table)", + "statements": [ + "CREATE STREAM TEST (ID INT) WITH (kafka_topic='test_topic', value_format='JSON');", + "CREATE TABLE OUTPUT AS SELECT ID, COUNT(*) FROM TEST GROUP BY ID;" + ], + "inputs": [ + {"topic": "test_topic", "key": "a", "value": {"ID": 1}, "timestamp": 1}, + {"topic": "test_topic", "key": "b", "value": {"ID": 2}, "timestamp": 2}, + {"topic": "test_topic", "key": "c", "value": {"ID": 1}, "timestamp": 3}, + {"topic": "test_topic", "key": "d", "value": {"ID": 2}, "timestamp": 4}, + {"topic": "test_topic", "key": "e", "value": {"ID": 1}, "timestamp": 5} + ], + "outputs": [ + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": 1, "value": {"KSQL_INTERNAL_COL_0": 1, "KSQL_INTERNAL_COL_1": 1, "KSQL_AGG_VARIABLE_0": 1}, "timestamp": 1}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": 2, "value": {"KSQL_INTERNAL_COL_0": 2, "KSQL_INTERNAL_COL_1": 2, "KSQL_AGG_VARIABLE_0": 1}, "timestamp": 2}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": 1, "value": {"KSQL_INTERNAL_COL_0": 1, "KSQL_INTERNAL_COL_1": 3, "KSQL_AGG_VARIABLE_0": 2}, "timestamp": 3}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": 2, "value": {"KSQL_INTERNAL_COL_0": 2, "KSQL_INTERNAL_COL_1": 4, "KSQL_AGG_VARIABLE_0": 2}, "timestamp": 4}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": 1, "value": {"KSQL_INTERNAL_COL_0": 1, "KSQL_INTERNAL_COL_1": 5, "KSQL_AGG_VARIABLE_0": 3}, "timestamp": 5}, + {"topic": "OUTPUT", "key": 1, "value": {"ID": 1, "KSQL_COL_1":1}, "timestamp": 1}, + {"topic": "OUTPUT", "key": 2, "value": {"ID": 2, "KSQL_COL_1":1}, "timestamp": 2}, + {"topic": "OUTPUT", "key": 1, "value": {"ID": 1, "KSQL_COL_1":2}, "timestamp": 3}, + {"topic": "OUTPUT", "key": 2, "value": {"ID": 2, "KSQL_COL_1":2}, "timestamp": 4}, + {"topic": "OUTPUT", "key": 1, "value": {"ID": 1, "KSQL_COL_1":3}, "timestamp": 5} + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY INT KEY, ID INT, KSQL_COL_1 BIGINT"} + ] + } }, { "name": "fields (stream->table)", @@ -91,7 +132,12 @@ {"topic": "OUTPUT", "key": "a|+|1", "value": "1,a,2"}, {"topic": "OUTPUT", "key": "b|+|2", "value": "2,b,2"}, {"topic": "OUTPUT", "key": "a|+|3", "value": "3,a,1"} - ] + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY STRING KEY, F1 INT, F2 STRING, KSQL_COL_2 BIGINT"} + ] + } }, { "name": "fields (stream->table) - format", @@ -118,7 +164,12 @@ {"topic": "OUTPUT", "key": "a|+|1", "value": {"F1": 1, "F2": "a", "KSQL_COL_2": 2}}, {"topic": "OUTPUT", "key": "b|+|2", "value": {"F1": 2, "F2": "b", "KSQL_COL_2": 2}}, {"topic": "OUTPUT", "key": "a|+|3", "value": {"F1": 3, "F2": "a", "KSQL_COL_2": 1}} - ] + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY STRING KEY, F1 INT, F2 STRING, KSQL_COL_2 BIGINT"} + ] + } }, { "name": "with groupings (stream->table)", @@ -139,7 +190,12 @@ {"topic": "OUTPUT", "key": "-1|+|a|+|1", "value": "1,a,2"}, {"topic": "OUTPUT", "key": "-2|+|b|+|2", "value": "2,b,2"}, {"topic": "OUTPUT", "key": "-3|+|a|+|3", "value": "3,a,1"} - ] + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY STRING KEY, F1 INT, F2 STRING, KSQL_COL_2 BIGINT"} + ] + } }, { "name": "fields (table->table)", @@ -169,7 +225,12 @@ {"topic": "OUTPUT", "key": "b|+|2", "value": "2,b,0"}, {"topic": "OUTPUT", "key": "b|+|1", "value": "1,b,0"}, {"topic": "OUTPUT", "key": "a|+|1", "value": "1,a,1"} - ] + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY STRING KEY, F1 INT, F2 STRING, KSQL_COL_2 BIGINT"} + ] + } }, { "name": "fields (table->table) - format", @@ -200,7 +261,12 @@ {"topic": "OUTPUT", "key": "b|+|2", "value": {"F1": 2, "F2": "b", "KSQL_COL_2": 0}}, {"topic": "OUTPUT", "key": "b|+|1", "value": {"F1": 1, "F2": "b", "KSQL_COL_2": 0}}, {"topic": "OUTPUT", "key": "a|+|1", "value": {"F1": 1, "F2": "a", "KSQL_COL_2": 1}} - ] + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY STRING KEY, F1 INT, F2 STRING, KSQL_COL_2 BIGINT"} + ] + } }, { "name": "field with re-key (stream->table)", @@ -231,7 +297,48 @@ {"topic": "OUTPUT", "key": "d1", "value": "d1,2"}, {"topic": "OUTPUT", "key": "d2", "value": "d2,2"}, {"topic": "OUTPUT", "key": "d1", "value": "d1,3"} - ] + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY STRING KEY, DATA STRING, KSQL_COL_1 BIGINT"} + ] + } + }, + { + "name": "double field with re-key (stream->table)", + "statements": [ + "CREATE STREAM TEST (data double) WITH (kafka_topic='test_topic', value_format='DELIMITED');", + "CREATE TABLE OUTPUT AS SELECT data, COUNT(*) FROM TEST GROUP BY DATA;" + ], + "inputs": [ + {"topic": "test_topic", "value": "0.1"}, + {"topic": "test_topic", "value": "0.2"}, + {"topic": "test_topic", "value": "0.1"}, + {"topic": "test_topic", "value": "0.2"}, + {"topic": "test_topic", "value": "0.1"} + ], + "outputs": [ + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-GroupBy-repartition", "key": 0.1, "value": "0.1,0"}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-GroupBy-repartition", "key": 0.2, "value": "0.2,0"}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-GroupBy-repartition", "key": 0.1, "value": "0.1,0"}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-GroupBy-repartition", "key": 0.2, "value": "0.2,0"}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-GroupBy-repartition", "key": 0.1, "value": "0.1,0"}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": 0.1, "value": "0.1,0,1"}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": 0.2, "value": "0.2,0,1"}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": 0.1, "value": "0.1,0,2"}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": 0.2, "value": "0.2,0,2"}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": 0.1, "value": "0.1,0,3"}, + {"topic": "OUTPUT", "key": 0.1, "value": "0.1,1"}, + {"topic": "OUTPUT", "key": 0.2, "value": "0.2,1"}, + {"topic": "OUTPUT", "key": 0.1, "value": "0.1,2"}, + {"topic": "OUTPUT", "key": 0.2, "value": "0.2,2"}, + {"topic": "OUTPUT", "key": 0.1, "value": "0.1,3"} + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY DOUBLE KEY, DATA DOUBLE, KSQL_COL_1 BIGINT"} + ] + } }, { "name": "field with re-key (stream->table) - format", @@ -263,7 +370,12 @@ {"topic": "OUTPUT", "key": "d1", "value": {"DATA": "d1", "KSQL_COL_1":2}}, {"topic": "OUTPUT", "key": "d2", "value": {"DATA": "d2", "KSQL_COL_1":2}}, {"topic": "OUTPUT", "key": "d1", "value": {"DATA": "d1", "KSQL_COL_1":3}} - ] + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY STRING KEY, DATA STRING, KSQL_COL_1 BIGINT"} + ] + } }, { "name": "field with re-key (table->table)", @@ -285,7 +397,12 @@ {"topic": "OUTPUT", "key": "r0", "value": "1"}, {"topic": "OUTPUT", "key": "r1", "value": "0"}, {"topic": "OUTPUT", "key": "r0", "value": "2"} - ] + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY STRING KEY, KSQL_COL_0 BIGINT"} + ] + } }, { "name": "with aggregate arithmetic (stream->table)", @@ -302,7 +419,12 @@ {"topic": "OUTPUT", "key": "d1", "value": "d1,2"}, {"topic": "OUTPUT", "key": "d2", "value": "d2,2"}, {"topic": "OUTPUT", "key": "d1", "value": "d1,4"} - ] + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY STRING KEY, DATA STRING, KSQL_COL_1 BIGINT"} + ] + } }, { "name": "with aggregate arithmetic (table->table)", @@ -324,7 +446,12 @@ {"topic": "OUTPUT", "key": "r0", "value": "2"}, {"topic": "OUTPUT", "key": "r1", "value": "0"}, {"topic": "OUTPUT", "key": "r0", "value": "4"} - ] + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY STRING KEY, KSQL_COL_0 BIGINT"} + ] + } }, { "name": "with aggregate arithmetic involving source field (stream->table)", @@ -358,12 +485,12 @@ {"topic": "test_topic", "key": "2", "value": null} ], "outputs": [ - {"topic": "OUTPUT", "key": "2", "value": "20"}, - {"topic": "OUTPUT", "key": "2", "value": "0"}, - {"topic": "OUTPUT", "key": "2", "value": "40"}, - {"topic": "OUTPUT", "key": "2", "value": "0"}, - {"topic": "OUTPUT", "key": "2", "value": "60"}, - {"topic": "OUTPUT", "key": "2", "value": "0"} + {"topic": "OUTPUT", "key": 2, "value": "20"}, + {"topic": "OUTPUT", "key": 2, "value": "0"}, + {"topic": "OUTPUT", "key": 2, "value": "40"}, + {"topic": "OUTPUT", "key": 2, "value": "0"}, + {"topic": "OUTPUT", "key": 2, "value": "60"}, + {"topic": "OUTPUT", "key": 2, "value": "0"} ] }, { @@ -398,7 +525,12 @@ {"topic": "OUTPUT", "key": "an", "value": "an,2"}, {"topic": "OUTPUT", "key": "so", "value": "so,3"}, {"topic": "OUTPUT", "key": "th", "value": "th,1"} - ] + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY STRING KEY, KSQL_COL_0 STRING, KSQL_COL_1 BIGINT"} + ] + } }, { "name": "function (table->table)", @@ -420,7 +552,39 @@ {"topic": "OUTPUT", "key": "r0", "value": "r0,1"}, {"topic": "OUTPUT", "key": "r1", "value": "r1,0"}, {"topic": "OUTPUT", "key": "r0", "value": "r0,2"} - ] + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY STRING KEY, KSQL_COL_0 STRING, KSQL_COL_1 BIGINT"} + ] + } + }, + { + "name": "int function (table->table)", + "statements": [ + "CREATE TABLE TEST (region VARCHAR) WITH (kafka_topic='test_topic', value_format='DELIMITED');", + "CREATE TABLE OUTPUT AS SELECT COUNT(*) FROM TEST GROUP BY LEN(region);" + ], + "inputs": [ + {"topic": "test_topic", "key": "1", "value": "usa"}, + {"topic": "test_topic", "key": "2", "value": "eu"}, + {"topic": "test_topic", "key": "3", "value": "usa"}, + {"topic": "test_topic", "key": "1", "value": null}, + {"topic": "test_topic", "key": "2", "value": "usa"} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 3, "value": "1"}, + {"topic": "OUTPUT", "key": 2, "value": "1"}, + {"topic": "OUTPUT", "key": 3, "value": "2"}, + {"topic": "OUTPUT", "key": 3, "value": "1"}, + {"topic": "OUTPUT", "key": 2, "value": "0"}, + {"topic": "OUTPUT", "key": 3, "value": "2"} + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "table", "schema": "ROWKEY INT KEY, KSQL_COL_0 BIGINT"} + ] + } }, { "name": "function with select field that is a subset of group by (stream->table)", @@ -482,9 +646,26 @@ "outputs": [ {"topic": "OUTPUT", "key": "Something", "value": {"FIELD": "Something", "COUNT": 1}}, {"topic": "OUTPUT", "key": "Something Else", "value": {"FIELD": "Something Else", "COUNT": 1}}, - {"topic": "OUTPUT", "key": "null", "value": {"FIELD": null, "COUNT": 1}}, - {"topic": "OUTPUT", "key": "Something", "value": {"FIELD": "Something", "COUNT": 2}}, - {"topic": "OUTPUT", "key": "null", "value": {"FIELD": null, "COUNT": 2}} + {"topic": "OUTPUT", "key": "Something", "value": {"FIELD": "Something", "COUNT": 2}} + ] + }, + { + "name": "int json field (stream->table)", + "statements": [ + "CREATE STREAM TEST (data STRUCT) WITH (kafka_topic='test_topic', value_format='JSON');", + "CREATE TABLE OUTPUT AS SELECT COUNT(*) AS COUNT FROM TEST GROUP BY data->field;" + ], + "inputs": [ + {"topic": "test_topic", "value": {"data": {"field": 1}}}, + {"topic": "test_topic", "value": {"data": {"field": 2}}}, + {"topic": "test_topic", "value": {"data": {}}}, + {"topic": "test_topic", "value": {"data": {"field": 1}}}, + {"topic": "test_topic", "value": {"data": {}}} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 1, "value": {"COUNT": 1}}, + {"topic": "OUTPUT", "key": 2, "value": {"COUNT": 1}}, + {"topic": "OUTPUT", "key": 1, "value": {"COUNT": 2}} ] }, { @@ -544,7 +725,7 @@ "CREATE TABLE OUTPUT AS SELECT COUNT(*) FROM TEST GROUP BY ROWTIME;" ], "inputs": [{"topic": "test_topic", "value": "-", "timestamp": 10}], - "outputs": [{"topic": "OUTPUT", "key": "10", "value": "1", "timestamp": 10}] + "outputs": [{"topic": "OUTPUT", "key": 10, "value": "1", "timestamp": 10}] }, { "name": "constant (stream->table)", @@ -560,11 +741,11 @@ {"topic": "test_topic", "value": "-"} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": "1"}, - {"topic": "OUTPUT", "key": "1", "value": "2"}, - {"topic": "OUTPUT", "key": "1", "value": "3"}, - {"topic": "OUTPUT", "key": "1", "value": "4"}, - {"topic": "OUTPUT", "key": "1", "value": "5"} + {"topic": "OUTPUT", "key": 1, "value": "1"}, + {"topic": "OUTPUT", "key": 1, "value": "2"}, + {"topic": "OUTPUT", "key": 1, "value": "3"}, + {"topic": "OUTPUT", "key": 1, "value": "4"}, + {"topic": "OUTPUT", "key": 1, "value": "5"} ] }, { @@ -581,12 +762,12 @@ {"topic": "test_topic", "key": "2", "value": "2,r0"} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": "1"}, - {"topic": "OUTPUT", "key": "1", "value": "2"}, - {"topic": "OUTPUT", "key": "1", "value": "3"}, - {"topic": "OUTPUT", "key": "1", "value": "2"}, - {"topic": "OUTPUT", "key": "1", "value": "1"}, - {"topic": "OUTPUT", "key": "1", "value": "2"} + {"topic": "OUTPUT", "key": 1, "value": "1"}, + {"topic": "OUTPUT", "key": 1, "value": "2"}, + {"topic": "OUTPUT", "key": 1, "value": "3"}, + {"topic": "OUTPUT", "key": 1, "value": "2"}, + {"topic": "OUTPUT", "key": 1, "value": "1"}, + {"topic": "OUTPUT", "key": 1, "value": "2"} ] }, { @@ -737,10 +918,10 @@ {"topic": "test_topic", "value": "6,8"} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": "1,1"}, - {"topic": "OUTPUT", "key": "1", "value": "1,2"}, - {"topic": "OUTPUT", "key": "2", "value": "2,1"}, - {"topic": "OUTPUT", "key": "2", "value": "2,2"} + {"topic": "OUTPUT", "key": 1, "value": "1,1"}, + {"topic": "OUTPUT", "key": 1, "value": "1,2"}, + {"topic": "OUTPUT", "key": 2, "value": "2,1"}, + {"topic": "OUTPUT", "key": 2, "value": "2,2"} ] }, { @@ -757,12 +938,12 @@ {"topic": "test_topic", "key": "2", "value": "4,2"} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": "1,1"}, - {"topic": "OUTPUT", "key": "1", "value": "1,2"}, - {"topic": "OUTPUT", "key": "2", "value": "2,1"}, - {"topic": "OUTPUT", "key": "1", "value": "1,1"}, - {"topic": "OUTPUT", "key": "1", "value": "1,0"}, - {"topic": "OUTPUT", "key": "2", "value": "2,2"} + {"topic": "OUTPUT", "key": 1, "value": "1,1"}, + {"topic": "OUTPUT", "key": 1, "value": "1,2"}, + {"topic": "OUTPUT", "key": 2, "value": "2,1"}, + {"topic": "OUTPUT", "key": 1, "value": "1,1"}, + {"topic": "OUTPUT", "key": 1, "value": "1,0"}, + {"topic": "OUTPUT", "key": 2, "value": "2,2"} ] }, { @@ -790,22 +971,22 @@ { "name": "with having expression (stream->table)", "statements": [ - "CREATE STREAM TEST (f1 INT) WITH (kafka_topic='test_topic', KEY='f1', value_format='DELIMITED');", + "CREATE STREAM TEST (ROWKEY INT KEY, f1 INT) WITH (kafka_topic='test_topic', KEY='f1', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT f1, COUNT(*) FROM TEST GROUP BY f1 HAVING SUM(f1) > 1;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": "1"}, - {"topic": "test_topic", "key": "2", "value": "2"}, - {"topic": "test_topic", "key": "1", "value": "1"}, - {"topic": "test_topic", "key": "2", "value": "2"}, - {"topic": "test_topic", "key": "3", "value": "3"} + {"topic": "test_topic", "key": 1, "value": "1"}, + {"topic": "test_topic", "key": 2, "value": "2"}, + {"topic": "test_topic", "key": 1, "value": "1"}, + {"topic": "test_topic", "key": 2, "value": "2"}, + {"topic": "test_topic", "key": 3, "value": "3"} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": null}, - {"topic": "OUTPUT", "key": "2", "value": "2,1"}, - {"topic": "OUTPUT", "key": "1", "value": "1,2"}, - {"topic": "OUTPUT", "key": "2", "value": "2,2"}, - {"topic": "OUTPUT", "key": "3", "value": "3,1"} + {"topic": "OUTPUT", "key": 1, "value": null}, + {"topic": "OUTPUT", "key": 2, "value": "2,1"}, + {"topic": "OUTPUT", "key": 1, "value": "1,2"}, + {"topic": "OUTPUT", "key": 2, "value": "2,2"}, + {"topic": "OUTPUT", "key": 3, "value": "3,1"} ] }, { @@ -822,12 +1003,12 @@ {"topic": "test_topic", "key": "2", "value": "2,0"} ], "outputs": [ - {"topic": "OUTPUT", "key": "0", "value": "0,1"}, - {"topic": "OUTPUT", "key": "1", "value": "1,2"}, - {"topic": "OUTPUT", "key": "0", "value": "0,4"}, - {"topic": "OUTPUT", "key": "0", "value": "0,3"}, - {"topic": "OUTPUT", "key": "1", "value": null}, - {"topic": "OUTPUT", "key": "0", "value": "0,5"} + {"topic": "OUTPUT", "key": 0, "value": "0,1"}, + {"topic": "OUTPUT", "key": 1, "value": "1,2"}, + {"topic": "OUTPUT", "key": 0, "value": "0,4"}, + {"topic": "OUTPUT", "key": 0, "value": "0,3"}, + {"topic": "OUTPUT", "key": 1, "value": null}, + {"topic": "OUTPUT", "key": 0, "value": "0,5"} ] }, { @@ -845,12 +1026,12 @@ {"topic": "test_topic", "key": "0", "value": "1,test"} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": null}, - {"topic": "OUTPUT", "key": "2", "value": null}, - {"topic": "OUTPUT", "key": "1", "value": "1,2"}, - {"topic": "OUTPUT", "key": "2", "value": null}, - {"topic": "OUTPUT", "key": "2", "value": null}, - {"topic": "OUTPUT", "key": "1", "value": "1,3"} + {"topic": "OUTPUT", "key": 1, "value": null}, + {"topic": "OUTPUT", "key": 2, "value": null}, + {"topic": "OUTPUT", "key": 1, "value": "1,2"}, + {"topic": "OUTPUT", "key": 2, "value": null}, + {"topic": "OUTPUT", "key": 2, "value": null}, + {"topic": "OUTPUT", "key": 1, "value": "1,3"} ] }, { @@ -900,22 +1081,22 @@ { "name": "with constants in the projection (stream->table)", "statements": [ - "CREATE STREAM TEST (f1 INT) WITH (kafka_topic='test_topic', KEY='f1', value_format='DELIMITED');", + "CREATE STREAM TEST (ROWKEY INT KEY, f1 INT) WITH (kafka_topic='test_topic', KEY='f1', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT f1, 'some constant' as f3, COUNT(f1) FROM TEST GROUP BY f1;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": "1"}, - {"topic": "test_topic", "key": "2", "value": "2"}, - {"topic": "test_topic", "key": "1", "value": "1"}, - {"topic": "test_topic", "key": "2", "value": "2"}, - {"topic": "test_topic", "key": "3", "value": "3"} + {"topic": "test_topic", "key": 1, "value": "1"}, + {"topic": "test_topic", "key": 2, "value": "2"}, + {"topic": "test_topic", "key": 1, "value": "1"}, + {"topic": "test_topic", "key": 2, "value": "2"}, + {"topic": "test_topic", "key": 3, "value": "3"} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": "1,some constant,1"}, - {"topic": "OUTPUT", "key": "2", "value": "2,some constant,1"}, - {"topic": "OUTPUT", "key": "1", "value": "1,some constant,2"}, - {"topic": "OUTPUT", "key": "2", "value": "2,some constant,2"}, - {"topic": "OUTPUT", "key": "3", "value": "3,some constant,1"} + {"topic": "OUTPUT", "key": 1, "value": "1,some constant,1"}, + {"topic": "OUTPUT", "key": 2, "value": "2,some constant,1"}, + {"topic": "OUTPUT", "key": 1, "value": "1,some constant,2"}, + {"topic": "OUTPUT", "key": 2, "value": "2,some constant,2"}, + {"topic": "OUTPUT", "key": 3, "value": "3,some constant,1"} ] }, { @@ -1134,6 +1315,64 @@ "type": "io.confluent.ksql.util.KsqlStatementException", "message": "Aggregate functions can not be nested: SUM(COUNT())" } + }, + { + "name": "should exclude any stream row whose single GROUP BY expression resolves to NULL", + "comment": "Passing NULL as the POS to SUBSTRING should resolve to NULL without an exception", + "statements": [ + "CREATE STREAM TEST (str STRING, pos INT) WITH (kafka_topic='test_topic', value_format='DELIMITED');", + "CREATE TABLE OUTPUT AS SELECT COUNT() FROM TEST GROUP BY SUBSTRING(str, pos);" + ], + "inputs": [ + {"topic": "test_topic", "value": "xx,1"}, + {"topic": "test_topic", "value": "x,"}, + {"topic": "test_topic", "value": "xx,1"} + ], + "outputs": [ + {"topic": "OUTPUT", "key": "xx", "value": "1"}, + {"topic": "OUTPUT", "key": "xx", "value": "2"} + ] + }, + { + "name": "should exclude any table row whose single GROUP BY expression resolves to NULL", + "comment": "Passing NULL as the POS to SUBSTRING should resolve to NULL without an exception", + "statements": [ + "CREATE TABLE TEST (str STRING, pos INT) WITH (kafka_topic='test_topic', value_format='DELIMITED');", + "CREATE TABLE OUTPUT AS SELECT COUNT() FROM TEST GROUP BY SUBSTRING(str, pos);" + ], + "inputs": [ + {"topic": "test_topic", "key": "1", "value": "xx,1"}, + {"topic": "test_topic", "key": "2", "value": "x,"}, + {"topic": "test_topic", "key": "3", "value": "xx,1"} + ], + "outputs": [ + {"topic": "OUTPUT", "key": "xx", "value": "1"}, + {"topic": "OUTPUT", "key": "xx", "value": "2"} + ] + }, + { + "name": "should exclude any stream row whose single GROUP BY expression throws", + "statements": [ + "CREATE STREAM TEST (id INT) WITH (kafka_topic='test_topic', value_format='DELIMITED');", + "CREATE TABLE OUTPUT AS SELECT COUNT() FROM TEST GROUP BY BAD_UDF(id);" + ], + "inputs": [ + {"topic": "test_topic", "value": "1"} + ], + "outputs": [ + ] + }, + { + "name": "should exclude any table row whose single GROUP BY expression throws", + "statements": [ + "CREATE TABLE TEST (id INT) WITH (kafka_topic='test_topic', value_format='DELIMITED');", + "CREATE TABLE OUTPUT AS SELECT COUNT() FROM TEST GROUP BY BAD_UDF(id);" + ], + "inputs": [ + {"topic": "test_topic", "key": "2", "value": "1"} + ], + "outputs": [ + ] } ] } \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/having.json b/ksql-functional-tests/src/test/resources/query-validation-tests/having.json index 5ac8c911c093..35c7736014e3 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/having.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/having.json @@ -11,39 +11,39 @@ { "name": "table having", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE T1 as select id, sum(value) as sum from test WINDOW TUMBLING (SIZE 30 SECONDS) group by id HAVING sum(value) > 100;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0,zero,0"}, - {"topic": "test_topic", "key": "1", "value": "1,one,100"}, - {"topic": "test_topic", "key": "1", "value": "1,one,5"} + {"topic": "test_topic", "key": 0,"value": "0,zero,0"}, + {"topic": "test_topic", "key": 1,"value": "1,one,100"}, + {"topic": "test_topic", "key": 1,"value": "1,one,5"} ], "outputs": [ - {"topic": "T1", "key": "0", "value": null, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "T1", "key": "1", "value": null, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "T1", "key": "1", "value": "1,105", "window": {"start": 0, "end": 30000, "type": "time"}} + {"topic": "T1", "key": 0,"value": null, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "T1", "key": 1,"value": null, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "T1", "key": 1,"value": "1,105", "window": {"start": 0, "end": 30000, "type": "time"}} ] }, { "name": "calculate average in having", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE AVG AS select id, sum(value)/count(id) as avg from test GROUP BY id HAVING sum(value)/count(id)> 25;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0,zero,50"}, - {"topic": "test_topic", "key": "0", "value": "0,zero,10"}, - {"topic": "test_topic", "key": "0", "value": "0,zero,15"}, - {"topic": "test_topic", "key": "1", "value": "1,one,100"}, - {"topic": "test_topic", "key": "1", "value": "1,one,10"} + {"topic": "test_topic", "key": 0,"value": "0,zero,50"}, + {"topic": "test_topic", "key": 0,"value": "0,zero,10"}, + {"topic": "test_topic", "key": 0,"value": "0,zero,15"}, + {"topic": "test_topic", "key": 1,"value": "1,one,100"}, + {"topic": "test_topic", "key": 1,"value": "1,one,10"} ], "outputs": [ - {"topic": "AVG", "key": "0", "value": "0,50"}, - {"topic": "AVG", "key": "0", "value": "0,30"}, - {"topic": "AVG", "key": "0", "value": null}, - {"topic": "AVG", "key": "1", "value": "1,100"}, - {"topic": "AVG", "key": "1", "value": "1,55"} + {"topic": "AVG", "key": 0,"value": "0,50"}, + {"topic": "AVG", "key": 0,"value": "0,30"}, + {"topic": "AVG", "key": 0,"value": null}, + {"topic": "AVG", "key": 1,"value": "1,100"}, + {"topic": "AVG", "key": 1,"value": "1,55"} ] } diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/histogram.json b/ksql-functional-tests/src/test/resources/query-validation-tests/histogram.json index 8982694b6be4..88cc78561416 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/histogram.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/histogram.json @@ -12,22 +12,22 @@ "name": "histogram string", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE varchar) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE varchar) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, histogram(value) as counts FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": "foo"}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": "baz"}}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "VALUE": "bar"}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": "baz"}}, - {"topic": "test_topic", "key": "100", "value": {"ID": 100, "VALUE": "foo"}} + {"topic": "test_topic", "key": 0,"value": {"ID": 0, "VALUE": "foo"}}, + {"topic": "test_topic", "key": 100,"value": {"ID": 100, "VALUE": "baz"}}, + {"topic": "test_topic", "key": 0,"value": {"ID": 0, "VALUE": "bar"}}, + {"topic": "test_topic", "key": 100,"value": {"ID": 100, "VALUE": "baz"}}, + {"topic": "test_topic", "key": 100,"value": {"ID": 100, "VALUE": "foo"}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "COUNTS": {"foo": 1}}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COUNTS": {"baz": 1}}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "COUNTS": {"foo": 1,"bar": 1}}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COUNTS": {"baz": 2}}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "COUNTS": {"baz": 2,"foo": 1}}} + {"topic": "S2", "key": 0,"value": {"ID": 0, "COUNTS": {"foo": 1}}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COUNTS": {"baz": 1}}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "COUNTS": {"foo": 1,"bar": 1}}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COUNTS": {"baz": 2}}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "COUNTS": {"baz": 2,"foo": 1}}} ] }, { diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/hopping-windows.json b/ksql-functional-tests/src/test/resources/query-validation-tests/hopping-windows.json index b77eacdf0ad8..d3797e2dc521 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/hopping-windows.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/hopping-windows.json @@ -11,37 +11,37 @@ { "name": "max hopping", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE S2 as SELECT id, max(value) FROM test WINDOW HOPPING (SIZE 30 SECONDS, ADVANCE BY 10 SECONDS) group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0,zero,0", "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": "0,100,5", "timestamp": 10000}, - {"topic": "test_topic", "key": "100", "value": "100,100,100", "timestamp": 30000}, - {"topic": "test_topic", "key": "100", "value": "100,100,6", "timestamp": 45000}, - {"topic": "test_topic", "key": "100", "value": "100,100,300", "timestamp": 50000}, - {"topic": "test_topic", "key": "0", "value": "0,zero,100", "timestamp": 35000}, - {"topic": "test_topic", "key": "0", "value": "0,100,2000", "timestamp": 40000} + {"topic": "test_topic", "key": 0,"value": "0,zero,0", "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": "0,100,5", "timestamp": 10000}, + {"topic": "test_topic", "key": 100,"value": "100,100,100", "timestamp": 30000}, + {"topic": "test_topic", "key": 100,"value": "100,100,6", "timestamp": 45000}, + {"topic": "test_topic", "key": 100,"value": "100,100,300", "timestamp": 50000}, + {"topic": "test_topic", "key": 0,"value": "0,zero,100", "timestamp": 35000}, + {"topic": "test_topic", "key": 0,"value": "0,100,2000", "timestamp": 40000} ], "outputs": [ - {"topic": "S2", "key": "0", "value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,5", "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,5", "timestamp": 10000, "window": {"start": 10000, "end": 40000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,100", "timestamp": 30000, "window": {"start": 10000, "end": 40000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,100", "timestamp": 30000, "window": {"start": 20000, "end": 50000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,100", "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,100", "timestamp": 45000, "window": {"start": 20000, "end": 50000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,100", "timestamp": 45000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,6", "timestamp": 45000, "window": {"start": 40000, "end": 70000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,300", "timestamp": 50000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,300", "timestamp": 50000, "window": {"start": 40000, "end": 70000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,300", "timestamp": 50000, "window": {"start": 50000, "end": 80000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,100", "timestamp": 35000, "window": {"start": 10000, "end": 40000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,100", "timestamp": 35000, "window": {"start": 20000, "end": 50000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,100", "timestamp": 35000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,2000", "timestamp": 40000, "window": {"start": 20000, "end": 50000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,2000", "timestamp": 40000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,2000", "timestamp": 40000, "window": {"start": 40000, "end": 70000, "type": "time"}} + {"topic": "S2", "key": 0,"value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,5", "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,5", "timestamp": 10000, "window": {"start": 10000, "end": 40000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,100", "timestamp": 30000, "window": {"start": 10000, "end": 40000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,100", "timestamp": 30000, "window": {"start": 20000, "end": 50000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,100", "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,100", "timestamp": 45000, "window": {"start": 20000, "end": 50000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,100", "timestamp": 45000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,6", "timestamp": 45000, "window": {"start": 40000, "end": 70000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,300", "timestamp": 50000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,300", "timestamp": 50000, "window": {"start": 40000, "end": 70000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,300", "timestamp": 50000, "window": {"start": 50000, "end": 80000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,100", "timestamp": 35000, "window": {"start": 10000, "end": 40000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,100", "timestamp": 35000, "window": {"start": 20000, "end": 50000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,100", "timestamp": 35000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,2000", "timestamp": 40000, "window": {"start": 20000, "end": 50000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,2000", "timestamp": 40000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,2000", "timestamp": 40000, "window": {"start": 40000, "end": 70000, "type": "time"}} ], "post": { "sources": [ @@ -56,85 +56,85 @@ { "name": "min hopping", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE S2 as SELECT id, min(value) FROM test WINDOW HOPPING (SIZE 30 SECONDS, ADVANCE BY 10 SECONDS) group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0,zero,0", "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": "0,100,5", "timestamp": 10000}, - {"topic": "test_topic", "key": "100", "value": "100,100,100", "timestamp": 30000} + {"topic": "test_topic", "key": 0,"value": "0,zero,0", "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": "0,100,5", "timestamp": 10000}, + {"topic": "test_topic", "key": 100,"value": "100,100,100", "timestamp": 30000} ], "outputs": [ - {"topic": "S2", "key": "0", "value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,0", "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,5", "timestamp": 10000, "window": {"start": 10000, "end": 40000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,100", "timestamp": 30000, "window": {"start": 10000, "end": 40000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,100", "timestamp": 30000, "window": {"start": 20000, "end": 50000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,100", "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}} + {"topic": "S2", "key": 0,"value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,0", "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,5", "timestamp": 10000, "window": {"start": 10000, "end": 40000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,100", "timestamp": 30000, "window": {"start": 10000, "end": 40000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,100", "timestamp": 30000, "window": {"start": 20000, "end": 50000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,100", "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}} ] }, { "name": "topk hopping", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", "CREATE TABLE S2 as SELECT id, topk(value, 2) as topk FROM test WINDOW HOPPING (SIZE 30 SECONDS, ADVANCE BY 10 SECONDS) group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": 0}, "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": 100}, "timestamp": 10000} + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": 0}, "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": 100}, "timestamp": 10000} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[0.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[100.0,0.0]}, "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[100.0]}, "timestamp": 10000, "window": {"start": 10000, "end": 40000, "type": "time"}} + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[0.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[100.0,0.0]}, "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[100.0]}, "timestamp": 10000, "window": {"start": 10000, "end": 40000, "type": "time"}} ] }, { "name": "topkdistinct hopping", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", "CREATE TABLE S2 as SELECT id, topkdistinct(value, 2) as topk FROM test WINDOW HOPPING (SIZE 30 SECONDS, ADVANCE BY 10 SECONDS) group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": 0}, "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": 100}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": 100}, "timestamp": 10000} + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": 0}, "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": 100}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": 100}, "timestamp": 10000} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[0.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[100.0,0.0]}, "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[100.0]}, "timestamp": 10000, "window": {"start": 10000, "end": 40000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[100.0,0.0]}, "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[100.0]}, "timestamp": 10000, "window": {"start": 10000, "end": 40000, "type": "time"}} + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[0.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[100.0,0.0]}, "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[100.0]}, "timestamp": 10000, "window": {"start": 10000, "end": 40000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[100.0,0.0]}, "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[100.0]}, "timestamp": 10000, "window": {"start": 10000, "end": 40000, "type": "time"}} ] }, { "name": "count", "statements": [ - "CREATE STREAM TEST (ID INT) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY INT KEY, ID INT) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", "CREATE TABLE S2 as SELECT count(1) as count FROM test WINDOW HOPPING (SIZE 5 SECOND, ADVANCE BY 1 SECONDS) group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0}, "timestamp": 10345}, - {"topic": "test_topic", "key": "0", "value": {"id": 0}, "timestamp": 13251} + {"topic": "test_topic", "key": 0, "value": {"id": 0}, "timestamp": 10345}, + {"topic": "test_topic", "key": 0, "value": {"id": 0}, "timestamp": 13251} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"COUNT":1}, "timestamp": 10345, "window": {"start": 6000, "end": 11000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"COUNT":1}, "timestamp": 10345, "window": {"start": 7000, "end": 12000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"COUNT":1}, "timestamp": 10345, "window": {"start": 8000, "end": 13000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"COUNT":1}, "timestamp": 10345, "window": {"start": 9000, "end": 14000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"COUNT":1}, "timestamp": 10345, "window": {"start": 10000, "end": 15000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"COUNT":2}, "timestamp": 13251, "window": {"start": 9000, "end": 14000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"COUNT":2}, "timestamp": 13251, "window": {"start": 10000, "end": 15000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"COUNT":1}, "timestamp": 13251, "window": {"start": 11000, "end": 16000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"COUNT":1}, "timestamp": 13251, "window": {"start": 12000, "end": 17000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"COUNT":1}, "timestamp": 13251, "window": {"start": 13000, "end": 18000, "type": "time"}} + {"topic": "S2", "key": 0,"value": {"COUNT":1}, "timestamp": 10345, "window": {"start": 6000, "end": 11000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"COUNT":1}, "timestamp": 10345, "window": {"start": 7000, "end": 12000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"COUNT":1}, "timestamp": 10345, "window": {"start": 8000, "end": 13000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"COUNT":1}, "timestamp": 10345, "window": {"start": 9000, "end": 14000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"COUNT":1}, "timestamp": 10345, "window": {"start": 10000, "end": 15000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"COUNT":2}, "timestamp": 13251, "window": {"start": 9000, "end": 14000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"COUNT":2}, "timestamp": 13251, "window": {"start": 10000, "end": 15000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"COUNT":1}, "timestamp": 13251, "window": {"start": 11000, "end": 16000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"COUNT":1}, "timestamp": 13251, "window": {"start": 12000, "end": 17000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"COUNT":1}, "timestamp": 13251, "window": {"start": 13000, "end": 18000, "type": "time"}} ] }, { "name": "import hopping table", "statements": [ - "CREATE TABLE TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID', WINDOW_TYPE='HoPping', WINDOW_SIZE='30 seconds');", + "CREATE TABLE TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', WINDOW_TYPE='HoPping', WINDOW_SIZE='30 seconds');", "CREATE TABLE S2 as SELECT *, ROWKEY as KEY FROM test;" ], "inputs": [ diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json b/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json index 26c15d185630..ba4bdb68b4b1 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json @@ -124,7 +124,7 @@ {"topic": "input_topic", "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": {"FOO":1, "KSQL_COL_1": 1}} + {"topic": "OUTPUT", "key": 1, "value": {"FOO":1, "KSQL_COL_1": 1}} ], "post": { "sources": [ @@ -133,7 +133,7 @@ "name": "OUTPUT", "type": "table", "keyField": "FOO", - "schema": "ROWKEY STRING KEY, FOO INT, KSQL_COL_1 BIGINT" + "schema": "ROWKEY INT KEY, FOO INT, KSQL_COL_1 BIGINT" } ] } @@ -148,7 +148,7 @@ {"topic": "input_topic", "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": {"ALIASED":1, "KSQL_COL_1": 1}} + {"topic": "OUTPUT", "key": 1, "value": {"ALIASED":1, "KSQL_COL_1": 1}} ], "post": { "sources": [ @@ -167,7 +167,7 @@ {"topic": "input_topic", "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": {"KSQL_COL_0": 1}} + {"topic": "OUTPUT", "key": 1,"value": {"KSQL_COL_0": 1}} ], "post": { "sources": [ @@ -359,14 +359,14 @@ { "name": "stream | initially set | group by (same) | key in value | no aliasing", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT foo, COUNT(*) FROM INPUT GROUP BY foo;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key":"1", "value": {"FOO":1, "KSQL_COL_1": 1}} + {"topic": "OUTPUT", "key": 1, "value": {"FOO":1, "KSQL_COL_1": 1}} ], "post": { "sources": [ @@ -381,14 +381,14 @@ { "name": "stream | initially set | group by (same) | key in value | aliasing", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT foo AS aliased, COUNT(*) FROM INPUT GROUP BY foo;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key":"1", "value": {"ALIASED":1, "KSQL_COL_1": 1}} + {"topic": "OUTPUT", "key": 1, "value": {"ALIASED":1, "KSQL_COL_1": 1}} ], "post": { "sources": [ @@ -403,14 +403,14 @@ { "name": "stream | initially set | group by (same) | key not in value | -", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT COUNT(*) FROM INPUT GROUP BY foo;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key":"1", "value": {"KSQL_COL_0": 1}} + {"topic": "OUTPUT", "key": 1, "value": {"KSQL_COL_0": 1}} ], "post": { "sources": [ @@ -424,14 +424,14 @@ { "name": "stream | initially set | group by (different) | key in value | no aliasing", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT bar, COUNT(*) FROM INPUT GROUP BY bar;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key":"2", "value": {"BAR":2, "KSQL_COL_1": 1}} + {"topic": "OUTPUT", "key": 2, "value": {"BAR":2, "KSQL_COL_1": 1}} ], "post": { "sources": [ @@ -442,14 +442,14 @@ { "name": "stream | initially set | group by (different) | key in value | aliasing", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT bar AS aliased, COUNT(*) FROM INPUT GROUP BY bar;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key":"2", "value": {"ALIASED":2, "KSQL_COL_1": 1}} + {"topic": "OUTPUT", "key": 2, "value": {"ALIASED":2, "KSQL_COL_1": 1}} ], "post": { "sources": [ @@ -460,14 +460,14 @@ { "name": "stream | initially set | group by (different) | key not in value | -", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT COUNT(*) FROM INPUT GROUP BY bar;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key":"2", "value": {"KSQL_COL_0": 1}} + {"topic": "OUTPUT", "key": 2, "value": {"KSQL_COL_0": 1}} ], "post": { "sources": [ @@ -504,7 +504,7 @@ {"topic": "input_topic", "key": "x", "value": {"foo": 1}} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": {"FOO": 1, "KSQL_COL_1": 1}} + {"topic": "OUTPUT", "key": 1, "value": {"FOO": 1, "KSQL_COL_1": 1}} ], "post": { "sources": [ @@ -526,7 +526,7 @@ {"topic": "input_topic", "key": "x", "value": {"foo": 1}} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": {"ALIASED": 1, "KSQL_COL_1": 1}} + {"topic": "OUTPUT", "key": 1, "value": {"ALIASED": 1, "KSQL_COL_1": 1}} ], "post": { "sources": [ @@ -538,14 +538,14 @@ { "name": "table | initially set | no key change | key in value | no aliasing", "statements": [ - "CREATE TABLE INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE TABLE INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT * FROM INPUT;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": {"FOO": 1, "BAR": 2}} + {"topic": "OUTPUT", "key": 1, "value": {"FOO": 1, "BAR": 2}} ], "post": { "sources": [ @@ -557,14 +557,14 @@ { "name": "table | initially set | no key change | key in value | aliasing", "statements": [ - "CREATE TABLE INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE TABLE INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT foo AS aliased, bar FROM INPUT;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": {"ALIASED": 1, "BAR": 2}} + {"topic": "OUTPUT", "key": 1, "value": {"ALIASED": 1, "BAR": 2}} ], "post": { "sources": [ @@ -576,14 +576,14 @@ { "name": "table | initially set | no key change | key not in value | -", "statements": [ - "CREATE TABLE INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE TABLE INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT bar FROM INPUT;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": {"BAR": 2}} + {"topic": "OUTPUT", "key": 1, "value": {"BAR": 2}} ], "post": { "sources": [ @@ -594,14 +594,14 @@ { "name": "table | initially set | group by (same) | key in value | no aliasing", "statements": [ - "CREATE TABLE INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE TABLE INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT foo, COUNT(*) FROM INPUT GROUP BY foo;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1,"value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": {"FOO": 1, "KSQL_COL_1": 1}} + {"topic": "OUTPUT", "key": 1,"value": {"FOO": 1, "KSQL_COL_1": 1}} ], "post": { "sources": [ @@ -612,14 +612,14 @@ { "name": "table | initially set | group by (same) | key in value | aliasing", "statements": [ - "CREATE TABLE INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE TABLE INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT foo AS aliased, COUNT(*) FROM INPUT GROUP BY foo;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1,"value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": {"ALIASED": 1, "KSQL_COL_1": 1}} + {"topic": "OUTPUT", "key": 1,"value": {"ALIASED": 1, "KSQL_COL_1": 1}} ], "post": { "sources": [ @@ -630,14 +630,14 @@ { "name": "table | initially set | group by (same) | key not in value | -", "statements": [ - "CREATE TABLE INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE TABLE INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT COUNT(*) FROM INPUT GROUP BY foo;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1,"value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": {"KSQL_COL_0": 1}} + {"topic": "OUTPUT", "key": 1,"value": {"KSQL_COL_0": 1}} ], "post": { "sources": [ @@ -648,14 +648,14 @@ { "name": "table | initially set | group by (different) | key in value | no aliasing", "statements": [ - "CREATE TABLE INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE TABLE INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT bar, COUNT(*) FROM INPUT GROUP BY bar;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "2", "value": {"BAR": 2, "KSQL_COL_1": 1}} + {"topic": "OUTPUT", "key": 2, "value": {"BAR": 2, "KSQL_COL_1": 1}} ], "post": { "sources": [ @@ -666,14 +666,14 @@ { "name": "table | initially set | group by (different) | key in value | aliasing", "statements": [ - "CREATE TABLE INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE TABLE INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT bar AS aliased, COUNT(*) FROM INPUT GROUP BY bar;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "2", "value": {"ALIASED": 2, "KSQL_COL_1": 1}} + {"topic": "OUTPUT", "key": 2,"value": {"ALIASED": 2, "KSQL_COL_1": 1}} ], "post": { "sources": [ @@ -684,14 +684,14 @@ { "name": "table | initially set | group by (different) | key not in value | -", "statements": [ - "CREATE TABLE INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE TABLE INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT COUNT(*) FROM INPUT GROUP BY bar;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "2", "value": {"KSQL_COL_0": 1}} + {"topic": "OUTPUT", "key": 2,"value": {"KSQL_COL_0": 1}} ], "post": { "sources": [ @@ -702,11 +702,11 @@ { "name": "stream | initially set | partition by expression | key in value | no aliasing", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT foo + bar FROM INPUT PARTITION BY foo + bar;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ {"topic": "OUTPUT", "key": 3, "value": {"KSQL_COL_0": 3}} @@ -720,7 +720,7 @@ { "name": "stream | initially set | partition by multiple | key in value | no aliasing", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT * FROM INPUT PARTITION BY foo, bar;" ], "comment": [ @@ -734,11 +734,11 @@ { "name": "stream | initially set | group by multiple | key in value | no aliasing", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT foo, bar, COUNT(*) FROM INPUT GROUP BY bar, foo;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ {"topic": "OUTPUT", "key":"2|+|1", "value": {"FOO": 1, "BAR": 2, "KSQL_COL_2": 1}} @@ -752,14 +752,14 @@ { "name": "stream | initially set | group by expression | key in value | no aliasing", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT foo + bar, COUNT(*) FROM INPUT GROUP BY foo + bar;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key":"3", "value": {"KSQL_COL_0": 3, "KSQL_COL_1": 1}} + {"topic": "OUTPUT", "key":3, "value": {"KSQL_COL_0": 3, "KSQL_COL_1": 1}} ], "post": { "issues": [ diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/max-group-by.json b/ksql-functional-tests/src/test/resources/query-validation-tests/max-group-by.json index 02d3adcc23ac..0e7fc21b0ba1 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/max-group-by.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/max-group-by.json @@ -11,129 +11,129 @@ { "name": "max integer group by", "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE integer) WITH (kafka_topic='test_topic',value_format='AVRO', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE integer) WITH (kafka_topic='test_topic',value_format='AVRO', key='ID');", "CREATE TABLE S2 as SELECT id, max(value) as value FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": {"id": 1, "value": null}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": null}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": -2147483647}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 5}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": null}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 100}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 6}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 300}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 2000}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 100}} + {"topic": "test_topic", "key": 1,"value": {"id": 1, "value": null}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": null}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": -2147483647}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 5}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": null}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 100}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 6}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 300}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 2000}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 100}} ], "outputs": [ - {"topic": "S2", "key": "1", "value": {"ID": 1, "VALUE": null}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": null}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": -2147483647}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": 5}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": 5}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "VALUE": 100}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "VALUE": 100}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "VALUE": 300}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": 2000}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": 2000}} + {"topic": "S2", "key": 1,"value": {"ID": 1, "VALUE": null}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": null}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": -2147483647}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": 5}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": 5}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "VALUE": 100}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "VALUE": 100}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "VALUE": 300}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": 2000}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": 2000}} ] }, { "name": "max long group by", "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", "CREATE TABLE S2 as SELECT id, max(value) as value FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": {"id": 1, "value": null}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": null}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": -1000000}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 5}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": null}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 100}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 6}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 300}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 9223372036854775807}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 100}} + {"topic": "test_topic", "key": 1,"value": {"id": 1, "value": null}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": null}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": -1000000}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 5}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": null}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 100}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 6}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 300}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 9223372036854775807}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 100}} ], "outputs": [ - {"topic": "S2", "key": "1", "value": {"ID": 1, "VALUE": null}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": null}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": -1000000}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": 5}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": 5}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "VALUE": 100}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "VALUE": 100}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "VALUE": 300}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": 9223372036854775807}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": 9223372036854775807}} + {"topic": "S2", "key": 1,"value": {"ID": 1, "VALUE": null}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": null}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": -1000000}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": 5}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": 5}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "VALUE": 100}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "VALUE": 100}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "VALUE": 300}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": 9223372036854775807}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": 9223372036854775807}} ] }, { "name": "max double group by", "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE double) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE double) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", "CREATE TABLE S2 as SELECT id, max(value) as value FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": {"id": 1, "value": null}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": null}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": -1000000.123}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 0.0}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 5.1}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": null}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 100.1}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 6.4}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 300.8}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 2000.99}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 100.11}} + {"topic": "test_topic", "key": 1,"value": {"id": 1, "value": null}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": null}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": -1000000.123}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 0.0}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 5.1}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": null}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 100.1}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 6.4}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 300.8}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 2000.99}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 100.11}} ], "outputs": [ - {"topic": "S2", "key": "1", "value": {"ID": 1, "VALUE": null}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": null}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": -1000000.123}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": 0.0}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": 5.1}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": 5.1}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "VALUE": 100.1}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "VALUE": 100.1}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "VALUE": 300.8}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": 2000.99}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": 2000.99}} + {"topic": "S2", "key": 1,"value": {"ID": 1, "VALUE": null}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": null}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": -1000000.123}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": 0.0}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": 5.1}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": 5.1}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "VALUE": 100.1}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "VALUE": 100.1}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "VALUE": 300.8}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": 2000.99}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": 2000.99}} ] }, { "name": "max decimal group by", "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE decimal(4, 2)) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE decimal(4, 2)) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", "CREATE TABLE S2 as SELECT id, max(value) as value FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": {"id": 1, "value": null}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": null}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": "-10.12"}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": "00.00"}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": "05.10"}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": null}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": "10.10"}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": "06.40"}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": "30.80"}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": "20.99"}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": "10.11"}} + {"topic": "test_topic", "key": 1,"value": {"id": 1, "value": null}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": null}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": "-10.12"}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": "00.00"}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": "05.10"}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": null}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": "10.10"}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": "06.40"}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": "30.80"}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": "20.99"}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": "10.11"}} ], "outputs": [ - {"topic": "S2", "key": "1", "value": {"ID": 1, "VALUE": null}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": null}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": "-10.12"}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": "00.00"}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": "05.10"}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": "05.10"}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "VALUE": "10.10"}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "VALUE": "10.10"}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "VALUE": "30.80"}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": "20.99"}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "VALUE": "20.99"}} + {"topic": "S2", "key": 1,"value": {"ID": 1, "VALUE": null}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": null}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": "-10.12"}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": "00.00"}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": "05.10"}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": "05.10"}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "VALUE": "10.10"}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "VALUE": "10.10"}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "VALUE": "30.80"}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": "20.99"}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "VALUE": "20.99"}} ] } ] diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/min-group-by.json b/ksql-functional-tests/src/test/resources/query-validation-tests/min-group-by.json index 64f824c5a8dc..5e8c49d3f813 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/min-group-by.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/min-group-by.json @@ -11,119 +11,119 @@ { "name": "min integer group by", "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE integer) WITH (kafka_topic='test_topic',value_format='AVRO', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE integer) WITH (kafka_topic='test_topic',value_format='AVRO', key='ID');", "CREATE TABLE S2 as SELECT id, min(value) as MIN FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": {"id": 1, "value": null}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": -2147483647}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 5}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": null}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 100}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 6}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 300}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 2000}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 100}} + {"topic": "test_topic", "key": 1,"value": {"id": 1, "value": null}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": -2147483647}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 5}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": null}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 100}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 6}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 300}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 2000}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 100}} ], "outputs": [ - {"topic": "S2", "key": "1", "value": {"ID": 1, "MIN": null}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": -2147483647}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": -2147483647}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": -2147483647}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "MIN": 100}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "MIN": 6}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "MIN": 6}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": -2147483647}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": -2147483647}} + {"topic": "S2", "key": 1,"value": {"ID": 1, "MIN": null}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": -2147483647}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": -2147483647}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": -2147483647}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "MIN": 100}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "MIN": 6}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "MIN": 6}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": -2147483647}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": -2147483647}} ] }, { "name": "min long group by", "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", "CREATE TABLE S2 as SELECT id, min(value) as MIN FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": {"id": 1, "value": null}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": -1000000}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 5}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": null}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 100}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 6}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 300}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": -9223372036854775807}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 100}} + {"topic": "test_topic", "key": 1,"value": {"id": 1, "value": null}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": -1000000}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 5}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": null}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 100}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 6}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 300}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": -9223372036854775807}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 100}} ], "outputs": [ - {"topic": "S2", "key": "1", "value": {"ID": 1, "MIN": null}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": -1000000}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": -1000000}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": -1000000}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "MIN": 100}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "MIN": 6}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "MIN": 6}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": -9223372036854775807}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": -9223372036854775807}} + {"topic": "S2", "key": 1,"value": {"ID": 1, "MIN": null}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": -1000000}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": -1000000}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": -1000000}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "MIN": 100}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "MIN": 6}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "MIN": 6}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": -9223372036854775807}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": -9223372036854775807}} ] }, { "name": "min double group by", "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE double) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE double) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", "CREATE TABLE S2 as SELECT id, min(value) as MIN FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": {"id": 1, "value": null}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 1.7976931348623157E308}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 5.1}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": null}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 100.1}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 6.4}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 300.8}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": -1.7976931348623157E308}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 100.11}} + {"topic": "test_topic", "key": 1,"value": {"id": 1, "value": null}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 1.7976931348623157E308}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 5.1}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": null}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 100.1}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 6.4}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 300.8}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": -1.7976931348623157E308}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 100.11}} ], "outputs": [ - {"topic": "S2", "key": "1", "value": {"ID": 1, "MIN": null}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": 1.7976931348623157E308}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": 5.1}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": 5.1}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "MIN": 100.1}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "MIN": 6.4}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "MIN": 6.4}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": -1.7976931348623157E308}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": -1.7976931348623157E308}} + {"topic": "S2", "key": 1,"value": {"ID": 1, "MIN": null}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": 1.7976931348623157E308}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": 5.1}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": 5.1}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "MIN": 100.1}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "MIN": 6.4}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "MIN": 6.4}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": -1.7976931348623157E308}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": -1.7976931348623157E308}} ] }, { "name": "min decimal group by", "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE decimal(4, 2)) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE decimal(4, 2)) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", "CREATE TABLE S2 as SELECT id, min(value) as MIN FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": {"id": 1, "value": null}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": "21.79"}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": "1.10"}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": null}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": "10.10"}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": "06.40"}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": "30.80"}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": "-01.79"}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": "10.11"}} + {"topic": "test_topic", "key": 1,"value": {"id": 1, "value": null}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": "21.79"}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": "1.10"}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": null}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": "10.10"}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": "06.40"}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": "30.80"}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": "-01.79"}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": "10.11"}} ], "outputs": [ - {"topic": "S2", "key": "1", "value": {"ID": 1, "MIN": null}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": "21.79"}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": "01.10"}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": "01.10"}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "MIN": "10.10"}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "MIN": "06.40"}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "MIN": "06.40"}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": "-01.79"}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "MIN": "-01.79"}} + {"topic": "S2", "key": 1,"value": {"ID": 1, "MIN": null}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": "21.79"}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": "01.10"}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": "01.10"}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "MIN": "10.10"}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "MIN": "06.40"}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "MIN": "06.40"}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": "-01.79"}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "MIN": "-01.79"}} ] } ] diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/session-windows.json b/ksql-functional-tests/src/test/resources/query-validation-tests/session-windows.json index 0c82143877d9..056a944334da 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/session-windows.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/session-windows.json @@ -6,22 +6,22 @@ { "name": "max session", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE S2 as SELECT id, max(value) FROM test WINDOW SESSION (30 SECONDS) group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0,zero,0", "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": "0,100,5", "timestamp": 10000}, - {"topic": "test_topic", "key": "1", "value": "1,100,100", "timestamp": 10000}, - {"topic": "test_topic", "key": "1", "value": "1,100,200", "timestamp": 40000} + {"topic": "test_topic", "key": 0,"value": "0,zero,0", "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": "0,100,5", "timestamp": 10000}, + {"topic": "test_topic", "key": 1,"value": "1,100,100", "timestamp": 10000}, + {"topic": "test_topic", "key": 1,"value": "1,100,200", "timestamp": 40000} ], "outputs": [ - {"topic": "S2", "key": "0", "value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}}, - {"topic": "S2", "key": "0", "value": null, "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}}, - {"topic": "S2", "key": "0", "value": "0,5", "timestamp": 10000, "window": {"start": 0, "end": 10000, "type": "session"}}, - {"topic": "S2", "key": "1", "value": "1,100", "timestamp": 10000, "window": {"start": 10000, "end": 10000, "type": "session"}}, - {"topic": "S2", "key": "1", "value": null, "timestamp": 10000, "window": {"start": 10000, "end": 10000, "type": "session"}}, - {"topic": "S2", "key": "1", "value": "1,200", "timestamp": 40000, "window": {"start": 10000, "end": 40000, "type": "session"}} + {"topic": "S2", "key": 0,"value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}}, + {"topic": "S2", "key": 0,"value": null, "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}}, + {"topic": "S2", "key": 0,"value": "0,5", "timestamp": 10000, "window": {"start": 0, "end": 10000, "type": "session"}}, + {"topic": "S2", "key": 1,"value": "1,100", "timestamp": 10000, "window": {"start": 10000, "end": 10000, "type": "session"}}, + {"topic": "S2", "key": 1,"value": null, "timestamp": 10000, "window": {"start": 10000, "end": 10000, "type": "session"}}, + {"topic": "S2", "key": 1,"value": "1,200", "timestamp": 40000, "window": {"start": 10000, "end": 40000, "type": "session"}} ], "post": { "sources": [ @@ -78,17 +78,17 @@ { "name": "inherit windowed keys", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE S2 as SELECT id, max(value) FROM test WINDOW SESSION (30 SECONDS) group by id;", "CREATE TABLE S3 as SELECT * FROM S2;" ], "inputs": [ - {"topic": "S2", "key": "0", "value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}}, - {"topic": "S2", "key": "0", "value": null, "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}} + {"topic": "S2", "key": 0,"value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}}, + {"topic": "S2", "key": 0,"value": null, "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}} ], "outputs": [ - {"topic": "S3", "key": "0", "value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}}, - {"topic": "S3", "key": "0", "value": null, "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}} + {"topic": "S3", "key": 0,"value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}}, + {"topic": "S3", "key": 0,"value": null, "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}} ], "post": { "sources": [ diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/sum.json b/ksql-functional-tests/src/test/resources/query-validation-tests/sum.json index 1a9251e4de17..dc40f2cfa97c 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/sum.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/sum.json @@ -6,20 +6,20 @@ { "name": "sum int", "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE integer) WITH (kafka_topic='test_topic',value_format='AVRO', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE integer) WITH (kafka_topic='test_topic',value_format='AVRO', key='ID');", "CREATE TABLE S2 as SELECT id, sum(value) AS SUM FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 0}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 100}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 500}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 100}} + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 0}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 100}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 500}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 100}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "SUM": 0}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "SUM": 100}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "SUM": 500}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "SUM": 600}} + {"topic": "S2", "key": 0,"value": {"ID": 0, "SUM": 0}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "SUM": 100}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "SUM": 500}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "SUM": 600}} ] }, { @@ -39,157 +39,157 @@ {"topic": "T2", "key": "0", "value": null} ], "outputs": [ - {"topic": "OUTPUT", "key": "0", "value": {"ID": 0, "SUM": 0}}, - {"topic": "OUTPUT", "key": "1", "value": {"ID": 1, "SUM": 0}}, - {"topic": "OUTPUT", "key": "0", "value": {"ID": 0, "SUM": 0}}, - {"topic": "OUTPUT", "key": "0", "value": {"ID": 0, "SUM": 5}}, - {"topic": "OUTPUT", "key": "1", "value": {"ID": 1, "SUM": 0}}, - {"topic": "OUTPUT", "key": "1", "value": {"ID": 1, "SUM": 10}}, - {"topic": "OUTPUT", "key": "0", "value": {"ID": 0, "SUM": 0}}, - {"topic": "OUTPUT", "key": "0", "value": {"ID": 0, "SUM": 20}}, - {"topic": "OUTPUT", "key": "0", "value": {"ID": 0, "SUM": 0}}, - {"topic": "OUTPUT", "key": "0", "value": {"ID": 0, "SUM": 0}} + {"topic": "OUTPUT", "key": 0,"value": {"ID": 0, "SUM": 0}}, + {"topic": "OUTPUT", "key": 1,"value": {"ID": 1, "SUM": 0}}, + {"topic": "OUTPUT", "key": 0,"value": {"ID": 0, "SUM": 0}}, + {"topic": "OUTPUT", "key": 0,"value": {"ID": 0, "SUM": 5}}, + {"topic": "OUTPUT", "key": 1,"value": {"ID": 1, "SUM": 0}}, + {"topic": "OUTPUT", "key": 1,"value": {"ID": 1, "SUM": 10}}, + {"topic": "OUTPUT", "key": 0,"value": {"ID": 0, "SUM": 0}}, + {"topic": "OUTPUT", "key": 0,"value": {"ID": 0, "SUM": 20}}, + {"topic": "OUTPUT", "key": 0,"value": {"ID": 0, "SUM": 0}}, + {"topic": "OUTPUT", "key": 0,"value": {"ID": 0, "SUM": 0}} ] }, { "name": "sum long", "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", "CREATE TABLE S2 as SELECT id, sum(value) as SUM FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 2147483648}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 100}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 500}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 100}} + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 2147483648}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 100}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 500}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 100}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "SUM": 2147483648}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "SUM": 2147483748}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "SUM": 500}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "SUM": 600}} + {"topic": "S2", "key": 0,"value": {"ID": 0, "SUM": 2147483648}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "SUM": 2147483748}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "SUM": 500}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "SUM": 600}} ] }, { "name": "sum double", "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE double) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE double) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", "CREATE TABLE S2 as SELECT id, sum(value) AS SUM FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 5.4}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": 100.1}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 500.9}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": 300.8}} + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 5.4}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": 100.1}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 500.9}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": 300.8}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "SUM": 5.4}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "SUM": 105.5}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "SUM": 500.9}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "SUM": 801.7}} + {"topic": "S2", "key": 0,"value": {"ID": 0, "SUM": 5.4}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "SUM": 105.5}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "SUM": 500.9}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "SUM": 801.7}} ] }, { "name": "sum decimal", "statements": [ - "CREATE STREAM TEST (ID bigint, VALUE decimal(4,1)) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, VALUE decimal(4,1)) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", "CREATE TABLE S2 as SELECT id, sum(value) AS SUM FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": "5.4"}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "value": "100.1"}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": "500.9"}}, - {"topic": "test_topic", "key": "100", "value": {"id": 100, "value": "300.8"}} + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": "5.4"}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "value": "100.1"}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": "500.9"}}, + {"topic": "test_topic", "key": 100,"value": {"id": 100, "value": "300.8"}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID": 0, "SUM": "005.4"}}, - {"topic": "S2", "key": "0", "value": {"ID": 0, "SUM": "105.5"}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "SUM": "500.9"}}, - {"topic": "S2", "key": "100", "value": {"ID": 100, "SUM": "801.7"}} + {"topic": "S2", "key": 0,"value": {"ID": 0, "SUM": "005.4"}}, + {"topic": "S2", "key": 0,"value": {"ID": 0, "SUM": "105.5"}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "SUM": "500.9"}}, + {"topic": "S2", "key": 100,"value": {"ID": 100, "SUM": "801.7"}} ] }, { "name": "sum double map", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE map) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE map) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", "CREATE TABLE S2 as SELECT id, sum(value['key1']) AS sum_val FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero","value":{"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}} + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero","value":{"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":10.0}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":20.0}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":30.0}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":40.0}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":50.0}} + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":10.0}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":20.0}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":30.0}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":40.0}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":50.0}} ] }, { "name": "sum with constant int arg", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE map) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE map) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", "CREATE TABLE S2 as SELECT id, sum(2) AS sum_val FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero","value":{"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}} + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero","value":{"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":2}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":4}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":6}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":8}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":10}} + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":2}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":4}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":6}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":8}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":10}} ] }, { "name": "sum with constant long arg", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE map) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE map) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", "CREATE TABLE S2 as SELECT id, sum(cast (2 as BIGINT)) AS sum_val FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero","value":{"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}} + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero","value":{"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":2}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":4}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":6}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":8}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":10}} + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":2}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":4}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":6}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":8}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":10}} ] }, { "name": "sum with constant double arg", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE map) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE map) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", "CREATE TABLE S2 as SELECT id, sum(1.0) AS sum_val FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero","value":{"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}} + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero","value":{"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": {"key1":10.0, "key2":1.0}}} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":1.0}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":2.0}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":3.0}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":4.0}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"SUM_VAL":5.0}} + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":1.0}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":2.0}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":3.0}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":4.0}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"SUM_VAL":5.0}} ] }, { diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/test-custom-udaf.json b/ksql-functional-tests/src/test/resources/query-validation-tests/test-custom-udaf.json index 2dc26e418dd2..ca5c63361c8f 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/test-custom-udaf.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/test-custom-udaf.json @@ -6,41 +6,41 @@ { "name": "test_udaf group by", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE integer) WITH (kafka_topic='test_topic',value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE integer) WITH (kafka_topic='test_topic',value_format='DELIMITED', key='ID');", "CREATE TABLE S2 as SELECT id, test_udaf(value) FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0,zero,-2147483647"}, - {"topic": "test_topic", "key": "0", "value": "0,100,5"}, - {"topic": "test_topic", "key": "100", "value": "100,100,100"}, - {"topic": "test_topic", "key": "100", "value": "100,100,6"}, - {"topic": "test_topic", "key": "100", "value": "100,100,300"}, - {"topic": "test_topic", "key": "0", "value": "0,zero,2000"}, - {"topic": "test_topic", "key": "0", "value": "0,100,100"} + {"topic": "test_topic", "key": 0,"value": "0,zero,-2147483647"}, + {"topic": "test_topic", "key": 0,"value": "0,100,5"}, + {"topic": "test_topic", "key": 100,"value": "100,100,100"}, + {"topic": "test_topic", "key": 100,"value": "100,100,6"}, + {"topic": "test_topic", "key": 100,"value": "100,100,300"}, + {"topic": "test_topic", "key": 0,"value": "0,zero,2000"}, + {"topic": "test_topic", "key": 0,"value": "0,100,100"} ], "outputs": [ - {"topic": "S2", "key": "0", "value": "0,-2147483647"}, - {"topic": "S2", "key": "0", "value": "0,-2147483642"}, - {"topic": "S2", "key": "100", "value": "100,100"}, - {"topic": "S2", "key": "100", "value": "100,106"}, - {"topic": "S2", "key": "100", "value": "100,406"}, - {"topic": "S2", "key": "0", "value": "0,-2147481642"}, - {"topic": "S2", "key": "0", "value": "0,-2147481542"} + {"topic": "S2", "key": 0,"value": "0,-2147483647"}, + {"topic": "S2", "key": 0,"value": "0,-2147483642"}, + {"topic": "S2", "key": 100,"value": "100,100"}, + {"topic": "S2", "key": 100,"value": "100,106"}, + {"topic": "S2", "key": 100,"value": "100,406"}, + {"topic": "S2", "key": 0,"value": "0,-2147481642"}, + {"topic": "S2", "key": 0,"value": "0,-2147481542"} ] }, { "name": "test_udaf on a table", "statements": [ - "CREATE TABLE TEST (ID bigint, NAME varchar, REGION string) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, REGION string) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE SUM_ID_BY_REGION AS SELECT REGION, test_udaf(id) FROM TEST GROUP BY REGION;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0,alice,east"}, - {"topic": "test_topic", "key": "1", "value": "1,bob,east"}, - {"topic": "test_topic", "key": "2", "value": "2,carol,west"}, - {"topic": "test_topic", "key": "3", "value": "3,dave,west"}, - {"topic": "test_topic", "key": "1", "value": "1,bob,west"}, - {"topic": "test_topic", "key": "1", "value": null} + {"topic": "test_topic", "key": 0,"value": "0,alice,east"}, + {"topic": "test_topic", "key": 1,"value": "1,bob,east"}, + {"topic": "test_topic", "key": 2,"value": "2,carol,west"}, + {"topic": "test_topic", "key": 3,"value": "3,dave,west"}, + {"topic": "test_topic", "key": 1,"value": "1,bob,west"}, + {"topic": "test_topic", "key": 1,"value": null} ], "outputs": [ {"topic": "SUM_ID_BY_REGION", "key": "east", "value": "east,0"}, @@ -55,13 +55,13 @@ { "name": "test_udaf with struct", "statements": [ - "CREATE STREAM TEST (id VARCHAR, val STRUCT) WITH (kafka_topic='test_topic', value_format='JSON');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, id VARCHAR, val STRUCT) WITH (kafka_topic='test_topic', value_format='JSON');", "CREATE TABLE RESULT AS SELECT id, test_udaf(val) as result FROM TEST GROUP BY ID;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": "0", "val": {"A": 1, "B": 2}}}, - {"topic": "test_topic", "key": "0", "value": {"id": "0", "val": {"A": 2, "B": 3}}}, - {"topic": "test_topic", "key": "1", "value": {"id": "1", "val": {"A": 1, "B": 0}}} + {"topic": "test_topic", "key": 0,"value": {"id": "0", "val": {"A": 1, "B": 2}}}, + {"topic": "test_topic", "key": 0,"value": {"id": "0", "val": {"A": 2, "B": 3}}}, + {"topic": "test_topic", "key": 1,"value": {"id": "1", "val": {"A": 1, "B": 0}}} ], "outputs": [ {"topic": "RESULT", "key": "0", "value": {"ID": "0", "RESULT": {"A": 1, "B": 2}}}, diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/topk-distinct.json b/ksql-functional-tests/src/test/resources/query-validation-tests/topk-distinct.json index e810a814be95..20c4c1afde7e 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/topk-distinct.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/topk-distinct.json @@ -11,89 +11,88 @@ { "name": "topk distinct integer", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE integer) WITH (kafka_topic='test_topic',value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE integer) WITH (kafka_topic='test_topic',value_format='JSON', key='ID');", "CREATE TABLE S2 as SELECT id, topkdistinct(value, 3) as topk FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE":0}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 100}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 99}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 7}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 100}} + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE":0}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 100}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 99}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 7}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 100}} ], "outputs": [ - {"topic": "S2", "value": {"ID":0,"TOPK":[0]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[100,0]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[100,99,0]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[100,99,7]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[100,99,7]}} + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[0]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[100,0]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[100,99,0]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[100,99,7]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[100,99,7]}} ] }, { "name": "topk distinct long", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", "CREATE TABLE S2 as SELECT id, topkdistinct(value, 3) as topk FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 2147483648}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 100}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 99}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 7}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 100}} + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 2147483648}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 100}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 99}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 7}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 100}} ], "outputs": [ - {"topic": "S2", "value": {"ID":0,"TOPK":[2147483648]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[2147483648,100]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[2147483648,100,99]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[2147483648,100,99]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[2147483648,100,99]}} + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[2147483648]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[2147483648,100]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[2147483648,100,99]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[2147483648,100,99]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[2147483648,100,99]}} ] }, { "name": "topk distinct string", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE string) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE string) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", "CREATE TABLE S2 as SELECT id, topkdistinct(value, 3) as topk FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": "a"}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": "b"}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": "c"}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": "b"}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": "d"}} + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": "a"}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": "b"}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": "c"}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": "b"}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": "d"}} ], "outputs": [ - {"topic": "S2", "value": {"ID":0,"TOPK":["a"]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":["b","a"]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":["c","b","a"]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":["c","b","a"]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":["d","c","b"]}} + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["a"]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["b","a"]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["c","b","a"]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["c","b","a"]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["d","c","b"]}} ] }, { "name": "topk distinct decimal", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE decimal(2,1)) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE decimal(2,1)) WITH (kafka_topic='test_topic', value_format='AVRO', key='ID');", "CREATE TABLE S2 as SELECT id, topkdistinct(value, 3) as topk FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": "9.8"}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": "8.9"}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": null}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": "7.8"}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": "6.5"}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": "9.9"}} + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": "9.8"}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": "8.9"}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": null}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": "7.8"}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": "6.5"}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": "9.9"}} ], "outputs": [ - {"topic": "S2", "value": {"ID":0,"TOPK":["9.8"]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":["9.8","8.9"]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":["9.8","8.9"]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":["9.8","8.9","7.8"]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":["9.8","8.9","7.8"]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":["9.9","9.8","8.9"]}} + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["9.8"]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["9.8","8.9"]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["9.8","8.9"]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["9.8","8.9","7.8"]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["9.8","8.9","7.8"]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["9.9","9.8","8.9"]}} ] } - ] } \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/topk-group-by.json b/ksql-functional-tests/src/test/resources/query-validation-tests/topk-group-by.json index b9d53de87428..1d8de0381978 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/topk-group-by.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/topk-group-by.json @@ -12,88 +12,88 @@ "name": "topk integer", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE integer) WITH (kafka_topic='test_topic',value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE integer) WITH (kafka_topic='test_topic',value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, topk(value, 3) as topk FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 100}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 99}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 7}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 100}} + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": 0}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": 100}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": 99}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": 7}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": 100}} ], "outputs": [ - {"topic": "S2", "value": {"ID":0,"TOPK":[0]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[100,0]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[100,99,0]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[100,99,7]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[100,100,99]}} + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[0]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[100,0]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[100,99,0]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[100,99,7]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[100,100,99]}} ] }, { "name": "topk long", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, topk(value, 3) as topk FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 2147483648}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 100}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 99}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 7}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 100}} + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": 2147483648}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": 100}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": 99}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": 7}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": 100}} ], "outputs": [ - {"topic": "S2", "value": {"ID":0,"TOPK":[2147483648]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[2147483648,100]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[2147483648,100,99]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[2147483648,100,99]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[2147483648,100,100]}} + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[2147483648]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[2147483648,100]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[2147483648,100,99]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[2147483648,100,99]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[2147483648,100,100]}} ] }, { "name": "topk double", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, topk(value, 3) as topk FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 2147483648.9}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 100.5}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 99.9}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 7.3}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": 100.5}} + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": 2147483648.9}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": 100.5}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": 99.9}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": 7.3}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": 100.5}} ], "outputs": [ - {"topic": "S2", "value": {"ID":0,"TOPK":[2.1474836489E9]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[2.1474836489E9,100.5]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[2.1474836489E9,100.5,99.9]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[2.1474836489E9,100.5,99.9]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":[2.1474836489E9,100.5,100.5]}} + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[2.1474836489E9]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[2.1474836489E9,100.5]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[2.1474836489E9,100.5,99.9]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[2.1474836489E9,100.5,99.9]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":[2.1474836489E9,100.5,100.5]}} ] }, { "name": "topk string", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE string) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE string) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE S2 as SELECT id, topk(value, 3) as topk FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": "a"}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": "b"}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": "c"}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": "b"}}, - {"topic": "test_topic", "value": {"ID": 0, "NAME": "zero", "VALUE": "d"}} + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": "a"}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": "b"}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": "c"}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": "b"}}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "key": 0, "value": "d"}} ], "outputs": [ - {"topic": "S2", "value": {"ID":0,"TOPK":["a"]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":["b","a"]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":["c","b","a"]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":["c","b","b"]}}, - {"topic": "S2", "value": {"ID":0,"TOPK":["d","c","b"]}} + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["a"]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["b","a"]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["c","b","a"]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["c","b","b"]}}, + {"topic": "S2", "key": 0, "value": {"ID":0,"TOPK":["d","c","b"]}} ] } ] diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/tumbling-windows.json b/ksql-functional-tests/src/test/resources/query-validation-tests/tumbling-windows.json index aa399c47e7cd..faf38789a63f 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/tumbling-windows.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/tumbling-windows.json @@ -11,26 +11,26 @@ { "name": "max tumbling", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE S2 as SELECT id, max(value) FROM test WINDOW TUMBLING (SIZE 30 SECONDS) group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0,zero,0", "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": "0,100,5", "timestamp": 10000}, - {"topic": "test_topic", "key": "100", "value": "100,100,100", "timestamp": 30000}, - {"topic": "test_topic", "key": "100", "value": "100,100,6", "timestamp": 45000}, - {"topic": "test_topic", "key": "100", "value": "100,100,300", "timestamp": 50000}, - {"topic": "test_topic", "key": "0", "value": "0,zero,100", "timestamp": 35000}, - {"topic": "test_topic", "key": "0", "value": "0,100,2000", "timestamp": 40000} + {"topic": "test_topic", "key": 0,"value": "0,zero,0", "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": "0,100,5", "timestamp": 10000}, + {"topic": "test_topic", "key": 100,"value": "100,100,100", "timestamp": 30000}, + {"topic": "test_topic", "key": 100,"value": "100,100,6", "timestamp": 45000}, + {"topic": "test_topic", "key": 100,"value": "100,100,300", "timestamp": 50000}, + {"topic": "test_topic", "key": 0,"value": "0,zero,100", "timestamp": 35000}, + {"topic": "test_topic", "key": 0,"value": "0,100,2000", "timestamp": 40000} ], "outputs": [ - {"topic": "S2", "key": "0", "value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,5", "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,100", "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,100", "timestamp": 45000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,300", "timestamp": 50000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,100", "timestamp": 35000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,2000", "timestamp": 40000, "window": {"start": 30000, "end": 60000, "type": "time"}} + {"topic": "S2", "key": 0,"value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,5", "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,100", "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,100", "timestamp": 45000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,300", "timestamp": 50000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,100", "timestamp": 35000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,2000", "timestamp": 40000, "window": {"start": 30000, "end": 60000, "type": "time"}} ], "post": { "sources": [ @@ -45,68 +45,68 @@ { "name": "min tumbling", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE S2 as SELECT id, min(value) FROM test WINDOW TUMBLING (SIZE 30 SECONDS) group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0,zero,0", "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": "0,100,5", "timestamp": 10000}, - {"topic": "test_topic", "key": "100", "value": "100,100,100", "timestamp": 30000}, - {"topic": "test_topic", "key": "100", "value": "100,100,6", "timestamp": 45000}, - {"topic": "test_topic", "key": "100", "value": "100,100,300", "timestamp": 50000} + {"topic": "test_topic", "key": 0,"value": "0,zero,0", "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": "0,100,5", "timestamp": 10000}, + {"topic": "test_topic", "key": 100,"value": "100,100,100", "timestamp": 30000}, + {"topic": "test_topic", "key": 100,"value": "100,100,6", "timestamp": 45000}, + {"topic": "test_topic", "key": 100,"value": "100,100,300", "timestamp": 50000} ], "outputs": [ - {"topic": "S2", "key": "0", "value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,0", "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,100", "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,6", "timestamp": 45000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,6", "timestamp": 50000, "window": {"start": 30000, "end": 60000, "type": "time"}} + {"topic": "S2", "key": 0,"value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,0", "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,100", "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,6", "timestamp": 45000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,6", "timestamp": 50000, "window": {"start": 30000, "end": 60000, "type": "time"}} ] }, { "name": "topk tumbling", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", "CREATE TABLE S2 as SELECT id, topk(value, 2) as topk FROM test WINDOW TUMBLING (SIZE 30 SECONDS) group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": 0}, "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": 100}, "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": 10}, "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": 50}, "timestamp": 30000} + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": 0}, "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": 100}, "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": 10}, "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": 50}, "timestamp": 30000} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[0.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[100.0,0.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[100.0,10.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[50.0]}, "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}} + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[0.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[100.0,0.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[100.0,10.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[50.0]}, "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}} ] }, { "name": "topkdistinct tumbling", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", "CREATE TABLE S2 as SELECT id, topkdistinct(value, 2) as topk FROM test WINDOW TUMBLING (SIZE 30 SECONDS) group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": 0}, "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": 100}, "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": 10}, "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": 100}, "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": {"id": 0, "name": "zero", "value": 100}, "timestamp": 30000} + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": 0}, "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": 100}, "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": 10}, "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": 100}, "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": {"id": 0, "name": "zero", "value": 100}, "timestamp": 30000} ], "outputs": [ - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[0.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[100.0,0.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[100.0,10.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[100.0,10.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": {"ID":0,"TOPK":[100.0]}, "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}} + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[0.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[100.0,0.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[100.0,10.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[100.0,10.0]}, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": {"ID":0,"TOPK":[100.0]}, "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}} ] }, { "name": "import tumbling table", "statements": [ - "CREATE TABLE TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID', WINDOW_TYPE='Tumbling', WINDOW_SIZE='30 seconds');", + "CREATE TABLE TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', WINDOW_TYPE='Tumbling', WINDOW_SIZE='30 seconds');", "CREATE TABLE S2 as SELECT *, ROWKEY as KEY FROM test;" ], "inputs": [ @@ -125,15 +125,15 @@ { "name": "inherit windowed keys", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE S2 as SELECT id, max(value) FROM test WINDOW TUMBLING (SIZE 30 SECONDS) group by id;", "CREATE TABLE S3 as SELECT * FROM S2;" ], "inputs": [ - {"topic": "S2", "key": "0", "value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}} + {"topic": "S2", "key": 0,"value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}} ], "outputs": [ - {"topic": "S3", "key": "0", "value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}} + {"topic": "S3", "key": 0,"value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}} ], "post": { "sources": [ diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/window-bounds.json b/ksql-functional-tests/src/test/resources/query-validation-tests/window-bounds.json index e8889964e274..10ea91f04fef 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/window-bounds.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/window-bounds.json @@ -6,22 +6,22 @@ { "name": "table session", "statements": [ - "CREATE STREAM TEST (ID bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE S2 as SELECT id, WindowStart(), WindowEnd() FROM test WINDOW SESSION (30 SECONDS) group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0", "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": "0", "timestamp": 10000}, - {"topic": "test_topic", "key": "1", "value": "1", "timestamp": 10000}, - {"topic": "test_topic", "key": "1", "value": "1", "timestamp": 40000} + {"topic": "test_topic", "key": 0,"value": "0", "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": "0", "timestamp": 10000}, + {"topic": "test_topic", "key": 1,"value": "1", "timestamp": 10000}, + {"topic": "test_topic", "key": 1,"value": "1", "timestamp": 40000} ], "outputs": [ - {"topic": "S2", "key": "0", "value": "0,0,0", "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}}, - {"topic": "S2", "key": "0", "value": null, "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}}, - {"topic": "S2", "key": "0", "value": "0,0,10000", "timestamp": 10000, "window": {"start": 0, "end": 10000, "type": "session"}}, - {"topic": "S2", "key": "1", "value": "1,10000,10000", "timestamp": 10000, "window": {"start": 10000, "end": 10000, "type": "session"}}, - {"topic": "S2", "key": "1", "value": null, "timestamp": 10000, "window": {"start": 10000, "end": 10000, "type": "session"}}, - {"topic": "S2", "key": "1", "value": "1,10000,40000", "timestamp": 40000, "window": {"start": 10000, "end": 40000, "type": "session"}} + {"topic": "S2", "key": 0,"value": "0,0,0", "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}}, + {"topic": "S2", "key": 0,"value": null, "timestamp": 0, "window": {"start": 0, "end": 0, "type": "session"}}, + {"topic": "S2", "key": 0,"value": "0,0,10000", "timestamp": 10000, "window": {"start": 0, "end": 10000, "type": "session"}}, + {"topic": "S2", "key": 1,"value": "1,10000,10000", "timestamp": 10000, "window": {"start": 10000, "end": 10000, "type": "session"}}, + {"topic": "S2", "key": 1,"value": null, "timestamp": 10000, "window": {"start": 10000, "end": 10000, "type": "session"}}, + {"topic": "S2", "key": 1,"value": "1,10000,40000", "timestamp": 40000, "window": {"start": 10000, "end": 40000, "type": "session"}} ], "properties": { "ksql.windowed.session.key.legacy": "false" @@ -30,85 +30,85 @@ { "name": "table tumbling", "statements": [ - "CREATE STREAM TEST (ID bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE S2 as SELECT id, WindowStart(), WindowEnd() FROM test WINDOW TUMBLING (SIZE 30 SECONDS) group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0", "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": "0", "timestamp": 10000}, - {"topic": "test_topic", "key": "100", "value": "100", "timestamp": 30000}, - {"topic": "test_topic", "key": "100", "value": "100", "timestamp": 45000}, - {"topic": "test_topic", "key": "100", "value": "100", "timestamp": 50000}, - {"topic": "test_topic", "key": "0", "value": "0", "timestamp": 35000}, - {"topic": "test_topic", "key": "0", "value": "0", "timestamp": 70000} + {"topic": "test_topic", "key": 0,"value": "0", "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": "0", "timestamp": 10000}, + {"topic": "test_topic", "key": 100,"value": "100", "timestamp": 30000}, + {"topic": "test_topic", "key": 100,"value": "100", "timestamp": 45000}, + {"topic": "test_topic", "key": 100,"value": "100", "timestamp": 50000}, + {"topic": "test_topic", "key": 0,"value": "0", "timestamp": 35000}, + {"topic": "test_topic", "key": 0,"value": "0", "timestamp": 70000} ], "outputs": [ - {"topic": "S2", "key": "0", "value": "0,0,30000", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,0,30000", "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,30000,60000", "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,30000,60000", "timestamp": 45000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,30000,60000", "timestamp": 50000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,30000,60000", "timestamp": 35000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,60000,90000", "timestamp": 70000, "window": {"start": 60000, "end": 90000, "type": "time"}} + {"topic": "S2", "key": 0,"value": "0,0,30000", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,0,30000", "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,30000,60000", "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,30000,60000", "timestamp": 45000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,30000,60000", "timestamp": 50000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,30000,60000", "timestamp": 35000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,60000,90000", "timestamp": 70000, "window": {"start": 60000, "end": 90000, "type": "time"}} ] }, { "name": "table hopping", "statements": [ - "CREATE STREAM TEST (ID bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE S2 as SELECT id, WindowStart(), WindowEnd() FROM test WINDOW HOPPING (SIZE 30 SECONDS, ADVANCE BY 5 SECONDS) group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0", "timestamp": 0}, - {"topic": "test_topic", "key": "100", "value": "100", "timestamp": 2000}, - {"topic": "test_topic", "key": "0", "value": "0", "timestamp": 4999}, - {"topic": "test_topic", "key": "0", "value": "0", "timestamp": 5000} + {"topic": "test_topic", "key": 0,"value": "0", "timestamp": 0}, + {"topic": "test_topic", "key": 100,"value": "100", "timestamp": 2000}, + {"topic": "test_topic", "key": 0,"value": "0", "timestamp": 4999}, + {"topic": "test_topic", "key": 0,"value": "0", "timestamp": 5000} ], "outputs": [ - {"topic": "S2", "key": "0", "value": "0,0,30000", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,0,30000", "timestamp": 2000, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,0,30000", "timestamp": 4999, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,0,30000", "timestamp": 5000, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,5000,35000", "timestamp": 5000, "window": {"start": 5000, "end": 35000, "type": "time"}} + {"topic": "S2", "key": 0,"value": "0,0,30000", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,0,30000", "timestamp": 2000, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,0,30000", "timestamp": 4999, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,0,30000", "timestamp": 5000, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,5000,35000", "timestamp": 5000, "window": {"start": 5000, "end": 35000, "type": "time"}} ] }, { "name": "none", "comment" : "Without a WINDOW statement the methods will return NULL", "statements": [ - "CREATE STREAM TEST (ID bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE S2 as SELECT id, WindowStart(), WindowEnd() FROM test group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0", "timestamp": 0}, - {"topic": "test_topic", "key": "100", "value": "100", "timestamp": 2000}, - {"topic": "test_topic", "key": "0", "value": "0", "timestamp": 4999}, - {"topic": "test_topic", "key": "0", "value": "0", "timestamp": 5000} + {"topic": "test_topic", "key": 0,"value": "0", "timestamp": 0}, + {"topic": "test_topic", "key": 100,"value": "100", "timestamp": 2000}, + {"topic": "test_topic", "key": 0,"value": "0", "timestamp": 4999}, + {"topic": "test_topic", "key": 0,"value": "0", "timestamp": 5000} ], "outputs": [ - {"topic": "S2", "key": "0", "value": "0,,", "timestamp": 0}, - {"topic": "S2", "key": "100", "value": "100,,", "timestamp": 2000}, - {"topic": "S2", "key": "0", "value": "0,,", "timestamp": 4999}, - {"topic": "S2", "key": "0", "value": "0,,", "timestamp": 5000} + {"topic": "S2", "key": 0,"value": "0,,", "timestamp": 0}, + {"topic": "S2", "key": 100,"value": "100,,", "timestamp": 2000}, + {"topic": "S2", "key": 0,"value": "0,,", "timestamp": 4999}, + {"topic": "S2", "key": 0,"value": "0,,", "timestamp": 5000} ] }, { "name": "in expressions", "statements": [ - "CREATE STREAM TEST (ID bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE S2 as SELECT id, WindowStart() / 2, WindowEnd() / 2 FROM test WINDOW TUMBLING (SIZE 30 SECONDS) group by id;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0", "timestamp": 0}, - {"topic": "test_topic", "key": "100", "value": "100", "timestamp": 2000}, - {"topic": "test_topic", "key": "0", "value": "0", "timestamp": 4999}, - {"topic": "test_topic", "key": "0", "value": "0", "timestamp": 5000} + {"topic": "test_topic", "key": 0,"value": "0", "timestamp": 0}, + {"topic": "test_topic", "key": 100,"value": "100", "timestamp": 2000}, + {"topic": "test_topic", "key": 0,"value": "0", "timestamp": 4999}, + {"topic": "test_topic", "key": 0,"value": "0", "timestamp": 5000} ], "outputs": [ - {"topic": "S2", "key": "0", "value": "0,0,15000", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,0,15000", "timestamp": 2000, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,0,15000", "timestamp": 4999, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,0,15000", "timestamp": 5000, "window": {"start": 0, "end": 30000, "type": "time"}} + {"topic": "S2", "key": 0,"value": "0,0,15000", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,0,15000", "timestamp": 2000, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,0,15000", "timestamp": 4999, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,0,15000", "timestamp": 5000, "window": {"start": 0, "end": 30000, "type": "time"}} ] } ] diff --git a/ksql-functional-tests/src/test/resources/test-runner/correct/aggregate/input.json b/ksql-functional-tests/src/test/resources/test-runner/correct/aggregate/input.json index a35aeee50243..bf232407bf63 100644 --- a/ksql-functional-tests/src/test/resources/test-runner/correct/aggregate/input.json +++ b/ksql-functional-tests/src/test/resources/test-runner/correct/aggregate/input.json @@ -1,11 +1,11 @@ { "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0,zero,0", "timestamp": 0}, - {"topic": "test_topic", "key": "0", "value": "0,100,5", "timestamp": 10000}, - {"topic": "test_topic", "key": "100", "value": "100,100,100", "timestamp": 30000}, - {"topic": "test_topic", "key": "100", "value": "100,100,6", "timestamp": 45000}, - {"topic": "test_topic", "key": "100", "value": "100,100,300", "timestamp": 50000}, - {"topic": "test_topic", "key": "0", "value": "0,zero,100", "timestamp": 35000}, - {"topic": "test_topic", "key": "0", "value": "0,100,2000", "timestamp": 40000} + {"topic": "test_topic", "key": 0,"value": "0,zero,0", "timestamp": 0}, + {"topic": "test_topic", "key": 0,"value": "0,100,5", "timestamp": 10000}, + {"topic": "test_topic", "key": 100,"value": "100,100,100", "timestamp": 30000}, + {"topic": "test_topic", "key": 100,"value": "100,100,6", "timestamp": 45000}, + {"topic": "test_topic", "key": 100,"value": "100,100,300", "timestamp": 50000}, + {"topic": "test_topic", "key": 0,"value": "0,zero,100", "timestamp": 35000}, + {"topic": "test_topic", "key": 0,"value": "0,100,2000", "timestamp": 40000} ] } \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/test-runner/correct/aggregate/output.json b/ksql-functional-tests/src/test/resources/test-runner/correct/aggregate/output.json index 1cd6a70ae2b0..c294728b7a89 100644 --- a/ksql-functional-tests/src/test/resources/test-runner/correct/aggregate/output.json +++ b/ksql-functional-tests/src/test/resources/test-runner/correct/aggregate/output.json @@ -1,19 +1,19 @@ { "outputs": [ - {"topic": "S2", "key": "0", "value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,5", "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,100", "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,100", "timestamp": 45000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "100", "value": "100,300", "timestamp": 50000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,100", "timestamp": 35000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "S2", "key": "0", "value": "0,2000", "timestamp": 40000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,0", "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,5", "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,100", "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,100", "timestamp": 45000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 100,"value": "100,300", "timestamp": 50000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,100", "timestamp": 35000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "S2", "key": 0,"value": "0,2000", "timestamp": 40000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "FOO", "key": "0", "value": null, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "FOO", "key": "0", "value": null, "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, - {"topic": "FOO", "key": "100", "value": "100", "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "FOO", "key": "100", "value": "100", "timestamp": 45000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "FOO", "key": "100", "value": "100", "timestamp": 50000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "FOO", "key": "0", "value": null, "timestamp": 35000, "window": {"start": 30000, "end": 60000, "type": "time"}}, - {"topic": "FOO", "key": "0", "value": null, "timestamp": 40000, "window": {"start": 30000, "end": 60000, "type": "time"}} + {"topic": "FOO", "key": 0,"value": null, "timestamp": 0, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "FOO", "key": 0,"value": null, "timestamp": 10000, "window": {"start": 0, "end": 30000, "type": "time"}}, + {"topic": "FOO", "key": 100,"value": "100", "timestamp": 30000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "FOO", "key": 100,"value": "100", "timestamp": 45000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "FOO", "key": 100,"value": "100", "timestamp": 50000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "FOO", "key": 0,"value": null, "timestamp": 35000, "window": {"start": 30000, "end": 60000, "type": "time"}}, + {"topic": "FOO", "key": 0,"value": null, "timestamp": 40000, "window": {"start": 30000, "end": 60000, "type": "time"}} ] } \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/test-runner/correct/aggregate/statements.sql b/ksql-functional-tests/src/test/resources/test-runner/correct/aggregate/statements.sql index ff629e735d02..21d708beed31 100644 --- a/ksql-functional-tests/src/test/resources/test-runner/correct/aggregate/statements.sql +++ b/ksql-functional-tests/src/test/resources/test-runner/correct/aggregate/statements.sql @@ -1,3 +1,3 @@ -CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID'); +CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID'); CREATE TABLE S2 as SELECT id, max(value) FROM test WINDOW TUMBLING (SIZE 30 SECONDS) group by id; CREATE TABLE foo AS SELECT id from s2 where id = 100; \ No newline at end of file diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/TableRowsEntityFactoryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/TableRowsEntityFactoryTest.java index 88db0813a8df..e7a41469e992 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/TableRowsEntityFactoryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/TableRowsEntityFactoryTest.java @@ -28,6 +28,7 @@ import io.confluent.ksql.execution.streams.materialization.Window; import io.confluent.ksql.execution.streams.materialization.WindowedRow; import io.confluent.ksql.execution.util.StructKeyUtil; +import io.confluent.ksql.execution.util.StructKeyUtil.KeyBuilder; import io.confluent.ksql.model.WindowType; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.schema.ksql.LogicalSchema; @@ -41,6 +42,8 @@ public class TableRowsEntityFactoryTest { + private static final KeyBuilder STRING_KEY_BUILDER = StructKeyUtil.keyBuilder(SqlTypes.STRING); + private static final LogicalSchema SIMPLE_SCHEMA = LogicalSchema.builder() .valueColumn(ColumnName.of("v0"), SqlTypes.BOOLEAN) .build(); @@ -68,7 +71,7 @@ public void shouldAddNonWindowedRowToValues() { final List input = ImmutableList.of( Row.of( SIMPLE_SCHEMA, - StructKeyUtil.asStructKey("x"), + STRING_KEY_BUILDER.build("x"), new GenericRow(false), ROWTIME ) @@ -92,14 +95,14 @@ public void shouldAddWindowedRowToValues() { final List input = ImmutableList.of( WindowedRow.of( SIMPLE_SCHEMA, - StructKeyUtil.asStructKey("x"), + STRING_KEY_BUILDER.build("x"), window0, new GenericRow(true), ROWTIME ), WindowedRow.of( SIMPLE_SCHEMA, - StructKeyUtil.asStructKey("y"), + STRING_KEY_BUILDER.build("y"), window1, new GenericRow(false), ROWTIME @@ -127,7 +130,7 @@ public void shouldSupportNullColumns() { GenericRow row = new GenericRow(newColumns); final Builder builder = ImmutableList.builder(); - builder.add(Row.of(SCHEMA_NULL, StructKeyUtil.asStructKey("k"), row, ROWTIME)); + builder.add(Row.of(SCHEMA_NULL, STRING_KEY_BUILDER.build("k"), row, ROWTIME)); // When: final List> output = TableRowsEntityFactory.createRows(builder.build()); diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/AggregateParamsFactory.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/AggregateParamsFactory.java index 117ccbdfe49f..96cd1d5742d6 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/AggregateParamsFactory.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/AggregateParamsFactory.java @@ -78,33 +78,28 @@ private AggregateParams create( final boolean table ) { final List nonAggColumnIndexes = nonAggColumnIndexes(schema, nonAggregateColumns); - final List> functions = ImmutableList.copyOf( - functionList.stream().map( - funcCall -> UdafUtil.resolveAggregateFunction( - functionRegistry, - funcCall, - schema) - ).collect(Collectors.toList())); + + final List> functions = + resolveAggregateFunctions(schema, functionRegistry, functionList); + final List> initialValueSuppliers = functions.stream() .map(KsqlAggregateFunction::getInitialValueSupplier) .collect(Collectors.toList()); - final Optional undoAggregator; - if (table) { - final List> tableFunctions = new LinkedList<>(); - for (final KsqlAggregateFunction function : functions) { - tableFunctions.add((TableAggregationFunction) function); - } - undoAggregator = Optional.of(new KudafUndoAggregator(nonAggColumnIndexes, tableFunctions)); - } else { - undoAggregator = Optional.empty(); - } + + final Optional undoAggregator = + buildUndoAggregators(nonAggColumnIndexes, table, functions); + + final LogicalSchema aggregateSchema = buildSchema(schema, nonAggregateColumns, functions, true); + + final LogicalSchema outputSchema = buildSchema(schema, nonAggregateColumns, functions, false); + return new AggregateParams( new KudafInitializer(nonAggregateColumns.size(), initialValueSuppliers), aggregatorFactory.create(nonAggColumnIndexes, functions), undoAggregator, new WindowSelectMapper(nonAggregateColumns.size(), functions), - buildSchema(schema, nonAggregateColumns, functions, true), - buildSchema(schema, nonAggregateColumns, functions, false) + aggregateSchema, + outputSchema ); } @@ -121,21 +116,55 @@ private static List nonAggColumnIndexes( return Collections.unmodifiableList(indexes); } + private static Optional buildUndoAggregators( + final List nonAggColumnIndexes, + final boolean table, + final List> functions + ) { + if (!table) { + return Optional.empty(); + } + + final List> tableFunctions = new LinkedList<>(); + for (final KsqlAggregateFunction function : functions) { + tableFunctions.add((TableAggregationFunction) function); + } + return Optional.of(new KudafUndoAggregator(nonAggColumnIndexes, tableFunctions)); + } + + private static List> resolveAggregateFunctions( + final LogicalSchema schema, + final FunctionRegistry functionRegistry, + final List functionList + ) { + return ImmutableList.copyOf( + functionList.stream().map( + funcCall -> UdafUtil.resolveAggregateFunction( + functionRegistry, + funcCall, + schema) + ).collect(Collectors.toList())); + } + private static LogicalSchema buildSchema( final LogicalSchema schema, final List nonAggregateColumns, final List> aggregateFunctions, - final boolean useAggregate) { + final boolean useAggregate + ) { final LogicalSchema.Builder schemaBuilder = LogicalSchema.builder(); schemaBuilder.keyColumns(schema.key()); for (final ColumnRef columnRef : nonAggregateColumns) { - schemaBuilder.valueColumn(schema.findValueColumn(columnRef).get()); + final Column col = schema.findValueColumn(columnRef) + .orElseThrow(IllegalArgumentException::new); + + schemaBuilder.valueColumn(col); } for (int i = 0; i < aggregateFunctions.size(); i++) { - final KsqlAggregateFunction aggregateFunction = aggregateFunctions.get(i); + final KsqlAggregateFunction aggregateFunction = aggregateFunctions.get(i); final ColumnName colName = ColumnName.aggregateColumn(i); final SqlType fieldType = useAggregate ? aggregateFunction.getAggregateType() @@ -147,7 +176,7 @@ private static LogicalSchema buildSchema( } interface KudafAggregatorFactory { - KudafAggregator create( + KudafAggregator create( List nonAggColumnIndexes, List> functions ); diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByMapper.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByMapper.java deleted file mode 100644 index 28420f47c560..000000000000 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByMapper.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2018 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.execution.streams; - -import com.google.common.collect.ImmutableList; -import io.confluent.ksql.GenericRow; -import io.confluent.ksql.execution.codegen.ExpressionMetadata; -import io.confluent.ksql.execution.util.StructKeyUtil; -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import org.apache.kafka.connect.data.Struct; -import org.apache.kafka.streams.kstream.KeyValueMapper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -class GroupByMapper implements KeyValueMapper { - - private static final Logger LOG = LoggerFactory.getLogger(GroupByMapper.class); - - private static final String GROUP_BY_VALUE_SEPARATOR = "|+|"; - - private final List expressions; - - GroupByMapper(final List expressions) { - this.expressions = ImmutableList.copyOf(Objects.requireNonNull(expressions, "expressions")); - if (expressions.isEmpty()) { - throw new IllegalArgumentException("Empty group by"); - } - } - - @Override - public Struct apply(final K key, final GenericRow row) { - final String stringRowKey = IntStream.range(0, expressions.size()) - .mapToObj(idx -> processColumn(idx, expressions.get(idx), row)) - .collect(Collectors.joining(GROUP_BY_VALUE_SEPARATOR)); - - return StructKeyUtil.asStructKey(stringRowKey); - } - - private static String processColumn( - final int index, - final ExpressionMetadata exp, - final GenericRow row - ) { - try { - return String.valueOf(exp.evaluate(row)); - } catch (final Exception e) { - LOG.error("Error calculating group-by field with index {}", index, e); - return "null"; - } - } - - List getExpressionMetadata() { - return expressions; - } -} diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByParams.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByParams.java new file mode 100644 index 000000000000..c91d9d8a3f90 --- /dev/null +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByParams.java @@ -0,0 +1,42 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.streams; + +import static java.util.Objects.requireNonNull; + +import io.confluent.ksql.GenericRow; +import io.confluent.ksql.schema.ksql.LogicalSchema; +import java.util.function.Function; +import org.apache.kafka.connect.data.Struct; + +public class GroupByParams { + + private final LogicalSchema schema; + private final Function mapper; + + GroupByParams(final LogicalSchema schema, final Function mapper) { + this.schema = requireNonNull(schema, "schema"); + this.mapper = requireNonNull(mapper, "mapper"); + } + + public LogicalSchema getSchema() { + return schema; + } + + public Function getMapper() { + return mapper; + } +} diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByParamsFactory.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByParamsFactory.java new file mode 100644 index 000000000000..96706bd0c651 --- /dev/null +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByParamsFactory.java @@ -0,0 +1,152 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.streams; + +import static java.util.Objects.requireNonNull; + +import com.google.common.collect.ImmutableList; +import io.confluent.ksql.GenericRow; +import io.confluent.ksql.execution.codegen.ExpressionMetadata; +import io.confluent.ksql.execution.util.StructKeyUtil; +import io.confluent.ksql.execution.util.StructKeyUtil.KeyBuilder; +import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.schema.ksql.types.SqlType; +import io.confluent.ksql.schema.ksql.types.SqlTypes; +import io.confluent.ksql.util.SchemaUtil; +import java.util.List; +import java.util.function.Function; +import org.apache.kafka.connect.data.Struct; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +final class GroupByParamsFactory { + + private static final Logger LOG = LoggerFactory.getLogger(GroupByParamsFactory.class); + + private static final String GROUP_BY_VALUE_SEPARATOR = "|+|"; + + private GroupByParamsFactory() { + } + + public static GroupByParams build( + final LogicalSchema sourceSchema, + final List expressions + ) { + final Function mapper = expressions.size() == 1 + ? new SingleExpressionGrouper(expressions.get(0))::apply + : new MultiExpressionGrouper(expressions)::apply; + + final LogicalSchema schema = expressions.size() == 1 + ? singleExpressionSchema(sourceSchema, expressions.get(0).getExpressionType()) + : multiExpressionSchema(sourceSchema); + + return new GroupByParams(schema, mapper); + } + + static LogicalSchema multiExpressionSchema( + final LogicalSchema sourceSchema + ) { + return buildSchema(sourceSchema, SqlTypes.STRING); + } + + static LogicalSchema singleExpressionSchema( + final LogicalSchema sourceSchema, + final SqlType rowKeyType + ) { + return buildSchema(sourceSchema, rowKeyType); + } + + private static LogicalSchema buildSchema( + final LogicalSchema sourceSchema, + final SqlType rowKeyType + ) { + return LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, rowKeyType) + .valueColumns(sourceSchema.value()) + .build(); + } + + private static Object processColumn( + final int index, + final ExpressionMetadata exp, + final GenericRow row + ) { + try { + final Object result = exp.evaluate(row); + if (result == null) { + LOG.error("Group-by column with index {} resolved to null. " + + "The source row will be excluded from the table.", index); + } + return result; + } catch (final Exception e) { + LOG.error("Error calculating group-by column with index {}. " + + "The source row will be excluded from the table.", index, e); + return null; + } + } + + private static final class SingleExpressionGrouper { + + private final KeyBuilder keyBuilder; + private final ExpressionMetadata expression; + + SingleExpressionGrouper(final ExpressionMetadata expression) { + this.expression = requireNonNull(expression, "expression"); + this.keyBuilder = StructKeyUtil.keyBuilder(expression.getExpressionType()); + } + + public Struct apply(final GenericRow row) { + final Object rowKey = processColumn(0, expression, row); + if (rowKey == null) { + return null; + } + return keyBuilder.build(rowKey); + } + } + + private static final class MultiExpressionGrouper { + + private final KeyBuilder keyBuilder; + private final ImmutableList expressions; + + MultiExpressionGrouper(final List expressions) { + this.expressions = ImmutableList.copyOf(requireNonNull(expressions, "expressions")); + this.keyBuilder = StructKeyUtil.keyBuilder(SqlTypes.STRING); + + if (expressions.isEmpty()) { + throw new IllegalArgumentException("Empty group by"); + } + } + + public Struct apply(final GenericRow row) { + final StringBuilder rowKey = new StringBuilder(); + for (int i = 0; i < expressions.size(); i++) { + final Object result = processColumn(i, expressions.get(i), row); + if (result == null) { + return null; + } + + if (rowKey.length() > 0) { + rowKey.append(GROUP_BY_VALUE_SEPARATOR); + } + + rowKey.append(result); + } + + return keyBuilder.build(rowKey.toString()); + } + } +} diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java index 46aef7f8ab1c..0bb2bb41a021 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java @@ -15,6 +15,7 @@ package io.confluent.ksql.execution.streams; +import io.confluent.ksql.execution.expression.tree.Expression; import io.confluent.ksql.execution.plan.AbstractStreamSource; import io.confluent.ksql.execution.plan.ExecutionStep; import io.confluent.ksql.execution.plan.StreamAggregate; @@ -46,6 +47,7 @@ import io.confluent.ksql.util.HandlerMaps; import io.confluent.ksql.util.KsqlConfig; import io.confluent.ksql.util.SchemaUtil; +import java.util.List; import java.util.Objects; import java.util.Optional; @@ -62,7 +64,7 @@ public final class StepSchemaResolver { .put(StreamWindowedAggregate.class, StepSchemaResolver::handleStreamWindowedAggregate) .put(StreamFilter.class, StepSchemaResolver::sameSchema) .put(StreamFlatMap.class, StepSchemaResolver::handleStreamFlatMap) - .put(StreamGroupBy.class, StepSchemaResolver::sameSchema) + .put(StreamGroupBy.class, StepSchemaResolver::handleGroupBy) .put(StreamGroupByKey.class, StepSchemaResolver::sameSchema) .put(StreamSelect.class, StepSchemaResolver::handleStreamSelect) .put(StreamSelectKey.class, StepSchemaResolver::handleSelectKey) @@ -160,6 +162,22 @@ private LogicalSchema handleStreamFlatMap( ); } + private LogicalSchema handleGroupBy( + final LogicalSchema sourceSchema, + final StreamGroupBy streamGroupBy + ) { + final List groupBy = streamGroupBy.getGroupByExpressions(); + + if (groupBy.size() != 1) { + return GroupByParamsFactory.multiExpressionSchema(sourceSchema); + } + + final SqlType rowKeyType = new ExpressionTypeManager(sourceSchema, functionRegistry) + .getExpressionSqlType(groupBy.get(0)); + + return GroupByParamsFactory.singleExpressionSchema(sourceSchema, rowKeyType); + } + private LogicalSchema handleStreamSelect( final LogicalSchema schema, final StreamSelect streamSelect diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamGroupByBuilder.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamGroupByBuilder.java index 60f284e431e1..b1c689192ade 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamGroupByBuilder.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamGroupByBuilder.java @@ -31,6 +31,7 @@ import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.connect.data.Struct; import org.apache.kafka.streams.kstream.Grouped; +import org.apache.kafka.streams.kstream.KGroupedStream; public final class StreamGroupByBuilder { private StreamGroupByBuilder() { @@ -64,13 +65,7 @@ public static KGroupedStreamHolder build( final LogicalSchema sourceSchema = stream.getSchema(); final QueryContext queryContext = step.getProperties().getQueryContext(); final Formats formats = step.getInternalFormats(); - final Grouped grouped = buildGrouped( - formats, - sourceSchema, - queryContext, - queryBuilder, - groupedFactory - ); + final List groupBy = CodeGenRunner.compileExpressions( step.getGroupByExpressions().stream(), "Group By", @@ -78,11 +73,22 @@ public static KGroupedStreamHolder build( queryBuilder.getKsqlConfig(), queryBuilder.getFunctionRegistry() ); - final GroupByMapper mapper = new GroupByMapper<>(groupBy); - return KGroupedStreamHolder.of( - stream.getStream().filter((key, value) -> value != null).groupBy(mapper, grouped), - stream.getSchema() + + final GroupByParams params = GroupByParamsFactory.build(sourceSchema, groupBy); + + final Grouped grouped = buildGrouped( + formats, + params.getSchema(), + queryContext, + queryBuilder, + groupedFactory ); + + final KGroupedStream groupedStream = stream.getStream() + .filter((k, v) -> v != null) + .groupBy((k, v) -> params.getMapper().apply(v), grouped); + + return KGroupedStreamHolder.of(groupedStream, params.getSchema()); } private static Grouped buildGrouped( diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilder.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilder.java index 5f158db9fcd3..6594ea9ecc39 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilder.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilder.java @@ -51,7 +51,7 @@ public static KStreamHolder build( final LogicalSchema resultSchema = new StepSchemaResolver(queryBuilder.getKsqlConfig(), queryBuilder.getFunctionRegistry()).resolve(selectKey, sourceSchema); - final KeyBuilder keyBuilder = StructKeyUtil.keySchema(resultSchema); + final KeyBuilder keyBuilder = StructKeyUtil.keyBuilder(resultSchema); final KStream kstream = stream.getStream(); final KStream rekeyed = kstream diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/TableGroupByBuilder.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/TableGroupByBuilder.java index f81d6fd5f5fc..43f52b245ae0 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/TableGroupByBuilder.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/TableGroupByBuilder.java @@ -28,10 +28,12 @@ import io.confluent.ksql.schema.ksql.PhysicalSchema; import java.util.List; import java.util.Objects; +import java.util.function.Function; import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.connect.data.Struct; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.kstream.Grouped; +import org.apache.kafka.streams.kstream.KGroupedTable; import org.apache.kafka.streams.kstream.KeyValueMapper; public final class TableGroupByBuilder { @@ -47,8 +49,19 @@ public static KGroupedTableHolder build( final LogicalSchema sourceSchema = table.getSchema(); final QueryContext queryContext = step.getProperties().getQueryContext(); final Formats formats = step.getInternalFormats(); - final PhysicalSchema physicalSchema = PhysicalSchema.from( + + final List groupBy = CodeGenRunner.compileExpressions( + step.getGroupByExpressions().stream(), + "Group By", sourceSchema, + queryBuilder.getKsqlConfig(), + queryBuilder.getFunctionRegistry() + ); + + final GroupByParams params = GroupByParamsFactory.build(sourceSchema, groupBy); + + final PhysicalSchema physicalSchema = PhysicalSchema.from( + params.getSchema(), formats.getOptions() ); final Serde keySerde = queryBuilder.buildKeySerde( @@ -66,37 +79,26 @@ public static KGroupedTableHolder build( keySerde, valSerde ); - final List groupBy = CodeGenRunner.compileExpressions( - step.getGroupByExpressions().stream(), - "Group By", - sourceSchema, - queryBuilder.getKsqlConfig(), - queryBuilder.getFunctionRegistry() - ); - final GroupByMapper mapper = new GroupByMapper<>(groupBy); - return KGroupedTableHolder.of( - table.getTable() - .filter((key, value) -> value != null) - .groupBy(new TableKeyValueMapper<>(mapper), grouped), - table.getSchema() - ); + + final KGroupedTable groupedTable = table.getTable() + .filter((k, v) -> v != null) + .groupBy(new TableKeyValueMapper<>(params.getMapper()), grouped); + + return KGroupedTableHolder.of(groupedTable, params.getSchema()); } public static final class TableKeyValueMapper implements KeyValueMapper> { - private final GroupByMapper groupByMapper; - private TableKeyValueMapper(final GroupByMapper groupByMapper) { + private final Function groupByMapper; + + private TableKeyValueMapper(final Function groupByMapper) { this.groupByMapper = Objects.requireNonNull(groupByMapper, "groupByMapper"); } @Override public KeyValue apply(final K key, final GenericRow value) { - return new KeyValue<>(groupByMapper.apply(key, value), value); - } - - GroupByMapper getGroupByMapper() { - return groupByMapper; + return new KeyValue<>(groupByMapper.apply(value), value); } } } diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/AggregateParamsFactoryTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/AggregateParamsFactoryTest.java index 90e4d86bb780..5a02a3782f57 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/AggregateParamsFactoryTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/AggregateParamsFactoryTest.java @@ -4,7 +4,6 @@ import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -40,16 +39,19 @@ @RunWith(MockitoJUnitRunner.class) public class AggregateParamsFactoryTest { + private static final LogicalSchema INPUT_SCHEMA = LogicalSchema.builder() .valueColumn(ColumnName.of("REQUIRED0"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("ARGUMENT0"), SqlTypes.INTEGER) .valueColumn(ColumnName.of("REQUIRED1"), SqlTypes.STRING) .valueColumn(ColumnName.of("ARGUMENT1"), SqlTypes.DOUBLE) .build(); + private static final List NON_AGG_COLUMNS = ImmutableList.of( INPUT_SCHEMA.value().get(0).ref(), INPUT_SCHEMA.value().get(2).ref() ); + private static final FunctionCall AGG0 = new FunctionCall( FunctionName.of("AGG0"), ImmutableList.of(new ColumnReferenceExp(ColumnRef.withoutSource(ColumnName.of("ARGUMENT0")))) diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/GroupByMapperTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/GroupByMapperTest.java deleted file mode 100644 index 6e5d413c81a1..000000000000 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/GroupByMapperTest.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright 2018 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.execution.streams; - -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.is; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.google.common.collect.ImmutableList; -import io.confluent.ksql.GenericRow; -import io.confluent.ksql.execution.codegen.ExpressionMetadata; -import io.confluent.ksql.execution.util.StructKeyUtil; -import java.util.Collections; -import org.apache.kafka.connect.data.Struct; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class GroupByMapperTest { - - @Mock - private ExpressionMetadata groupBy0; - - @Mock - private ExpressionMetadata groupBy1; - - @Mock - private Struct key; - @Mock - private GenericRow value; - - private GroupByMapper mapper; - - @Before - public void setUp() { - mapper = new GroupByMapper<>(ImmutableList.of(groupBy0, groupBy1)); - - when(groupBy0.evaluate(any())).thenReturn("result0"); - when(groupBy1.evaluate(any())).thenReturn("result1"); - } - - @Test(expected = NullPointerException.class) - public void shouldThrowOnNullParam() { - new GroupByMapper(null); - } - - @Test(expected = IllegalArgumentException.class) - public void shouldThrowOnEmptyParam() { - new GroupByMapper(Collections.emptyList()); - } - - @Test - public void shouldInvokeEvaluatorsWithCorrectParams() { - // When: - mapper.apply(key, value); - - // Then: - verify(groupBy0).evaluate(value); - verify(groupBy1).evaluate(value); - } - - @Test - public void shouldGenerateGroupByKey() { - // When: - final Struct result = mapper.apply(key, value); - - // Then: - assertThat(result, is(StructKeyUtil.asStructKey("result0|+|result1"))); - } - - @Test - public void shouldSupportNullValues() { - // Given: - when(groupBy0.evaluate(any())).thenReturn(null); - - // When: - final Struct result = mapper.apply(key, value); - - // Then: - assertThat(result, is(StructKeyUtil.asStructKey("null|+|result1"))); - } - - @Test - public void shouldUseNullIfExpressionThrows() { - // Given: - when(groupBy0.evaluate(any())).thenThrow(new RuntimeException("Boom")); - - // When: - final Struct result = mapper.apply(key, value); - - // Then: - assertThat(result, is(StructKeyUtil.asStructKey("null|+|result1"))); - } -} diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/GroupByParamsFactoryTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/GroupByParamsFactoryTest.java new file mode 100644 index 000000000000..2f47919a1a3e --- /dev/null +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/GroupByParamsFactoryTest.java @@ -0,0 +1,175 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.streams; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableList; +import com.google.common.testing.NullPointerTester; +import com.google.common.testing.NullPointerTester.Visibility; +import io.confluent.ksql.GenericRow; +import io.confluent.ksql.execution.codegen.ExpressionMetadata; +import io.confluent.ksql.execution.util.StructKeyUtil; +import io.confluent.ksql.execution.util.StructKeyUtil.KeyBuilder; +import io.confluent.ksql.name.ColumnName; +import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.schema.ksql.types.SqlType; +import io.confluent.ksql.schema.ksql.types.SqlTypes; +import java.util.Collections; +import java.util.List; +import org.apache.kafka.connect.data.Struct; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class GroupByParamsFactoryTest { + + private static final KeyBuilder INT_KEY_BUILDER = StructKeyUtil.keyBuilder(SqlTypes.INTEGER); + private static final KeyBuilder STRING_KEY_BUILDER = StructKeyUtil.keyBuilder(SqlTypes.STRING); + + private static final LogicalSchema SOURCE_SCHEMA = LogicalSchema.builder() + .valueColumn(ColumnName.of("v0"), SqlTypes.DOUBLE) + .build(); + + @Mock + private ExpressionMetadata groupBy0; + + @Mock + private ExpressionMetadata groupBy1; + + @Mock + private GenericRow value; + + private GroupByParams singlePrams; + private GroupByParams multiParams; + + @Before + public void setUp() { + when(groupBy0.getExpressionType()).thenReturn(SqlTypes.INTEGER); + + singlePrams = GroupByParamsFactory.build(SOURCE_SCHEMA, ImmutableList.of(groupBy0)); + multiParams = GroupByParamsFactory.build(SOURCE_SCHEMA, ImmutableList.of(groupBy0, groupBy1)); + + when(groupBy0.evaluate(any())).thenReturn(0); + when(groupBy1.evaluate(any())).thenReturn(0L); + } + + @SuppressWarnings("UnstableApiUsage") + @Test + public void shouldThrowOnNullParam() { + new NullPointerTester() + .setDefault(List.class, ImmutableList.of(groupBy0)) + .setDefault(LogicalSchema.class, SOURCE_SCHEMA) + .setDefault(SqlType.class, SqlTypes.BIGINT) + .testStaticMethods(GroupByParamsFactory.class, Visibility.PACKAGE); + } + + @Test(expected = IllegalArgumentException.class) + public void shouldThrowOnEmptyParam() { + GroupByParamsFactory.build(SOURCE_SCHEMA, Collections.emptyList()); + } + + @Test + public void shouldInvokeEvaluatorsWithCorrectParams() { + // When: + multiParams.getMapper().apply(value); + + // Then: + verify(groupBy0).evaluate(value); + verify(groupBy1).evaluate(value); + } + + @Test + public void shouldGenerateSingleExpressionGroupByKey() { + // Given: + when(groupBy0.evaluate(any())).thenReturn(10); + + // When: + final Struct result = singlePrams.getMapper().apply(value); + + // Then: + assertThat(result, is(INT_KEY_BUILDER.build(10))); + } + + @Test + public void shouldGenerateMultiExpressionGroupByKey() { + // Given: + when(groupBy0.evaluate(any())).thenReturn(99); + when(groupBy1.evaluate(any())).thenReturn(-100L); + + // When: + final Struct result = multiParams.getMapper().apply(value); + + // Then: + assertThat(result, is(STRING_KEY_BUILDER.build("99|+|-100"))); + } + + @Test + public void shouldReturnNullIfSingleExpressionResolvesToNull() { + // Given: + when(groupBy0.evaluate(any())).thenReturn(null); + + // When: + final Struct result = singlePrams.getMapper().apply(value); + + // Then: + assertThat(result, is(nullValue())); + } + + @Test + public void shouldReturnNullIfAnyMultiExpressionResolvesToNull() { + // Given: + when(groupBy0.evaluate(any())).thenReturn(null); + + // When: + final Struct result = multiParams.getMapper().apply(value); + + // Then: + assertThat(result, is(nullValue())); + } + + @Test + public void shouldReturnNullIfExpressionThrowsInSingle() { + // Given: + when(groupBy0.evaluate(any())).thenThrow(new RuntimeException("Boom")); + + // When: + final Struct result = singlePrams.getMapper().apply(value); + + // Then: + assertThat(result, is(nullValue())); + } + + @Test + public void shouldReturnNullExpressionThrowsInMulti() { + // Given: + when(groupBy0.evaluate(any())).thenThrow(new RuntimeException("Boom")); + + // When: + final Struct result = multiParams.getMapper().apply(value); + + // Then: + assertThat(result, is(nullValue())); + } +} diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamGroupByBuilderTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamGroupByBuilderTest.java index bd91b1c9cab2..f705ddd4eaea 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamGroupByBuilderTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamGroupByBuilderTest.java @@ -1,7 +1,5 @@ package io.confluent.ksql.execution.streams; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; import static org.mockito.ArgumentMatchers.any; @@ -27,6 +25,7 @@ import io.confluent.ksql.execution.plan.StreamGroupBy; import io.confluent.ksql.execution.plan.StreamGroupByKey; import io.confluent.ksql.execution.util.StructKeyUtil; +import io.confluent.ksql.execution.util.StructKeyUtil.KeyBuilder; import io.confluent.ksql.function.FunctionRegistry; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.name.SourceName; @@ -38,6 +37,7 @@ import io.confluent.ksql.serde.FormatInfo; import io.confluent.ksql.serde.SerdeOption; import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.SchemaUtil; import java.util.List; import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.connect.data.Struct; @@ -56,15 +56,29 @@ import org.mockito.junit.MockitoRule; public class StreamGroupByBuilderTest { + + private static final KeyBuilder STRING_KEY_BUILDER = StructKeyUtil.keyBuilder(SqlTypes.STRING); + private static final SourceName ALIAS = SourceName.of("SOURCE"); + private static final LogicalSchema SCHEMA = LogicalSchema.builder() .valueColumn(ColumnName.of("PAC"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("MAN"), SqlTypes.STRING) .build() .withAlias(SourceName.of(ALIAS.name())) .withMetaAndKeyColsInValue(); + + private static final LogicalSchema REKEYED_SCHEMA = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.STRING) + .valueColumns(SCHEMA.value()) + .build(); + private static final PhysicalSchema PHYSICAL_SCHEMA = PhysicalSchema.from(SCHEMA, SerdeOption.none()); + + private static final PhysicalSchema REKEYED_PHYSICAL_SCHEMA = + PhysicalSchema.from(REKEYED_SCHEMA, SerdeOption.none()); + private static final List GROUP_BY_EXPRESSIONS = ImmutableList.of( columnReference("PAC"), columnReference("MAN") @@ -107,8 +121,6 @@ public class StreamGroupByBuilderTest { @Mock private KGroupedStream groupedStream; @Captor - private ArgumentCaptor> mapperCaptor; - @Captor private ArgumentCaptor> predicateCaptor; private PlanBuilder planBuilder; @@ -162,18 +174,8 @@ public void shouldPerformGroupByCorrectly() { // Then: assertThat(result.getGroupedStream(), is(groupedStream)); verify(sourceStream).filter(any()); - verify(filteredStream).groupBy(mapperCaptor.capture(), same(grouped)); + verify(filteredStream).groupBy(any(), same(grouped)); verifyNoMoreInteractions(filteredStream, sourceStream); - final GroupByMapper mapper = mapperCaptor.getValue(); - assertThat(mapper.getExpressionMetadata(), hasSize(2)); - assertThat( - mapper.getExpressionMetadata().get(0).getExpression(), - equalTo(GROUP_BY_EXPRESSIONS.get(0)) - ); - assertThat( - mapper.getExpressionMetadata().get(1).getExpression(), - equalTo(GROUP_BY_EXPRESSIONS.get(1)) - ); } @Test @@ -184,8 +186,8 @@ public void shouldFilterNullRowsBeforeGroupBy() { // Then: verify(sourceStream).filter(predicateCaptor.capture()); final Predicate predicate = predicateCaptor.getValue(); - assertThat(predicate.test(StructKeyUtil.asStructKey("foo"), new GenericRow()), is(true)); - assertThat(predicate.test(StructKeyUtil.asStructKey("foo"), null), is(false)); + assertThat(predicate.test(STRING_KEY_BUILDER.build("foo"), new GenericRow()), is(true)); + assertThat(predicate.test(STRING_KEY_BUILDER.build("foo"), null), is(false)); } @Test @@ -203,7 +205,10 @@ public void shouldReturnCorrectSchemaForGroupBy() { final KGroupedStreamHolder result = streamGroupBy.build(planBuilder); // Then: - assertThat(result.getSchema(), is(SCHEMA)); + assertThat(result.getSchema(), is(LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.STRING) + .valueColumns(SCHEMA.value()) + .build())); } @Test @@ -214,7 +219,7 @@ public void shouldBuildKeySerdeCorrectlyForGroupBy() { // Then: verify(queryBuilder).buildKeySerde( FORMATS.getKeyFormat(), - PHYSICAL_SCHEMA, + REKEYED_PHYSICAL_SCHEMA, STEP_CTX ); } @@ -227,7 +232,7 @@ public void shouldBuildValueSerdeCorrectlyForGroupBy() { // Then: verify(queryBuilder).buildValueSerde( FORMATS.getValueFormat(), - PHYSICAL_SCHEMA, + REKEYED_PHYSICAL_SCHEMA, STEP_CTX ); } diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilderTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilderTest.java index 8907c820d8d7..3f9b0e23bad4 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilderTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StreamSelectKeyBuilderTest.java @@ -15,7 +15,6 @@ package io.confluent.ksql.execution.streams; -import static io.confluent.ksql.execution.util.StructKeyUtil.asStructKey; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; import static org.mockito.ArgumentMatchers.any; @@ -86,12 +85,14 @@ public class StreamSelectKeyBuilderTest { .valueColumn(ALIAS, ColumnName.of("BOI"), SqlTypes.BIGINT) .build(); - private static final KeyBuilder RESULT_KEY_BUILDER = StructKeyUtil.keySchema(RESULT_SCHEMA); + private static final KeyBuilder RESULT_KEY_BUILDER = StructKeyUtil.keyBuilder(RESULT_SCHEMA); private static final long A_BOI = 5000; private static final long A_BIG = 3000; - private static final Struct SOURCE_KEY = asStructKey("dre"); + + private static final Struct SOURCE_KEY = StructKeyUtil.keyBuilder(SqlTypes.STRING) + .build("dre"); @Mock private KStream kstream; diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/TableGroupByBuilderTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/TableGroupByBuilderTest.java index 64f6df6c7478..005d083da4e2 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/TableGroupByBuilderTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/TableGroupByBuilderTest.java @@ -1,7 +1,5 @@ package io.confluent.ksql.execution.streams; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; import static org.mockito.ArgumentMatchers.any; @@ -39,6 +37,7 @@ import io.confluent.ksql.serde.FormatInfo; import io.confluent.ksql.serde.SerdeOption; import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.SchemaUtil; import java.util.List; import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.connect.data.Struct; @@ -64,7 +63,17 @@ public class TableGroupByBuilderTest { .build() .withAlias(ALIAS) .withMetaAndKeyColsInValue(); - private static final PhysicalSchema PHYSICAL_SCHEMA = PhysicalSchema.from(SCHEMA, SerdeOption.none()); + + private static final LogicalSchema REKEYED_SCHEMA = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.STRING) + .valueColumns(SCHEMA.value()) + .build(); + + private static final PhysicalSchema PHYSICAL_SCHEMA = + PhysicalSchema.from(SCHEMA, SerdeOption.none()); + + private static final PhysicalSchema REKEYED_PHYSICAL_SCHEMA = + PhysicalSchema.from(REKEYED_SCHEMA, SerdeOption.none()); private static final List GROUPBY_EXPRESSIONS = ImmutableList.of( columnReference("PAC"), @@ -85,6 +94,8 @@ public class TableGroupByBuilderTest { SerdeOption.none() ); + private static final Struct KEY = StructKeyUtil.keyBuilder(SqlTypes.STRING).build("key"); + @Mock private KsqlQueryBuilder queryBuilder; @Mock @@ -163,16 +174,6 @@ public void shouldPerformGroupByCorrectly() { verify(sourceTable).filter(any()); verify(filteredTable).groupBy(mapperCaptor.capture(), same(grouped)); verifyNoMoreInteractions(filteredTable, sourceTable); - final GroupByMapper mapper = mapperCaptor.getValue().getGroupByMapper(); - assertThat(mapper.getExpressionMetadata(), hasSize(2)); - assertThat( - mapper.getExpressionMetadata().get(0).getExpression(), - equalTo(GROUPBY_EXPRESSIONS.get(0)) - ); - assertThat( - mapper.getExpressionMetadata().get(1).getExpression(), - equalTo(GROUPBY_EXPRESSIONS.get(1)) - ); } @Test @@ -181,7 +182,10 @@ public void shouldReturnCorrectSchema() { final KGroupedTableHolder result = groupBy.build(planBuilder); // Then: - assertThat(result.getSchema(), is(SCHEMA)); + assertThat(result.getSchema(), is(is(LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.STRING) + .valueColumns(SCHEMA.value()) + .build()))); } @Test @@ -192,8 +196,8 @@ public void shouldFilterNullRowsBeforeGroupBy() { // Then: verify(sourceTable).filter(predicateCaptor.capture()); final Predicate predicate = predicateCaptor.getValue(); - assertThat(predicate.test(StructKeyUtil.asStructKey("key"), new GenericRow()), is(true)); - assertThat(predicate.test(StructKeyUtil.asStructKey("key"), null), is(false)); + assertThat(predicate.test(KEY, new GenericRow()), is(true)); + assertThat(predicate.test(KEY, null), is(false)); } @Test @@ -213,7 +217,7 @@ public void shouldBuildKeySerdeCorrectlyForGroupBy() { // Then: verify(queryBuilder).buildKeySerde( FORMATS.getKeyFormat(), - PHYSICAL_SCHEMA, + REKEYED_PHYSICAL_SCHEMA, STEP_CONTEXT ); } @@ -226,7 +230,7 @@ public void shouldBuildValueSerdeCorrectlyForGroupBy() { // Then: verify(queryBuilder).buildValueSerde( FORMATS.getValueFormat(), - PHYSICAL_SCHEMA, + REKEYED_PHYSICAL_SCHEMA, STEP_CONTEXT ); } diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/KsqlMaterializationTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/KsqlMaterializationTest.java index 96e606fe7d8a..6be413a4c56a 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/KsqlMaterializationTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/KsqlMaterializationTest.java @@ -62,7 +62,7 @@ public class KsqlMaterializationTest { .valueColumn(ColumnName.of("v1"), SqlTypes.STRING) .build(); - private static final Struct A_KEY = StructKeyUtil.asStructKey("k"); + private static final Struct A_KEY = StructKeyUtil.keyBuilder(SqlTypes.STRING).build("k"); private static final long A_ROWTIME = 12335L; private static final Range WINDOW_START_BOUNDS = Range.closed( Instant.now(), diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializedSessionTableTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializedSessionTableTest.java index 4d9672a550ab..34909f92b145 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializedSessionTableTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializedSessionTableTest.java @@ -64,7 +64,7 @@ public class KsMaterializedSessionTableTest { .valueColumn(ColumnName.of("v0"), SqlTypes.STRING) .build(); - private static final Struct A_KEY = StructKeyUtil.asStructKey("x"); + private static final Struct A_KEY = StructKeyUtil.keyBuilder(SqlTypes.STRING).build("x"); private static final GenericRow A_VALUE = new GenericRow("c0l"); private static final Instant LOWER_INSTANT = Instant.now(); diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializedTableTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializedTableTest.java index f98557e4e3a4..c1299a88068b 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializedTableTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializedTableTest.java @@ -55,7 +55,7 @@ public class KsMaterializedTableTest { .valueColumn(ColumnName.of("v0"), SqlTypes.STRING) .build(); - private static final Struct A_KEY = StructKeyUtil.asStructKey("x"); + private static final Struct A_KEY = StructKeyUtil.keyBuilder(SqlTypes.STRING).build("x"); @Rule public final ExpectedException expectedException = ExpectedException.none(); diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializedWindowTableTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializedWindowTableTest.java index 98122045724d..673d8fa86083 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializedWindowTableTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializedWindowTableTest.java @@ -63,7 +63,7 @@ public class KsMaterializedWindowTableTest { .valueColumn(ColumnName.of("v0"), SqlTypes.STRING) .build(); - private static final Struct A_KEY = StructKeyUtil.asStructKey("x"); + private static final Struct A_KEY = StructKeyUtil.keyBuilder(SqlTypes.STRING).build("x"); private static final Range WINDOW_START_BOUNDS = Range.closed( Instant.now(), From 584c4a79d6f227913a387250a06e293bda4500ea Mon Sep 17 00:00:00 2001 From: Andy Coates Date: Thu, 12 Dec 2019 10:26:32 +0000 Subject: [PATCH 027/123] chore: reserve KLIP numbers --- design-proposals/README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/design-proposals/README.md b/design-proposals/README.md index aaaff7eacf8e..e7bc51691254 100644 --- a/design-proposals/README.md +++ b/design-proposals/README.md @@ -54,6 +54,10 @@ Next KLIP number: **14** | [KLIP-11: Redesign KSQL query language](klip-11-DQL.md) | Proposal | N/A | | [KLIP-12: Implement High-Availability for Pull queries](klip-12-pull-high-availability.md)| Proposal | N/A | | [KLIP-13: Introduce KSQL command to print connect worker properties to the console](klip-13-introduce-KSQL-command-to-print-connect-worker-properties-to-the-console.md) | Proposal | N/A | -| [KLIP-14: ROWTIME as Pseudocolumn](klip-14-rowtime-as-pseudocolumn.md) | Proposal | N/A | +| [KLIP-14: ROWTIME as Pseudocolumn](klip-14-rowtime-as-pseudocolumn.md) | Approved | N/A | | [KLIP-15: KSQLDB new API and Client(klip-15-new-api-and-client.md | Proposal | N/A | | [KLIP-16: Introduce 'K$' dynamic views | Proposal | N/A | +| [KLIP-17: Remove 'INSERT INTO' in favour of SQL Union | Proposal | N/A | +| [KLIP-18: Distributed Metastore | Proposal | N/A | +| [KLIP-19: Introduce Materialize Views | Proposal | N/A | +| [KLIP-20: Remove 'TERMINATE' statements | Proposal | N/A | From d95f420dba00b99061679d44c2663736c3b1c8f4 Mon Sep 17 00:00:00 2001 From: Andy Coates Date: Thu, 12 Dec 2019 12:16:33 +0000 Subject: [PATCH 028/123] chore: reserve KLIP numbers --- design-proposals/README.md | 1 + design-proposals/klip-14-rowtime-as-pseudocolumn.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/design-proposals/README.md b/design-proposals/README.md index e7bc51691254..3c85361427b7 100644 --- a/design-proposals/README.md +++ b/design-proposals/README.md @@ -61,3 +61,4 @@ Next KLIP number: **14** | [KLIP-18: Distributed Metastore | Proposal | N/A | | [KLIP-19: Introduce Materialize Views | Proposal | N/A | | [KLIP-20: Remove 'TERMINATE' statements | Proposal | N/A | +| [KLIP-21: Correct 'INSERT VALUES' semantics | Proposal | N/A | diff --git a/design-proposals/klip-14-rowtime-as-pseudocolumn.md b/design-proposals/klip-14-rowtime-as-pseudocolumn.md index 6a4d84385366..58e832361cc9 100644 --- a/design-proposals/klip-14-rowtime-as-pseudocolumn.md +++ b/design-proposals/klip-14-rowtime-as-pseudocolumn.md @@ -2,7 +2,7 @@ **Author**: @big-andy-coates | **Release Target**: TBD | -**Status**: In Discussion | +**Status**: Design Approved | **Discussion**: https://github.com/confluentinc/ksql/pull/4026 **tl;dr:** From da962599c17c47108dce702071ee0b13270e9fc6 Mon Sep 17 00:00:00 2001 From: Bill Bejeck Date: Thu, 12 Dec 2019 10:07:35 -0500 Subject: [PATCH 029/123] chore: Update KafkaStreams.close to take a duration parameter (#4110) Updated QueryMetadata#close method to use KafkaStreams#close(Duration.ofXXX) vs. the no-arg version. When calling KafkaStreams#close() the default timeout is set to Long.MAX_VALUE so the close call will block forever. Reviewers: Guozhang Wang , Andy Coates , Almog Gavra --- .../java/io/confluent/ksql/util/KsqlConfig.java | 13 +++++++++++++ .../io/confluent/ksql/query/QueryExecutor.java | 10 ++++++---- .../ksql/util/PersistentQueryMetadata.java | 7 ++++--- .../java/io/confluent/ksql/util/QueryMetadata.java | 10 +++++++--- .../ksql/util/TransientQueryMetadata.java | 7 ++++--- .../io/confluent/ksql/util/QueryMetadataTest.java | 11 ++++++----- .../rest/entity/QueryDescriptionFactoryTest.java | 14 ++++++++++---- .../streaming/StreamedQueryResourceTest.java | 6 ++++-- 8 files changed, 54 insertions(+), 24 deletions(-) diff --git a/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java b/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java index 279436dccd0d..919ac0216b94 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java +++ b/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java @@ -187,6 +187,13 @@ public class KsqlConfig extends AbstractConfig { public static final Collection COMPATIBLY_BREAKING_CONFIG_DEFS = ImmutableList.of(); + public static final String KSQL_SHUTDOWN_TIMEOUT_MS_CONFIG = + "ksql.streams.shutdown.timeout.ms"; + public static final Long KSQL_SHUTDOWN_TIMEOUT_MS_DEFAULT = 300_000L; + public static final String KSQL_SHUTDOWN_TIMEOUT_MS_DOC = "Timeout in " + + "milliseconds to block waiting for the underlying streams instance to exit"; + + private enum ConfigGeneration { LEGACY, CURRENT @@ -499,6 +506,12 @@ private static ConfigDef buildConfigDef(final ConfigGeneration generation) { KSQL_ACTIVE_PERSISTENT_QUERY_LIMIT_DEFAULT, Importance.MEDIUM, KSQL_ACTIVE_PERSISTENT_QUERY_LIMIT_DOC + ).define( + KSQL_SHUTDOWN_TIMEOUT_MS_CONFIG, + Type.LONG, + KSQL_SHUTDOWN_TIMEOUT_MS_DEFAULT, + Importance.MEDIUM, + KSQL_SHUTDOWN_TIMEOUT_MS_DOC ) .withClientSslSupport(); diff --git a/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java b/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java index 44dce8de7184..34301d205afd 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java @@ -15,6 +15,8 @@ package io.confluent.ksql.query; +import static io.confluent.ksql.util.KsqlConfig.KSQL_SHUTDOWN_TIMEOUT_MS_CONFIG; + import io.confluent.ksql.GenericRow; import io.confluent.ksql.errors.ProductionExceptionHandlerUtil; import io.confluent.ksql.execution.builder.KsqlQueryBuilder; @@ -176,8 +178,8 @@ public TransientQueryMetadata buildTransientQuery( built.topology, streamsProperties, overrides, - queryCloseCallback - ); + queryCloseCallback, + ksqlConfig.getLong(KSQL_SHUTDOWN_TIMEOUT_MS_CONFIG)); } private static Optional getMaterializationInfo(final Object result) { @@ -237,8 +239,8 @@ public PersistentQueryMetadata buildQuery( ksqlQueryBuilder.getSchemas(), streamsProperties, overrides, - queryCloseCallback - ); + queryCloseCallback, + ksqlConfig.getLong(KSQL_SHUTDOWN_TIMEOUT_MS_CONFIG)); } private TransientQueryQueue buildTransientQueryQueue( diff --git a/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java b/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java index 3947e378bdf6..b44dc9c0dde0 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java @@ -63,8 +63,8 @@ public PersistentQueryMetadata( final QuerySchemas schemas, final Map streamsProperties, final Map overriddenProperties, - final Consumer closeCallback - ) { + final Consumer closeCallback, + final Long closeTimeout) { // CHECKSTYLE_RULES.ON: ParameterNumberCheck super( statementString, @@ -76,7 +76,8 @@ public PersistentQueryMetadata( topology, streamsProperties, overriddenProperties, - closeCallback); + closeCallback, + closeTimeout); this.id = requireNonNull(id, "id"); this.resultTopic = requireNonNull(resultTopic, "resultTopic"); diff --git a/ksql-engine/src/main/java/io/confluent/ksql/util/QueryMetadata.java b/ksql-engine/src/main/java/io/confluent/ksql/util/QueryMetadata.java index d51706afa860..47414605ab6f 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/util/QueryMetadata.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/util/QueryMetadata.java @@ -20,6 +20,7 @@ import io.confluent.ksql.name.SourceName; import io.confluent.ksql.schema.ksql.LogicalSchema; import java.lang.Thread.UncaughtExceptionHandler; +import java.time.Duration; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -44,6 +45,7 @@ public class QueryMetadata { private final Consumer closeCallback; private final Set sourceNames; private final LogicalSchema logicalSchema; + private final Long closeTimeout; private Optional queryStateListener = Optional.empty(); private boolean everStarted = false; @@ -59,8 +61,8 @@ protected QueryMetadata( final Topology topology, final Map streamsProperties, final Map overriddenProperties, - final Consumer closeCallback - ) { + final Consumer closeCallback, + final Long closeTimeout) { // CHECKSTYLE_RULES.ON: ParameterNumberCheck this.statementString = Objects.requireNonNull(statementString, "statementString"); this.kafkaStreams = Objects.requireNonNull(kafkaStreams, "kafkaStreams"); @@ -76,6 +78,7 @@ protected QueryMetadata( this.closeCallback = Objects.requireNonNull(closeCallback, "closeCallback"); this.sourceNames = Objects.requireNonNull(sourceNames, "sourceNames"); this.logicalSchema = Objects.requireNonNull(logicalSchema, "logicalSchema"); + this.closeTimeout = Objects.requireNonNull(closeTimeout, "closeTimeout"); } protected QueryMetadata(final QueryMetadata other, final Consumer closeCallback) { @@ -89,6 +92,7 @@ protected QueryMetadata(final QueryMetadata other, final Consumer this.sourceNames = other.sourceNames; this.logicalSchema = other.logicalSchema; this.closeCallback = Objects.requireNonNull(closeCallback, "closeCallback"); + this.closeTimeout = other.closeTimeout; } public void registerQueryStateListener(final QueryStateListener queryStateListener) { @@ -141,7 +145,7 @@ public boolean hasEverBeenStarted() { } public void close() { - kafkaStreams.close(); + kafkaStreams.close(Duration.ofMillis(closeTimeout)); kafkaStreams.cleanUp(); diff --git a/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java b/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java index 9cb451f42e7e..8f3f33be693f 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java @@ -51,7 +51,8 @@ public TransientQueryMetadata( final Topology topology, final Map streamsProperties, final Map overriddenProperties, - final Consumer closeCallback) { + final Consumer closeCallback, + final Long closeTimeout) { // CHECKSTYLE_RULES.ON: ParameterNumberCheck super( statementString, @@ -63,8 +64,8 @@ public TransientQueryMetadata( topology, streamsProperties, overriddenProperties, - closeCallback - ); + closeCallback, + closeTimeout); this.limitHandlerSetter = Objects.requireNonNull(limitHandlerSetter, "limitHandlerSetter"); this.rowQueue = Objects.requireNonNull(rowQueue, "rowQueue"); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/util/QueryMetadataTest.java b/ksql-engine/src/test/java/io/confluent/ksql/util/QueryMetadataTest.java index 6d6576838a5a..44d89e7baee0 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/util/QueryMetadataTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/util/QueryMetadataTest.java @@ -15,7 +15,6 @@ package io.confluent.ksql.util; -import static io.confluent.ksql.metastore.model.DataSource.DataSourceType; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.inOrder; @@ -28,6 +27,7 @@ import io.confluent.ksql.name.SourceName; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.types.SqlTypes; +import java.time.Duration; import java.util.Collections; import java.util.Set; import java.util.function.Consumer; @@ -49,6 +49,7 @@ public class QueryMetadataTest { .valueColumn(ColumnName.of("f0"), SqlTypes.STRING) .build(); private static final Set SOME_SOURCES = ImmutableSet.of(SourceName.of("s1"), SourceName.of("s2")); + private static final Long closeTimeout = KsqlConfig.KSQL_SHUTDOWN_TIMEOUT_MS_DEFAULT; @Mock private Topology topoplogy; @@ -72,8 +73,8 @@ public void setup() { topoplogy, Collections.emptyMap(), Collections.emptyMap(), - closeCallback - ); + closeCallback, + closeTimeout); } @Test @@ -131,7 +132,7 @@ public void shouldCloseKStreamsAppOnCloseThenCloseCallback() { // Then: final InOrder inOrder = inOrder(kafkaStreams, closeCallback); - inOrder.verify(kafkaStreams).close(); + inOrder.verify(kafkaStreams).close(Duration.ofMillis(closeTimeout)); inOrder.verify(closeCallback).accept(query); } @@ -142,7 +143,7 @@ public void shouldCleanUpKStreamsAppAfterCloseOnClose() { // Then: final InOrder inOrder = inOrder(kafkaStreams); - inOrder.verify(kafkaStreams).close(); + inOrder.verify(kafkaStreams).close(Duration.ofMillis(closeTimeout)); inOrder.verify(kafkaStreams).cleanUp(); } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java index 1040803963cd..a77244684401 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java @@ -33,6 +33,7 @@ import io.confluent.ksql.schema.ksql.SqlBaseType; import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.serde.SerdeOption; +import io.confluent.ksql.util.KsqlConfig; import io.confluent.ksql.util.PersistentQueryMetadata; import io.confluent.ksql.util.QueryMetadata; import io.confluent.ksql.util.QuerySchemas; @@ -74,6 +75,7 @@ public class QueryDescriptionFactoryTest { private static final ImmutableSet SOURCE_NAMES = ImmutableSet.of(SourceName.of("s1"), SourceName.of("s2")); private static final String SQL_TEXT = "test statement"; private static final String TOPOLOGY_TEXT = "Topology Text"; + private static final Long closeTimeout = KsqlConfig.KSQL_SHUTDOWN_TIMEOUT_MS_DEFAULT; @Mock private Consumer queryCloseCallback; @@ -108,7 +110,8 @@ public void setUp() { topology, STREAMS_PROPS, PROP_OVERRIDES, - queryCloseCallback); + queryCloseCallback, + closeTimeout); transientQueryDescription = QueryDescriptionFactory.forQueryMetadata(transientQuery); @@ -128,7 +131,8 @@ public void setUp() { QuerySchemas.of(new LinkedHashMap<>()), STREAMS_PROPS, PROP_OVERRIDES, - queryCloseCallback); + queryCloseCallback, + closeTimeout); persistentQueryDescription = QueryDescriptionFactory.forQueryMetadata(persistentQuery); } @@ -221,7 +225,8 @@ public void shouldHandleRowTimeInValueSchemaForTransientQuery() { topology, STREAMS_PROPS, PROP_OVERRIDES, - queryCloseCallback); + queryCloseCallback, + closeTimeout); // When: transientQueryDescription = QueryDescriptionFactory.forQueryMetadata(transientQuery); @@ -255,7 +260,8 @@ public void shouldHandleRowKeyInValueSchemaForTransientQuery() { topology, STREAMS_PROPS, PROP_OVERRIDES, - queryCloseCallback); + queryCloseCallback, + closeTimeout); // When: transientQueryDescription = QueryDescriptionFactory.forQueryMetadata(transientQuery); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java index 26d17805f7e4..43655bb03ccd 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java @@ -110,6 +110,7 @@ public class StreamedQueryResourceTest { private static final KsqlConfig VALID_CONFIG = new KsqlConfig(ImmutableMap.of( StreamsConfig.APPLICATION_SERVER_CONFIG, "something:1" )); + private static final Long closeTimeout = KsqlConfig.KSQL_SHUTDOWN_TIMEOUT_MS_DEFAULT; private static final String TOPIC_NAME = "test_stream"; private static final String PUSH_QUERY_STRING = "SELECT * FROM " + TOPIC_NAME + " EMIT CHANGES;"; @@ -378,7 +379,8 @@ public void shouldStreamRowsCorrectly() throws Throwable { mock(Topology.class), Collections.emptyMap(), Collections.emptyMap(), - queryCloseCallback); + queryCloseCallback, + closeTimeout); when(mockKsqlEngine.executeQuery(serviceContext, ConfiguredStatement.of(query, requestStreamsProperties, VALID_CONFIG))) @@ -451,7 +453,7 @@ public void shouldStreamRowsCorrectly() throws Throwable { verify(mockKafkaStreams).start(); verify(mockKafkaStreams).setUncaughtExceptionHandler(any()); verify(mockKafkaStreams).cleanUp(); - verify(mockKafkaStreams).close(); + verify(mockKafkaStreams).close(Duration.ofMillis(closeTimeout)); // If one of the other threads has somehow managed to throw an exception without breaking things up until this // point, we throw that exception now in the main thread and cause the test to fail From 54bc2bea2d39314dd300b6396bff6efbf1cfe684 Mon Sep 17 00:00:00 2001 From: Andy Coates Date: Thu, 12 Dec 2019 16:03:13 +0000 Subject: [PATCH 030/123] chore: reserve klip number and update some links --- design-proposals/README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/design-proposals/README.md b/design-proposals/README.md index 3c85361427b7..403641fcbbc1 100644 --- a/design-proposals/README.md +++ b/design-proposals/README.md @@ -57,8 +57,8 @@ Next KLIP number: **14** | [KLIP-14: ROWTIME as Pseudocolumn](klip-14-rowtime-as-pseudocolumn.md) | Approved | N/A | | [KLIP-15: KSQLDB new API and Client(klip-15-new-api-and-client.md | Proposal | N/A | | [KLIP-16: Introduce 'K$' dynamic views | Proposal | N/A | -| [KLIP-17: Remove 'INSERT INTO' in favour of SQL Union | Proposal | N/A | -| [KLIP-18: Distributed Metastore | Proposal | N/A | -| [KLIP-19: Introduce Materialize Views | Proposal | N/A | -| [KLIP-20: Remove 'TERMINATE' statements | Proposal | N/A | -| [KLIP-21: Correct 'INSERT VALUES' semantics | Proposal | N/A | +| [KLIP-17: Remove 'INSERT INTO' in favour of SQL Union](klip-17-sql-union.md) | Proposed [Discussion](https://github.com/confluentinc/ksql/pull/4125) | N/A | +| [KLIP-18: Distributed Metastore](klip-18-distributed-metastore .md) | Proposal | N/A | +| [KLIP-19: Introduce Materialize Views](klip-19-materialize-views.md) | Proposal | N/A | +| [KLIP-20: Remove 'TERMINATE' statements](klip-20_remove_terminate.md) | Proposal | N/A | +| [KLIP-21: Correct 'INSERT VALUES' semantics](klip-21_correct_insert_values_semantics.md) | Proposal | N/A | From 858e4dc4cfe8a5e6a514d1df632ea9942f5545b6 Mon Sep 17 00:00:00 2001 From: Rohan Date: Thu, 12 Dec 2019 21:39:17 -0800 Subject: [PATCH 031/123] feat: support writing plans to command topic (#4106) This patch adds support for writing the physical plan to the command topic in place of the original KSQL statements. Part of this change is a small refactor to our rest-app to engine communication module (ksql.rest.server.computation): - CommandQueue.enqueue now accepts CommandId and Command as parameters rather than computing them. - DistributingExecutor no longer takes a RequestValidator. Instead, it accepts a factory for creating validated commands. A validated command is one that has been validated to execute successfully against an engine snapshot. RequestValidator also uses an instance of this factory to validate statements at the api endpoint layer. For now, writing of query plans is enabled by a config (ksql.execution.plan.enable) which defaults to false. We'll drop this config once all the plan schema changes are merged. --- .../io/confluent/ksql/util/KsqlConfig.java | 9 + .../ksql/planner/plan/ConfiguredKsqlPlan.java | 22 +- .../planner/plan/ConfiguredKsqlPlanTest.java | 53 ++++ .../ksql/rest/server/CommandTopic.java | 6 +- .../ksql/rest/server/computation/Command.java | 46 +++- .../rest/server/computation/CommandQueue.java | 7 +- .../rest/server/computation/CommandStore.java | 28 +-- .../computation/DistributingExecutor.java | 41 ++- .../InteractiveStatementExecutor.java | 92 +++---- .../computation/InternalTopicSerdes.java | 65 +++++ .../server/computation/KafkaConfigStore.java | 21 +- .../computation/ValidatedCommandFactory.java | 116 +++++++++ .../rest/server/execution/RequestHandler.java | 10 +- .../rest/server/resources/KsqlResource.java | 6 +- .../server/validation/CustomValidators.java | 8 +- .../server/validation/RequestValidator.java | 13 +- .../validation/TerminateQueryValidator.java | 52 ---- .../rest/util/InternalTopicJsonSerdeUtil.java | 52 ---- .../server/computation/CommandStoreTest.java | 60 ++--- .../rest/server/computation/CommandTest.java | 18 +- .../computation/ConfigTopicKeyTest.java | 8 +- .../computation/DistributingExecutorTest.java | 125 +++++----- .../InteractiveStatementExecutorTest.java | 199 ++++++++++++--- .../computation/InternalTopicSerdesTest.java | 75 ++++++ .../computation/KafkaConfigStoreTest.java | 30 ++- .../rest/server/computation/RecoveryTest.java | 24 +- .../ValidatedCommandFactoryTest.java | 233 ++++++++++++++++++ .../server/execution/RequestHandlerTest.java | 6 +- .../server/resources/KsqlResourceTest.java | 169 ++++++++----- .../ksql/rest/server/utils/TestUtils.java | 7 +- .../validation/RequestValidatorTest.java | 24 +- .../TerminateQueryValidatorTest.java | 118 --------- .../ksql/rest/entity/CommandStatus.java | 5 + 33 files changed, 1140 insertions(+), 608 deletions(-) create mode 100644 ksql-engine/src/test/java/io/confluent/ksql/planner/plan/ConfiguredKsqlPlanTest.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InternalTopicSerdes.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/ValidatedCommandFactory.java delete mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/TerminateQueryValidator.java delete mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/util/InternalTopicJsonSerdeUtil.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InternalTopicSerdesTest.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/ValidatedCommandFactoryTest.java delete mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/TerminateQueryValidatorTest.java diff --git a/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java b/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java index 919ac0216b94..913571c6dc58 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java +++ b/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java @@ -184,6 +184,9 @@ public class KsqlConfig extends AbstractConfig { public static final String KSQL_QUERY_PULL_STREAMSTORE_REBALANCING_TIMEOUT_MS_DOC = "Timeout in " + "milliseconds when waiting for rebalancing of the stream store during a pull query"; + public static final String KSQL_EXECUTION_PLANS_ENABLE = "ksql.execution.plan.enable"; + public static final boolean KSQL_EXECUTION_PLANS_ENABLE_DEFAULT = false; + public static final Collection COMPATIBLY_BREAKING_CONFIG_DEFS = ImmutableList.of(); @@ -512,6 +515,12 @@ private static ConfigDef buildConfigDef(final ConfigGeneration generation) { KSQL_SHUTDOWN_TIMEOUT_MS_DEFAULT, Importance.MEDIUM, KSQL_SHUTDOWN_TIMEOUT_MS_DOC + ).define( + KSQL_EXECUTION_PLANS_ENABLE, + Type.BOOLEAN, + KSQL_EXECUTION_PLANS_ENABLE_DEFAULT, + Importance.LOW, + "Feature flag to enable writing KSQL execution plans. For testing only." ) .withClientSslSupport(); diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/ConfiguredKsqlPlan.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/ConfiguredKsqlPlan.java index 29fadfdce7ef..182aa02419a6 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/ConfiguredKsqlPlan.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/ConfiguredKsqlPlan.java @@ -15,6 +15,7 @@ package io.confluent.ksql.planner.plan; +import com.google.common.collect.ImmutableMap; import io.confluent.ksql.engine.KsqlPlan; import io.confluent.ksql.util.KsqlConfig; import java.util.Map; @@ -39,7 +40,7 @@ private ConfiguredKsqlPlan( final KsqlConfig config ) { this.plan = Objects.requireNonNull(plan, "plan"); - this.overrides = Objects.requireNonNull(overrides, "overrides"); + this.overrides = ImmutableMap.copyOf(Objects.requireNonNull(overrides, "overrides")); this.config = Objects.requireNonNull(config, "config"); } @@ -54,4 +55,23 @@ public Map getOverrides() { public KsqlConfig getConfig() { return config; } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ConfiguredKsqlPlan that = (ConfiguredKsqlPlan) o; + return Objects.equals(plan, that.plan) + && Objects.equals(overrides, that.overrides) + && Objects.equals(config, that.config); + } + + @Override + public int hashCode() { + return Objects.hash(plan, overrides, config); + } } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/planner/plan/ConfiguredKsqlPlanTest.java b/ksql-engine/src/test/java/io/confluent/ksql/planner/plan/ConfiguredKsqlPlanTest.java new file mode 100644 index 000000000000..78492f475e65 --- /dev/null +++ b/ksql-engine/src/test/java/io/confluent/ksql/planner/plan/ConfiguredKsqlPlanTest.java @@ -0,0 +1,53 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.planner.plan; + +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.engine.KsqlPlan; +import io.confluent.ksql.util.KsqlConfig; +import java.util.Collections; +import java.util.Map; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class ConfiguredKsqlPlanTest { + private static final Map OVERRIDES = Collections.singletonMap("foo", "bar"); + private static final Map OVERRIDES2 = Collections.singletonMap("biz", "baz"); + + @Mock + private KsqlPlan plan; + @Mock + private KsqlPlan plan2; + @Mock + private KsqlConfig ksqlConfig; + @Mock + private KsqlConfig ksqlConfig2; + + @Test + public void testEquality() { + new EqualsTester() + .addEqualityGroup( + ConfiguredKsqlPlan.of(plan, OVERRIDES, ksqlConfig), + ConfiguredKsqlPlan.of(plan, OVERRIDES, ksqlConfig)) + .addEqualityGroup(ConfiguredKsqlPlan.of(plan2, OVERRIDES, ksqlConfig)) + .addEqualityGroup(ConfiguredKsqlPlan.of(plan, OVERRIDES, ksqlConfig2)) + .addEqualityGroup(ConfiguredKsqlPlan.of(plan, OVERRIDES2, ksqlConfig)) + .testEquals(); + } +} \ No newline at end of file diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/CommandTopic.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/CommandTopic.java index 42e9fca96082..160f3890bb86 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/CommandTopic.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/CommandTopic.java @@ -18,8 +18,8 @@ import com.google.common.collect.Lists; import io.confluent.ksql.rest.entity.CommandId; import io.confluent.ksql.rest.server.computation.Command; +import io.confluent.ksql.rest.server.computation.InternalTopicSerdes; import io.confluent.ksql.rest.server.computation.QueuedCommand; -import io.confluent.ksql.rest.util.InternalTopicJsonSerdeUtil; import java.time.Duration; import java.util.Collections; import java.util.List; @@ -50,8 +50,8 @@ public CommandTopic( commandTopicName, new KafkaConsumer<>( Objects.requireNonNull(kafkaConsumerProperties, "kafkaClientProperties"), - InternalTopicJsonSerdeUtil.getJsonDeserializer(CommandId.class, true), - InternalTopicJsonSerdeUtil.getJsonDeserializer(Command.class, false) + InternalTopicSerdes.deserializer(CommandId.class), + InternalTopicSerdes.deserializer(Command.class) ) ); } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/Command.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/Command.java index bbcca43493c9..51ddf183df4d 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/Command.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/Command.java @@ -18,41 +18,73 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonSubTypes; +import io.confluent.ksql.engine.KsqlPlan; +import io.confluent.ksql.planner.plan.ConfiguredKsqlPlan; +import io.confluent.ksql.statement.ConfiguredStatement; import java.util.Collections; import java.util.Map; import java.util.Objects; +import java.util.Optional; @JsonSubTypes({}) public class Command { private final String statement; private final Map overwriteProperties; private final Map originalProperties; + private final Optional plan; @JsonCreator - public Command(@JsonProperty("statement") final String statement, - @JsonProperty("streamsProperties") final Map overwriteProperties, - @JsonProperty("originalProperties") final Map originalProperties) { + public Command( + @JsonProperty(value = "statement", required = true) final String statement, + @JsonProperty(value = "streamsProperties", required = true) + final Map overwriteProperties, + @JsonProperty(value = "originalProperties", required = true) + final Map originalProperties, + @JsonProperty("plan") + final Optional plan + ) { this.statement = statement; this.overwriteProperties = Collections.unmodifiableMap(overwriteProperties); this.originalProperties = originalProperties == null ? Collections.emptyMap() : originalProperties; + this.plan = Objects.requireNonNull(plan, "plan"); } - @JsonProperty("statement") public String getStatement() { return statement; } @JsonProperty("streamsProperties") - Map getOverwriteProperties() { + public Map getOverwriteProperties() { return Collections.unmodifiableMap(overwriteProperties); } - @JsonProperty("originalProperties") - Map getOriginalProperties() { + public Map getOriginalProperties() { return originalProperties; } + public Optional getPlan() { + return plan; + } + + public static Command of(final ConfiguredKsqlPlan configuredPlan) { + return new Command( + configuredPlan.getPlan().getStatementText(), + configuredPlan.getOverrides(), + configuredPlan.getConfig().getAllConfigPropsWithSecretsObfuscated(), + Optional.of(configuredPlan.getPlan()) + ); + } + + public static Command of(final ConfiguredStatement configuredStatement) { + return new Command( + configuredStatement.getStatementText(), + configuredStatement.getOverrides(), + configuredStatement.getConfig().getAllConfigPropsWithSecretsObfuscated(), + Optional.empty() + ); + } + @Override public boolean equals(final Object o) { return diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandQueue.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandQueue.java index 01bb5b753a1d..e97e7795e27f 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandQueue.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandQueue.java @@ -16,7 +16,6 @@ package io.confluent.ksql.rest.server.computation; import io.confluent.ksql.rest.entity.CommandId; -import io.confluent.ksql.statement.ConfiguredStatement; import java.io.Closeable; import java.time.Duration; import java.util.List; @@ -36,13 +35,15 @@ public interface CommandQueue extends Closeable { * it is guaranteed that the command has been persisted, without regard * for the {@link io.confluent.ksql.rest.entity.CommandStatus CommandStatus}. * - * @param statement The statement to be distributed + * @param commandId The id of the command to be distributed + * @param command The command to be distributed * @param transactionalProducer The transactional producer used to for enqueue the command * @return an asynchronous tracker that can be used to determine the current * state of the command */ QueuedCommandStatus enqueueCommand( - ConfiguredStatement statement, + CommandId commandId, + Command command, Producer transactionalProducer ); diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandStore.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandStore.java index 668c9cddf3f0..c80d8b66c46b 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandStore.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandStore.java @@ -22,8 +22,6 @@ import io.confluent.ksql.rest.entity.KsqlEntityList; import io.confluent.ksql.rest.server.CommandTopic; import io.confluent.ksql.rest.server.resources.KsqlRestException; -import io.confluent.ksql.rest.util.InternalTopicJsonSerdeUtil; -import io.confluent.ksql.statement.ConfiguredStatement; import io.confluent.ksql.util.KsqlException; import java.io.Closeable; import java.time.Duration; @@ -59,7 +57,6 @@ public class CommandStore implements CommandQueue, Closeable { private static final int COMMAND_TOPIC_PARTITION = 0; private final CommandTopic commandTopic; - private final CommandIdAssigner commandIdAssigner; private final Map commandStatusMap; private final SequenceNumberFutureStore sequenceNumberFutureStore; @@ -96,7 +93,6 @@ public static CommandStore create( return new CommandStore( commandTopicName, new CommandTopic(commandTopicName, kafkaConsumerProperties), - new CommandIdAssigner(), new SequenceNumberFutureStore(), kafkaConsumerProperties, kafkaProducerProperties, @@ -108,14 +104,12 @@ public static CommandStore create( CommandStore( final String commandTopicName, final CommandTopic commandTopic, - final CommandIdAssigner commandIdAssigner, final SequenceNumberFutureStore sequenceNumberFutureStore, final Map kafkaConsumerProperties, final Map kafkaProducerProperties, final Duration commandQueueCatchupTimeout ) { this.commandTopic = Objects.requireNonNull(commandTopic, "commandTopic"); - this.commandIdAssigner = Objects.requireNonNull(commandIdAssigner, "commandIdAssigner"); this.commandStatusMap = Maps.newConcurrentMap(); this.sequenceNumberFutureStore = Objects.requireNonNull(sequenceNumberFutureStore, "sequenceNumberFutureStore"); @@ -151,18 +145,10 @@ public void close() { @Override public QueuedCommandStatus enqueueCommand( - final ConfiguredStatement statement, + final CommandId commandId, + final Command command, final Producer transactionalProducer ) { - final CommandId commandId = commandIdAssigner.getCommandId(statement.getStatement()); - - // new commands that generate queries will use the new query id generation method from now on - final Command command = new Command( - statement.getStatementText(), - statement.getOverrides(), - statement.getConfig().getAllConfigPropsWithSecretsObfuscated() - ); - final CommandStatusFuture statusFuture = commandStatusMap.compute( commandId, (k, v) -> { @@ -193,7 +179,7 @@ public QueuedCommandStatus enqueueCommand( String.format( "Could not write the statement '%s' into the " + "command topic" - + ".", statement.getStatementText() + + ".", command.getStatement() ), e ); @@ -257,8 +243,8 @@ public void ensureConsumedPast(final long seqNum, final Duration timeout) public Producer createTransactionalProducer() { return new KafkaProducer<>( kafkaProducerProperties, - InternalTopicJsonSerdeUtil.getJsonSerializer(true), - InternalTopicJsonSerdeUtil.getJsonSerializer(false) + InternalTopicSerdes.serializer(), + InternalTopicSerdes.serializer() ); } @@ -289,8 +275,8 @@ private long getCommandTopicOffset() { try (Consumer commandConsumer = new KafkaConsumer<>( kafkaConsumerProperties, - InternalTopicJsonSerdeUtil.getJsonDeserializer(CommandId.class, true), - InternalTopicJsonSerdeUtil.getJsonDeserializer(Command.class, false) + InternalTopicSerdes.deserializer(CommandId.class), + InternalTopicSerdes.deserializer(Command.class) )) { commandConsumer.assign(Collections.singleton(commandTopicPartition)); return commandConsumer.endOffsets(Collections.singletonList(commandTopicPartition)) diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/DistributingExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/DistributingExecutor.java index 1aa548f5834a..ae3c75c0089d 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/DistributingExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/DistributingExecutor.java @@ -16,23 +16,17 @@ import io.confluent.ksql.KsqlExecutionContext; import io.confluent.ksql.metastore.MetaStore; -import io.confluent.ksql.parser.KsqlParser.ParsedStatement; import io.confluent.ksql.parser.tree.Statement; import io.confluent.ksql.rest.entity.CommandId; import io.confluent.ksql.rest.entity.CommandStatus; import io.confluent.ksql.rest.entity.CommandStatusEntity; import io.confluent.ksql.rest.entity.KsqlEntity; -import io.confluent.ksql.rest.server.validation.RequestValidator; -import io.confluent.ksql.rest.util.TerminateCluster; import io.confluent.ksql.security.KsqlAuthorizationValidator; -import io.confluent.ksql.services.SandboxedServiceContext; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; import io.confluent.ksql.statement.Injector; import io.confluent.ksql.util.KsqlServerException; import java.time.Duration; -import java.util.Collections; -import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.function.BiFunction; @@ -48,19 +42,19 @@ * {@code distributedCmdResponseTimeout}. */ public class DistributingExecutor { - private final CommandQueue commandQueue; private final Duration distributedCmdResponseTimeout; private final BiFunction injectorFactory; private final Optional authorizationValidator; - private final RequestValidator requestValidator; + private final ValidatedCommandFactory validatedCommandFactory; + private final CommandIdAssigner commandIdAssigner; public DistributingExecutor( final CommandQueue commandQueue, final Duration distributedCmdResponseTimeout, final BiFunction injectorFactory, final Optional authorizationValidator, - final RequestValidator requestValidator + final ValidatedCommandFactory validatedCommandFactory ) { this.commandQueue = Objects.requireNonNull(commandQueue, "commandQueue"); this.distributedCmdResponseTimeout = @@ -68,8 +62,11 @@ public DistributingExecutor( this.injectorFactory = Objects.requireNonNull(injectorFactory, "injectorFactory"); this.authorizationValidator = Objects.requireNonNull(authorizationValidator, "authorizationValidator"); - this.requestValidator = - Objects.requireNonNull(requestValidator, "requestValidator"); + this.validatedCommandFactory = Objects.requireNonNull( + validatedCommandFactory, + "validatedCommandFactory" + ); + this.commandIdAssigner = new CommandIdAssigner(); } /** @@ -83,9 +80,7 @@ public DistributingExecutor( * the old producer will be fenced off and unable to continue with its transaction. */ public Optional execute( - final ConfiguredStatement statement, - final ParsedStatement parsedStatement, - final Map mutableScopedProperties, + final ConfiguredStatement statement, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { @@ -101,20 +96,14 @@ public Optional execute( transactionalProducer.initTransactions(); transactionalProducer.beginTransaction(); commandQueue.waitForCommandConsumer(); - - // Don't perform validation on Terminate Cluster statements - if (!parsedStatement.getStatementText() - .equals(TerminateCluster.TERMINATE_CLUSTER_STATEMENT_TEXT)) { - requestValidator.validate( - SandboxedServiceContext.create(serviceContext), - Collections.singletonList(parsedStatement), - mutableScopedProperties, - parsedStatement.getStatementText() - ); - } + final CommandId commandId = commandIdAssigner.getCommandId(statement.getStatement()); + final Command command = validatedCommandFactory.create( + injected, + executionContext.createSandbox(executionContext.getServiceContext()) + ); final QueuedCommandStatus queuedCommandStatus = - commandQueue.enqueueCommand(injected, transactionalProducer); + commandQueue.enqueueCommand(commandId, command, transactionalProducer); transactionalProducer.commitTransaction(); final CommandStatus commandStatus = queuedCommandStatus diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutor.java index 8cf58aa3faaa..89ca6eb41a94 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutor.java @@ -16,10 +16,10 @@ package io.confluent.ksql.rest.server.computation; import com.google.common.annotations.VisibleForTesting; +import io.confluent.ksql.KsqlExecutionContext.ExecuteResult; import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.engine.KsqlPlan; import io.confluent.ksql.exception.ExceptionUtil; -import io.confluent.ksql.parser.KsqlParser.ParsedStatement; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; import io.confluent.ksql.parser.tree.CreateAsSelect; import io.confluent.ksql.parser.tree.CreateTableAsSelect; @@ -33,17 +33,13 @@ import io.confluent.ksql.rest.entity.CommandStatus; import io.confluent.ksql.rest.server.StatementParser; import io.confluent.ksql.rest.server.resources.KsqlConfigurable; -import io.confluent.ksql.rest.util.QueryCapacityUtil; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; import io.confluent.ksql.util.KsqlConfig; -import io.confluent.ksql.util.KsqlConstants; import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.PersistentQueryMetadata; import io.confluent.ksql.util.QueryMetadata; -import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -194,6 +190,10 @@ private void handleStatementWithTerminatedQueries( final long offset ) { try { + if (command.getPlan().isPresent()) { + executePlan(command, commandId, commandStatusFuture, command.getPlan().get(), mode); + return; + } final String statementString = command.getStatement(); putStatus( commandId, @@ -219,6 +219,42 @@ private void handleStatementWithTerminatedQueries( } } + private void executePlan( + final Command command, + final CommandId commandId, + final Optional commandStatusFuture, + final KsqlPlan plan, + final Mode mode + ) { + final KsqlConfig mergedConfig = buildMergedConfig(command); + final ConfiguredKsqlPlan configured = ConfiguredKsqlPlan.of( + plan, + command.getOverwriteProperties(), + mergedConfig + ); + putStatus( + commandId, + commandStatusFuture, + new CommandStatus(CommandStatus.Status.EXECUTING, "Executing statement") + ); + final ExecuteResult result = ksqlEngine.execute(serviceContext, configured); + if (mode == Mode.EXECUTE) { + result.getQuery().ifPresent(QueryMetadata::start); + } + final String successMessage = getSuccessMessage(result); + final CommandStatus successStatus = + new CommandStatus(CommandStatus.Status.SUCCESS, successMessage); + putFinalStatus(commandId, commandStatusFuture, successStatus); + } + + private String getSuccessMessage(final ExecuteResult result) { + if (result.getCommandResult().isPresent()) { + return result.getCommandResult().get(); + } + return "Created query with ID " + + ((PersistentQueryMetadata) result.getQuery().get()).getQueryId(); + } + @SuppressWarnings("unchecked") private void executeStatement( final PreparedStatement statement, @@ -270,52 +306,6 @@ private String executeDdlStatement(final PreparedStatement statement, final C .get(); } - /** - * @deprecated deprecate since 5.2. `RUN SCRIPT` will be removed from syntax in later release. - */ - @SuppressWarnings("DeprecatedIsStillUsed") - @Deprecated - private void handleLegacyRunScript(final Command command, final Mode mode) { - - final String sql = (String) command.getOverwriteProperties() - .get(KsqlConstants.LEGACY_RUN_SCRIPT_STATEMENTS_CONTENT); - - if (sql == null) { - throw new KsqlException("No statements received for LOAD FROM FILE."); - } - - final Map overriddenProperties = new HashMap<>( - command.getOverwriteProperties()); - - final KsqlConfig mergedConfig = buildMergedConfig(command); - - final List queries = new ArrayList<>(); - for (final ParsedStatement parsed : ksqlEngine.parse(sql)) { - final PreparedStatement prepared = ksqlEngine.prepare(parsed); - final ConfiguredStatement configured = - ConfiguredStatement.of(prepared, overriddenProperties, ksqlConfig); - ksqlEngine.execute(serviceContext, configured) - .getQuery() - .ifPresent(queries::add); - } - - if (QueryCapacityUtil.exceedsPersistentQueryCapacity(ksqlEngine, mergedConfig, 0)) { - queries.forEach(QueryMetadata::close); - QueryCapacityUtil.throwTooManyActivePersistentQueriesException( - ksqlEngine, mergedConfig, command.getStatement()); - } - - if (mode == Mode.EXECUTE) { - for (final QueryMetadata queryMetadata : queries) { - if (queryMetadata instanceof PersistentQueryMetadata) { - final PersistentQueryMetadata persistentQueryMd = - (PersistentQueryMetadata) queryMetadata; - persistentQueryMd.start(); - } - } - } - } - private PersistentQueryMetadata startQuery( final PreparedStatement statement, final Command command, diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InternalTopicSerdes.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InternalTopicSerdes.java new file mode 100644 index 000000000000..e36d9394f3ea --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InternalTopicSerdes.java @@ -0,0 +1,65 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.server.computation; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.confluent.ksql.execution.json.PlanJsonMapper; +import java.io.IOException; +import java.util.Objects; +import org.apache.kafka.common.errors.SerializationException; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serializer; + +public final class InternalTopicSerdes { + private static final ObjectMapper MAPPER = PlanJsonMapper.create(); + + private InternalTopicSerdes() { + } + + public static Serializer serializer() { + return new InternalTopicSerializer<>(); + } + + public static Deserializer deserializer(final Class clazz) { + return new InternalTopicDeserializer<>(clazz); + } + + private static final class InternalTopicSerializer implements Serializer { + public byte[] serialize(final String topic, final T obj) { + try { + return MAPPER.writeValueAsBytes(obj); + } catch (final IOException e) { + throw new SerializationException(e); + } + } + } + + private static final class InternalTopicDeserializer implements Deserializer { + private final Class clazz; + + private InternalTopicDeserializer(final Class clazz) { + this.clazz = Objects.requireNonNull(clazz, "clazz"); + } + + public T deserialize(final String topic, final byte[] serialized) { + try { + return MAPPER.readValue(serialized, clazz); + } catch (final IOException e) { + throw new SerializationException(e); + } + } + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/KafkaConfigStore.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/KafkaConfigStore.java index cee851e2d6f6..c264cd339cef 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/KafkaConfigStore.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/KafkaConfigStore.java @@ -19,7 +19,6 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.collect.ImmutableMap; import io.confluent.ksql.rest.server.computation.ConfigTopicKey.StringKey; -import io.confluent.ksql.rest.util.InternalTopicJsonSerdeUtil; import io.confluent.ksql.util.KsqlConfig; import java.time.Duration; import java.time.temporal.ChronoUnit; @@ -61,8 +60,8 @@ private static KafkaProducer createProducer( final KsqlConfig ksqlConfig) { return new KafkaProducer<>( ksqlConfig.getKsqlStreamConfigProps(), - InternalTopicJsonSerdeUtil.getJsonSerializer(true), - InternalTopicJsonSerdeUtil.getJsonSerializer(false) + InternalTopicSerdes.serializer(), + InternalTopicSerdes.serializer() ); } @@ -96,8 +95,8 @@ public KafkaConfigStore(final String topicName, final KsqlConfig currentConfig) final KsqlProperties savedProperties = new KafkaWriteOnceStore<>( topicName, new StringKey(CONFIG_MSG_KEY), - InternalTopicJsonSerdeUtil.getJsonDeserializer(ConfigTopicKey.class, true), - InternalTopicJsonSerdeUtil.getJsonDeserializer(KsqlProperties.class, false), + InternalTopicSerdes.deserializer(ConfigTopicKey.class), + InternalTopicSerdes.deserializer(KsqlProperties.class), consumer, producer ).readMaybeWrite(currentProperties); @@ -115,13 +114,13 @@ public static class KsqlProperties { @JsonCreator KsqlProperties( - @JsonProperty("ksqlProperties") final Map ksqlProperties) { - this.ksqlProperties = ksqlProperties == null - ? Collections.emptyMap() - : ksqlProperties.entrySet() + @JsonProperty("ksqlProperties") final Optional> ksqlProperties) { + this.ksqlProperties = ksqlProperties.isPresent() + ? ksqlProperties.get().entrySet() .stream() .filter(kv -> kv.getValue() != null) - .collect(ImmutableMap.toImmutableMap(Entry::getKey, Entry::getValue)); + .collect(ImmutableMap.toImmutableMap(Entry::getKey, Entry::getValue)) + : Collections.emptyMap(); } public Map getKsqlProperties() { @@ -129,7 +128,7 @@ public Map getKsqlProperties() { } static KsqlProperties createFor(final KsqlConfig ksqlConfig) { - return new KsqlProperties(ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); + return new KsqlProperties(Optional.of(ksqlConfig.getAllConfigPropsWithSecretsObfuscated())); } @Override diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/ValidatedCommandFactory.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/ValidatedCommandFactory.java new file mode 100644 index 000000000000..010b79c0d5c9 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/ValidatedCommandFactory.java @@ -0,0 +1,116 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.server.computation; + +import io.confluent.ksql.KsqlExecutionContext; +import io.confluent.ksql.engine.KsqlPlan; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.parser.tree.TerminateQuery; +import io.confluent.ksql.planner.plan.ConfiguredKsqlPlan; +import io.confluent.ksql.query.QueryId; +import io.confluent.ksql.rest.util.TerminateCluster; +import io.confluent.ksql.services.ServiceContext; +import io.confluent.ksql.statement.ConfiguredStatement; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlStatementException; +import io.confluent.ksql.util.PersistentQueryMetadata; +import java.util.Objects; +import java.util.Optional; + +/** + * Creates commands that have been validated to successfully execute against + * the given engine snapshot. Validated commands are safe to enqueue onto the + * command queue. + */ +public final class ValidatedCommandFactory { + private final KsqlConfig config; + + public ValidatedCommandFactory(final KsqlConfig config) { + this.config = Objects.requireNonNull(config, "config"); + } + + /** + * Create a validated command. + * @param statement The KSQL statement to create the command for. + * @param context The KSQL engine snapshot to validate the command against. + * @return A validated command, which is safe to enqueue onto the command topic. + */ + public Command create( + final ConfiguredStatement statement, + final KsqlExecutionContext context) { + return create(statement, context.getServiceContext(), context); + } + + /** + * Create a validated command using the supplied service context + * @param statement The KSQL statement to create the command for. + * @param serviceContext The KSQL service context. + * @param context The KSQL engine snapshot to validate the command against. + * @return A validated command, which is safe to enqueue onto the command topic. + */ + public Command create( + final ConfiguredStatement statement, + final ServiceContext serviceContext, + final KsqlExecutionContext context) { + if (statement.getStatementText().equals(TerminateCluster.TERMINATE_CLUSTER_STATEMENT_TEXT)) { + return Command.of(statement); + } else if (statement.getStatement() instanceof TerminateQuery) { + return createForTerminateQuery(statement, context); + } + return createForPlannedQuery(statement, serviceContext, context); + } + + private Command createForTerminateQuery( + final ConfiguredStatement statement, + final KsqlExecutionContext context + ) { + final TerminateQuery terminateQuery = (TerminateQuery) statement.getStatement(); + final Optional queryId = terminateQuery.getQueryId(); + + if (!queryId.isPresent()) { + context.getPersistentQueries().forEach(PersistentQueryMetadata::close); + return Command.of(statement); + } + + context.getPersistentQuery(queryId.get()) + .orElseThrow(() -> new KsqlStatementException( + "Unknown queryId: " + queryId.get(), + statement.getStatementText())) + .close(); + return Command.of(statement); + } + + private Command createForPlannedQuery( + final ConfiguredStatement statement, + final ServiceContext serviceContext, + final KsqlExecutionContext context + ) { + final KsqlPlan plan = context.plan(serviceContext, statement); + context.execute( + serviceContext, + ConfiguredKsqlPlan.of( + plan, + statement.getOverrides(), + statement.getConfig() + ) + ); + if (!config.getBoolean(KsqlConfig.KSQL_EXECUTION_PLANS_ENABLE)) { + return Command.of(statement); + } + return Command.of( + ConfiguredKsqlPlan.of(plan, statement.getOverrides(), statement.getConfig())); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java index bf9f1c479c6e..d473db0e53f4 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java @@ -81,7 +81,6 @@ public KsqlEntityList execute( executeStatement( serviceContext, configured, - parsed, scopedPropertyOverrides, entities ).ifPresent(entities::add); @@ -93,7 +92,6 @@ public KsqlEntityList execute( private Optional executeStatement( final ServiceContext serviceContext, final ConfiguredStatement configured, - final ParsedStatement parsed, final Map mutableScopedProperties, final KsqlEntityList entities ) { @@ -101,10 +99,10 @@ private Optional executeStatement( commandQueueSync.waitFor(new KsqlEntityList(entities), statementClass); - final StatementExecutor executor = (StatementExecutor) - customExecutors.getOrDefault(statementClass, - (stmt, props, ctx, svcCtx) -> - distributor.execute(stmt, parsed, props, ctx, svcCtx)); + final StatementExecutor executor = (StatementExecutor) customExecutors.getOrDefault( + statementClass, + (stmt, props, ctx, svcCtx) -> distributor.execute(stmt, ctx, svcCtx) + ); return executor.execute( configured, diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java index f828fbf14371..cf4cc872aaec 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java @@ -36,6 +36,7 @@ import io.confluent.ksql.rest.entity.Versions; import io.confluent.ksql.rest.server.computation.CommandQueue; import io.confluent.ksql.rest.server.computation.DistributingExecutor; +import io.confluent.ksql.rest.server.computation.ValidatedCommandFactory; import io.confluent.ksql.rest.server.execution.CustomExecutors; import io.confluent.ksql.rest.server.execution.DefaultCommandQueueSync; import io.confluent.ksql.rest.server.execution.RequestHandler; @@ -149,7 +150,8 @@ public void configure(final KsqlConfig config) { CustomValidators.VALIDATOR_MAP, injectorFactory, ksqlEngine::createSandbox, - config + config, + new ValidatedCommandFactory(config) ); this.handler = new RequestHandler( @@ -159,7 +161,7 @@ public void configure(final KsqlConfig config) { distributedCmdResponseTimeout, injectorFactory, authorizationValidator, - this.validator + new ValidatedCommandFactory(config) ), ksqlEngine, config, diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/CustomValidators.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/CustomValidators.java index 6c62dfd9b74f..a3ec2a9d9263 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/CustomValidators.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/CustomValidators.java @@ -37,7 +37,6 @@ import io.confluent.ksql.parser.tree.SetProperty; import io.confluent.ksql.parser.tree.ShowColumns; import io.confluent.ksql.parser.tree.Statement; -import io.confluent.ksql.parser.tree.TerminateQuery; import io.confluent.ksql.parser.tree.UnsetProperty; import io.confluent.ksql.rest.server.execution.DescribeConnectorExecutor; import io.confluent.ksql.rest.server.execution.DescribeFunctionExecutor; @@ -61,7 +60,6 @@ */ @SuppressWarnings({"unchecked", "rawtypes"}) public enum CustomValidators { - QUERY_ENDPOINT(Query.class, PullQueryExecutor::validate), PRINT_TOPIC(PrintTopic.class, PrintTopicValidator::validate), @@ -82,9 +80,7 @@ public enum CustomValidators { DESCRIBE_FUNCTION(DescribeFunction.class, DescribeFunctionExecutor::execute), DESCRIBE_CONNECTOR(DescribeConnector.class, new DescribeConnectorExecutor()::execute), SET_PROPERTY(SetProperty.class, PropertyExecutor::set), - UNSET_PROPERTY(UnsetProperty.class, PropertyExecutor::unset), - - TERMINATE_QUERY(TerminateQuery.class, TerminateQueryValidator::validate); + UNSET_PROPERTY(UnsetProperty.class, PropertyExecutor::unset); public static final Map, StatementValidator> VALIDATOR_MAP = ImmutableMap.copyOf( @@ -121,4 +117,4 @@ public void validate( final ServiceContext serviceContext) throws KsqlException { validator.validate(statement, mutableScopedProperties, executionContext, serviceContext); } -} \ No newline at end of file +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/RequestValidator.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/RequestValidator.java index 7716e7f49b5a..5e8aed556cad 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/RequestValidator.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/RequestValidator.java @@ -25,6 +25,8 @@ import io.confluent.ksql.parser.tree.CreateAsSelect; import io.confluent.ksql.parser.tree.InsertInto; import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.parser.tree.TerminateQuery; +import io.confluent.ksql.rest.server.computation.ValidatedCommandFactory; import io.confluent.ksql.rest.util.QueryCapacityUtil; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; @@ -49,6 +51,7 @@ public class RequestValidator { private final BiFunction injectorFactory; private final Function snapshotSupplier; private final KsqlConfig ksqlConfig; + private final ValidatedCommandFactory distributedStatementValidator; /** * @param customValidators a map describing how to validate each statement of type @@ -62,12 +65,15 @@ public RequestValidator( final Map, StatementValidator> customValidators, final BiFunction injectorFactory, final Function snapshotSupplier, - final KsqlConfig ksqlConfig + final KsqlConfig ksqlConfig, + final ValidatedCommandFactory distributedStatementValidator ) { this.customValidators = requireNonNull(customValidators, "customValidators"); this.injectorFactory = requireNonNull(injectorFactory, "injectorFactory"); this.snapshotSupplier = requireNonNull(snapshotSupplier, "snapshotSupplier"); this.ksqlConfig = requireNonNull(ksqlConfig, "ksqlConfig"); + this.distributedStatementValidator = requireNonNull( + distributedStatementValidator, "distributedStatementValidator"); } /** @@ -134,9 +140,10 @@ private int validate( if (customValidator != null) { customValidator .validate(configured, mutableScopedProperties, executionContext, serviceContext); - } else if (KsqlEngine.isExecutableStatement(configured.getStatement())) { + } else if (KsqlEngine.isExecutableStatement(configured.getStatement()) + || configured.getStatement() instanceof TerminateQuery) { final ConfiguredStatement statementInjected = injector.inject(configured); - executionContext.execute(serviceContext, statementInjected); + distributedStatementValidator.create(statementInjected, serviceContext, executionContext); } else { throw new KsqlStatementException( "Do not know how to validate statement of type: " + statementClass diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/TerminateQueryValidator.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/TerminateQueryValidator.java deleted file mode 100644 index 33ab62e5843e..000000000000 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/TerminateQueryValidator.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2019 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.rest.server.validation; - -import io.confluent.ksql.KsqlExecutionContext; -import io.confluent.ksql.parser.tree.TerminateQuery; -import io.confluent.ksql.query.QueryId; -import io.confluent.ksql.services.ServiceContext; -import io.confluent.ksql.statement.ConfiguredStatement; -import io.confluent.ksql.util.KsqlStatementException; -import io.confluent.ksql.util.PersistentQueryMetadata; -import java.util.Map; -import java.util.Optional; - -public final class TerminateQueryValidator { - - private TerminateQueryValidator() { } - - public static void validate( - final ConfiguredStatement statement, - final Map sessionProperties, - final KsqlExecutionContext context, - final ServiceContext serviceContext - ) { - final TerminateQuery terminateQuery = (TerminateQuery) statement.getStatement(); - final Optional queryId = terminateQuery.getQueryId(); - - if (!queryId.isPresent()) { - context.getPersistentQueries().forEach(PersistentQueryMetadata::close); - return; - } - - context.getPersistentQuery(queryId.get()) - .orElseThrow(() -> new KsqlStatementException( - "Unknown queryId: " + queryId.get(), - statement.getStatementText())) - .close(); - } -} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/util/InternalTopicJsonSerdeUtil.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/util/InternalTopicJsonSerdeUtil.java deleted file mode 100644 index bf2d5598e83d..000000000000 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/util/InternalTopicJsonSerdeUtil.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2018 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.rest.util; - -import io.confluent.kafka.serializers.KafkaJsonDeserializer; -import io.confluent.kafka.serializers.KafkaJsonDeserializerConfig; -import io.confluent.kafka.serializers.KafkaJsonSerializer; -import java.util.Collections; -import java.util.Map; -import org.apache.kafka.common.serialization.Deserializer; -import org.apache.kafka.common.serialization.Serializer; - -public final class InternalTopicJsonSerdeUtil { - - private InternalTopicJsonSerdeUtil(){} - - public static Serializer getJsonSerializer(final boolean isKey) { - final Serializer result = new KafkaJsonSerializer<>(); - result.configure(Collections.emptyMap(), isKey); - return result; - } - - public static Deserializer getJsonDeserializer( - final Class classs, - final boolean isKey) { - final Deserializer result = new KafkaJsonDeserializer<>(); - final String typeConfigProperty = isKey - ? KafkaJsonDeserializerConfig.JSON_KEY_TYPE - : KafkaJsonDeserializerConfig.JSON_VALUE_TYPE; - - final Map props = Collections.singletonMap( - typeConfigProperty, - classs - ); - result.configure(props, isKey); - return result; - } - -} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandStoreTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandStoreTest.java index 9256eae4c23a..84b357047d0d 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandStoreTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandStoreTest.java @@ -25,6 +25,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -42,6 +43,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; @@ -72,12 +74,7 @@ public class CommandStoreTest { private static final String COMMAND_TOPIC_NAME = "command"; private static final TopicPartition COMMAND_TOPIC_PARTITION = new TopicPartition(COMMAND_TOPIC_NAME, 0); - private static final KsqlConfig KSQL_CONFIG = new KsqlConfig( - Collections.singletonMap(KsqlConfig.KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG, "foo")); - private static final Map OVERRIDE_PROPERTIES = - Collections.singletonMap(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); private static final Duration TIMEOUT = Duration.ofMillis(1000); - private static final AtomicInteger COUNTER = new AtomicInteger(); private static final String statementText = "test-statement"; private static final Duration NEW_CMDS_TIMEOUT = Duration.ofSeconds(30); @@ -91,18 +88,16 @@ public class CommandStoreTest { @Mock private CommandTopic commandTopic; @Mock - private Statement statement; - @Mock - private CommandIdAssigner commandIdAssigner; - @Mock private Producer transactionalProducer; - private ConfiguredStatement configured; - private final CommandId commandId = new CommandId(CommandId.Type.STREAM, "foo", CommandId.Action.CREATE); - private final Command command = - new Command(statementText, Collections.emptyMap(), Collections.emptyMap()); + private final Command command = new Command( + statementText, + Collections.emptyMap(), + Collections.emptyMap(), + Optional.empty() + ); private final RecordMetadata recordMetadata = new RecordMetadata( COMMAND_TOPIC_PARTITION, 0, 0, RecordBatch.NO_TIMESTAMP, 0L, 0, 0); @@ -137,10 +132,6 @@ public RecordMetadata get(final long timeout, final TimeUnit unit) { @Before public void setUp() { - when(commandIdAssigner.getCommandId(any())) - .thenAnswer(invocation -> new CommandId( - CommandId.Type.STREAM, "foo" + COUNTER.getAndIncrement(), CommandId.Action.CREATE)); - when(transactionalProducer.send(any(ProducerRecord.class))).thenReturn(testFuture); when(commandTopic.getNewCommands(any())).thenReturn(buildRecords(commandId, command)); @@ -149,13 +140,9 @@ public void setUp() { when(sequenceNumberFutureStore.getFutureForSequenceNumber(anyLong())).thenReturn(future); - configured = ConfiguredStatement.of( - PreparedStatement.of(statementText, statement), OVERRIDE_PROPERTIES, KSQL_CONFIG); - commandStore = new CommandStore( COMMAND_TOPIC_NAME, commandTopic, - commandIdAssigner, sequenceNumberFutureStore, Collections.emptyMap(), Collections.emptyMap(), @@ -166,13 +153,12 @@ public void setUp() { @Test public void shouldFailEnqueueIfCommandWithSameIdRegistered() { // Given: - when(commandIdAssigner.getCommandId(any())).thenReturn(commandId); - commandStore.enqueueCommand(configured, transactionalProducer); + commandStore.enqueueCommand(commandId, command, transactionalProducer); expectedException.expect(IllegalStateException.class); // When: - commandStore.enqueueCommand(configured, transactionalProducer); + commandStore.enqueueCommand(commandId, command, transactionalProducer); } @Test @@ -183,27 +169,25 @@ public void shouldCleanupCommandStatusOnProduceError() { .thenReturn(testFuture); expectedException.expect(KsqlException.class); expectedException.expectMessage("Could not write the statement 'test-statement' into the command topic."); - commandStore.enqueueCommand(configured, transactionalProducer); + commandStore.enqueueCommand(commandId, command, transactionalProducer); - // When: - commandStore.enqueueCommand(configured, transactionalProducer); + // When (Then: don't throw): + commandStore.enqueueCommand(commandId, command, transactionalProducer); } @Test public void shouldEnqueueNewAfterHandlingExistingCommand() { // Given: - when(commandIdAssigner.getCommandId(any())).thenReturn(commandId); - commandStore.enqueueCommand(configured, transactionalProducer); + commandStore.enqueueCommand(commandId, command, transactionalProducer); commandStore.getNewCommands(NEW_CMDS_TIMEOUT); // Should: - commandStore.enqueueCommand(configured, transactionalProducer); + commandStore.enqueueCommand(commandId, command, transactionalProducer); } @Test public void shouldRegisterBeforeDistributeAndReturnStatusOnGetNewCommands() { // Given: - when(commandIdAssigner.getCommandId(any())).thenReturn(commandId); when(transactionalProducer.send(any(ProducerRecord.class))).thenAnswer( invocation -> { final QueuedCommand queuedCommand = commandStore.getNewCommands(NEW_CMDS_TIMEOUT).get(0); @@ -218,7 +202,7 @@ public void shouldRegisterBeforeDistributeAndReturnStatusOnGetNewCommands() { ); // When: - commandStore.enqueueCommand(configured, transactionalProducer); + commandStore.enqueueCommand(commandId, command, transactionalProducer); // Then: verify(transactionalProducer).send(any(ProducerRecord.class)); @@ -244,21 +228,25 @@ public void shouldFilterNullCommands() { @Test public void shouldDistributeCommand() { - when(commandIdAssigner.getCommandId(any())).thenReturn(commandId); when(transactionalProducer.send(any(ProducerRecord.class))).thenReturn(testFuture); // When: - commandStore.enqueueCommand(configured, transactionalProducer); + commandStore.enqueueCommand(commandId, command, transactionalProducer); // Then: - //verify(transactionalProducer).send(same(commandId), any()); + verify(transactionalProducer).send(new ProducerRecord<>( + COMMAND_TOPIC_NAME, + COMMAND_TOPIC_PARTITION.partition(), + commandId, + command + )); } @Test public void shouldIncludeCommandSequenceNumberInSuccessfulQueuedCommandStatus() { // When: final QueuedCommandStatus commandStatus = - commandStore.enqueueCommand(configured, transactionalProducer); + commandStore.enqueueCommand(commandId, command, transactionalProducer); // Then: assertThat(commandStatus.getCommandSequenceNumber(), equalTo(recordMetadata.offset())); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandTest.java index 78e19491a35b..2090db341666 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandTest.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.Collections; import java.util.Map; +import java.util.Optional; import org.junit.Test; public class CommandTest { @@ -45,20 +46,6 @@ public void shouldDeserializeCorrectly() throws IOException { assertThat(command.getOriginalProperties(), equalTo(expectedOriginalProperties)); } - @Test - public void shouldDeserializeWithoutKsqlConfigCorrectly() throws IOException { - final String commandStr = "{" + - "\"statement\": \"test statement;\", " + - "\"streamsProperties\": {\"foo\": \"bar\"}" + - "}"; - final ObjectMapper mapper = JsonMapper.INSTANCE.mapper; - final Command command = mapper.readValue(commandStr, Command.class); - assertThat(command.getStatement(), equalTo("test statement;")); - final Map expecteOverwriteProperties = Collections.singletonMap("foo", "bar"); - assertThat(command.getOverwriteProperties(), equalTo(expecteOverwriteProperties)); - assertThat(command.getOriginalProperties(), equalTo(Collections.emptyMap())); - } - private void grep(final String string, final String regex) { assertThat(string.matches(regex), is(true)); } @@ -67,7 +54,8 @@ private void grep(final String string, final String regex) { public void shouldSerializeDeserializeCorrectly() throws IOException { final Command command = new Command( "test statement;", - Collections.singletonMap("foo", "bar"), Collections.singletonMap("biz", "baz")); + Collections.singletonMap("foo", "bar"), Collections.singletonMap("biz", "baz"), + Optional.empty()); final ObjectMapper mapper = JsonMapper.INSTANCE.mapper; final String serialized = mapper.writeValueAsString(command); grep(serialized, ".*\"streamsProperties\" *: *\\{ *\"foo\" *: *\"bar\" *\\}.*"); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/ConfigTopicKeyTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/ConfigTopicKeyTest.java index 357054e8e115..2df056c481dc 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/ConfigTopicKeyTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/ConfigTopicKeyTest.java @@ -21,7 +21,6 @@ import com.fasterxml.jackson.databind.exc.InvalidDefinitionException; import com.google.common.testing.EqualsTester; import io.confluent.ksql.rest.server.computation.ConfigTopicKey.StringKey; -import io.confluent.ksql.rest.util.InternalTopicJsonSerdeUtil; import java.nio.charset.StandardCharsets; import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.serialization.Deserializer; @@ -37,10 +36,9 @@ public class ConfigTopicKeyTest { private final byte[] SERIALIZED = "{\"string\":{\"value\":\"string-key-value\"}}".getBytes(StandardCharsets.UTF_8); - private final Serializer serializer - = InternalTopicJsonSerdeUtil.getJsonSerializer(false); + private final Serializer serializer = InternalTopicSerdes.serializer(); private final Deserializer deserializer - = InternalTopicJsonSerdeUtil.getJsonDeserializer(ConfigTopicKey.class, false); + = InternalTopicSerdes.deserializer(ConfigTopicKey.class); @Rule public final ExpectedException expectedException = ExpectedException.none(); @@ -104,7 +102,7 @@ private IllegalArgumentMatcher illegalString( @Test public void shouldThrowOnStringKeyWithNoValue() { // Then: - expectedException.expect(illegalString(NullPointerException.class, "")); + expectedException.expect(SerializationException.class); // When: deserializer.deserialize("", "{\"string\":{}}".getBytes(StandardCharsets.UTF_8)); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/DistributingExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/DistributingExecutorTest.java index 83abe615b4f1..f4f133caae6d 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/DistributingExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/DistributingExecutorTest.java @@ -29,18 +29,22 @@ import com.google.common.collect.ImmutableMap; import io.confluent.ksql.KsqlExecutionContext; import io.confluent.ksql.exception.KsqlTopicAuthorizationException; +import io.confluent.ksql.execution.expression.tree.StringLiteral; import io.confluent.ksql.metastore.MetaStore; -import io.confluent.ksql.parser.KsqlParser.ParsedStatement; +import io.confluent.ksql.name.SourceName; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; +import io.confluent.ksql.parser.properties.with.CreateSourceProperties; +import io.confluent.ksql.parser.tree.CreateStream; import io.confluent.ksql.parser.tree.ListProperties; import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.parser.tree.TableElements; +import io.confluent.ksql.properties.with.CommonCreateConfigs; import io.confluent.ksql.rest.entity.CommandId; import io.confluent.ksql.rest.entity.CommandId.Action; import io.confluent.ksql.rest.entity.CommandId.Type; import io.confluent.ksql.rest.entity.CommandStatus; import io.confluent.ksql.rest.entity.CommandStatus.Status; import io.confluent.ksql.rest.entity.CommandStatusEntity; -import io.confluent.ksql.rest.server.validation.RequestValidator; import io.confluent.ksql.security.KsqlAuthorizationValidator; import io.confluent.ksql.services.SandboxedServiceContext; import io.confluent.ksql.services.ServiceContext; @@ -52,7 +56,6 @@ import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.KsqlServerException; import java.time.Duration; -import java.util.Collections; import java.util.HashMap; import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; @@ -70,33 +73,54 @@ @RunWith(MockitoJUnitRunner.class) public class DistributingExecutorTest { - private static final String SQL_STRING = "some ksql statement;"; private static final Duration DURATION_10_MS = Duration.ofMillis(10); private static final CommandId CS_COMMAND = new CommandId(Type.STREAM, "stream", Action.CREATE); private static final CommandStatus SUCCESS_STATUS = new CommandStatus(Status.SUCCESS, ""); private static final KsqlConfig KSQL_CONFIG = new KsqlConfig(new HashMap<>()); - private static final ConfiguredStatement EMPTY_STATEMENT = + private static final Statement STATEMENT = new CreateStream( + SourceName.of("TEST"), + TableElements.of(), + false, + CreateSourceProperties.from(ImmutableMap.of( + CommonCreateConfigs.KAFKA_TOPIC_NAME_PROPERTY, new StringLiteral("topic"), + CommonCreateConfigs.VALUE_FORMAT_PROPERTY, new StringLiteral("json") + )) + ); + private static final ConfiguredStatement CONFIGURED_STATEMENT = ConfiguredStatement.of( - PreparedStatement.of("", new ListProperties(Optional.empty())), + PreparedStatement.of("statement", STATEMENT), ImmutableMap.of(), KSQL_CONFIG ); + private static final CommandIdAssigner IDGEN = new CommandIdAssigner(); @Rule public ExpectedException expectedException = ExpectedException.none(); - @Mock CommandQueue queue; - @Mock QueuedCommandStatus status; - @Mock ServiceContext serviceContext; - @Mock Injector schemaInjector; - @Mock Injector topicInjector; - @Mock KsqlAuthorizationValidator authorizationValidator; - @Mock KsqlExecutionContext executionContext; - @Mock MetaStore metaStore; - @Mock RequestValidator requestValidator; - @Mock ParsedStatement parsedStatement; @Mock - Producer transactionalProducer; + private CommandQueue queue; + @Mock + private QueuedCommandStatus status; + @Mock + private ServiceContext serviceContext; + @Mock + private Injector schemaInjector; + @Mock + private Injector topicInjector; + @Mock + private KsqlAuthorizationValidator authorizationValidator; + @Mock + private KsqlExecutionContext executionContext; + @Mock + private KsqlExecutionContext sandboxContext; + @Mock + private MetaStore metaStore; + @Mock + private ValidatedCommandFactory validatedCommandFactory; + @Mock + private Producer transactionalProducer; + @Mock + private Command command; private DistributingExecutor distributor; private AtomicLong scnCounter; @@ -106,16 +130,15 @@ public void setUp() throws InterruptedException { scnCounter = new AtomicLong(); when(schemaInjector.inject(any())).thenAnswer(inv -> inv.getArgument(0)); when(topicInjector.inject(any())).thenAnswer(inv -> inv.getArgument(0)); - when(queue.enqueueCommand(EMPTY_STATEMENT, transactionalProducer)).thenReturn(status); + when(queue.enqueueCommand(any(), any(), any())).thenReturn(status); when(status.tryWaitForFinalStatus(any())).thenReturn(SUCCESS_STATUS); when(status.getCommandId()).thenReturn(CS_COMMAND); when(status.getCommandSequenceNumber()).thenAnswer(inv -> scnCounter.incrementAndGet()); when(executionContext.getMetaStore()).thenReturn(metaStore); + when(executionContext.createSandbox(any())).thenReturn(sandboxContext); serviceContext = SandboxedServiceContext.create(TestServiceContext.create()); when(executionContext.getServiceContext()).thenReturn(serviceContext); - when(requestValidator.validate( - serviceContext, Collections.singletonList(parsedStatement), ImmutableMap.of(), SQL_STRING)).thenReturn(1); - when(parsedStatement.getStatementText()).thenReturn(SQL_STRING); + when(validatedCommandFactory.create(any(), any())).thenReturn(command); when(queue.createTransactionalProducer()).thenReturn(transactionalProducer); distributor = new DistributingExecutor( @@ -123,37 +146,40 @@ public void setUp() throws InterruptedException { DURATION_10_MS, (ec, sc) -> InjectorChain.of(schemaInjector, topicInjector), Optional.of(authorizationValidator), - requestValidator + validatedCommandFactory ); } @Test public void shouldEnqueueSuccessfulCommandTransactionally() { // When: - distributor.execute(EMPTY_STATEMENT, parsedStatement, ImmutableMap.of(), executionContext, serviceContext); + distributor.execute(CONFIGURED_STATEMENT, executionContext, serviceContext); // Then: - InOrder inOrder = Mockito.inOrder(transactionalProducer, queue, requestValidator); - inOrder.verify(transactionalProducer, times(1)).initTransactions(); - inOrder.verify(transactionalProducer, times(1)).beginTransaction(); - inOrder.verify(queue, times(1)).waitForCommandConsumer(); - inOrder.verify(requestValidator).validate( - serviceContext, - Collections.singletonList(parsedStatement), - ImmutableMap.of(), - SQL_STRING); - inOrder.verify(queue, times(1)).enqueueCommand(EMPTY_STATEMENT, transactionalProducer); - inOrder.verify(transactionalProducer, times(1)).commitTransaction(); - inOrder.verify(transactionalProducer, times(1)).close(); + InOrder inOrder = Mockito.inOrder(transactionalProducer, queue, validatedCommandFactory); + inOrder.verify(transactionalProducer).initTransactions(); + inOrder.verify(transactionalProducer).beginTransaction(); + inOrder.verify(queue).waitForCommandConsumer(); + inOrder.verify(validatedCommandFactory).create( + CONFIGURED_STATEMENT, + sandboxContext + ); + inOrder.verify(queue).enqueueCommand( + IDGEN.getCommandId(CONFIGURED_STATEMENT.getStatement()), + command, + transactionalProducer + ); + inOrder.verify(transactionalProducer).commitTransaction(); + inOrder.verify(transactionalProducer).close(); } @Test public void shouldInferSchemas() { // When: - distributor.execute(EMPTY_STATEMENT, parsedStatement, ImmutableMap.of(), executionContext, serviceContext); + distributor.execute(CONFIGURED_STATEMENT, executionContext, serviceContext); // Then: - verify(schemaInjector, times(1)).inject(eq(EMPTY_STATEMENT)); + verify(schemaInjector, times(1)).inject(eq(CONFIGURED_STATEMENT)); } @Test @@ -161,9 +187,7 @@ public void shouldReturnCommandStatus() { // When: final CommandStatusEntity commandStatusEntity = (CommandStatusEntity) distributor.execute( - EMPTY_STATEMENT, - parsedStatement, - ImmutableMap.of(), + CONFIGURED_STATEMENT, executionContext, serviceContext ) @@ -179,24 +203,15 @@ public void shouldThrowExceptionOnFailureToEnqueue() { // Given: final KsqlException cause = new KsqlException("fail"); - final PreparedStatement preparedStatement = - PreparedStatement.of("x", new ListProperties(Optional.empty())); - - final ConfiguredStatement configured = - ConfiguredStatement.of( - preparedStatement, - ImmutableMap.of(), - KSQL_CONFIG); - - when(queue.enqueueCommand(configured, transactionalProducer)).thenThrow(cause); + when(queue.enqueueCommand(any(), any(), any())).thenThrow(cause); // Expect: expectedException.expect(KsqlServerException.class); expectedException.expectMessage( - "Could not write the statement 'x' into the command topic: fail"); + "Could not write the statement 'statement' into the command topic: fail"); expectedException.expectCause(is(cause)); // When: - distributor.execute(configured, parsedStatement, ImmutableMap.of(), executionContext, serviceContext); + distributor.execute(CONFIGURED_STATEMENT, executionContext, serviceContext); verify(transactionalProducer, times(1)).abortTransaction(); } @@ -214,7 +229,7 @@ public void shouldThrowFailureIfCannotInferSchema() { expectedException.expectMessage("Could not infer!"); // When: - distributor.execute(configured, parsedStatement, ImmutableMap.of(), executionContext, serviceContext); + distributor.execute(configured, executionContext, serviceContext); } @Test @@ -232,7 +247,7 @@ public void shouldThrowExceptionIfUserServiceContextIsDeniedAuthorization() { expectedException.expect(KsqlTopicAuthorizationException.class); // When: - distributor.execute(configured, parsedStatement, ImmutableMap.of(), executionContext, userServiceContext); + distributor.execute(configured, executionContext, userServiceContext); } @Test @@ -251,6 +266,6 @@ public void shouldThrowServerExceptionIfServerServiceContextIsDeniedAuthorizatio expectedException.expectCause(is(instanceOf(KsqlTopicAuthorizationException.class))); // When: - distributor.execute(configured, parsedStatement, ImmutableMap.of(), executionContext, userServiceContext); + distributor.execute(configured, executionContext, userServiceContext); } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java index 6856ab54b2cc..d3ebec489c5b 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java @@ -33,6 +33,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.confluent.ksql.KsqlConfigTestUtil; import io.confluent.ksql.KsqlExecutionContext.ExecuteResult; import io.confluent.ksql.engine.KsqlEngine; @@ -56,6 +57,7 @@ import io.confluent.ksql.rest.entity.CommandId.Action; import io.confluent.ksql.rest.entity.CommandId.Type; import io.confluent.ksql.rest.entity.CommandStatus; +import io.confluent.ksql.rest.entity.CommandStatus.Status; import io.confluent.ksql.rest.server.StatementParser; import io.confluent.ksql.rest.server.utils.TestUtils; import io.confluent.ksql.services.FakeKafkaTopicClient; @@ -93,13 +95,13 @@ @RunWith(MockitoJUnitRunner.class) public class InteractiveStatementExecutorTest { - - private static final Map PRE_VERSION_5_NULL_ORIGINAL_PROPS = null; private static final String CREATE_STREAM_FOO_STATMENT = "CREATE STREAM foo (" + "biz bigint," + " baz varchar) " + "WITH (kafka_topic = 'foo', " + "value_format = 'json');"; + private static final CommandId COMMAND_ID = new CommandId(Type.STREAM, "foo", Action.CREATE); + private static final QueryId QUERY_ID = new QueryId("qid"); private KsqlEngine ksqlEngine; private InteractiveStatementExecutor statementExecutor; @@ -122,6 +124,12 @@ public class InteractiveStatementExecutorTest { private PersistentQueryMetadata mockQueryMetadata; @Mock private QueuedCommand queuedCommand; + @Mock + private KsqlPlan plan; + @Mock + private CommandStatusFuture status; + + private Command plannedCommand; @Before public void setUp() { @@ -160,6 +168,13 @@ public void setUp() { statementExecutor.configure(ksqlConfig); statementExecutorWithMocks.configure(ksqlConfig); + + plannedCommand = new Command( + CREATE_STREAM_FOO_STATMENT, + emptyMap(), + ksqlConfig.getAllConfigPropsWithSecretsObfuscated(), + Optional.of(plan) + ); } @After @@ -222,7 +237,9 @@ public void shouldThrowOnUnexpectedException() { final Command command = new Command( statementText, emptyMap(), - emptyMap()); + emptyMap(), + Optional.empty() + ); final CommandId commandId = new CommandId( CommandId.Type.STREAM, "_CSASGen", CommandId.Action.CREATE); @@ -266,7 +283,9 @@ public void shouldBuildQueriesWithPersistedConfig() { final Command csasCommand = new Command( statementText, emptyMap(), - originalConfig.getAllConfigPropsWithSecretsObfuscated()); + originalConfig.getAllConfigPropsWithSecretsObfuscated(), + Optional.empty() + ); final CommandId csasCommandId = new CommandId( CommandId.Type.STREAM, "_CSASGen", @@ -294,14 +313,12 @@ public void shouldCompleteFutureOnSuccess() { final Command command = new Command( CREATE_STREAM_FOO_STATMENT, emptyMap(), - ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); - final CommandId commandId = new CommandId(CommandId.Type.STREAM, - "foo", - CommandId.Action.CREATE); - final CommandStatusFuture status = mock(CommandStatusFuture.class); + ksqlConfig.getAllConfigPropsWithSecretsObfuscated(), + Optional.empty() + ); // When: - handleStatement(command, commandId, Optional.of(status), 0L); + handleStatement(command, COMMAND_ID, Optional.of(status), 0L); // Then: InOrder inOrder = Mockito.inOrder(status); @@ -316,6 +333,99 @@ public void shouldCompleteFutureOnSuccess() { assertEquals(CommandStatus.Status.SUCCESS, argFinalCommandStatus.getValue().getStatus()); } + @Test + public void shouldExecutePlannedCommand() { + // Given: + givenMockPlannedQuery(); + + // When: + handleStatement(statementExecutorWithMocks, plannedCommand, COMMAND_ID, Optional.empty(), 0L); + + // Then: + verify(mockEngine).execute(serviceContext, ConfiguredKsqlPlan.of(plan, emptyMap(), ksqlConfig)); + } + + @Test + public void shouldUpdateStatusOnCompletedPlannedCommand() { + // Given: + givenMockPlannedQuery(); + + // When: + handleStatement( + statementExecutorWithMocks, + plannedCommand, + COMMAND_ID, + Optional.of(status), + 0L + ); + + // Then: + final InOrder inOrder = Mockito.inOrder(status, mockEngine); + inOrder.verify(status).setStatus( + new CommandStatus(Status.EXECUTING, "Executing statement")); + inOrder.verify(mockEngine).execute(any(), any(ConfiguredKsqlPlan.class)); + inOrder.verify(status).setFinalStatus( + new CommandStatus(Status.SUCCESS, "Created query with ID qid")); + } + + @Test + public void shouldSetCorrectFinalStatusOnCompletedPlannedDDLCommand() { + // Given: + when(mockEngine.execute(any(), any(ConfiguredKsqlPlan.class))) + .thenReturn(ExecuteResult.of("result")); + + // When: + handleStatement( + statementExecutorWithMocks, + plannedCommand, + COMMAND_ID, + Optional.of(status), + 0L + ); + + // Then: + verify(status).setFinalStatus(new CommandStatus(Status.SUCCESS, "result")); + } + + @Test + public void shouldStartQueryForPlannedCommand() { + // Given: + givenMockPlannedQuery(); + + // When: + handleStatement(statementExecutorWithMocks, plannedCommand, COMMAND_ID, Optional.empty(), 0L); + + // Then: + verify(mockQueryMetadata).start(); + } + + @Test + @SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_INFERRED") + public void shouldExecutePlannedCommandWithMergedConfig() { + // Given: + final Map savedConfigs = ImmutableMap.of("biz", "baz"); + plannedCommand = new Command( + CREATE_STREAM_FOO_STATMENT, + emptyMap(), + savedConfigs, + Optional.of(plan) + ); + final KsqlConfig mockConfig = mock(KsqlConfig.class); + when(mockConfig.getKsqlStreamConfigProps()).thenReturn( + ImmutableMap.of(StreamsConfig.APPLICATION_SERVER_CONFIG, "appid")); + final KsqlConfig mergedConfig = mock(KsqlConfig.class); + when(mockConfig.overrideBreakingConfigsWithOriginalValues(any())).thenReturn(mergedConfig); + givenMockPlannedQuery(); + + // When: + statementExecutorWithMocks.configure(mockConfig); + handleStatement(statementExecutorWithMocks, plannedCommand, COMMAND_ID, Optional.empty(), 0L); + + // Then: + verify(mockConfig).overrideBreakingConfigsWithOriginalValues(savedConfigs); + verify(mockEngine).execute(any(), eq(ConfiguredKsqlPlan.of(plan, emptyMap(), mergedConfig))); + } + @Test public void shouldThrowExceptionIfCommandFails() { // Given: @@ -324,15 +434,14 @@ public void shouldThrowExceptionIfCommandFails() { final Command command = new Command( CREATE_STREAM_FOO_STATMENT, emptyMap(), - ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); - final CommandId commandId = new CommandId(CommandId.Type.STREAM, - "foo", - CommandId.Action.CREATE); + ksqlConfig.getAllConfigPropsWithSecretsObfuscated(), + Optional.empty() + ); final CommandStatusFuture status = mock(CommandStatusFuture.class); // When: try { - handleStatement(command, commandId, Optional.of(status), 0L); + handleStatement(command, COMMAND_ID, Optional.of(status), 0L); } catch (KsqlStatementException e) { // Then: assertEquals("Cannot add stream 'FOO': A stream with the same name already exists\n" + @@ -428,7 +537,9 @@ public void shouldEnforceReferentialIntegrity() { final Command dropTableCommand2 = new Command( "drop table table1;", emptyMap(), - ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); + ksqlConfig.getAllConfigPropsWithSecretsObfuscated(), + Optional.empty() + ); final CommandId dropTableCommandId2 = new CommandId(CommandId.Type.TABLE, "_TABLE1", CommandId.Action.DROP); handleStatement( @@ -445,7 +556,9 @@ public void shouldEnforceReferentialIntegrity() { final Command dropStreamCommand3 = new Command( "drop stream pageview;", emptyMap(), - ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); + ksqlConfig.getAllConfigPropsWithSecretsObfuscated(), + Optional.empty() + ); final CommandId dropStreamCommandId3 = new CommandId(CommandId.Type.STREAM, "_user1pv", CommandId.Action.DROP); handleStatement( @@ -468,7 +581,7 @@ public void shouldSkipStartWhenReplayingLog() { statementExecutorWithMocks.handleRestore( new QueuedCommand( new CommandId(Type.STREAM, name, Action.CREATE), - new Command("CSAS", emptyMap(), emptyMap()), + new Command("CSAS", emptyMap(), emptyMap(), Optional.empty()), Optional.empty(), 0L ) @@ -532,7 +645,7 @@ public void shouldNotCascadeDropStreamCommand() { statementExecutorWithMocks.handleRestore( new QueuedCommand( new CommandId(Type.STREAM, "foo", Action.DROP), - new Command(drop, emptyMap(), emptyMap()), + new Command(drop, emptyMap(), emptyMap(), Optional.empty()), Optional.empty(), 0L ) @@ -559,7 +672,7 @@ public void shouldTerminateAll() { statementExecutorWithMocks.handleStatement( new QueuedCommand( new CommandId(Type.TERMINATE, "-", Action.EXECUTE), - new Command("terminate all", emptyMap(), emptyMap()), + new Command("terminate all", emptyMap(), emptyMap(), Optional.empty()), Optional.empty(), 0L ) @@ -578,7 +691,10 @@ private void createStreamsAndStartTwoPersistentQueries() { + "userid varchar) " + "WITH (kafka_topic = 'pageview_topic_json', " + "value_format = 'json');", - emptyMap(), ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); + emptyMap(), + ksqlConfig.getAllConfigPropsWithSecretsObfuscated(), + Optional.empty() + ); final CommandId csCommandId = new CommandId(CommandId.Type.STREAM, "_CSASStreamGen", CommandId.Action.CREATE); @@ -588,7 +704,10 @@ private void createStreamsAndStartTwoPersistentQueries() { "CREATE STREAM user1pv AS " + "select * from pageview" + " WHERE userid = 'user1';", - emptyMap(), ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); + emptyMap(), + ksqlConfig.getAllConfigPropsWithSecretsObfuscated(), + Optional.empty() + ); final CommandId csasCommandId = new CommandId(CommandId.Type.STREAM, "_CSASGen", @@ -602,7 +721,9 @@ private void createStreamsAndStartTwoPersistentQueries() { + "WINDOW TUMBLING ( SIZE 10 SECONDS) " + "GROUP BY pageid;", emptyMap(), - ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); + ksqlConfig.getAllConfigPropsWithSecretsObfuscated(), + Optional.empty() + ); final CommandId ctasCommandId = new CommandId(CommandId.Type.TABLE, "_CTASGen", @@ -618,7 +739,9 @@ private void tryDropThatViolatesReferentialIntegrity() { final Command dropStreamCommand1 = new Command( "drop stream pageview;", emptyMap(), - ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); + ksqlConfig.getAllConfigPropsWithSecretsObfuscated(), + Optional.empty() + ); final CommandId dropStreamCommandId1 = new CommandId(CommandId.Type.STREAM, "_PAGEVIEW", CommandId.Action.DROP); @@ -657,7 +780,9 @@ private void tryDropThatViolatesReferentialIntegrity() { final Command dropStreamCommand2 = new Command( "drop stream user1pv;", emptyMap(), - ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); + ksqlConfig.getAllConfigPropsWithSecretsObfuscated(), + Optional.empty() + ); final CommandId dropStreamCommandId2 = new CommandId(CommandId.Type.STREAM, "_user1pv", CommandId.Action.DROP); handleStatement( @@ -692,7 +817,9 @@ private void tryDropThatViolatesReferentialIntegrity() { final Command dropTableCommand1 = new Command( "drop table table1;", emptyMap(), - ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); + ksqlConfig.getAllConfigPropsWithSecretsObfuscated(), + Optional.empty() + ); final CommandId dropTableCommandId1 = new CommandId(CommandId.Type.TABLE, "_TABLE1", CommandId.Action.DROP); handleStatement( @@ -745,7 +872,10 @@ private static void handleStatement( private void terminateQueries() { final Command terminateCommand1 = new Command( "TERMINATE CSAS_USER1PV_1;", - emptyMap(), ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); + emptyMap(), + ksqlConfig.getAllConfigPropsWithSecretsObfuscated(), + Optional.empty() + ); final CommandId terminateCommandId1 = new CommandId(CommandId.Type.STREAM, "_TerminateGen", CommandId.Action.CREATE); handleStatement( @@ -756,7 +886,9 @@ private void terminateQueries() { final Command terminateCommand2 = new Command( "TERMINATE CTAS_TABLE1_2;", emptyMap(), - ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); + ksqlConfig.getAllConfigPropsWithSecretsObfuscated(), + Optional.empty() + ); final CommandId terminateCommandId2 = new CommandId(CommandId.Type.TABLE, "_TerminateGen", CommandId.Action.CREATE); handleStatement( @@ -773,12 +905,9 @@ private CommandStatus getCommandStatus(CommandId commandId) { return commandStatus.get(); } - private static KsqlConfig givenCommandConfig(final String name, final Object value) { - return new KsqlConfig(Collections.singletonMap(name, value)); - } - - private static Command givenCommand(final String statementStr, final KsqlConfig ksqlConfig) { - return new Command( - statementStr, emptyMap(), ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); + private void givenMockPlannedQuery() { + when(mockQueryMetadata.getQueryId()).thenReturn(QUERY_ID); + when(mockEngine.execute(any(), any(ConfiguredKsqlPlan.class))) + .thenReturn(ExecuteResult.of(mockQueryMetadata)); } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InternalTopicSerdesTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InternalTopicSerdesTest.java new file mode 100644 index 000000000000..19dcd38e356c --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InternalTopicSerdesTest.java @@ -0,0 +1,75 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.server.computation; + +import static org.hamcrest.Matchers.equalTo; +import static org.junit.Assert.assertThat; + +import com.google.common.base.Charsets; +import io.confluent.ksql.execution.expression.tree.ArithmeticBinaryExpression; +import io.confluent.ksql.execution.expression.tree.Expression; +import io.confluent.ksql.execution.expression.tree.IntegerLiteral; +import io.confluent.ksql.schema.Operator; +import java.nio.charset.Charset; +import org.apache.kafka.common.errors.SerializationException; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class InternalTopicSerdesTest { + + private static final Expression EXPRESSION = new ArithmeticBinaryExpression( + Operator.ADD, + new IntegerLiteral(123), + new IntegerLiteral(456) + ); + + @Rule + public final ExpectedException expectedException = ExpectedException.none(); + + @Test + public void shouldUsePlanMapperForSerialize() { + // When: + final byte[] serialized = InternalTopicSerdes.serializer().serialize("", EXPRESSION); + + // Then: + assertThat(new String(serialized, Charsets.UTF_8), equalTo("\"(123 + 456)\"")); + } + + @Test + public void shouldUsePlanMapperForDeserialize() { + // When: + final Expression deserialized = InternalTopicSerdes.deserializer(Expression.class).deserialize( + "", + "\"(123 + 456)\"".getBytes(Charset.defaultCharset()) + ); + + // Then: + assertThat(deserialized, equalTo(EXPRESSION)); + } + + @Test + public void shouldThrowSerializationExceptionOnSerializeError() { + // Expect: + expectedException.expect(SerializationException.class); + + // When: + InternalTopicSerdes.deserializer(Command.class).deserialize( + "", + "{abc".getBytes(Charset.defaultCharset()) + ); + } +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/KafkaConfigStoreTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/KafkaConfigStoreTest.java index 7865dba5b69c..bcf98fa40e3f 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/KafkaConfigStoreTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/KafkaConfigStoreTest.java @@ -29,7 +29,6 @@ import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.confluent.ksql.rest.server.computation.ConfigTopicKey.StringKey; import io.confluent.ksql.rest.server.computation.KafkaConfigStore.KsqlProperties; -import io.confluent.ksql.rest.util.InternalTopicJsonSerdeUtil; import io.confluent.ksql.util.KsqlConfig; import java.nio.charset.StandardCharsets; import java.util.Arrays; @@ -39,6 +38,7 @@ import java.util.ListIterator; import java.util.Map; import java.util.Map.Entry; +import java.util.Optional; import java.util.function.Supplier; import java.util.stream.Collectors; import org.apache.kafka.clients.consumer.ConsumerRecord; @@ -70,23 +70,21 @@ public class KafkaConfigStoreTest { ImmutableMap.of(KsqlConfig.KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG, "bad")); private final KsqlProperties properties = new KsqlProperties( - currentConfig.getAllConfigPropsWithSecretsObfuscated() + Optional.of(currentConfig.getAllConfigPropsWithSecretsObfuscated()) ); private final KsqlProperties savedProperties = new KsqlProperties( - savedConfig.getAllConfigPropsWithSecretsObfuscated() + Optional.of(savedConfig.getAllConfigPropsWithSecretsObfuscated()) ); private final KsqlProperties badProperties = new KsqlProperties( - badConfig.getAllConfigPropsWithSecretsObfuscated() + Optional.of(badConfig.getAllConfigPropsWithSecretsObfuscated()) ); private final TopicPartition topicPartition = new TopicPartition(TOPIC_NAME, 0); private final List topicPartitionAsList = Collections.singletonList(topicPartition); private final List> log = new LinkedList<>(); - private final Serializer keySerializer - = InternalTopicJsonSerdeUtil.getJsonSerializer(true); - private final Serializer serializer - = InternalTopicJsonSerdeUtil.getJsonSerializer(false); + private final Serializer keySerializer = InternalTopicSerdes.serializer(); + private final Serializer serializer = InternalTopicSerdes.serializer(); @Mock private KafkaConsumer consumerBefore; @@ -369,7 +367,7 @@ public void shouldMergeExistingConfigIfExists() { public void shouldDeserializeEmptyContentsToEmptyProps() { // When: final Deserializer deserializer - = InternalTopicJsonSerdeUtil.getJsonDeserializer(KsqlProperties.class, false); + = InternalTopicSerdes.deserializer(KsqlProperties.class); final KafkaConfigStore.KsqlProperties ksqlProperties = deserializer.deserialize(TOPIC_NAME, "{}".getBytes(StandardCharsets.UTF_8)); @@ -377,6 +375,20 @@ public void shouldDeserializeEmptyContentsToEmptyProps() { assertThat(ksqlProperties.getKsqlProperties(), equalTo(Collections.emptyMap())); } + @Test + public void shouldDeserializeProps() { + // When: + final Deserializer deserializer + = InternalTopicSerdes.deserializer(KsqlProperties.class); + final KafkaConfigStore.KsqlProperties ksqlProperties = deserializer.deserialize( + TOPIC_NAME, + "{\"ksqlProperties\": {\"foo\": \"bar\"}}".getBytes(StandardCharsets.UTF_8) + ); + + // Then: + assertThat(ksqlProperties.getKsqlProperties(), equalTo(ImmutableMap.of("foo", "bar"))); + } + private static Map filterNullValues(final Map map) { return map.entrySet() .stream() diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java index cee9143c7c0f..a39474f64f47 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java @@ -116,27 +116,25 @@ private KsqlEngine createKsqlEngine() { private static class FakeCommandQueue implements CommandQueue { private final List commandLog; - private final CommandIdAssigner commandIdAssigner; private int offset; private Producer transactionalProducer; FakeCommandQueue(final List commandLog, final Producer transactionalProducer) { - this.commandIdAssigner = new CommandIdAssigner(); this.commandLog = commandLog; this.transactionalProducer = transactionalProducer; } @Override - public QueuedCommandStatus enqueueCommand(final ConfiguredStatement statement, final Producer transactionalProducer) { - final CommandId commandId = commandIdAssigner.getCommandId(statement.getStatement()); + public QueuedCommandStatus enqueueCommand( + final CommandId commandId, + final Command command, + final Producer transactionalProducer + ) { final long commandSequenceNumber = commandLog.size(); commandLog.add( new QueuedCommand( commandId, - new Command( - statement.getStatementText(), - Collections.emptyMap(), - statement.getConfig().getAllConfigPropsWithSecretsObfuscated()), + command, Optional.empty(), commandSequenceNumber)); return new QueuedCommandStatus(commandSequenceNumber, new CommandStatusFuture(commandId)); @@ -615,7 +613,7 @@ public void shouldNotDeleteTopicsOnRecoveryEvenIfLegacyDropCommandAlreadyInComma shouldRecover(ImmutableList.of( new QueuedCommand( new CommandId(Type.STREAM, "B", Action.DROP), - new Command("DROP STREAM B DELETE TOPIC;", ImmutableMap.of(), ImmutableMap.of()), + new Command("DROP STREAM B DELETE TOPIC;", ImmutableMap.of(), ImmutableMap.of(), Optional.empty()), Optional.empty(), 0L ) @@ -625,7 +623,7 @@ public void shouldNotDeleteTopicsOnRecoveryEvenIfLegacyDropCommandAlreadyInComma } @Test - public void shouldRecoverQueryIDsByOffset() { + public void shouldRecoverQueryIDs() { commands.addAll( ImmutableList.of( new QueuedCommand( @@ -634,7 +632,8 @@ public void shouldRecoverQueryIDsByOffset() { "CREATE STREAM A (COLUMN STRING) " + "WITH (KAFKA_TOPIC='A', VALUE_FORMAT='JSON');", Collections.emptyMap(), - null + null, + Optional.empty() ), Optional.empty(), 2L @@ -644,7 +643,8 @@ public void shouldRecoverQueryIDsByOffset() { new Command( "CREATE STREAM C AS SELECT * FROM A;", Collections.emptyMap(), - null + null, + Optional.empty() ), Optional.empty(), 7L diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/ValidatedCommandFactoryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/ValidatedCommandFactoryTest.java new file mode 100644 index 000000000000..b8040e8519b2 --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/ValidatedCommandFactoryTest.java @@ -0,0 +1,233 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.server.computation; + +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableList; +import io.confluent.ksql.KsqlExecutionContext; +import io.confluent.ksql.engine.KsqlPlan; +import io.confluent.ksql.parser.KsqlParser.PreparedStatement; +import io.confluent.ksql.parser.tree.CreateStream; +import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.parser.tree.TerminateQuery; +import io.confluent.ksql.planner.plan.ConfiguredKsqlPlan; +import io.confluent.ksql.query.QueryId; +import io.confluent.ksql.rest.util.TerminateCluster; +import io.confluent.ksql.services.ServiceContext; +import io.confluent.ksql.statement.ConfiguredStatement; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlStatementException; +import io.confluent.ksql.util.PersistentQueryMetadata; +import java.util.Map; +import java.util.Optional; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class ValidatedCommandFactoryTest { + private static final QueryId QUERY_ID = new QueryId("FOO"); + + @Mock + private KsqlExecutionContext executionContext; + @Mock + private ServiceContext serviceContext; + @Mock + private TerminateQuery terminateQuery; + @Mock + private CreateStream plannedQuery; + @Mock + private KsqlConfig config; + @Mock + private Map overrides; + @Mock + private KsqlPlan plan; + @Mock + private PersistentQueryMetadata query1; + @Mock + private PersistentQueryMetadata query2; + + private ConfiguredStatement configuredStatement; + private ValidatedCommandFactory commandFactory; + + @Rule + public final ExpectedException expectedException = ExpectedException.none(); + + @Before + public void setup() { + commandFactory = new ValidatedCommandFactory(config); + } + + @Test + public void shouldValidateTerminateCluster() { + // Given: + configuredStatement = configuredStatement( + TerminateCluster.TERMINATE_CLUSTER_STATEMENT_TEXT, + terminateQuery + ); + + // When: + final Command command = commandFactory.create(configuredStatement, executionContext); + + // Then: + assertThat(command, is(Command.of(configuredStatement))); + } + + @Test + public void shouldFailValidationForTerminateUnknownQuery() { + // Given: + configuredStatement = configuredStatement("TERMINATE X", terminateQuery); + when(terminateQuery.getQueryId()).thenReturn(Optional.of(QUERY_ID)); + when(executionContext.getPersistentQuery(QUERY_ID)).thenReturn(Optional.empty()); + + // Then: + expectedException.expect(KsqlStatementException.class); + expectedException.expectMessage("Unknown queryId"); + + // When: + commandFactory.create(configuredStatement, executionContext); + + } + + @Test + public void shouldCreateCommandForTerminateQuery() { + // Given: + givenTerminate(); + + // When: + final Command command = commandFactory.create(configuredStatement, executionContext); + + // Then: + assertThat(command, is(Command.of(configuredStatement))); + } + + @Test + public void shouldValidateTerminateQuery() { + // Given: + givenTerminate(); + + // When: + commandFactory.create(configuredStatement, executionContext); + + // Then: + verify(executionContext).getPersistentQuery(QUERY_ID); + verify(query1).close(); + } + + @Test + public void shouldValidateTerminateAllQuery() { + // Given: + givenTerminateAll(); + + // When: + commandFactory.create(configuredStatement, executionContext); + + // Then: + verify(query1).close(); + verify(query2).close(); + } + + @Test + public void shouldCreateCommandForTerminateAllQuery() { + // Given: + givenTerminateAll(); + + // When: + final Command command = commandFactory.create(configuredStatement, executionContext); + + // Then: + assertThat(command, is(Command.of(configuredStatement))); + } + + @Test + public void shouldValidatePlannedQuery() { + // Given: + givenPlannedQuery(); + + // When: + commandFactory.create(configuredStatement, executionContext); + + // Then: + verify(executionContext).plan(serviceContext, configuredStatement); + verify(executionContext).execute( + serviceContext, + ConfiguredKsqlPlan.of(plan, overrides, config) + ); + } + + @Test + public void shouldCreateCommandForPlannedQuery() { + // Given: + givenPlannedQuery(); + + // When: + final Command command = commandFactory.create(configuredStatement, executionContext); + + // Then: + assertThat(command, is(Command.of(ConfiguredKsqlPlan.of(plan, overrides, config)))); + } + + @Test + public void shouldCreateCommandWithoutPlanForPlannedQueryIfFeatureOff() { + // Given: + givenPlannedQuery(); + when(config.getBoolean(KsqlConfig.KSQL_EXECUTION_PLANS_ENABLE)).thenReturn(false); + + // When: + final Command command = commandFactory.create(configuredStatement, executionContext); + + // Then: + assertThat(command, is(Command.of(configuredStatement))); + } + + private void givenTerminate() { + configuredStatement = configuredStatement("TERMINATE FOO", terminateQuery); + when(terminateQuery.getQueryId()).thenReturn(Optional.of(QUERY_ID)); + when(executionContext.getPersistentQuery(any())).thenReturn(Optional.of(query1)); + } + + private void givenTerminateAll() { + configuredStatement = configuredStatement("TERMINATE ALL", terminateQuery); + when(terminateQuery.getQueryId()).thenReturn(Optional.empty()); + when(executionContext.getPersistentQueries()).thenReturn(ImmutableList.of(query1, query2)); + } + + private void givenPlannedQuery() { + configuredStatement = configuredStatement("CREATE STREAM", plannedQuery); + when(executionContext.plan(any(), any())).thenReturn(plan); + when(executionContext.getServiceContext()).thenReturn(serviceContext); + when(config.getBoolean(KsqlConfig.KSQL_EXECUTION_PLANS_ENABLE)).thenReturn(true); + } + + private ConfiguredStatement configuredStatement( + final String text, + final T statement + ) { + return ConfiguredStatement.of( + PreparedStatement.of(text, statement), + overrides, + config + ); + } +} \ No newline at end of file diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java index 12026386224e..cb5e539da48f 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java @@ -83,7 +83,7 @@ public void setUp() { when(ksqlEngine.prepare(any())) .thenAnswer(invocation -> new DefaultKsqlParser().prepare(invocation.getArgument(0), metaStore)); - when(distributor.execute(any(), any(), any(), any(), any())).thenReturn(Optional.of(entity)); + when(distributor.execute(any(), any(), any())).thenReturn(Optional.of(entity)); doNothing().when(sync).waitFor(any(), any()); } @@ -130,8 +130,6 @@ public void shouldDefaultToDistributor() { preparedStatement(instanceOf(CreateStream.class)), ImmutableMap.of(), ksqlConfig))), - eq(statements.get(0)), - eq(ImmutableMap.of()), eq(ksqlEngine), eq(serviceContext) ); @@ -159,8 +157,6 @@ public void shouldDistributeProperties() { preparedStatement(instanceOf(CreateStream.class)), ImmutableMap.of("x", "y"), ksqlConfig))), - eq(statements.get(0)), - eq(ImmutableMap.of("x", "y")), eq(ksqlEngine), eq(serviceContext) ); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java index b9c512376480..813371df7b54 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java @@ -16,7 +16,6 @@ package io.confluent.ksql.rest.server.resources; import static io.confluent.ksql.parser.ParserMatchers.configured; -import static io.confluent.ksql.parser.ParserMatchers.preparedStatement; import static io.confluent.ksql.parser.ParserMatchers.preparedStatementText; import static io.confluent.ksql.rest.entity.CommandId.Action.CREATE; import static io.confluent.ksql.rest.entity.CommandId.Action.DROP; @@ -67,6 +66,7 @@ import io.confluent.ksql.KsqlExecutionContext; import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.engine.KsqlEngineTestUtil; +import io.confluent.ksql.engine.KsqlPlan; import io.confluent.ksql.exception.KsqlTopicAuthorizationException; import io.confluent.ksql.execution.ddl.commands.KsqlTopic; import io.confluent.ksql.execution.expression.tree.StringLiteral; @@ -81,12 +81,10 @@ import io.confluent.ksql.parser.KsqlParser.PreparedStatement; import io.confluent.ksql.parser.properties.with.CreateSourceProperties; import io.confluent.ksql.parser.tree.CreateStream; -import io.confluent.ksql.parser.tree.CreateStreamAsSelect; import io.confluent.ksql.parser.tree.Statement; import io.confluent.ksql.parser.tree.TableElement; import io.confluent.ksql.parser.tree.TableElement.Namespace; import io.confluent.ksql.parser.tree.TableElements; -import io.confluent.ksql.parser.tree.TerminateQuery; import io.confluent.ksql.rest.Errors; import io.confluent.ksql.rest.entity.ClusterTerminateRequest; import io.confluent.ksql.rest.entity.CommandId; @@ -169,7 +167,10 @@ import org.apache.kafka.streams.StreamsConfig; import org.eclipse.jetty.http.HttpStatus.Code; import org.hamcrest.CoreMatchers; +import org.hamcrest.Description; +import org.hamcrest.Matcher; import org.hamcrest.Matchers; +import org.hamcrest.TypeSafeMatcher; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -310,7 +311,7 @@ public void setUp() throws IOException, RestClientException { addTestTopicAndSources(); - when(commandStore.enqueueCommand(any(), any(Producer.class))) + when(commandStore.enqueueCommand(any(), any(), any(Producer.class))) .thenReturn(commandStatus) .thenReturn(commandStatus1) .thenReturn(commandStatus2); @@ -689,29 +690,32 @@ public void shouldFailPrintTopic() { @Test public void shouldDistributePersistentQuery() { + // Given: + final String sql = "CREATE STREAM S AS SELECT * FROM test_stream;"; + // When: - makeSingleRequest( - "CREATE STREAM S AS SELECT * FROM test_stream;", CommandStatusEntity.class); + makeSingleRequest(sql, CommandStatusEntity.class); // Then: verify(commandStore).enqueueCommand( - argThat(is( - configured( - preparedStatement( - "CREATE STREAM S AS SELECT * FROM test_stream;", - CreateStreamAsSelect.class))) - ), any(Producer.class) + any(), + argThat(is(commandWithStatement(sql))), + any(Producer.class) ); } @Test - public void shouldDistributeWithConfig() { + public void shouldDistributeWithStreamsProperties() { // When: makeSingleRequest(VALID_EXECUTABLE_REQUEST, KsqlEntity.class); // Then: verify(commandStore).enqueueCommand( - argThat(configured(VALID_EXECUTABLE_REQUEST.getStreamsProperties(), ksqlConfig)), any(Producer.class)); + any(), + argThat(is(commandWithOverwrittenProperties( + VALID_EXECUTABLE_REQUEST.getStreamsProperties()))), + any(Producer.class) + ); } @Test @@ -809,7 +813,7 @@ public void shouldNotDistributeCreateStatementIfTopicDoesNotExist() { } @Test - public void shouldDistributeAvoCreateStatementWithColumns() { + public void shouldDistributeAvroCreateStatementWithColumns() { // When: makeSingleRequest( "CREATE STREAM S (foo INT) WITH(VALUE_FORMAT='AVRO', KAFKA_TOPIC='orders-topic');", @@ -817,10 +821,10 @@ public void shouldDistributeAvoCreateStatementWithColumns() { // Then: verify(commandStore).enqueueCommand( - argThat(is(configured(preparedStatement( - "CREATE STREAM S (foo INT) WITH(VALUE_FORMAT='AVRO', KAFKA_TOPIC='orders-topic');", - CreateStream.class) - ))), any(Producer.class) + any(), + argThat(is(commandWithStatement( + "CREATE STREAM S (foo INT) WITH(VALUE_FORMAT='AVRO', KAFKA_TOPIC='orders-topic');"))), + any(Producer.class) ); } @@ -845,8 +849,12 @@ public void shouldSupportTopicInferenceInVerification() { makeRequest(sql); // Then: - verify(sandbox, times(2)).execute(any(SandboxedServiceContext.class), eq(configuredStatement)); - verify(commandStore).enqueueCommand(argThat(configured(preparedStatementText(sql))), any(Producer.class)); + verify(sandbox).plan(any(SandboxedServiceContext.class), eq(configuredStatement)); + verify(commandStore).enqueueCommand( + any(), + argThat(is(commandWithStatement(sql))), + any(Producer.class) + ); } @Test @@ -870,7 +878,11 @@ public void shouldSupportTopicInferenceInExecution() { makeRequest(sql); // Then: - verify(commandStore).enqueueCommand(eq(configured), any(Producer.class)); + verify(commandStore).enqueueCommand( + any(), + argThat(is(commandWithStatement(sqlWithTopic))), + any() + ); } @Test @@ -927,8 +939,12 @@ public void shouldSupportSchemaInference() { makeRequest(sql); // Then: - verify(sandbox, times(2)).execute(any(SandboxedServiceContext.class), eq(CFG_0_WITH_SCHEMA)); - verify(commandStore).enqueueCommand(eq(CFG_1_WITH_SCHEMA), any(Producer.class)); + verify(sandbox).plan(any(SandboxedServiceContext.class), eq(CFG_0_WITH_SCHEMA)); + verify(commandStore).enqueueCommand( + any(), + argThat(is(commandWithStatement(CFG_1_WITH_SCHEMA.getStatementText()))), + any() + ); } @Test @@ -1133,7 +1149,7 @@ public void shouldFailMultipleStatementsAtomically() { ); // Then: - verify(commandStore, never()).enqueueCommand(any(), any(Producer.class)); + verify(commandStore, never()).enqueueCommand(any(), any(), any(Producer.class)); } @Test @@ -1149,10 +1165,11 @@ public void shouldDistributeTerminateQuery() { final CommandStatusEntity result = makeSingleRequest(terminateSql, CommandStatusEntity.class); // Then: - verify(commandStore) - .enqueueCommand( - argThat(is(configured(preparedStatement(terminateSql, TerminateQuery.class)))), - any(Producer.class)); + verify(commandStore).enqueueCommand( + any(), + argThat(is(commandWithStatement(terminateSql))), + any() + ); assertThat(result.getStatementText(), is(terminateSql)); } @@ -1170,13 +1187,11 @@ public void shouldDistributeTerminateAllQueries() { final CommandStatusEntity result = makeSingleRequest(terminateSql, CommandStatusEntity.class); // Then: - verify(commandStore) - .enqueueCommand( - argThat(is(configured(preparedStatement( - is(terminateSql), - is(TerminateQuery.all(Optional.empty())))) - )), - any(Producer.class)); + verify(commandStore).enqueueCommand( + any(), + argThat(is(commandWithStatement(terminateSql))), + any() + ); assertThat(result.getStatementText(), is(terminateSql)); } @@ -1307,7 +1322,7 @@ public void shouldReturn5xxOnStatementSystemError() { final String ksqlString = "CREATE STREAM test_explain AS SELECT * FROM test_stream;"; givenMockEngine(); - when(sandbox.execute(any(), any(ConfiguredStatement.class))) + when(sandbox.plan(any(), any(ConfiguredStatement.class))) .thenThrow(new RuntimeException("internal error")); // When: @@ -1332,11 +1347,11 @@ public void shouldSetProperty() { // Then: verify(commandStore).enqueueCommand( - argThat(is(configured( - preparedStatementText(csas), - ImmutableMap.of(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"), - ksqlConfig))), - any(Producer.class)); + any(), + argThat(is(commandWithOverwrittenProperties( + ImmutableMap.of(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")))), + any() + ); assertThat(results, hasSize(1)); assertThat(results.get(0).getStatementText(), is(csas)); @@ -1355,11 +1370,10 @@ public void shouldSetPropertyOnlyOnCommandsFollowingTheSetStatement() { // Then: verify(commandStore).enqueueCommand( - argThat(is(configured( - preparedStatementText(csas), - ImmutableMap.of(), - ksqlConfig))), - any(Producer.class)); + any(), + argThat(is(commandWithOverwrittenProperties(emptyMap()))), + any() + ); } @Test @@ -1406,8 +1420,10 @@ public void shouldUnsetProperty() { // Then: verify(commandStore).enqueueCommand( - argThat(is(configured(preparedStatementText(csas), emptyMap(), ksqlConfig))), - any(Producer.class)); + any(), + argThat(is(commandWithOverwrittenProperties(emptyMap()))), + any(Producer.class) + ); assertThat(result.getStatementText(), is(csas)); } @@ -1438,8 +1454,10 @@ public void shouldScopeSetPropertyToSingleRequest() { // Then: verify(commandStore).enqueueCommand( - argThat(is(configured(preparedStatementText(csas), emptyMap(), ksqlConfig))), - any(Producer.class)); + any(), + argThat(is(commandWithOverwrittenProperties(emptyMap()))), + any() + ); } @Test @@ -1485,7 +1503,7 @@ public void shouldFailAllCommandsIfWouldReachActivePersistentQueriesLimit() { containsString("would cause the number of active, persistent queries " + "to exceed the configured limit")); - verify(commandStore, never()).enqueueCommand(any(), any(Producer.class)); + verify(commandStore, never()).enqueueCommand(any(), any(), any()); } @Test @@ -1712,18 +1730,16 @@ public void shouldHandleTerminateRequestCorrectly() { equalTo(CommandStatus.Status.QUEUED)); verify(transactionalProducer, times(1)).initTransactions(); verify(commandStore).enqueueCommand( - argThat(is(configured( - preparedStatementText(TerminateCluster.TERMINATE_CLUSTER_STATEMENT_TEXT), - Collections.singletonMap( - ClusterTerminateRequest.DELETE_TOPIC_LIST_PROP, ImmutableList.of("Foo")), - ksqlConfig))), - any(Producer.class)); + any(), + argThat(is(commandWithStatement(TerminateCluster.TERMINATE_CLUSTER_STATEMENT_TEXT))), + any() + ); } @Test public void shouldFailIfCannotWriteTerminateCommand() { // Given: - when(commandStore.enqueueCommand(any(), any(Producer.class))) + when(commandStore.enqueueCommand(any(), any(), any(Producer.class))) .thenThrow(new KsqlException("")); // When: @@ -1767,7 +1783,7 @@ public void shouldNeverEnqueueIfErrorIsThrown() { Code.BAD_REQUEST); // Then: - verify(commandStore, never()).enqueueCommand(any(), any(Producer.class)); + verify(commandStore, never()).enqueueCommand(any(), any(), any(Producer.class)); } @Test @@ -1848,7 +1864,7 @@ public void shouldFailIfCreateAsSelectExistingSourceTable() { @Test public void shouldThrowServerErrorOnFailedToDistribute() { // Given: - when(commandStore.enqueueCommand(any(), any(Producer.class))) + when(commandStore.enqueueCommand(any(), any(), any(Producer.class))) .thenThrow(new KsqlException("blah")); final String statement = "CREATE STREAM " + streamName + " AS SELECT * FROM test_stream;"; @@ -1900,6 +1916,7 @@ private void givenMockEngine() { .thenAnswer(invocation -> realEngine.prepare(invocation.getArgument(0))); when(sandbox.prepare(any())) .thenAnswer(invocation -> realEngine.createSandbox(serviceContext).prepare(invocation.getArgument(0))); + when(sandbox.plan(any(), any())).thenReturn(mock(KsqlPlan.class)); when(ksqlEngine.createSandbox(any())).thenReturn(sandbox); when(ksqlEngine.getMetaStore()).thenReturn(metaStore); when(topicInjectorFactory.apply(ksqlEngine)).thenReturn(topicInjector); @@ -2186,6 +2203,36 @@ private static void registerSchema(final SchemaRegistryClient schemaRegistryClie avroSchema); } + private static Matcher commandWithStatement(final String statement) { + return new TypeSafeMatcher() { + @Override + protected boolean matchesSafely(final Command command) { + return command.getStatement().equals(statement); + } + + @Override + public void describeTo(final Description description) { + description.appendText(statement); + } + }; + } + + private static Matcher commandWithOverwrittenProperties( + final Map properties + ) { + return new TypeSafeMatcher() { + @Override + protected boolean matchesSafely(final Command command) { + return command.getOverwriteProperties().equals(properties); + } + + @Override + public void describeTo(final Description description) { + description.appendText(properties.toString()); + } + }; + } + @SuppressWarnings("SameParameterValue") private void givenAvroSchemaNotEvolveable(final String topicName) { final org.apache.avro.Schema schema = org.apache.avro.Schema.create(Type.INT); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/utils/TestUtils.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/utils/TestUtils.java index 6281cf2ed45d..2f62dcc837eb 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/utils/TestUtils.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/utils/TestUtils.java @@ -23,6 +23,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Optional; public class TestUtils { @@ -32,13 +33,13 @@ public List> getAllPriorCommandRecords() { final Command csCommand = new Command("CREATE STREAM pageview " + "(viewtime bigint, pageid varchar, userid varchar) " + "WITH (kafka_topic='pageview_topic_json', value_format='json');", - Collections.emptyMap(), Collections.emptyMap()); + Collections.emptyMap(), Collections.emptyMap(), Optional.empty()); final CommandId csCommandId = new CommandId(CommandId.Type.STREAM, "_CSASStreamGen", CommandId.Action.CREATE); priorCommands.add(new Pair<>(csCommandId, csCommand)); final Command csasCommand = new Command("CREATE STREAM user1pv " + " AS select * from pageview WHERE userid = 'user1';", - Collections.emptyMap(), Collections.emptyMap()); + Collections.emptyMap(), Collections.emptyMap(), Optional.empty()); final CommandId csasCommandId = new CommandId(CommandId.Type.STREAM, "_CSASGen", CommandId.Action.CREATE); priorCommands.add(new Pair<>(csasCommandId, csasCommand)); @@ -48,7 +49,7 @@ public List> getAllPriorCommandRecords() { + " AS select * from pageview window tumbling(size 5 " + "second) WHERE userid = " + "'user1' group by pageid;", - Collections.emptyMap(), Collections.emptyMap()); + Collections.emptyMap(), Collections.emptyMap(), Optional.empty()); final CommandId ctasCommandId = new CommandId(CommandId.Type.TABLE, "_CTASGen", CommandId.Action.CREATE); priorCommands.add(new Pair<>(ctasCommandId, ctasCommand)); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/RequestValidatorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/RequestValidatorTest.java index c796eaf33b42..4a53092733a9 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/RequestValidatorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/RequestValidatorTest.java @@ -21,6 +21,7 @@ import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -41,13 +42,13 @@ import io.confluent.ksql.parser.tree.CreateStream; import io.confluent.ksql.parser.tree.Explain; import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.rest.server.computation.ValidatedCommandFactory; import io.confluent.ksql.services.SandboxedServiceContext; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.services.TestServiceContext; import io.confluent.ksql.statement.Injector; import io.confluent.ksql.statement.InjectorChain; import io.confluent.ksql.util.KsqlConfig; -import io.confluent.ksql.util.KsqlConstants; import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.KsqlStatementException; import io.confluent.ksql.util.Sandbox; @@ -79,6 +80,8 @@ public class RequestValidatorTest { private Injector schemaInjector; @Mock private Injector topicInjector; + @Mock + private ValidatedCommandFactory distributedStatementValidator; private ServiceContext serviceContext; private MutableMetaStore metaStore; @@ -88,8 +91,6 @@ public class RequestValidatorTest { @Before public void setUp() { metaStore = new MetaStoreImpl(new InternalFunctionRegistry()); - when(ksqlEngine.parse(any())) - .thenAnswer(inv -> new DefaultKsqlParser().parse(inv.getArgument(0))); when(ksqlEngine.prepare(any())) .thenAnswer(invocation -> new DefaultKsqlParser().prepare(invocation.getArgument(0), metaStore)); @@ -135,7 +136,7 @@ public void shouldCallStatementValidator() { } @Test - public void shouldExecuteOnEngineIfNoCustomExecutor() { + public void shouldExecuteOnDistributedStatementValidatorIfNoCustomExecutor() { // Given: final List statements = givenParsed("CREATE STREAM foo WITH (kafka_topic='foo', value_format='json');"); @@ -144,9 +145,10 @@ public void shouldExecuteOnEngineIfNoCustomExecutor() { validator.validate(serviceContext, statements, ImmutableMap.of(), "sql"); // Then: - verify(ksqlEngine, times(1)).execute( + verify(distributedStatementValidator).create( + argThat(configured(preparedStatement(instanceOf(CreateStream.class)))), eq(serviceContext), - argThat(configured(preparedStatement(instanceOf(CreateStream.class)))) + eq(executionContext) ); } @@ -263,9 +265,10 @@ public void shouldExecuteWithSpecifiedServiceContext() { validator.validate(otherServiceContext, statements, ImmutableMap.of(), "sql"); // Then: - verify(executionContext, times(1)).execute( - argThat(is(otherServiceContext)), - argThat(configured(preparedStatement(instanceOf(CreateStream.class)))) + verify(distributedStatementValidator).create( + argThat(configured(preparedStatement(instanceOf(CreateStream.class)))), + same(otherServiceContext), + any() ); } @@ -280,7 +283,8 @@ private void givenRequestValidator( customValidators, (ec, sc) -> InjectorChain.of(schemaInjector, topicInjector), (sc) -> executionContext, - ksqlConfig + ksqlConfig, + distributedStatementValidator ); } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/TerminateQueryValidatorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/TerminateQueryValidatorTest.java deleted file mode 100644 index 8963ef4d6a83..000000000000 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/TerminateQueryValidatorTest.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright 2019 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.rest.server.validation; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import io.confluent.ksql.engine.KsqlEngine; -import io.confluent.ksql.parser.KsqlParser.PreparedStatement; -import io.confluent.ksql.parser.tree.TerminateQuery; -import io.confluent.ksql.query.QueryId; -import io.confluent.ksql.services.ServiceContext; -import io.confluent.ksql.statement.ConfiguredStatement; -import io.confluent.ksql.util.KsqlConfig; -import io.confluent.ksql.util.KsqlStatementException; -import io.confluent.ksql.util.PersistentQueryMetadata; -import java.util.Optional; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class TerminateQueryValidatorTest { - - private static final KsqlConfig KSQL_CONFIG = new KsqlConfig(ImmutableMap.of()); - - @Rule - public final ExpectedException expectedException = ExpectedException.none(); - - @Mock - private PersistentQueryMetadata query0; - @Mock - private PersistentQueryMetadata query1; - @Mock - private KsqlEngine engine; - @Mock - private ServiceContext serviceContext; - - @Test - public void shouldFailOnTerminateUnknownQueryId() { - // Expect: - expectedException.expect(KsqlStatementException.class); - expectedException.expectMessage("Unknown queryId"); - - // When: - CustomValidators.TERMINATE_QUERY.validate( - configuredStmt(TerminateQuery.query(Optional.empty(), new QueryId("id"))), - ImmutableMap.of(), - engine, - serviceContext - ); - } - - @Test - public void shouldValidateKnownQueryId() { - // Given: - when(engine.getPersistentQuery(any())).thenReturn(Optional.of(query0)); - - // When: - CustomValidators.TERMINATE_QUERY.validate( - configuredStmt(TerminateQuery.query(Optional.empty(), new QueryId("id"))), - ImmutableMap.of(), - engine, - serviceContext - ); - - // Then: - verify(query0).close(); - } - - @Test - public void shouldValidateTerminateAllQueries() { - // Given: - when(engine.getPersistentQueries()).thenReturn(ImmutableList.of(query0, query1)); - - // When: - CustomValidators.TERMINATE_QUERY.validate( - configuredStmt(TerminateQuery.all(Optional.empty())), - ImmutableMap.of(), - engine, - serviceContext - ); - - // Then: - verify(query0).close(); - verify(query1).close(); - } - - private static ConfiguredStatement configuredStmt( - final TerminateQuery terminateQuery - ) { - return ConfiguredStatement.of( - PreparedStatement.of("meh", terminateQuery), - ImmutableMap.of(), - KSQL_CONFIG - ); - } -} - diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/CommandStatus.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/CommandStatus.java index 0d6fb5a08168..ec0c21710a37 100644 --- a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/CommandStatus.java +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/CommandStatus.java @@ -64,4 +64,9 @@ public boolean equals(final Object o) { public int hashCode() { return Objects.hash(getStatus(), getMessage()); } + + @Override + public String toString() { + return status.name() + ": " + message; + } } From e3a7279f8b320f862dee0d1e447b8b4ecb625b18 Mon Sep 17 00:00:00 2001 From: Confluent Jenkins Bot Date: Fri, 13 Dec 2019 16:48:47 +0000 Subject: [PATCH 032/123] Set Confluent to 5.3.2, Kafka to 5.3.2. --- build-tools/pom.xml | 2 +- docs/conf.py | 2 +- ksql-benchmark/pom.xml | 2 +- ksql-cli/pom.xml | 2 +- ksql-clickstream-demo/pom.xml | 2 +- ksql-common/pom.xml | 2 +- ksql-console-scripts/pom.xml | 2 +- ksql-engine/pom.xml | 2 +- ksql-etc/pom.xml | 2 +- ksql-examples/pom.xml | 2 +- ksql-functional-tests/pom.xml | 2 +- ksql-metastore/pom.xml | 2 +- ksql-package/pom.xml | 2 +- ksql-parser/pom.xml | 4 ++-- ksql-rest-app/pom.xml | 2 +- ksql-rocksdb-config-setter/pom.xml | 2 +- ksql-serde/pom.xml | 2 +- ksql-test-util/pom.xml | 2 +- ksql-tools/pom.xml | 2 +- ksql-udf-quickstart/pom.xml | 2 +- ksql-udf/pom.xml | 2 +- ksql-version-metrics-client/pom.xml | 2 +- licenses/licenses.html | 12 ++++++------ pom.xml | 4 ++-- 24 files changed, 31 insertions(+), 31 deletions(-) diff --git a/build-tools/pom.xml b/build-tools/pom.xml index f0f3f8a73704..f18e7016af75 100644 --- a/build-tools/pom.xml +++ b/build-tools/pom.xml @@ -19,6 +19,6 @@ 4.0.0 io.confluent build-tools - 5.3.2-SNAPSHOT + 5.3.2 Build Tools diff --git a/docs/conf.py b/docs/conf.py index 6cfe7ae2c868..74b91dbd9845 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,7 +59,7 @@ def setup(app): # The short X.Y version. version = '5.3' # The full version, including alpha/beta/rc tags. -release = '5.3.2-SNAPSHOT' +release = '5.3.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/ksql-benchmark/pom.xml b/ksql-benchmark/pom.xml index 1c9e620ce989..b7e2fcada262 100644 --- a/ksql-benchmark/pom.xml +++ b/ksql-benchmark/pom.xml @@ -47,7 +47,7 @@ questions. io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 ksql-benchmark diff --git a/ksql-cli/pom.xml b/ksql-cli/pom.xml index f0807e6b167b..3c9b5956a0a0 100644 --- a/ksql-cli/pom.xml +++ b/ksql-cli/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 ksql-cli diff --git a/ksql-clickstream-demo/pom.xml b/ksql-clickstream-demo/pom.xml index bdc701df5816..5257cd4f6324 100644 --- a/ksql-clickstream-demo/pom.xml +++ b/ksql-clickstream-demo/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 io.confluent.ksql diff --git a/ksql-common/pom.xml b/ksql-common/pom.xml index 3f8adc0adbc0..5af3229a8103 100644 --- a/ksql-common/pom.xml +++ b/ksql-common/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 ksql-common diff --git a/ksql-console-scripts/pom.xml b/ksql-console-scripts/pom.xml index 630280e8388b..7f994090cae2 100644 --- a/ksql-console-scripts/pom.xml +++ b/ksql-console-scripts/pom.xml @@ -22,7 +22,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 io.confluent.ksql diff --git a/ksql-engine/pom.xml b/ksql-engine/pom.xml index 794547724c89..906f5d9d3d93 100644 --- a/ksql-engine/pom.xml +++ b/ksql-engine/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 ksql-engine diff --git a/ksql-etc/pom.xml b/ksql-etc/pom.xml index 095c7869457c..d467982ba98a 100644 --- a/ksql-etc/pom.xml +++ b/ksql-etc/pom.xml @@ -22,7 +22,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 io.confluent.ksql diff --git a/ksql-examples/pom.xml b/ksql-examples/pom.xml index 17ab44b46eaa..1f39fca850e5 100644 --- a/ksql-examples/pom.xml +++ b/ksql-examples/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 ksql-examples diff --git a/ksql-functional-tests/pom.xml b/ksql-functional-tests/pom.xml index 91e445ed365e..97657057c5f2 100644 --- a/ksql-functional-tests/pom.xml +++ b/ksql-functional-tests/pom.xml @@ -5,7 +5,7 @@ ksql-parent io.confluent.ksql - 5.3.2-SNAPSHOT + 5.3.2 4.0.0 diff --git a/ksql-metastore/pom.xml b/ksql-metastore/pom.xml index 7db629e3ebf1..5bb7a504fc57 100644 --- a/ksql-metastore/pom.xml +++ b/ksql-metastore/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 ksql-metastore diff --git a/ksql-package/pom.xml b/ksql-package/pom.xml index 1c18d0f7c619..f8220dc2f0f6 100644 --- a/ksql-package/pom.xml +++ b/ksql-package/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 ksql-package diff --git a/ksql-parser/pom.xml b/ksql-parser/pom.xml index 6b4dae6eb549..2cee985e9d4e 100644 --- a/ksql-parser/pom.xml +++ b/ksql-parser/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 ksql-parser @@ -40,7 +40,7 @@ io.confluent.ksql ksql-metastore - 5.3.2-SNAPSHOT + 5.3.2 test-jar test diff --git a/ksql-rest-app/pom.xml b/ksql-rest-app/pom.xml index d89d37a5f078..10c34d6f0c0d 100644 --- a/ksql-rest-app/pom.xml +++ b/ksql-rest-app/pom.xml @@ -23,7 +23,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 ksql-rest-app diff --git a/ksql-rocksdb-config-setter/pom.xml b/ksql-rocksdb-config-setter/pom.xml index 3b1c9ee3a5e2..f44457573047 100644 --- a/ksql-rocksdb-config-setter/pom.xml +++ b/ksql-rocksdb-config-setter/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 ksql-rocksdb-config-setter diff --git a/ksql-serde/pom.xml b/ksql-serde/pom.xml index aeb95f7820d3..f64ff296cf7e 100644 --- a/ksql-serde/pom.xml +++ b/ksql-serde/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 ksql-serde diff --git a/ksql-test-util/pom.xml b/ksql-test-util/pom.xml index 14fe1eb92e99..fa3c5c5583c2 100644 --- a/ksql-test-util/pom.xml +++ b/ksql-test-util/pom.xml @@ -20,7 +20,7 @@ ksql-parent io.confluent.ksql - 5.3.2-SNAPSHOT + 5.3.2 4.0.0 diff --git a/ksql-tools/pom.xml b/ksql-tools/pom.xml index e466d0427d38..6a81b35dc643 100644 --- a/ksql-tools/pom.xml +++ b/ksql-tools/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 ksql-tools diff --git a/ksql-udf-quickstart/pom.xml b/ksql-udf-quickstart/pom.xml index 15b24d795cb9..5a31f517f51e 100644 --- a/ksql-udf-quickstart/pom.xml +++ b/ksql-udf-quickstart/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 ksql-udf-quickstart diff --git a/ksql-udf/pom.xml b/ksql-udf/pom.xml index 03fb8b31c65b..7eb8f902ae0a 100644 --- a/ksql-udf/pom.xml +++ b/ksql-udf/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 ksql-udf diff --git a/ksql-version-metrics-client/pom.xml b/ksql-version-metrics-client/pom.xml index b2a37b04dc25..53ac798ebf0e 100644 --- a/ksql-version-metrics-client/pom.xml +++ b/ksql-version-metrics-client/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 ksql-version-metrics-client diff --git a/licenses/licenses.html b/licenses/licenses.html index 38d8722d80bf..8fd8b97ac810 100644 --- a/licenses/licenses.html +++ b/licenses/licenses.html @@ -67,15 +67,15 @@

License Report


slice-0.29jar0.29 -common-config-5.3.2-SNAPSHOTjar5.3.2-SNAPSHOT +common-config-5.3.2jar5.3.2 -common-utils-5.3.2-SNAPSHOTjar5.3.2-SNAPSHOT +common-utils-5.3.2jar5.3.2 -kafka-avro-serializer-5.3.2-SNAPSHOTjar5.3.2-SNAPSHOT +kafka-avro-serializer-5.3.2jar5.3.2 -kafka-connect-avro-converter-5.3.2-SNAPSHOTjar5.3.2-SNAPSHOT +kafka-connect-avro-converter-5.3.2jar5.3.2 -kafka-schema-registry-client-5.3.2-SNAPSHOTjar5.3.2-SNAPSHOT +kafka-schema-registry-client-5.3.2jar5.3.2 ksql-engine-0.1-SNAPSHOTjar0.1-SNAPSHOT @@ -123,7 +123,7 @@

License Report


kafka-streams-0.11.0.0-cp1jarincluded file -kafka_2.11-5.3.2-ccs-SNAPSHOTjarincluded file +kafka_2.11-5.3.2-ccsjarincluded file lz4-1.3.0jar1.3.0 diff --git a/pom.xml b/pom.xml index 5e9a6dafd420..0b4e25ce38a0 100644 --- a/pom.xml +++ b/pom.xml @@ -22,14 +22,14 @@ io.confluent rest-utils-parent - 5.3.2-SNAPSHOT + 5.3.2 io.confluent.ksql ksql-parent pom ksql-parent - 5.3.2-SNAPSHOT + 5.3.2 Confluent Community License From b23dae95a82fc49d1615264d718c7d0d6ee0a7c5 Mon Sep 17 00:00:00 2001 From: Alex Dukhno <5074607+alex-dukhno@users.noreply.github.com> Date: Fri, 13 Dec 2019 19:01:56 +0200 Subject: [PATCH 033/123] feat: implemention of KLIP-13 (#4099) --- design-proposals/README.md | 2 +- .../builder/PropertiesListTableBuilder.java | 30 ++++---- .../java/io/confluent/ksql/cli/CliTest.java | 3 + .../ksql/cli/console/ConsoleTest.java | 41 ++++++----- .../PropertiesListTableBuilderTest.java | 21 +++--- .../execution/ListPropertiesExecutor.java | 61 ++++++++++++++--- .../execution/ListPropertiesExecutorTest.java | 27 ++++++-- .../server/resources/KsqlResourceTest.java | 8 ++- .../ksql/rest/entity/PropertiesList.java | 68 +++++++++++++++++-- 9 files changed, 196 insertions(+), 65 deletions(-) diff --git a/design-proposals/README.md b/design-proposals/README.md index 403641fcbbc1..13bbdb1e3227 100644 --- a/design-proposals/README.md +++ b/design-proposals/README.md @@ -53,7 +53,7 @@ Next KLIP number: **14** | [KLIP-10: Suppress](klip-10-suppress.md) | Proposal | N/A | | [KLIP-11: Redesign KSQL query language](klip-11-DQL.md) | Proposal | N/A | | [KLIP-12: Implement High-Availability for Pull queries](klip-12-pull-high-availability.md)| Proposal | N/A | -| [KLIP-13: Introduce KSQL command to print connect worker properties to the console](klip-13-introduce-KSQL-command-to-print-connect-worker-properties-to-the-console.md) | Proposal | N/A | +| [KLIP-13: Introduce KSQL command to print connect worker properties to the console](klip-13-introduce-KSQL-command-to-print-connect-worker-properties-to-the-console.md) | Merged | 5.5 | | [KLIP-14: ROWTIME as Pseudocolumn](klip-14-rowtime-as-pseudocolumn.md) | Approved | N/A | | [KLIP-15: KSQLDB new API and Client(klip-15-new-api-and-client.md | Proposal | N/A | | [KLIP-16: Introduce 'K$' dynamic views | Proposal | N/A | diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/PropertiesListTableBuilder.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/PropertiesListTableBuilder.java index b633635a9e35..ba183411651e 100644 --- a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/PropertiesListTableBuilder.java +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/PropertiesListTableBuilder.java @@ -19,9 +19,10 @@ import io.confluent.ksql.cli.console.table.Table; import io.confluent.ksql.cli.console.table.Table.Builder; import io.confluent.ksql.rest.entity.PropertiesList; +import io.confluent.ksql.rest.entity.PropertiesList.Property; + import java.util.Comparator; import java.util.List; -import java.util.Map.Entry; import java.util.Objects; import java.util.function.Function; import java.util.stream.Collectors; @@ -29,7 +30,7 @@ public class PropertiesListTableBuilder implements TableBuilder { private static final List HEADERS = - ImmutableList.of("Property", "Default override", "Effective Value"); + ImmutableList.of("Property", "Scope", "Default override", "Effective Value"); @Override public Table buildTable(final PropertiesList entity) { @@ -42,27 +43,29 @@ public Table buildTable(final PropertiesList entity) { private static List> defRowValues(final List properties) { return properties.stream() .sorted(Comparator.comparing(propertyDef -> propertyDef.propertyName)) - .map( - def -> ImmutableList.of(def.propertyName, def.overrideType, def.effectiveValue)) + .map(def -> ImmutableList.of( + def.propertyName, def.scope, def.overrideType, def.effectiveValue)) .collect(Collectors.toList()); } private static List propertiesListWithOverrides(final PropertiesList properties) { - final Function, PropertyDef> toPropertyDef = e -> { - final String value = e.getValue() == null ? "NULL" : e.getValue().toString(); - if (properties.getOverwrittenProperties().contains(e.getKey())) { - return new PropertyDef(e.getKey(), "SESSION", value); + final Function toPropertyDef = property -> { + final String value = property.getValue() == null ? "NULL" : property.getValue(); + final String name = property.getName(); + final String scope = property.getScope(); + if (properties.getOverwrittenProperties().contains(name)) { + return new PropertyDef(name, scope, "SESSION", value); } - if (properties.getDefaultProperties().contains(e.getKey())) { - return new PropertyDef(e.getKey(), "", value); + if (properties.getDefaultProperties().contains(name)) { + return new PropertyDef(name, scope, "", value); } - return new PropertyDef(e.getKey(), "SERVER", value); + return new PropertyDef(name, scope, "SERVER", value); }; - return properties.getProperties().entrySet().stream() + return properties.getProperties().stream() .map(toPropertyDef) .collect(Collectors.toList()); } @@ -70,14 +73,17 @@ private static List propertiesListWithOverrides(final PropertiesLis private static class PropertyDef { private final String propertyName; + private final String scope; private final String overrideType; private final String effectiveValue; PropertyDef( final String propertyName, + final String scope, final String overrideType, final String effectiveValue) { this.propertyName = Objects.requireNonNull(propertyName, "propertyName"); + this.scope = Objects.requireNonNull(scope, "scope"); this.overrideType = Objects.requireNonNull(overrideType, "overrideType"); this.effectiveValue = Objects.requireNonNull(effectiveValue, "effectiveValue"); } diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java index 3748548907c6..69a54a567f3f 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java @@ -443,17 +443,20 @@ public void testPropertySetUnset() { // SERVER OVERRIDES: row( KsqlConfig.KSQL_STREAMS_PREFIX + StreamsConfig.NUM_STREAM_THREADS_CONFIG, + "KSQL", SERVER_OVERRIDE, "4" ), row( KsqlConfig.SINK_WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_MS_PROPERTY, + "KSQL", SERVER_OVERRIDE, "" + (KsqlConstants.defaultSinkWindowChangeLogAdditionalRetention + 1) ), // SESSION OVERRIDES: row( KsqlConfig.KSQL_STREAMS_PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, + "KSQL", SESSION_OVERRIDE, "latest" ) diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java index 14d2feafbd20..35cc00762dc4 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java @@ -56,6 +56,7 @@ import io.confluent.ksql.rest.entity.KsqlEntityList; import io.confluent.ksql.rest.entity.KsqlWarning; import io.confluent.ksql.rest.entity.PropertiesList; +import io.confluent.ksql.rest.entity.PropertiesList.Property; import io.confluent.ksql.rest.entity.Queries; import io.confluent.ksql.rest.entity.RunningQuery; import io.confluent.ksql.rest.entity.SchemaInfo; @@ -78,9 +79,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.function.Supplier; import org.apache.commons.lang3.StringUtils; import org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo; @@ -233,10 +232,10 @@ public void testPrintCommandStatus() { @Test public void testPrintPropertyList() { // Given: - final Map properties = new HashMap<>(); - properties.put("k1", 1); - properties.put("k2", "v2"); - properties.put("k3", true); + final List properties = new ArrayList<>(); + properties.add(new Property("k1", "KSQL", "1")); + properties.add(new Property("k2", "KSQL", "v2")); + properties.add(new Property("k3", "KSQL", "true")); final KsqlEntityList entityList = new KsqlEntityList(ImmutableList.of( new PropertiesList("e", properties, Collections.emptyList(), Collections.emptyList()) @@ -251,23 +250,31 @@ public void testPrintPropertyList() { assertThat(output, is("[ {\n" + " \"@type\" : \"properties\",\n" + " \"statementText\" : \"e\",\n" - + " \"properties\" : {\n" - + " \"k1\" : 1,\n" - + " \"k2\" : \"v2\",\n" - + " \"k3\" : true\n" - + " },\n" + + " \"properties\" : [ {\n" + + " \"name\" : \"k1\",\n" + + " \"scope\" : \"KSQL\",\n" + + " \"value\" : \"1\"\n" + + " }, {\n" + + " \"name\" : \"k2\",\n" + + " \"scope\" : \"KSQL\",\n" + + " \"value\" : \"v2\"\n" + + " }, {\n" + + " \"name\" : \"k3\",\n" + + " \"scope\" : \"KSQL\",\n" + + " \"value\" : \"true\"\n" + + " } ],\n" + " \"overwrittenProperties\" : [ ],\n" + " \"defaultProperties\" : [ ],\n" + " \"warnings\" : [ ]\n" + "} ]\n")); } else { assertThat(output, is("\n" - + " Property | Default override | Effective Value \n" - + "-----------------------------------------------\n" - + " k1 | SERVER | 1 \n" - + " k2 | SERVER | v2 \n" - + " k3 | SERVER | true \n" - + "-----------------------------------------------\n")); + + " Property | Scope | Default override | Effective Value \n" + + "-------------------------------------------------------\n" + + " k1 | KSQL | SERVER | 1 \n" + + " k2 | KSQL | SERVER | v2 \n" + + " k3 | KSQL | SERVER | true \n" + + "-------------------------------------------------------\n")); } } diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/console/table/builder/PropertiesListTableBuilderTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/console/table/builder/PropertiesListTableBuilderTest.java index daf916496dba..7af16d3fdc95 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/console/table/builder/PropertiesListTableBuilderTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/console/table/builder/PropertiesListTableBuilderTest.java @@ -22,10 +22,10 @@ import static org.mockito.Mockito.when; import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; import io.confluent.ksql.cli.console.Console; import io.confluent.ksql.cli.console.table.Table; import io.confluent.ksql.rest.entity.PropertiesList; +import io.confluent.ksql.rest.entity.PropertiesList.Property; import io.confluent.ksql.util.KsqlConfig; import java.io.PrintWriter; import java.util.Collections; @@ -61,7 +61,7 @@ public void setUp() { public void shouldHandleClientOverwrittenProperties() { // Given: final PropertiesList propList = new PropertiesList("list properties;", - ImmutableMap.of(SOME_KEY, "earliest"), + ImmutableList.of(new Property(SOME_KEY, "KSQL", "earliest")), ImmutableList.of(SOME_KEY), Collections.emptyList() ); @@ -70,14 +70,14 @@ public void shouldHandleClientOverwrittenProperties() { final Table table = builder.buildTable(propList); // Then: - assertThat(getRows(table), contains(row(SOME_KEY, "SESSION", "earliest"))); + assertThat(getRows(table), contains(row(SOME_KEY, "KSQL", "SESSION", "earliest"))); } @Test public void shouldHandleServerOverwrittenProperties() { // Given: final PropertiesList propList = new PropertiesList("list properties;", - ImmutableMap.of(SOME_KEY, "earliest"), + ImmutableList.of(new Property(SOME_KEY, "KSQL", "earliest")), Collections.emptyList(), Collections.emptyList() ); @@ -86,14 +86,14 @@ public void shouldHandleServerOverwrittenProperties() { final Table table = builder.buildTable(propList); // Then: - assertThat(getRows(table), contains(row(SOME_KEY, "SERVER", "earliest"))); + assertThat(getRows(table), contains(row(SOME_KEY, "KSQL", "SERVER", "earliest"))); } @Test public void shouldHandleDefaultProperties() { // Given: final PropertiesList propList = new PropertiesList("list properties;", - ImmutableMap.of(SOME_KEY, "earliest"), + ImmutableList.of(new Property(SOME_KEY, "KSQL", "earliest")), Collections.emptyList(), ImmutableList.of(SOME_KEY) ); @@ -102,14 +102,14 @@ public void shouldHandleDefaultProperties() { final Table table = builder.buildTable(propList); // Then: - assertThat(getRows(table), contains(row(SOME_KEY, "", "earliest"))); + assertThat(getRows(table), contains(row(SOME_KEY, "KSQL", "", "earliest"))); } @Test public void shouldHandlePropertiesWithNullValue() { // Given: final PropertiesList propList = new PropertiesList("list properties;", - Collections.singletonMap(SOME_KEY, null), + Collections.singletonList(new Property(SOME_KEY, "KSQL", null)), Collections.emptyList(), ImmutableList.of(SOME_KEY) ); @@ -118,7 +118,7 @@ public void shouldHandlePropertiesWithNullValue() { final Table table = builder.buildTable(propList); // Then: - assertThat(getRows(table), contains(row(SOME_KEY, "", "NULL"))); + assertThat(getRows(table), contains(row(SOME_KEY, "KSQL", "", "NULL"))); } private List> getRows(final Table table) { @@ -130,9 +130,10 @@ private List> getRows(final Table table) { @SuppressWarnings("SameParameterValue") private static List row( final String property, + final String scope, final String defaultValue, final String actualValue ) { - return ImmutableList.of(property, defaultValue, actualValue); + return ImmutableList.of(property, scope, defaultValue, actualValue); } } \ No newline at end of file diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutor.java index 258ad638a2b4..be70d4963140 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutor.java @@ -20,15 +20,23 @@ import io.confluent.ksql.parser.tree.ListProperties; import io.confluent.ksql.rest.entity.KsqlEntity; import io.confluent.ksql.rest.entity.PropertiesList; +import io.confluent.ksql.rest.entity.PropertiesList.Property; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; +import io.confluent.ksql.util.KsqlConfig; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.Objects; import java.util.Optional; +import java.util.Properties; import java.util.stream.Collectors; +import org.apache.kafka.common.utils.Utils; + public final class ListPropertiesExecutor { private ListPropertiesExecutor() { } @@ -44,25 +52,56 @@ public static Optional execute( final Map engineProperties = statement.getConfig().getAllConfigPropsWithSecretsObfuscated(); - final Map mergedProperties = statement.getConfig() - .cloneWithPropertyOverwrite(statement.getOverrides()) - .getAllConfigPropsWithSecretsObfuscated(); + final List mergedProperties = mergedProperties(statement); - final List overwritten = mergedProperties.entrySet() + final List overwritten = mergedProperties .stream() - .filter(e -> !Objects.equals(engineProperties.get(e.getKey()), e.getValue())) - .map(Entry::getKey) + .filter(property -> !Objects.equals( + engineProperties.get(property.getName()), property.getValue())) + .map(Property::getName) .collect(Collectors.toList()); - final List defaultProps = mergedProperties.entrySet().stream() - .filter(e -> resolver.resolve(e.getKey(), false) - .map(resolved -> resolved.isDefaultValue(e.getValue())) + final List defaultProps = mergedProperties.stream() + .filter(property -> resolver.resolve(property.getName(), false) + .map(resolved -> resolved.isDefaultValue(property.getValue())) .orElse(false)) - .map(Entry::getKey) + .map(Property::getName) .collect(Collectors.toList()); return Optional.of(new PropertiesList( statement.getStatementText(), mergedProperties, overwritten, defaultProps)); } + private static List mergedProperties( + ConfiguredStatement statement) { + final List mergedProperties = new ArrayList<>(); + + statement.getConfig() + .cloneWithPropertyOverwrite(statement.getOverrides()) + .getAllConfigPropsWithSecretsObfuscated() + .forEach((key, value) -> mergedProperties.add(new Property(key, "KSQL", value))); + + embeddedConnectWorkerProperties(statement) + .forEach((key, value) -> + mergedProperties.add(new Property(key, "EMBEDDED CONNECT WORKER", value))); + + return mergedProperties; + } + + private static Map embeddedConnectWorkerProperties( + ConfiguredStatement statement) { + String configFile = statement.getConfig() + .getString(KsqlConfig.CONNECT_WORKER_CONFIG_FILE_PROPERTY); + return !configFile.isEmpty() + ? Utils.propsToStringMap(getWorkerProps(configFile)) + : Collections.emptyMap(); + } + + private static Properties getWorkerProps(String configFile) { + try { + return Utils.loadProps(configFile); + } catch (IOException e) { + return new Properties(); + } + } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutorTest.java index 164f3d7635b1..208c97191940 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutorTest.java @@ -19,7 +19,6 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isIn; @@ -27,6 +26,7 @@ import com.google.common.collect.ImmutableMap; import io.confluent.ksql.rest.entity.PropertiesList; +import io.confluent.ksql.rest.entity.PropertiesList.Property; import io.confluent.ksql.rest.server.TemporaryEngine; import io.confluent.ksql.util.KsqlConfig; import org.junit.Rule; @@ -34,6 +34,9 @@ import org.junit.runner.RunWith; import org.mockito.junit.MockitoJUnitRunner; +import java.util.HashMap; +import java.util.Map; + @RunWith(MockitoJUnitRunner.class) public class ListPropertiesExecutorTest { @@ -50,11 +53,20 @@ public void shouldListProperties() { ).orElseThrow(IllegalStateException::new); // Then: - assertThat(properties.getProperties(), + assertThat( + toMap(properties), equalTo(engine.getKsqlConfig().getAllConfigPropsWithSecretsObfuscated())); assertThat(properties.getOverwrittenProperties(), is(empty())); } + private Map toMap(PropertiesList properties) { + Map map = new HashMap<>(); + for (Property property : properties.getProperties()) { + map.put(property.getName(), property.getValue()); + } + return map; + } + @Test public void shouldListPropertiesWithOverrides() { // When: @@ -67,8 +79,9 @@ public void shouldListPropertiesWithOverrides() { ).orElseThrow(IllegalStateException::new); // Then: - assertThat(properties.getProperties(), - hasEntry("ksql.streams.auto.offset.reset", "latest")); + assertThat( + properties.getProperties(), + hasItem(new Property("ksql.streams.auto.offset.reset", "KSQL", "latest"))); assertThat(properties.getOverwrittenProperties(), hasItem("ksql.streams.auto.offset.reset")); } @@ -83,8 +96,8 @@ public void shouldNotListSslProperties() { ).orElseThrow(IllegalStateException::new); // Then: - assertThat(properties.getProperties(), not(hasKey(isIn(KsqlConfig.SSL_CONFIG_NAMES)))); + assertThat( + toMap(properties), + not(hasKey(isIn(KsqlConfig.SSL_CONFIG_NAMES)))); } - - } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java index 813371df7b54..1355df1c4a68 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java @@ -98,6 +98,7 @@ import io.confluent.ksql.rest.entity.KsqlRequest; import io.confluent.ksql.rest.entity.KsqlStatementErrorMessage; import io.confluent.ksql.rest.entity.PropertiesList; +import io.confluent.ksql.rest.entity.PropertiesList.Property; import io.confluent.ksql.rest.entity.Queries; import io.confluent.ksql.rest.entity.QueryDescription; import io.confluent.ksql.rest.entity.QueryDescriptionEntity; @@ -1516,7 +1517,9 @@ public void shouldListPropertiesWithOverrides() { new KsqlRequest("list properties;", overrides, null), PropertiesList.class); // Then: - assertThat(props.getProperties().get("ksql.streams.auto.offset.reset"), is("latest")); + assertThat( + props.getProperties(), + hasItem(new Property("ksql.streams.auto.offset.reset", "KSQL", "latest"))); assertThat(props.getOverwrittenProperties(), hasItem("ksql.streams.auto.offset.reset")); } @@ -1661,7 +1664,8 @@ public void shouldNotIncludeSslPropertiesInListPropertiesOutput() { final PropertiesList props = makeSingleRequest("list properties;", PropertiesList.class); // Then: - assertThat(props.getProperties().keySet(), + assertThat(props.getProperties().stream().map(Property::getName).collect( + Collectors.toList()), not(hasItems(KsqlConfig.SSL_CONFIG_NAMES.toArray(new String[0])))); } diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java index 62ae517d6c93..62bbe63a008d 100644 --- a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java @@ -18,34 +18,92 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; + import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Objects; @JsonIgnoreProperties(ignoreUnknown = true) public class PropertiesList extends KsqlEntity { - private final Map properties; + @JsonIgnoreProperties(ignoreUnknown = true) + public static class Property { + private final String name; + private final String scope; + private final String value; + + @JsonCreator + public Property( + @JsonProperty("name") final String name, + @JsonProperty("scope") final String scope, + @JsonProperty("value") final String value + ) { + this.name = name; + this.scope = scope; + this.value = value; + } + + public String getName() { + return name; + } + + public String getScope() { + return scope; + } + + public String getValue() { + return value; + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + if (object == null || getClass() != object.getClass()) { + return false; + } + Property that = (Property) object; + return Objects.equals(name, that.name) + && Objects.equals(scope, that.scope) + && Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return Objects.hash(name, scope, value); + } + + @Override + public String toString() { + return "Property{" + + "name='" + name + '\'' + + ", scope='" + scope + '\'' + + ", value='" + value + '\'' + + '}'; + } + } + + private final List properties; private final List overwrittenProperties; private final List defaultProperties; @JsonCreator public PropertiesList( @JsonProperty("statementText") final String statementText, - @JsonProperty("properties") final Map properties, + @JsonProperty("properties") final List properties, @JsonProperty("overwrittenProperties") final List overwrittenProperties, @JsonProperty("defaultProperties") final List defaultProperties ) { super(statementText); this.properties = properties == null - ? Collections.emptyMap() : properties; + ? Collections.emptyList() : properties; this.overwrittenProperties = overwrittenProperties == null ? Collections.emptyList() : overwrittenProperties; this.defaultProperties = defaultProperties == null ? Collections.emptyList() : defaultProperties; } - public Map getProperties() { + public List getProperties() { return properties; } From ebac1042aa6366c7d951e88f16dab6fff3e8a168 Mon Sep 17 00:00:00 2001 From: Alex Dukhno <5074607+alex-dukhno@users.noreply.github.com> Date: Fri, 13 Dec 2019 19:03:27 +0200 Subject: [PATCH 034/123] feat: show properties now includes embedded connect properties and scope (#4099) From 3187a4c7f4b0ff42a7b7e4b570e444deac63588c Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Fri, 13 Dec 2019 14:22:57 -0800 Subject: [PATCH 035/123] docs: update push and pull query topics with feedback (DOCS-3092) (#4117) * docs: update push and pull query topics with feedback (DOCS-3092) * docs: incorporate feedeback --- docs-md/concepts/queries/index.md | 21 +++---- docs-md/concepts/queries/pull.md | 57 +++++++++++++++---- docs-md/concepts/queries/push.md | 50 +++++++++++----- .../ksqldb-reference/select-pull-query.md | 14 ++--- .../ksqldb-reference/select-push-query.md | 6 +- 5 files changed, 103 insertions(+), 45 deletions(-) diff --git a/docs-md/concepts/queries/index.md b/docs-md/concepts/queries/index.md index 8b0fbf8c89e0..457589ba5700 100644 --- a/docs-md/concepts/queries/index.md +++ b/docs-md/concepts/queries/index.md @@ -1,27 +1,28 @@ --- layout: page title: Queries -tagline: Query materialized views of event streams -description: Learn how to query materialized views of event streams by using the SELECT statement. +tagline: Query event streams +description: Learn how to query event streams by using the SELECT statement. keywords: ksqldb, query, select, pull, push, materialized view --- ksqlDB has a rich set of constructs for both storing events in collections and deriving new ones through stream processing. Sometimes, you need to process -your events by aggregating them together into a materialized view. In this -case, you need a way for your applications or microservices to leverage this -view. That's where queries come in. +your events in a real-time stream or by aggregating them together into a +materialized table. In these cases, you need a way for your applications or +microservices to leverage these collections. That's where queries come in. Push and pull queries --------------------- -Queries enable you to ask questions about materialized views. ksqlDB supports -two different kinds of client-issued queries: push and pull. +Queries enable you to ask questions about collections and materialized views. +ksqlDB supports two different kinds of client-issued queries: push and pull. -- [Push Queries](push.md) enable you to subscribe to a result as it changes in - real-time. - [Pull Queries](pull.md) enable you to look up information at a point in time. - +- [Push Queries](push.md) enable you to subscribe to a result as it changes in + real-time. You can subscribe to the output of any query, including those that + returns a stream or a materialized aggregate table. + ksqlDB supports both kinds of queries by using SQL over its REST API. Combining them enables you to build powerful real-time applications. diff --git a/docs-md/concepts/queries/pull.md b/docs-md/concepts/queries/pull.md index b6c78c03c2e3..4099f3e7c6dd 100644 --- a/docs-md/concepts/queries/pull.md +++ b/docs-md/concepts/queries/pull.md @@ -8,18 +8,15 @@ keywords: ksqldb, pull, query, select ![Illustration of a pull query](../../img/ksqldb-pull-query.svg) -Pull queries are a form of query issued by a client that retrieve a result as -of "now". As a dual to the push query example, a pull query for a credit score -would be asking for the current score of a particular user. Because it is a -pull query, it returns immediately with a finite result and closes its -connection. This is ideal for rendering a user interface once at page load -time. It's generally a good fit for any sort of synchronous control flow. - -Pull queries are expressed using a strict subset of ANSI SQL. They can only be -used to query tables that have been processed into materialized views. -Currently, pull queries only support looking up events by key. They're executed -by running over the ksqlDB REST API. The result of a pull query isn't persisted -anywhere. +A pull query is a form of query issued by a client that retrieves a result as +of "now", like a query against a traditional RDBS. + +As a dual to the [push query](push.md) example, a pull query for a geographic +location would ask for the current map coordinates of a particular user. +Because it's a pull query, it returns immediately with a finite result and +closes its connection. This is ideal for rendering a user interface once, at +page load time. It's generally a good fit for any sort of synchronous control +flow. Pull queries enable you to fetch the current state of a materialized view. Because materialized views are incrementally updated as new events arrive, @@ -30,4 +27,40 @@ request/response flows. For asynchronous application flows, see Execute a pull query by sending an HTTP request to the ksqlDB REST API, and the API responds with a single response. +Pull query features and limitations +----------------------------------- + +- Pull queries are expressed using a strict subset of ANSI SQL. +- Pull queries are currently available only against materialized aggregate + tables, which means tables that are created by using a persistent query + with a GROUP BY clause. +- For non-windowed aggregations, pull queries only support looking up events + by key. +- WHERE clauses can only have `ROWKEY=x`-style bounds for non-windowed tables. +- Windowed tables support bounds on WINDOWSTART using operators + `<=`, `<`, `=`, `>`, `>=`. +- JOIN, PARTITION BY, GROUP BY and WINDOW clauses aren't supported. +- SELECT statements can contain column arithmetic and function calls. +- The result of a pull query isn't persisted anywhere. + +Example pull query +------------------ + +The following pull query gets all events for the specified user that have a +timestamp within the specified time window. The WHERE clause must contain a +single value of `ROWKEY` to retrieve and may optionally include bounds on +WINDOWSTART if the materialized table is windowed. + +```sql +SELECT * FROM user_location + WHERE ROWKEY = 'user19r7t33' + AND '2019-10-02T21:31:16' <= WINDOWSTART AND WINDOWSTART <= '2019-10-03T21:31:16'; +``` + +API Reference +------------- + +- [SELECT (Pull Query)](../../developer-guide/ksqldb-reference/select-pull-query.md) +- [SELECT (Push Query)](../../developer-guide/ksqldb-reference/select-push-query.md) + Page last revised on: {{ git_revision_date }} \ No newline at end of file diff --git a/docs-md/concepts/queries/push.md b/docs-md/concepts/queries/push.md index ed53d2fb74df..7eb14cbbcf04 100644 --- a/docs-md/concepts/queries/push.md +++ b/docs-md/concepts/queries/push.md @@ -8,26 +8,48 @@ keywords: ksqldb, push, query, select ![Illustration of a push query](../../img/ksqldb-push-query.svg) -Push queries are a form of query issued by a client that subscribe to a result +A push query is a form of query issued by a client that subscribes to a result as it changes in real-time. A good example of a push query is subscribing to a -particular user’s credit score. The query requests the value of the credit -score. Because it's a push query, any change to the credit score is "pushed" -to the client as soon as it occurs over a long-lived connection. This is useful -for building programmatically controlled microservices, real-time apps, or any -sort of asynchronous control flow. +particular user's geographic location. The query requests the map coordinates, +and because it's a push query, any change to the location is "pushed" over a +long-lived connection to the client as soon as it occurs. This is useful for +building programmatically controlled microservices, real-time apps, or any sort +of asynchronous control flow. Push queries are expressed using a SQL-like language. They can be used to query -either streams or tables for a particular key. They’re executed by running over -the ksqlDB REST API. The result of a push query isn't persisted to a backing -{{ site.ak }} topic. +either streams or tables for a particular key. Also, push queries aren't limited +to key look-ups. They support a full set of SQL, including filters, selects, +group bys, partition bys, and joins. -Push queries enable you to query a materialized view with a subscription to -the results. Push queries emit refinements to materialized views, which enable -reacting to new information in real-time. They’re a good fit for asynchronous -application flows. For request/response flows, see -[Pull Query](pull.md). +Push queries enable you to query a stream or materialized table with a +subscription to the results. You can subscribe to the output of any query, +including one that returns a stream. A push query emits refinements to a stream +or materialized table, which enables reacting to new information in real-time. +They’re a good fit for asynchronous application flows. For request/response +flows, see [Pull Query](pull.md). Execute a push query by sending an HTTP request to the ksqlDB REST API, and the API sends back a chunked response of indefinite length. +The result of a push query isn't persisted to a backing {{ site.ak }} topic. +If you need to persist the result of a query to a {{ site.ak }} topic, use a +CREATE TABLE AS SELECT or CREATE STREAM AS SELECT statement. + +Example push query +================== + +Specify a push query by using the EMIT CHANGES clause in a SELECT statement. +The following statement shows how to select five events from a `pageviews` +stream. + +```sql +SELECT * FROM pageviews EMIT CHANGES LIMIT 5; +``` + +API Reference +============= + +- [SELECT (Push Query)](../../developer-guide/ksqldb-reference/select-push-query.md) +- [SELECT (Pull Query)](../../developer-guide/ksqldb-reference/select-pull-query.md) + Page last revised on: {{ git_revision_date }} \ No newline at end of file diff --git a/docs-md/developer-guide/ksqldb-reference/select-pull-query.md b/docs-md/developer-guide/ksqldb-reference/select-pull-query.md index 1f84ed837ba9..3140eb8cc294 100644 --- a/docs-md/developer-guide/ksqldb-reference/select-pull-query.md +++ b/docs-md/developer-guide/ksqldb-reference/select-pull-query.md @@ -35,7 +35,7 @@ request/response flows. For asynchronous application flows, see Execute a pull query by sending an HTTP request to the ksqlDB REST API, and the API responds with a single response. -The WHERE clause must contain a single value of `ROWKEY` to retieve and may +The WHERE clause must contain a single value of `ROWKEY` to retrieve and may optionally include bounds on WINDOWSTART if the materialized table is windowed. Example @@ -52,9 +52,9 @@ formatted datestrings to represent date times. For example, the previous query is equivalent to the following: ```sql - SELECT * FROM pageviews_by_region - WHERE ROWKEY = 'Region_1' - AND '2019-10-02T21:31:16' <= WINDOWSTART AND WINDOWSTART <= '2019-10-03T21:31:16'; +SELECT * FROM pageviews_by_region + WHERE ROWKEY = 'Region_1' + AND '2019-10-02T21:31:16' <= WINDOWSTART AND WINDOWSTART <= '2019-10-03T21:31:16'; ``` You can specify time zones within the datestring. For example, @@ -63,6 +63,6 @@ specified within the datestring, then timestamps are interpreted in the UTC time zone. If no bounds are placed on `WINDOWSTART`, rows are returned for all windows -in the windowed table. - -Page last revised on: {{ git_revision_date }} +in the windowed table. + +Page last revised on: {{ git_revision_date }} diff --git a/docs-md/developer-guide/ksqldb-reference/select-push-query.md b/docs-md/developer-guide/ksqldb-reference/select-push-query.md index 9dfab6ab1557..4be1862bde1e 100644 --- a/docs-md/developer-guide/ksqldb-reference/select-push-query.md +++ b/docs-md/developer-guide/ksqldb-reference/select-push-query.md @@ -59,7 +59,8 @@ stream that have timestamps between two values. ```sql SELECT * FROM pageviews WHERE ROWTIME >= 1510923225000 - AND ROWTIME <= 1510923228000; + AND ROWTIME <= 1510923228000 + EMIT CHANGES; ``` When writing logical expressions using `ROWTIME`, you can use ISO-8601 @@ -69,7 +70,8 @@ query is equivalent to the following: ```sql SELECT * FROM pageviews WHERE ROWTIME >= '2017-11-17T04:53:45' - AND ROWTIME <= '2017-11-17T04:53:48'; + AND ROWTIME <= '2017-11-17T04:53:48' + EMIT CHANGES; ``` If the datestring is inexact, the rest of the timestamp is assumed to be From a34c04a6df23a145a897675a4e75463c4d3d4338 Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Fri, 13 Dec 2019 14:29:39 -0800 Subject: [PATCH 036/123] docs: fix a typo in queries overview topic (DOCS-3105) (#4133) --- docs-md/concepts/queries/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs-md/concepts/queries/index.md b/docs-md/concepts/queries/index.md index 457589ba5700..24f1117fd431 100644 --- a/docs-md/concepts/queries/index.md +++ b/docs-md/concepts/queries/index.md @@ -21,7 +21,7 @@ ksqlDB supports two different kinds of client-issued queries: push and pull. - [Pull Queries](pull.md) enable you to look up information at a point in time. - [Push Queries](push.md) enable you to subscribe to a result as it changes in real-time. You can subscribe to the output of any query, including those that - returns a stream or a materialized aggregate table. + return a stream or a materialized aggregate table. ksqlDB supports both kinds of queries by using SQL over its REST API. Combining them enables you to build powerful real-time applications. From 4a6141a9d0f72c7a9326f204493d1ce55d672031 Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Fri, 13 Dec 2019 15:38:58 -0800 Subject: [PATCH 037/123] docs: add functions index to markdown docs (DOCS-3049) (#4135) --- .../ksqldb-reference/functions.md | 16 ++++++++++++++++ .../developer-guide/ksqldb-reference/index.md | 2 ++ mkdocs.yml | 2 +- 3 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 docs-md/developer-guide/ksqldb-reference/functions.md diff --git a/docs-md/developer-guide/ksqldb-reference/functions.md b/docs-md/developer-guide/ksqldb-reference/functions.md new file mode 100644 index 000000000000..059c29b05c0e --- /dev/null +++ b/docs-md/developer-guide/ksqldb-reference/functions.md @@ -0,0 +1,16 @@ +--- +layout: page +title: ksqlDB Functions Index +tagline: ksqlDB Functions +description: Learn to use ksqlDB functions to build streaming applications. +keywords: ksqldb, api, reference, function +--- + +Functions +========= + +- [Aggregate Functions](aggregate-functions.md) +- [Scalar Functions](scalar-functions.md) +- [Table Functions](table-functions.md) + +Page last revised on: {{ git_revision_date }} \ No newline at end of file diff --git a/docs-md/developer-guide/ksqldb-reference/index.md b/docs-md/developer-guide/ksqldb-reference/index.md index c6a81a9181ec..ffdeee0e2048 100644 --- a/docs-md/developer-guide/ksqldb-reference/index.md +++ b/docs-md/developer-guide/ksqldb-reference/index.md @@ -68,3 +68,5 @@ Execution --------- - [RUN SCRIPT](run-script.md) + +Page last revised on: {{ git_revision_date }} diff --git a/mkdocs.yml b/mkdocs.yml index 4983c12417c5..bbe8d70757cf 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -100,7 +100,7 @@ nav: - TERMINATE: developer-guide/ksqldb-reference/terminate.md - Operators: developer-guide/ksqldb-reference/operators.md - Functions: - - Functions Index: developer-guide/ksqldb-reference/index.md + - Functions Index: developer-guide/ksqldb-reference/functions.md - Scalar functions: developer-guide/ksqldb-reference/scalar-functions.md - Aggregation functions: developer-guide/ksqldb-reference/aggregate-functions.md - Table Functions: developer-guide/ksqldb-reference/table-functions.md From d5974e1c4d3213daabf94fc6c58b5bc7794e1dfe Mon Sep 17 00:00:00 2001 From: Rohan Date: Sun, 15 Dec 2019 21:03:38 -0800 Subject: [PATCH 038/123] chore: fix join left right names (#4136) * chore: rename join left/right to leftSource/rightSource * chore: fix schema --- .../ksql/execution/plan/StreamStreamJoin.java | 32 ++++++++++--------- .../ksql/execution/plan/StreamTableJoin.java | 30 +++++++++-------- .../ksql/execution/plan/TableTableJoin.java | 30 +++++++++-------- .../resources/ksql-plan-schema/schema.json | 18 +++++------ .../ksql/execution/streams/KSPlanBuilder.java | 12 +++---- 5 files changed, 64 insertions(+), 58 deletions(-) diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamStreamJoin.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamStreamJoin.java index 51a326162a47..4f177071830b 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamStreamJoin.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamStreamJoin.java @@ -29,8 +29,8 @@ public class StreamStreamJoin implements ExecutionStep> { private final JoinType joinType; private final Formats leftInternalFormats; private final Formats rightInternalFormats; - private final ExecutionStep> left; - private final ExecutionStep> right; + private final ExecutionStep> leftSource; + private final ExecutionStep> rightSource; private final Duration beforeMillis; private final Duration afterMillis; @@ -39,8 +39,10 @@ public StreamStreamJoin( @JsonProperty(value = "joinType", required = true) JoinType joinType, @JsonProperty(value = "leftInternalFormats", required = true) Formats leftInternalFormats, @JsonProperty(value = "rightInternalFormats", required = true) Formats rightInternalFormats, - @JsonProperty(value = "left", required = true) ExecutionStep> left, - @JsonProperty(value = "right", required = true) ExecutionStep> right, + @JsonProperty(value = "leftSource", required = true) + ExecutionStep> leftSource, + @JsonProperty(value = "rightSource", required = true) + ExecutionStep> rightSource, @JsonProperty(value = "beforeMillis", required = true) Duration beforeMillis, @JsonProperty(value = "afterMillis", required = true) Duration afterMillis) { this.properties = Objects.requireNonNull(properties, "properties"); @@ -49,8 +51,8 @@ public StreamStreamJoin( this.rightInternalFormats = Objects.requireNonNull(rightInternalFormats, "rightInternalFormats"); this.joinType = Objects.requireNonNull(joinType, "joinType"); - this.left = Objects.requireNonNull(left, "left"); - this.right = Objects.requireNonNull(right, "right"); + this.leftSource = Objects.requireNonNull(leftSource, "leftSource"); + this.rightSource = Objects.requireNonNull(rightSource, "rightSource"); this.beforeMillis = Objects.requireNonNull(beforeMillis, "beforeMillis"); this.afterMillis = Objects.requireNonNull(afterMillis, "afterMillis"); } @@ -63,7 +65,7 @@ public ExecutionStepPropertiesV1 getProperties() { @Override @JsonIgnore public List> getSources() { - return ImmutableList.of(left, right); + return ImmutableList.of(leftSource, rightSource); } public Formats getLeftInternalFormats() { @@ -74,12 +76,12 @@ public Formats getRightInternalFormats() { return rightInternalFormats; } - public ExecutionStep> getLeft() { - return left; + public ExecutionStep> getLeftSource() { + return leftSource; } - public ExecutionStep> getRight() { - return right; + public ExecutionStep> getRightSource() { + return rightSource; } public JoinType getJoinType() { @@ -113,8 +115,8 @@ public boolean equals(Object o) { && joinType == that.joinType && Objects.equals(leftInternalFormats, that.leftInternalFormats) && Objects.equals(rightInternalFormats, that.rightInternalFormats) - && Objects.equals(left, that.left) - && Objects.equals(right, that.right) + && Objects.equals(leftSource, that.leftSource) + && Objects.equals(rightSource, that.rightSource) && Objects.equals(beforeMillis, that.beforeMillis) && Objects.equals(afterMillis, that.afterMillis); } @@ -127,8 +129,8 @@ public int hashCode() { joinType, leftInternalFormats, rightInternalFormats, - left, - right, + leftSource, + rightSource, beforeMillis, afterMillis ); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamTableJoin.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamTableJoin.java index 3ff1b86e5f37..c284595f90f0 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamTableJoin.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamTableJoin.java @@ -27,20 +27,22 @@ public class StreamTableJoin implements ExecutionStep> { private final ExecutionStepPropertiesV1 properties; private final JoinType joinType; private final Formats internalFormats; - private final ExecutionStep> left; - private final ExecutionStep> right; + private final ExecutionStep> leftSource; + private final ExecutionStep> rightSource; public StreamTableJoin( @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, @JsonProperty(value = "joinType", required = true) JoinType joinType, @JsonProperty(value = "internalFormats", required = true) Formats internalFormats, - @JsonProperty(value = "left", required = true) ExecutionStep> left, - @JsonProperty(value = "right", required = true) ExecutionStep> right) { + @JsonProperty(value = "leftSource", required = true) + ExecutionStep> leftSource, + @JsonProperty(value = "rightSource", required = true) + ExecutionStep> rightSource) { this.properties = Objects.requireNonNull(properties, "properties"); this.internalFormats = Objects.requireNonNull(internalFormats, "internalFormats"); this.joinType = Objects.requireNonNull(joinType, "joinType"); - this.left = Objects.requireNonNull(left, "left"); - this.right = Objects.requireNonNull(right, "right"); + this.leftSource = Objects.requireNonNull(leftSource, "leftSource"); + this.rightSource = Objects.requireNonNull(rightSource, "rightSource"); } @Override @@ -51,19 +53,19 @@ public ExecutionStepPropertiesV1 getProperties() { @Override @JsonIgnore public List> getSources() { - return ImmutableList.of(left, right); + return ImmutableList.of(leftSource, rightSource); } public Formats getInternalFormats() { return internalFormats; } - public ExecutionStep> getLeft() { - return left; + public ExecutionStep> getLeftSource() { + return leftSource; } - public ExecutionStep> getRight() { - return right; + public ExecutionStep> getRightSource() { + return rightSource; } public JoinType getJoinType() { @@ -87,13 +89,13 @@ public boolean equals(Object o) { return Objects.equals(properties, that.properties) && joinType == that.joinType && Objects.equals(internalFormats, that.internalFormats) - && Objects.equals(left, that.left) - && Objects.equals(right, that.right); + && Objects.equals(leftSource, that.leftSource) + && Objects.equals(rightSource, that.rightSource); } @Override public int hashCode() { - return Objects.hash(properties, joinType, internalFormats, left, right); + return Objects.hash(properties, joinType, internalFormats, leftSource, rightSource); } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableTableJoin.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableTableJoin.java index 6d0ead2e3f23..9ce53ac071ac 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableTableJoin.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableTableJoin.java @@ -25,18 +25,20 @@ public class TableTableJoin implements ExecutionStep> { private final ExecutionStepPropertiesV1 properties; private final JoinType joinType; - private final ExecutionStep> left; - private final ExecutionStep> right; + private final ExecutionStep> leftSource; + private final ExecutionStep> rightSource; public TableTableJoin( @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, @JsonProperty(value = "joinType", required = true) JoinType joinType, - @JsonProperty(value = "left", required = true) ExecutionStep> left, - @JsonProperty(value = "right", required = true) ExecutionStep> right) { + @JsonProperty(value = "leftSource", required = true) + ExecutionStep> leftSource, + @JsonProperty(value = "rightSource", required = true) + ExecutionStep> rightSource) { this.properties = Objects.requireNonNull(properties, "properties"); this.joinType = Objects.requireNonNull(joinType, "joinType"); - this.left = Objects.requireNonNull(left, "left"); - this.right = Objects.requireNonNull(right, "right"); + this.leftSource = Objects.requireNonNull(leftSource, "leftSource"); + this.rightSource = Objects.requireNonNull(rightSource, "rightSource"); } @Override @@ -47,15 +49,15 @@ public ExecutionStepPropertiesV1 getProperties() { @Override @JsonIgnore public List> getSources() { - return ImmutableList.of(left, right); + return ImmutableList.of(leftSource, rightSource); } - public ExecutionStep> getLeft() { - return left; + public ExecutionStep> getLeftSource() { + return leftSource; } - public ExecutionStep> getRight() { - return right; + public ExecutionStep> getRightSource() { + return rightSource; } public JoinType getJoinType() { @@ -78,13 +80,13 @@ public boolean equals(Object o) { TableTableJoin that = (TableTableJoin) o; return Objects.equals(properties, that.properties) && joinType == that.joinType - && Objects.equals(left, that.left) - && Objects.equals(right, that.right); + && Objects.equals(leftSource, that.leftSource) + && Objects.equals(rightSource, that.rightSource); } @Override public int hashCode() { - return Objects.hash(properties, joinType, left, right); + return Objects.hash(properties, joinType, leftSource, rightSource); } } diff --git a/ksql-rest-app/src/test/resources/ksql-plan-schema/schema.json b/ksql-rest-app/src/test/resources/ksql-plan-schema/schema.json index ee20905190e9..24ef2407aa76 100644 --- a/ksql-rest-app/src/test/resources/ksql-plan-schema/schema.json +++ b/ksql-rest-app/src/test/resources/ksql-plan-schema/schema.json @@ -542,10 +542,10 @@ "rightInternalFormats" : { "$ref" : "#/definitions/Formats" }, - "left" : { + "leftSource" : { "$ref" : "#/definitions/ExecutionStep" }, - "right" : { + "rightSource" : { "$ref" : "#/definitions/ExecutionStep" }, "beforeMillis" : { @@ -556,7 +556,7 @@ } }, "title" : "streamStreamJoinV1", - "required" : [ "@type", "properties", "joinType", "leftInternalFormats", "rightInternalFormats", "left", "right", "beforeMillis", "afterMillis" ] + "required" : [ "@type", "properties", "joinType", "leftInternalFormats", "rightInternalFormats", "leftSource", "rightSource", "beforeMillis", "afterMillis" ] }, "StreamTableJoin" : { "type" : "object", @@ -577,15 +577,15 @@ "internalFormats" : { "$ref" : "#/definitions/Formats" }, - "left" : { + "leftSource" : { "$ref" : "#/definitions/ExecutionStep" }, - "right" : { + "rightSource" : { "$ref" : "#/definitions/ExecutionStep" } }, "title" : "streamTableJoinV1", - "required" : [ "@type", "properties", "joinType", "internalFormats", "left", "right" ] + "required" : [ "@type", "properties", "joinType", "internalFormats", "leftSource", "rightSource" ] }, "StreamWindowedAggregate" : { "type" : "object", @@ -839,15 +839,15 @@ "type" : "string", "enum" : [ "INNER", "LEFT", "OUTER" ] }, - "left" : { + "leftSource" : { "$ref" : "#/definitions/ExecutionStep" }, - "right" : { + "rightSource" : { "$ref" : "#/definitions/ExecutionStep" } }, "title" : "tableTableJoinV1", - "required" : [ "@type", "properties", "joinType", "left", "right" ] + "required" : [ "@type", "properties", "joinType", "leftSource", "rightSource" ] }, "ExecutionStep" : { "oneOf" : [ { diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/KSPlanBuilder.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/KSPlanBuilder.java index f73d151127c1..be7219027455 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/KSPlanBuilder.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/KSPlanBuilder.java @@ -168,8 +168,8 @@ public KStreamHolder> visitWindowedStreamSource( @Override public KStreamHolder visitStreamStreamJoin(final StreamStreamJoin join) { - final KStreamHolder left = join.getLeft().build(this); - final KStreamHolder right = join.getRight().build(this); + final KStreamHolder left = join.getLeftSource().build(this); + final KStreamHolder right = join.getRightSource().build(this); return StreamStreamJoinBuilder.build( left, right, @@ -181,8 +181,8 @@ public KStreamHolder visitStreamStreamJoin(final StreamStreamJoin join @Override public KStreamHolder visitStreamTableJoin(final StreamTableJoin join) { - final KTableHolder right = join.getRight().build(this); - final KStreamHolder left = join.getLeft().build(this); + final KTableHolder right = join.getRightSource().build(this); + final KStreamHolder left = join.getLeftSource().build(this); return StreamTableJoinBuilder.build( left, right, @@ -274,8 +274,8 @@ public KTableHolder visitTableSink(final TableSink tableSink) { @Override public KTableHolder visitTableTableJoin( final TableTableJoin tableTableJoin) { - final KTableHolder left = tableTableJoin.getLeft().build(this); - final KTableHolder right = tableTableJoin.getRight().build(this); + final KTableHolder left = tableTableJoin.getLeftSource().build(this); + final KTableHolder right = tableTableJoin.getRightSource().build(this); return TableTableJoinBuilder.build(left, right, tableTableJoin); } } From 04de30e74cd19dc541ad84e3efa0d33a4a4df116 Mon Sep 17 00:00:00 2001 From: Alberto Santini Date: Mon, 16 Dec 2019 14:29:54 +0100 Subject: [PATCH 039/123] fix: untracked file after cloning on Windows (#4122) * fix: untracked file after cloning on Windows This patch resolves a problem on Windows. After cloning the repo, git status shows one untracked file. This is a due to the trailing period in the file name. --- ..._the_sink_topic_if_default_partitions_and_replicas_were_set} | 0 .../query-validation-tests/sink-partitions-replicas.json | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/{sink-partitions-replicas_-_Use_the_legacy_default_sink_properties_for_the_sink_topic_if_default_partitions_and_replicas_were_set. => sink-partitions-replicas_-_Use_the_legacy_default_sink_properties_for_the_sink_topic_if_default_partitions_and_replicas_were_set} (100%) diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-partitions-replicas_-_Use_the_legacy_default_sink_properties_for_the_sink_topic_if_default_partitions_and_replicas_were_set. b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-partitions-replicas_-_Use_the_legacy_default_sink_properties_for_the_sink_topic_if_default_partitions_and_replicas_were_set similarity index 100% rename from ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-partitions-replicas_-_Use_the_legacy_default_sink_properties_for_the_sink_topic_if_default_partitions_and_replicas_were_set. rename to ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/sink-partitions-replicas_-_Use_the_legacy_default_sink_properties_for_the_sink_topic_if_default_partitions_and_replicas_were_set diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/sink-partitions-replicas.json b/ksql-functional-tests/src/test/resources/query-validation-tests/sink-partitions-replicas.json index 4f5ee2571fe9..d4d034b8d33d 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/sink-partitions-replicas.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/sink-partitions-replicas.json @@ -26,7 +26,7 @@ "outputs": [{"topic": "S", "value": {"C1": 4}}] }, { - "name": "Use the legacy default sink properties for the sink topic if default partitions and replicas were set.", + "name": "Use the legacy default sink properties for the sink topic if default partitions and replicas were set", "statements": [ "CREATE STREAM TEST WITH (kafka_topic='input', value_format='AVRO');", "CREATE STREAM S as SELECT * FROM test;" From 1146aa5c095a7acf73f20f25cfeb7db1388692ad Mon Sep 17 00:00:00 2001 From: Steven Zhang <35498506+stevenpyzhang@users.noreply.github.com> Date: Mon, 16 Dec 2019 13:57:53 -0800 Subject: [PATCH 040/123] feat: add source statement to SourceDescription (#4134) --- .../confluent/ksql/cli/console/Console.java | 1 + .../ksql/cli/console/ConsoleTest.java | 19 +++++-- .../rest/entity/SourceDescriptionFactory.java | 3 +- .../DescribeConnectorExecutorTest.java | 2 + .../ksql/rest/entity/SourceDescription.java | 56 ++++++++++++++----- .../rest/entity/SourceDescriptionTest.java | 54 ++++++++++++++++++ 6 files changed, 113 insertions(+), 22 deletions(-) create mode 100644 ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionTest.java diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java index a6a0ce7e3fd2..f27bcffd0ba7 100644 --- a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java @@ -570,6 +570,7 @@ private void printSourceDescription(final SourceDescription source) { writer().println(String.format("%-20s : %s", "Type", source.getType())); printTopicInfo(source); + writer().println(String.format("%-20s : %s", "Statement", source.getStatement())); writer().println(""); printSchema(source.getFields(), source.getKey()); diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java index 35cc00762dc4..21e196b14faf 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java @@ -121,7 +121,8 @@ public class ConsoleTest { "avro", "kadka-topic", 2, - 1 + 1, + "statement" ); @Parameterized.Parameters(name = "{0}") @@ -356,7 +357,8 @@ public void testPrintSourceDescription() { "avro", "kadka-topic", 1, - 1 + 1, + "sql statement" ), Collections.emptyList() ) @@ -478,7 +480,8 @@ public void testPrintSourceDescription() { + " \"format\" : \"avro\",\n" + " \"topic\" : \"kadka-topic\",\n" + " \"partitions\" : 1,\n" - + " \"replication\" : 1\n" + + " \"replication\" : 1,\n" + + " \"statement\" : \"sql statement\"\n" + " },\n" + " \"warnings\" : [ ]\n" + "} ]\n")); @@ -620,7 +623,8 @@ public void testPrintConnectorDescription() { + " \"format\" : \"avro\",\n" + " \"topic\" : \"kadka-topic\",\n" + " \"partitions\" : 2,\n" - + " \"replication\" : 1\n" + + " \"replication\" : 1,\n" + + " \"statement\" : \"statement\"\n" + " } ],\n" + " \"topics\" : [ \"a-jdbc-topic\" ],\n" + " \"warnings\" : [ ]\n" @@ -997,7 +1001,8 @@ public void shouldPrintTopicDescribeExtended() { true, "avro", "kadka-topic", - 2, 1 + 2, 1, + "sql statement text" ), Collections.emptyList() )) @@ -1055,7 +1060,8 @@ public void shouldPrintTopicDescribeExtended() { + " \"format\" : \"avro\",\n" + " \"topic\" : \"kadka-topic\",\n" + " \"partitions\" : 2,\n" - + " \"replication\" : 1\n" + + " \"replication\" : 1,\n" + + " \"statement\" : \"sql statement text\"\n" + " },\n" + " \"warnings\" : [ ]\n" + "} ]\n")); @@ -1068,6 +1074,7 @@ public void shouldPrintTopicDescribeExtended() { + "Timestamp field : 2000-01-01\n" + "Value format : avro\n" + "Kafka topic : kadka-topic (partitions: 2, replication: 1)\n" + + "Statement : sql statement text\n" + "\n" + " Field | Type \n" + "-------------------------------------\n" diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SourceDescriptionFactory.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SourceDescriptionFactory.java index 1c0ca86be7ce..ea357831049a 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SourceDescriptionFactory.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SourceDescriptionFactory.java @@ -57,7 +57,8 @@ public static SourceDescription create( format, dataSource.getKafkaTopicName(), topicDescription.map(td -> td.partitions().size()).orElse(0), - topicDescription.map(td -> td.partitions().get(0).replicas().size()).orElse(0) + topicDescription.map(td -> td.partitions().get(0).replicas().size()).orElse(0), + dataSource.getSqlExpression() ); } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutorTest.java index f6f5eeaaca55..103957867e7a 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutorTest.java @@ -76,6 +76,7 @@ public class DescribeConnectorExecutorTest { private static final String TOPIC = "kafka-topic"; + private static final String STATEMENT = "statement"; private static final String CONNECTOR_NAME = "connector"; private static final String CONNECTOR_CLASS = "io.confluent.ConnectorClazz"; @@ -122,6 +123,7 @@ public void setUp() { when(serviceContext.getAdminClient()).thenReturn(adminClient); when(metaStore.getAllDataSources()).thenReturn(ImmutableMap.of(SourceName.of("source"), source)); when(source.getKafkaTopicName()).thenReturn(TOPIC); + when(source.getSqlExpression()).thenReturn(STATEMENT); when(source.getKsqlTopic()).thenReturn( new KsqlTopic( TOPIC, diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/SourceDescription.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/SourceDescription.java index 5066c778fbd8..19416348a798 100644 --- a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/SourceDescription.java +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/SourceDescription.java @@ -43,6 +43,7 @@ public class SourceDescription { private final String topic; private final int partitions; private final int replication; + private final String statement; // CHECKSTYLE_RULES.OFF: ParameterNumberCheck @JsonCreator @@ -60,23 +61,32 @@ public SourceDescription( @JsonProperty("format") final String format, @JsonProperty("topic") final String topic, @JsonProperty("partitions") final int partitions, - @JsonProperty("replication") final int replication + @JsonProperty("replication") final int replication, + @JsonProperty("statement") final String statement ) { // CHECKSTYLE_RULES.ON: ParameterNumberCheck - this.name = name; - this.readQueries = Collections.unmodifiableList(readQueries); - this.writeQueries = Collections.unmodifiableList(writeQueries); - this.fields = Collections.unmodifiableList(fields); - this.type = type; - this.key = key; - this.timestamp = timestamp; - this.statistics = statistics; - this.errorStats = errorStats; - this.extended = extended; - this.format = format; - this.topic = topic; + this.name = Objects.requireNonNull(name, "name"); + this.readQueries = + Collections.unmodifiableList(Objects.requireNonNull(readQueries, "readQueries")); + this.writeQueries = + Collections.unmodifiableList(Objects.requireNonNull(writeQueries, "writeQueries")); + this.fields = + Collections.unmodifiableList(Objects.requireNonNull(fields, "fields")); + this.type = Objects.requireNonNull(type, "type"); + this.key = Objects.requireNonNull(key, "key"); + this.timestamp = Objects.requireNonNull(timestamp, "timestamp"); + this.statistics = Objects.requireNonNull(statistics, "statistics"); + this.errorStats = Objects.requireNonNull(errorStats, "errorStats"); + this.extended = Objects.requireNonNull(extended, "extended"); + this.format = Objects.requireNonNull(format, "format"); + this.topic = Objects.requireNonNull(topic, "topic"); this.partitions = partitions; this.replication = replication; + this.statement = Objects.requireNonNull(statement, "statement"); + } + + public String getStatement() { + return statement; } public int getPartitions() { @@ -157,7 +167,7 @@ private boolean equals2(final SourceDescription that) { if (!Objects.equals(errorStats, that.errorStats)) { return false; } - return true; + return Objects.equals(statement, that.statement); } @Override @@ -189,6 +199,22 @@ public boolean equals(final Object o) { @Override public int hashCode() { - return Objects.hash(name, fields, type, key, timestamp); + return Objects.hash( + name, + readQueries, + writeQueries, + fields, + type, + key, + timestamp, + statistics, + errorStats, + extended, + format, + topic, + partitions, + replication, + statement + ); } } diff --git a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionTest.java b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionTest.java new file mode 100644 index 000000000000..ba4c587c3caa --- /dev/null +++ b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionTest.java @@ -0,0 +1,54 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.entity; + +import com.google.common.testing.EqualsTester; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Collections; + +@RunWith(MockitoJUnitRunner.class) +public class SourceDescriptionTest { + + private static final String SOME_STRING = "some string"; + private static final int SOME_INT = 3; + private static final boolean SOME_BOOL = true; + + @Mock + private RunningQuery runningQuery; + + @Mock + private FieldInfo fieldInfo; + + @Test + public void shouldImplementHashCodeAndEqualsProperty() { + new EqualsTester() + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Collections.singletonList(runningQuery), Collections.singletonList(runningQuery), + Collections.singletonList(fieldInfo), SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + SOME_STRING, SOME_BOOL, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, SOME_STRING), + new SourceDescription( + SOME_STRING, Collections.singletonList(runningQuery), Collections.singletonList(runningQuery), + Collections.singletonList(fieldInfo), SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + SOME_STRING, SOME_BOOL, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, SOME_STRING) + ) + .testEquals(); + } + } From 2d5e68042fbce95240dfcaeeaf2daa1b9b598528 Mon Sep 17 00:00:00 2001 From: Almog Gavra Date: Mon, 16 Dec 2019 15:34:30 -0800 Subject: [PATCH 041/123] feat: add COUNT_DISTINCT and allow generics in UDAFs (#4150) --- ksql-engine/pom.xml | 5 + .../io/confluent/ksql/function/UdafTypes.java | 8 +- .../function/udaf/count/CountDistinct.java | 98 ++++++++++++++++ .../udaf/count/CountDistinctKudafTest.java | 108 ++++++++++++++++++ .../count-distinct.json | 27 +++++ pom.xml | 7 ++ 6 files changed, 247 insertions(+), 6 deletions(-) create mode 100644 ksql-engine/src/main/java/io/confluent/ksql/function/udaf/count/CountDistinct.java create mode 100644 ksql-engine/src/test/java/io/confluent/ksql/function/udaf/count/CountDistinctKudafTest.java create mode 100644 ksql-functional-tests/src/test/resources/query-validation-tests/count-distinct.json diff --git a/ksql-engine/pom.xml b/ksql-engine/pom.xml index dc97e6b85659..858a8feb004f 100644 --- a/ksql-engine/pom.xml +++ b/ksql-engine/pom.xml @@ -130,6 +130,11 @@ 1.9.0 + + com.clearspring.analytics + stream + + diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/UdafTypes.java b/ksql-engine/src/main/java/io/confluent/ksql/function/UdafTypes.java index eb5a2e1398cf..3471356f5031 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/UdafTypes.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/UdafTypes.java @@ -25,6 +25,7 @@ import java.lang.reflect.Method; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; +import java.lang.reflect.TypeVariable; import java.math.BigDecimal; import java.util.List; import java.util.Map; @@ -84,11 +85,6 @@ class UdafTypes { ParameterInfo getInputSchema(final String inSchema) { validateStructAnnotation(inputType, inSchema, "paramSchema"); final ParamType inputSchema = getSchemaFromType(inputType, inSchema); - //Currently, aggregate functions cannot have reified types as input parameters. - if (!GenericsUtil.constituentGenerics(inputSchema).isEmpty()) { - throw new KsqlException("Generic type parameters containing reified types are not currently" - + " supported. " + functionInfo); - } return new ParameterInfo("val", inputSchema, "", false); } @@ -103,7 +99,7 @@ ParamType getOutputSchema(final String outSchema) { } private void validateTypes(final Type t) { - if (isUnsupportedType((Class) getRawType(t))) { + if (!(t instanceof TypeVariable) && isUnsupportedType((Class) getRawType(t))) { throw new KsqlException(String.format(invalidClassErrorMsg, t)); } } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/udaf/count/CountDistinct.java b/ksql-engine/src/main/java/io/confluent/ksql/function/udaf/count/CountDistinct.java new file mode 100644 index 000000000000..284a1caa0794 --- /dev/null +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/udaf/count/CountDistinct.java @@ -0,0 +1,98 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.function.udaf.count; + +import com.clearspring.analytics.stream.cardinality.HyperLogLog; +import com.clearspring.analytics.stream.cardinality.RegisterSet; +import com.google.common.primitives.Ints; +import io.confluent.ksql.function.udaf.Udaf; +import io.confluent.ksql.function.udaf.UdafDescription; +import io.confluent.ksql.function.udaf.UdafFactory; +import java.util.List; + +@UdafDescription( + name = "COUNT_DISTINCT", + description = CountDistinct.DESCRIPTION +) +public final class CountDistinct { + + static final String DESCRIPTION = "This function returns the number of items found in a group. " + + "The implementation is probabilistic with a typical accuracy (standard error) of less " + + "than 1%."; + + // magic number causes accuracy < .01 - we can consider making + // this configurable if the need arises + private static final int M = 1 << 14; + private static final int LOG_2_M = 14; + + private CountDistinct() { + } + + // NOTE: since our UDAF framework requires the aggregate values to + // be serializable, and we don't support serialization of native int[], + // this implementation can be optimized by avoiding conversions between + // int[] and List - since RegisterSet requires an int[], we would + // need to duplicate a lot of code to get this to be zero-copy + private static Udaf, Long> countDistinct() { + return new Udaf, Long>() { + + @Override + public List initialize() { + return Ints.asList(new int[RegisterSet.getSizeForCount(M)]); + } + + @Override + public List aggregate(T current, List aggregate) { + if (current == null) { + return aggregate; + } + + // this operation updates the underlying bytes + final int[] ints = Ints.toArray(aggregate); + final RegisterSet set = new RegisterSet(M, ints); + + // this modifies the underlying ints + toHyperLogLog(set).offer(current); + + return Ints.asList(ints); + } + + @Override + public List merge(List aggOne, List aggTwo) { + final RegisterSet registerSet = new RegisterSet(M, Ints.toArray(aggOne)); + registerSet.merge(new RegisterSet(M, Ints.toArray(aggTwo))); + + return Ints.asList(registerSet.bits()); + } + + @Override + public Long map(List agg) { + return toHyperLogLog(new RegisterSet(M, Ints.toArray(agg))).cardinality(); + } + }; + } + + @SuppressWarnings("deprecation") + private static HyperLogLog toHyperLogLog(final RegisterSet set) { + return new HyperLogLog(LOG_2_M, set); + } + + @UdafFactory(description = "Count distinct") + public static Udaf, Long> distinct() { + return countDistinct(); + } + +} diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/count/CountDistinctKudafTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/count/CountDistinctKudafTest.java new file mode 100644 index 000000000000..f00fd67c78fb --- /dev/null +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/count/CountDistinctKudafTest.java @@ -0,0 +1,108 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.function.udaf.count; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; + +import com.google.common.primitives.Ints; +import io.confluent.ksql.function.udaf.Udaf; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.Test; + +public class CountDistinctKudafTest { + + @Test + public void shouldCountStrings() { + // Given: + final Udaf, Long> udaf = CountDistinct.distinct(); + final String[] values = IntStream + .range(0, 100) + .mapToObj(i -> String.valueOf(i % 4)) + .toArray(String[]::new); + + List agg = udaf.initialize(); + + // When: + for (String value : values) { + agg = udaf.aggregate(value, agg); + } + + // Then: + assertThat(udaf.map(agg), is(4L)); + } + + @Test + public void shouldCountList() { + // Given: + final Udaf, List, Long> udaf = CountDistinct.distinct(); + final List> values = IntStream + .range(0, 100) + .mapToObj(i -> Ints.asList(i % 4)) + .collect(Collectors.toList()); + + List agg = udaf.initialize(); + + // When: + for (List value : values) { + agg = udaf.aggregate(value, agg); + } + + // Then: + assertThat(udaf.map(agg), is(4L)); + } + + @Test + public void shouldIgnoreNulls() { + // Given: + final Udaf, Long> udaf = CountDistinct.distinct(); + List agg = udaf.initialize(); + + // When: + agg = udaf.aggregate(null, agg); + + // Then: + assertThat(udaf.map(agg), is(0L)); + } + + @Test + public void shouldMerge() { + // Given: + final Udaf, Long> udaf = CountDistinct.distinct(); + final String[] values1 = IntStream + .range(0, 100) + .mapToObj(i -> String.valueOf(i % 4)) + .toArray(String[]::new); + + List agg1 = udaf.initialize(); + List agg2 = udaf.initialize(); + + // When: + for (String value : values1) { + agg1 = udaf.aggregate(value, agg1); + } + + for (String value : new String[]{"5"}) { + agg2 = udaf.aggregate(value, agg2); + } + + // Then: + assertThat(udaf.map(udaf.merge(agg1, agg2)), is(5L)); + } + +} \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/count-distinct.json b/ksql-functional-tests/src/test/resources/query-validation-tests/count-distinct.json new file mode 100644 index 000000000000..fc8bce8e43e5 --- /dev/null +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/count-distinct.json @@ -0,0 +1,27 @@ +{ + "tests": [ + { + "name": "count distinct", + "statements": [ + "CREATE STREAM TEST (ID varchar, NAME varchar) WITH (kafka_topic='test_topic', value_format='JSON');", + "CREATE TABLE S2 as SELECT id, count_distinct(name) as count FROM test group by id;" + ], + "inputs": [ + {"topic": "test_topic", "key": "0", "value": {"id": "foo", "name": "one"}}, + {"topic": "test_topic", "key": "0", "value": {"id": "foo", "name": "two"}}, + {"topic": "test_topic", "key": "0", "value": {"id": "foo", "name": "one"}}, + {"topic": "test_topic", "key": "0", "value": {"id": "foo", "name": "two"}}, + {"topic": "test_topic", "key": "0", "value": {"id": "bar", "name": "one"}}, + {"topic": "test_topic", "key": "0", "value": {"id": "foo", "name": null}} + ], + "outputs": [ + {"topic": "S2", "key": "foo" ,"value": {"ID": "foo", "COUNT": 1}}, + {"topic": "S2", "key": "foo" ,"value": {"ID": "foo", "COUNT": 2}}, + {"topic": "S2", "key": "foo" ,"value": {"ID": "foo", "COUNT": 2}}, + {"topic": "S2", "key": "foo" ,"value": {"ID": "foo", "COUNT": 2}}, + {"topic": "S2", "key": "bar" ,"value": {"ID": "bar", "COUNT": 1}}, + {"topic": "S2", "key": "foo" ,"value": {"ID": "foo", "COUNT": 2}} + ] + } + ] +} \ No newline at end of file diff --git a/pom.xml b/pom.xml index e43ad21c0705..99fc03c77281 100644 --- a/pom.xml +++ b/pom.xml @@ -115,6 +115,7 @@ 0.2.2 2.9.0 2.24.0 + 2.9.5 true true @@ -372,6 +373,12 @@ ${javax-validation.version} + + com.clearspring.analytics + stream + ${clearspring-analytics.version} + + junit From d595985853703f19611bac1a63957b447260b38e Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Tue, 17 Dec 2019 14:36:49 +0000 Subject: [PATCH 042/123] Adds support for using primitive types in joins. (#4132) * chore: primitive key support in JOINs Adds support for using primitive types in joins. BREAKING CHANGE: Some existing joins may now fail and the type of `ROWKEY` in the result schema of joins may have changed. When `ROWKEY` was always a `STRING` it was possible to join an `INTEGER` column with a `BIGINT` column. This is no longer the case. A `JOIN` requires the join columns to be of the same type. (See https://github.com/confluentinc/ksql/issues/4130 which tracks adding support for being able to `CAST` join criteria). Where joining on two `INT` columns would previously have resulted in a schema containing `ROWKEY STRING KEY`, it would not result in `ROWKEY INT KEY`. --- .../ksql/planner/plan/DataSourceNode.java | 2 +- .../confluent/ksql/planner/plan/JoinNode.java | 133 +++++++----- .../physical/PhysicalPlanBuilderTest.java | 12 +- .../ksql/planner/LogicalPlannerTest.java | 8 +- .../PlanSourceExtractorVisitorTest.java | 2 +- .../ksql/planner/plan/JoinNodeTest.java | 162 ++++++--------- .../KsqlStructuredDataOutputNodeTest.java | 4 - .../ksql/structured/SchemaKStreamTest.java | 118 +---------- .../ksql/structured/SchemaKTableTest.java | 26 +-- .../query-validation-tests/joins.json | 196 ++++++++++++++++-- .../confluent/ksql/util/MetaStoreFixture.java | 4 +- .../execution/streams/JoinParamsFactory.java | 33 ++- .../streams/JoinParamsFactoryTest.java | 72 ++++++- 13 files changed, 425 insertions(+), 347 deletions(-) diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/DataSourceNode.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/DataSourceNode.java index aa775549eb5c..399787de615c 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/DataSourceNode.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/DataSourceNode.java @@ -95,7 +95,7 @@ public DataSource getDataSource() { return dataSource; } - SourceName getAlias() { + public SourceName getAlias() { return alias; } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java index 358a0ee390bb..5f07e36f585a 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java @@ -25,13 +25,12 @@ import io.confluent.ksql.execution.streams.JoinParamsFactory; import io.confluent.ksql.metastore.model.DataSource.DataSourceType; import io.confluent.ksql.metastore.model.KeyField; -import io.confluent.ksql.name.SourceName; import io.confluent.ksql.parser.tree.WithinExpression; import io.confluent.ksql.schema.ksql.Column; +import io.confluent.ksql.schema.ksql.Column.Namespace; import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.FormatOptions; import io.confluent.ksql.schema.ksql.LogicalSchema; -import io.confluent.ksql.schema.ksql.SqlBaseType; import io.confluent.ksql.serde.ValueFormat; import io.confluent.ksql.services.KafkaTopicClient; import io.confluent.ksql.structured.SchemaKStream; @@ -93,11 +92,7 @@ public JoinNode( ? left.getKeyField() : KeyField.of(leftKeyCol.ref()); - this.schema = JoinParamsFactory.createSchema(left.getSchema(), right.getSchema()); - - if (schema.key().get(0).type().baseType() != SqlBaseType.STRING) { - throw new KsqlException("JOIN is not supported with non-STRING keys"); - } + this.schema = buildJoinSchema(left, leftJoinFieldName, right, rightJoinFieldName); } @Override @@ -237,11 +232,7 @@ SchemaKStream buildStream( } @SuppressWarnings("unchecked") - SchemaKTable buildTable( - final PlanNode node, - final ColumnRef joinFieldName, - final SourceName tableName - ) { + SchemaKTable buildTable(final PlanNode node) { final SchemaKStream schemaKStream = node.buildStream( builder.withKsqlConfig(builder.getKsqlConfig() .cloneWithPropertyOverwrite(Collections.singletonMap( @@ -252,37 +243,7 @@ SchemaKTable buildTable( throw new RuntimeException("Expected to find a Table, found a stream instead."); } - final Optional keyColumn = schemaKStream - .getKeyField() - .resolve(schemaKStream.getSchema()); - - final ColumnRef rowKey = ColumnRef.of( - tableName, - SchemaUtil.ROWKEY_NAME - ); - - final boolean namesMatch = keyColumn - .map(field -> field.ref().equals(joinFieldName)) - .orElse(false); - - if (namesMatch || joinFieldName.equals(rowKey)) { - return (SchemaKTable) schemaKStream; - } - - if (!keyColumn.isPresent()) { - throw new KsqlException( - "Source table (" + tableName.name() + ") has no key column defined. " - + "Only 'ROWKEY' is supported in the join criteria." - ); - } - - throw new KsqlException( - "Source table (" + tableName.toString(FormatOptions.noEscape()) + ") key column (" - + keyColumn.get().ref().toString(FormatOptions.noEscape()) + ") " - + "is not the column used in the join criteria (" - + joinFieldName.toString(FormatOptions.noEscape()) + "). " - + "Only the table's key column or 'ROWKEY' is supported in the join criteria." - ); + return (SchemaKTable) schemaKStream; } @SuppressWarnings("unchecked") @@ -378,8 +339,7 @@ public SchemaKStream join() { + " the WITHIN clause) and try to execute your join again."); } - final SchemaKTable rightTable = buildTable( - joinNode.getRight(), joinNode.rightJoinFieldName, joinNode.right.getAlias()); + final SchemaKTable rightTable = buildTable(joinNode.getRight()); final SchemaKStream leftStream = buildStream( joinNode.getLeft(), joinNode.leftJoinFieldName); @@ -428,10 +388,8 @@ public SchemaKTable join() { + "join again."); } - final SchemaKTable leftTable = buildTable( - joinNode.getLeft(), joinNode.leftJoinFieldName, joinNode.left.getAlias()); - final SchemaKTable rightTable = buildTable( - joinNode.getRight(), joinNode.rightJoinFieldName, joinNode.right.getAlias()); + final SchemaKTable leftTable = buildTable(joinNode.getLeft()); + final SchemaKTable rightTable = buildTable(joinNode.getRight()); switch (joinNode.joinType) { case LEFT: @@ -465,4 +423,81 @@ private static DataSourceType calculateSinkType( ? DataSourceType.KTABLE : DataSourceType.KSTREAM; } + + private static LogicalSchema buildJoinSchema( + final DataSourceNode left, + final ColumnRef leftJoinFieldName, + final DataSourceNode right, + final ColumnRef rightJoinFieldName + ) { + final LogicalSchema leftSchema = selectKey(left, leftJoinFieldName); + final LogicalSchema rightSchema = selectKey(right, rightJoinFieldName); + + return JoinParamsFactory.createSchema(leftSchema, rightSchema); + } + + /** + * Adjust the schema to take into account any change in key columns. + * + * @param source the source node + * @param joinColumnRef the join column + * @return the true source schema after any change of key columns. + */ + private static LogicalSchema selectKey( + final DataSourceNode source, + final ColumnRef joinColumnRef + ) { + final LogicalSchema sourceSchema = source.getSchema(); + + final Column joinCol = sourceSchema.findColumn(joinColumnRef) + .orElseThrow(() -> new KsqlException("Unknown join column: " + joinColumnRef)); + + if (sourceSchema.key().size() != 1) { + throw new UnsupportedOperationException("Only single key columns supported"); + } + + if (joinCol.namespace() == Namespace.KEY) { + // Join column is only key column, so no change of key columns required: + return sourceSchema; + } + + final Optional keyColumn = source + .getKeyField() + .resolve(sourceSchema); + + if (keyColumn.isPresent() && keyColumn.get().equals(joinCol)) { + // Join column is KEY field, which is an alias for the only key column, so no change of key + // columns required: + return sourceSchema; + } + + // Change of key columns required + + if (source.getDataSourceType() == DataSourceType.KTABLE) { + // Tables do not support rekey: + final String sourceName = source.getDataSource().getName().toString(FormatOptions.noEscape()); + + if (!keyColumn.isPresent()) { + throw new KsqlException( + "Invalid join criteria: Source table (" + sourceName + ") has no key column " + + "defined. Only 'ROWKEY' is supported in the join criteria for a TABLE." + ); + } + + throw new KsqlException( + "Invalid join criteria: Source table " + + "(" + sourceName + ") key column " + + "(" + keyColumn.get().ref().toString(FormatOptions.noEscape()) + ") " + + "is not the column used in the join criteria (" + + joinCol.ref().toString(FormatOptions.noEscape()) + "). " + + "Only the table's key column or 'ROWKEY' is supported in the join criteria " + + "for a TABLE." + ); + } + + return LogicalSchema.builder() + .keyColumn(source.getAlias(), SchemaUtil.ROWKEY_NAME, joinCol.type()) + .valueColumns(sourceSchema.value()) + .build(); + } } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java b/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java index 6890b89091a3..fad0339c4198 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java @@ -68,19 +68,19 @@ public class PhysicalPlanBuilderTest { + "WITH (KAFKA_TOPIC = 'test1', VALUE_FORMAT = 'JSON');"; private static final String CREATE_STREAM_TEST2 = "CREATE STREAM TEST2 " - + "(ID BIGINT, COL0 VARCHAR, COL1 DOUBLE) " + + "(ROWKEY BIGINT KEY, ID BIGINT, COL0 VARCHAR, COL1 BIGINT) " + " WITH (KAFKA_TOPIC = 'test2', VALUE_FORMAT = 'JSON', KEY='ID');"; private static final String CREATE_STREAM_TEST3 = "CREATE STREAM TEST3 " - + "(ID BIGINT, COL0 VARCHAR, COL1 DOUBLE) " + + "(ROWKEY BIGINT KEY, ID BIGINT, COL0 BIGINT, COL1 DOUBLE) " + " WITH (KAFKA_TOPIC = 'test3', VALUE_FORMAT = 'JSON', KEY='ID');"; private static final String CREATE_TABLE_TEST4 = "CREATE TABLE TEST4 " - + "(ID BIGINT, COL0 VARCHAR, COL1 DOUBLE) " + + "(ROWKEY BIGINT KEY, ID BIGINT, COL0 VARCHAR, COL1 DOUBLE) " + " WITH (KAFKA_TOPIC = 'test4', VALUE_FORMAT = 'JSON', KEY='ID');"; private static final String CREATE_TABLE_TEST5 = "CREATE TABLE TEST5 " - + "(ID BIGINT, COL0 VARCHAR, COL1 DOUBLE) " + + "(ROWKEY BIGINT KEY, ID BIGINT, COL0 VARCHAR, COL1 DOUBLE) " + " WITH (KAFKA_TOPIC = 'test5', VALUE_FORMAT = 'JSON', KEY='ID');"; private static final String CREATE_STREAM_TEST6 = "CREATE STREAM TEST6 " @@ -316,7 +316,7 @@ public void shouldRepartitionLeftStreamIfNotCorrectKey() { .get(0); // Then: - assertThat(result.getExecutionPlan(), containsString("[ REKEY ] | Schema: [ROWKEY DOUBLE KEY, TEST2.")); + assertThat(result.getExecutionPlan(), containsString("[ REKEY ] | Schema: [ROWKEY BIGINT KEY, TEST2.")); } @Test @@ -332,7 +332,7 @@ public void shouldRepartitionRightStreamIfNotCorrectKey() { .get(0); // Then: - assertThat(result.getExecutionPlan(), containsString("[ REKEY ] | Schema: [ROWKEY STRING KEY, TEST3.")); + assertThat(result.getExecutionPlan(), containsString("[ REKEY ] | Schema: [ROWKEY BIGINT KEY, TEST3.")); } @Test diff --git a/ksql-engine/src/test/java/io/confluent/ksql/planner/LogicalPlannerTest.java b/ksql-engine/src/test/java/io/confluent/ksql/planner/LogicalPlannerTest.java index 1be149612fb8..c3d85e440171 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/planner/LogicalPlannerTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/planner/LogicalPlannerTest.java @@ -87,7 +87,7 @@ public void testSimpleQueryLogicalPlan() { @Test public void testSimpleLeftJoinLogicalPlan() { - final String simpleQuery = "SELECT t1.col1, t2.col1, t1.col4, t2.col2 FROM test1 t1 LEFT JOIN test2 t2 ON t1.col1 = t2.col1 EMIT CHANGES;"; + final String simpleQuery = "SELECT t1.col1, t2.col1, t1.col4, t2.col2 FROM test1 t1 LEFT JOIN test2 t2 ON t1.col0 = t2.col0 EMIT CHANGES;"; final PlanNode logicalPlan = buildLogicalPlan(simpleQuery); assertThat(logicalPlan.getSources().get(0), instanceOf(ProjectNode.class)); @@ -106,13 +106,13 @@ public void testSimpleLeftJoinFilterLogicalPlan() { final String simpleQuery = "SELECT t1.col1, t2.col1, col5, t2.col4, t2.col2 FROM test1 t1 LEFT JOIN test2 t2 ON " - + "t1.col1 = t2.col1 WHERE t1.col1 > 10 AND t2.col4 = 10.8 EMIT CHANGES;"; + + "t1.col0 = t2.col0 WHERE t1.col1 > 10 AND t2.col4 = 10.8 EMIT CHANGES;"; final PlanNode logicalPlan = buildLogicalPlan(simpleQuery); assertThat(logicalPlan.getSources().get(0), instanceOf(ProjectNode.class)); final ProjectNode projectNode = (ProjectNode) logicalPlan.getSources().get(0); - assertThat(projectNode.getKeyField().ref(), is(Optional.of(ColumnRef.withoutSource(ColumnName.of("T1_COL1"))))); + assertThat(projectNode.getKeyField().ref(), is(Optional.empty())); assertThat(projectNode.getSchema().value().size(), equalTo(5)); assertThat(projectNode.getSources().get(0), instanceOf(FilterNode.class)); @@ -183,7 +183,7 @@ public void shouldCreateStreamOutputForStreamTableJoin() { final String simpleQuery = "SELECT t1.col1, t2.col1, col5, t2.col4, t2.col2 FROM test1 t1 LEFT JOIN test2 t2 ON " - + "t1.col1 = t2.col1 WHERE t1.col1 > 10 AND t2.col4 = 10.8 EMIT CHANGES;"; + + "t1.col0 = t2.col0 WHERE t1.col1 > 10 AND t2.col4 = 10.8 EMIT CHANGES;"; final PlanNode logicalPlan = buildLogicalPlan(simpleQuery); assertThat(logicalPlan.getNodeOutputType(), equalTo(DataSourceType.KSTREAM)); } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/planner/PlanSourceExtractorVisitorTest.java b/ksql-engine/src/test/java/io/confluent/ksql/planner/PlanSourceExtractorVisitorTest.java index 10fb078bda83..98449749ed40 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/planner/PlanSourceExtractorVisitorTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/planner/PlanSourceExtractorVisitorTest.java @@ -58,7 +58,7 @@ public void shouldExtractCorrectSourceForSimpleQuery() { public void shouldExtractCorrectSourceForJoinQuery() { final PlanNode planNode = buildLogicalPlan( "SELECT t1.col1, t2.col1, t1.col4, t2.col2 FROM test1 t1 LEFT JOIN " - + "test2 t2 ON t1.col1 = t2.col1 EMIT CHANGES;"); + + "test2 t2 ON t1.col0 = t2.col0 EMIT CHANGES;"); final PlanSourceExtractorVisitor planSourceExtractorVisitor = new PlanSourceExtractorVisitor(); planSourceExtractorVisitor.process(planNode, null); final Set sourceNames = planSourceExtractorVisitor.getSourceNames(); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/planner/plan/JoinNodeTest.java b/ksql-engine/src/test/java/io/confluent/ksql/planner/plan/JoinNodeTest.java index b10278e272c9..3a5ecc64bd94 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/planner/plan/JoinNodeTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/planner/plan/JoinNodeTest.java @@ -22,7 +22,6 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -45,10 +44,8 @@ import io.confluent.ksql.name.SourceName; import io.confluent.ksql.parser.tree.WithinExpression; import io.confluent.ksql.planner.plan.JoinNode.JoinType; -import io.confluent.ksql.query.QueryId; import io.confluent.ksql.schema.ksql.Column; import io.confluent.ksql.schema.ksql.ColumnRef; -import io.confluent.ksql.schema.ksql.FormatOptions; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.serde.Format; @@ -74,7 +71,6 @@ import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartitionInfo; -import org.apache.kafka.common.utils.Utils; import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.TopologyDescription; @@ -92,13 +88,15 @@ public class JoinNodeTest { private static final LogicalSchema LEFT_SOURCE_SCHEMA = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) .valueColumn(ColumnName.of("C0"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("L1"), SqlTypes.STRING) .build(); private static final LogicalSchema RIGHT_SOURCE_SCHEMA = LogicalSchema.builder() - .valueColumn(ColumnName.of("C0"), SqlTypes.BIGINT) - .valueColumn(ColumnName.of("R1"), SqlTypes.STRING) + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) + .valueColumn(ColumnName.of("C0"), SqlTypes.STRING) + .valueColumn(ColumnName.of("R1"), SqlTypes.BIGINT) .build(); private static final SourceName LEFT_ALIAS = SourceName.of("left"); @@ -112,8 +110,6 @@ public class JoinNodeTest { .withMetaAndKeyColsInValue() .withAlias(RIGHT_ALIAS); - private static final LogicalSchema JOIN_SCHEMA = joinSchema(); - private static final Optional NO_KEY_FIELD = Optional.empty(); private static final ValueFormat VALUE_FORMAT = ValueFormat.of(FormatInfo.of(Format.JSON)); private static final ValueFormat OTHER_FORMAT = ValueFormat.of(FormatInfo.of(Format.DELIMITED)); @@ -126,13 +122,10 @@ public class JoinNodeTest { private static final KeyField leftJoinField = KeyField.of(LEFT_JOIN_FIELD_REF); - private static final KeyField rightJoinField = KeyField.of(RIGHT_JOIN_FIELD_REF); - private static final Optional WITHIN_EXPRESSION = Optional.of(new WithinExpression(10, TimeUnit.SECONDS)); private static final PlanNodeId nodeId = new PlanNodeId("join"); - private static final QueryId queryId = new QueryId("join-query"); private static final QueryContext.Stacker CONTEXT_STACKER = new QueryContext.Stacker().push(nodeId.toString()); @@ -179,9 +172,6 @@ public void setUp() { new QueryContext.Stacker() .push(inv.getArgument(0).toString())); - when(left.getAlias()).thenReturn(LEFT_ALIAS); - when(right.getAlias()).thenReturn(RIGHT_ALIAS); - when(left.getDataSourceType()).thenReturn(DataSourceType.KSTREAM); when(right.getDataSourceType()).thenReturn(DataSourceType.KTABLE); @@ -191,11 +181,11 @@ public void setUp() { when(left.getPartitions(mockKafkaTopicClient)).thenReturn(2); when(right.getPartitions(mockKafkaTopicClient)).thenReturn(2); + when(left.getKeyField()).thenReturn(KeyField.of(LEFT_JOIN_FIELD_REF)); + when(right.getKeyField()).thenReturn(KeyField.of(RIGHT_JOIN_FIELD_REF)); + setUpSource(left, VALUE_FORMAT, leftSource, "Foobar1"); setUpSource(right, OTHER_FORMAT, rightSource, "Foobar2"); - - when(leftSchemaKTable.getKeyField()).thenReturn(leftJoinField); - when(rightSchemaKTable.getKeyField()).thenReturn(rightJoinField); } @Test @@ -274,10 +264,7 @@ public void shouldHaveLeftJoin() { final Topology topology = builder.build(); final TopologyDescription.Processor leftJoin = (TopologyDescription.Processor) getNodeByName(topology, "Join"); - final List predecessors = leftJoin.predecessors().stream() - .map(TopologyDescription.Node::name).collect(Collectors.toList()); - assertThat(leftJoin.stores(), equalTo(Utils.mkSet("KafkaTopic_Right-Reduce"))); - assertThat(predecessors, equalTo(Collections.singletonList("Join-repartition-source"))); + assertThat(leftJoin.stores(), equalTo(ImmutableSet.of("KafkaTopic_Right-Reduce"))); } @Test @@ -455,29 +442,24 @@ public void shouldFailJoinIfTableCriteriaColumnIsNotKey() { final ColumnRef rightCriteriaColumn = getNonKeyColumn(RIGHT_SOURCE_SCHEMA, RIGHT_ALIAS, RIGHT_JOIN_FIELD_REF); - final JoinNode joinNode = new JoinNode( + // Then: + expectedException.expect(KsqlException.class); + expectedException.expectMessage( + "Source table (Foobar2) key column (right.R1) is not the column used in the join criteria (right.C0). " + + "Only the table's key column or 'ROWKEY' is supported in the join criteria for a TABLE." + ); + + // When: + new JoinNode( nodeId, Collections.emptyList(), - JoinNode.JoinType.LEFT, + JoinType.LEFT, left, right, LEFT_JOIN_FIELD_REF, rightCriteriaColumn, Optional.empty() ); - - // Then: - expectedException.expect(KsqlException.class); - expectedException.expectMessage(String.format( - "Source table (%s) key column (%s) is not the column used in the join criteria (%s). " - + "Only the table's key column or 'ROWKEY' is supported in the join criteria.", - RIGHT_ALIAS.toString(FormatOptions.noEscape()), - RIGHT_JOIN_FIELD_REF.toString(FormatOptions.noEscape()), - rightCriteriaColumn.toString(FormatOptions.noEscape()) - )); - - // When: - joinNode.buildStream(ksqlStreamBuilder); } @Test @@ -486,7 +468,15 @@ public void shouldFailJoinIfTableHasNoKeyAndJoinFieldIsNotRowKey() { setupStream(left, leftSchemaKStream); setupTable(right, rightSchemaKTable, NO_KEY_FIELD); - final JoinNode joinNode = new JoinNode( + // Then: + expectedException.expect(KsqlException.class); + expectedException.expectMessage( + "Source table (Foobar2) has no key column defined. " + + "Only 'ROWKEY' is supported in the join criteria for a TABLE." + ); + + // When: + new JoinNode( nodeId, Collections.emptyList(), JoinNode.JoinType.LEFT, @@ -496,16 +486,6 @@ public void shouldFailJoinIfTableHasNoKeyAndJoinFieldIsNotRowKey() { RIGHT_JOIN_FIELD_REF, Optional.empty() ); - - // Then: - expectedException.expect(KsqlException.class); - expectedException.expectMessage( - "Source table (" + RIGHT_ALIAS.name() + ") has no key column defined. " - + "Only 'ROWKEY' is supported in the join criteria." - ); - - // When: - joinNode.buildStream(ksqlStreamBuilder); } @Test @@ -657,10 +637,18 @@ public void shouldFailTableTableJoinIfLeftCriteriaColumnIsNotKey() { setupTable(left, leftSchemaKTable); setupTable(right, rightSchemaKTable); - final ColumnRef leftCriteriaColumn = getNonKeyColumn(LEFT_SOURCE_SCHEMA, LEFT_ALIAS, - LEFT_JOIN_FIELD_REF); + final ColumnRef leftCriteriaColumn = + getNonKeyColumn(LEFT_SOURCE_SCHEMA, LEFT_ALIAS, LEFT_JOIN_FIELD_REF); - final JoinNode joinNode = new JoinNode( + // Then: + expectedException.expect(KsqlException.class); + expectedException.expectMessage( + "Source table (Foobar1) key column (left.C0) is not the column used in the join criteria (left.L1). " + + "Only the table's key column or 'ROWKEY' is supported in the join criteria for a TABLE." + ); + + // When: + new JoinNode( nodeId, Collections.emptyList(), JoinNode.JoinType.LEFT, @@ -670,19 +658,6 @@ public void shouldFailTableTableJoinIfLeftCriteriaColumnIsNotKey() { RIGHT_JOIN_FIELD_REF, Optional.empty() ); - - // Then: - expectedException.expect(KsqlException.class); - expectedException.expectMessage(String.format( - "Source table (%s) key column (%s) is not the column used in the join criteria (%s). " - + "Only the table's key column or 'ROWKEY' is supported in the join criteria.", - LEFT_ALIAS.toString(FormatOptions.noEscape()), - LEFT_JOIN_FIELD_REF.toString(FormatOptions.noEscape()), - leftCriteriaColumn.toString(FormatOptions.noEscape()) - )); - - // When: - joinNode.buildStream(ksqlStreamBuilder); } @Test @@ -694,7 +669,15 @@ public void shouldFailTableTableJoinIfRightCriteriaColumnIsNotKey() { final ColumnRef rightCriteriaColumn = getNonKeyColumn(RIGHT_SOURCE_SCHEMA, RIGHT_ALIAS, RIGHT_JOIN_FIELD_REF); - final JoinNode joinNode = new JoinNode( + // Then: + expectedException.expect(KsqlException.class); + expectedException.expectMessage( + "Source table (Foobar2) key column (right.R1) is not the column used in the join criteria (right.C0). " + + "Only the table's key column or 'ROWKEY' is supported in the join criteria for a TABLE." + ); + + // When: + new JoinNode( nodeId, Collections.emptyList(), JoinNode.JoinType.LEFT, @@ -704,19 +687,6 @@ public void shouldFailTableTableJoinIfRightCriteriaColumnIsNotKey() { rightCriteriaColumn, Optional.empty() ); - - // Then: - expectedException.expect(KsqlException.class); - expectedException.expectMessage(String.format( - "Source table (%s) key column (%s) is not the column used in the join criteria (%s). " - + "Only the table's key column or 'ROWKEY' is supported in the join criteria.", - RIGHT_ALIAS.toString(FormatOptions.noEscape()), - RIGHT_JOIN_FIELD_REF.toString(FormatOptions.noEscape()), - rightCriteriaColumn.toString(FormatOptions.noEscape()) - )); - - // When: - joinNode.buildStream(ksqlStreamBuilder); } @Test @@ -845,14 +815,15 @@ public void shouldHaveFullyQualifiedJoinSchema() { // When: assertThat(joinNode.getSchema(), is(LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) .valueColumn(LEFT_ALIAS, ColumnName.of("ROWTIME"), SqlTypes.BIGINT) - .valueColumn(LEFT_ALIAS, ColumnName.of("ROWKEY"), SqlTypes.STRING) + .valueColumn(LEFT_ALIAS, ColumnName.of("ROWKEY"), SqlTypes.BIGINT) .valueColumn(LEFT_ALIAS, ColumnName.of("C0"), SqlTypes.BIGINT) .valueColumn(LEFT_ALIAS, ColumnName.of("L1"), SqlTypes.STRING) .valueColumn(RIGHT_ALIAS, ColumnName.of("ROWTIME"), SqlTypes.BIGINT) - .valueColumn(RIGHT_ALIAS, ColumnName.of("ROWKEY"), SqlTypes.STRING) - .valueColumn(RIGHT_ALIAS, ColumnName.of("C0"), SqlTypes.BIGINT) - .valueColumn(RIGHT_ALIAS, ColumnName.of("R1"), SqlTypes.STRING) + .valueColumn(RIGHT_ALIAS, ColumnName.of("ROWKEY"), SqlTypes.BIGINT) + .valueColumn(RIGHT_ALIAS, ColumnName.of("C0"), SqlTypes.STRING) + .valueColumn(RIGHT_ALIAS, ColumnName.of("R1"), SqlTypes.BIGINT) .build() )); } @@ -924,28 +895,30 @@ public void shouldReturnCorrectSchema() { ); // Then: - assertThat(joinNode.getSchema(), is(JOIN_SCHEMA)); + assertThat(joinNode.getSchema(), is(LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) + .valueColumns(LEFT_NODE_SCHEMA.value()) + .valueColumns(RIGHT_NODE_SCHEMA.value()) + .build())); } - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "rawtypes"}) private void setupTable( final DataSourceNode node, - final SchemaKTable table + final SchemaKTable table ) { - when(node.buildStream(ksqlStreamBuilder)).thenReturn(table); - final LogicalSchema schema = node.getSchema(); - when(table.getSchema()).thenReturn(schema); + when(node.buildStream(ksqlStreamBuilder)).thenReturn((SchemaKTable) table); when(node.getDataSourceType()).thenReturn(DataSourceType.KTABLE); } private void setupTable( final DataSourceNode node, - final SchemaKTable table, + final SchemaKTable table, final Optional keyFieldName ) { setupTable(node, table); - when(table.getKeyField()).thenReturn(KeyField.of(keyFieldName)); + when(node.getKeyField()).thenReturn(KeyField.of(keyFieldName)); } @SuppressWarnings("unchecked") @@ -958,20 +931,11 @@ private void setupStream( when(node.getDataSourceType()).thenReturn(DataSourceType.KSTREAM); } - @SuppressWarnings("Duplicates") - private static LogicalSchema joinSchema() { - return LogicalSchema.builder() - .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.STRING) - .valueColumns(LEFT_NODE_SCHEMA.value()) - .valueColumns(RIGHT_NODE_SCHEMA.value()) - .build(); - } - private void buildJoin() { buildJoin( "SELECT t1.col1, t2.col1, t2.col4, col5, t2.col2 " + "FROM test1 t1 LEFT JOIN test2 t2 " - + "ON t1.col1 = t2.col0 EMIT CHANGES;" + + "ON t1.col0 = t2.col0 EMIT CHANGES;" ); } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/planner/plan/KsqlStructuredDataOutputNodeTest.java b/ksql-engine/src/test/java/io/confluent/ksql/planner/plan/KsqlStructuredDataOutputNodeTest.java index 782e7f8f4d66..b0a37a99c49f 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/planner/plan/KsqlStructuredDataOutputNodeTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/planner/plan/KsqlStructuredDataOutputNodeTest.java @@ -118,10 +118,6 @@ public void before() { when(sourceStream.into(any(), any(), any(), any())) .thenReturn((SchemaKStream) sinkStream); - when(sourceStream.selectKey(any(), any())) - .thenReturn((SchemaKStream) resultWithKeySelected); - when(resultWithKeySelected.into(any(), any(), any(), any())) - .thenReturn((SchemaKStream) sinkStreamWithKeySelected); when(ksqlStreamBuilder.buildNodeContext(any())).thenAnswer(inv -> new QueryContext.Stacker() diff --git a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKStreamTest.java b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKStreamTest.java index 75447d9c6d7d..88453d24e37b 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKStreamTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKStreamTest.java @@ -20,7 +20,6 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -import static org.mockito.Mockito.mock; import com.google.common.collect.ImmutableList; import io.confluent.ksql.execution.builder.KsqlQueryBuilder; @@ -63,7 +62,6 @@ import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.MetaStoreFixture; import io.confluent.ksql.util.Pair; -import java.time.Duration; import java.util.Collections; import java.util.List; import java.util.Optional; @@ -85,7 +83,7 @@ public class SchemaKStreamTest { private final KsqlConfig ksqlConfig = new KsqlConfig(Collections.emptyMap()); private final MetaStore metaStore = MetaStoreFixture.getNewMetaStore(new InternalFunctionRegistry()); private final KeyField validJoinKeyField = KeyField - .of(Optional.of(ColumnRef.of(SourceName.of("left"), ColumnName.of("COL1")))); + .of(Optional.of(ColumnRef.of(SourceName.of("left"), ColumnName.of("COL0")))); private final KeyFormat keyFormat = KeyFormat.nonWindowed(FormatInfo.of(Format.KAFKA)); private final ValueFormat valueFormat = ValueFormat.of(FormatInfo.of(Format.JSON)); private final ValueFormat rightFormat = ValueFormat.of(FormatInfo.of(Format.DELIMITED)); @@ -94,7 +92,6 @@ public class SchemaKStreamTest { private final QueryContext.Stacker childContextStacker = queryContext.push("child"); private SchemaKStream initialSchemaKStream; - private SchemaKStream secondSchemaKStream; private SchemaKTable schemaKTable; private KsqlStream ksqlStream; private InternalFunctionRegistry functionRegistry; @@ -112,11 +109,6 @@ public void init() { functionRegistry = new InternalFunctionRegistry(); schemaResolver = new StepSchemaResolver(ksqlConfig, functionRegistry); ksqlStream = (KsqlStream) metaStore.getSource(SourceName.of("TEST1")); - final KsqlStream secondKsqlStream = (KsqlStream) metaStore.getSource(SourceName.of("ORDERS")); - secondSchemaKStream = buildSchemaKStreamForJoin( - secondKsqlStream, - mock(ExecutionStep.class) - ); final KsqlTable ksqlTable = (KsqlTable) metaStore.getSource(SourceName.of("TEST2")); schemaKTable = new SchemaKTable( tableSourceStep, @@ -421,33 +413,6 @@ public void testSelectWithExpression() { } - @Test - public void shouldReturnSchemaKStreamWithCorrectSchemaForFilter() { - // Given: - final PlanNode logicalPlan = givenInitialKStreamOf( - "SELECT col0, col2, col3 FROM test1 WHERE col0 > 100 EMIT CHANGES;"); - final FilterNode filterNode = (FilterNode) logicalPlan.getSources().get(0).getSources().get(0); - - // When: - final SchemaKStream filteredSchemaKStream = initialSchemaKStream.filter( - filterNode.getPredicate(), - childContextStacker - ); - - // Then: - assertThat(filteredSchemaKStream.getSchema().value(), contains( - valueColumn(TEST1, ColumnName.of("ROWTIME"), SqlTypes.BIGINT), - valueColumn(TEST1, ColumnName.of("ROWKEY"), SqlTypes.STRING), - valueColumn(TEST1, ColumnName.of("COL0"), SqlTypes.BIGINT), - valueColumn(TEST1, ColumnName.of("COL1"), SqlTypes.STRING), - valueColumn(TEST1, ColumnName.of("COL2"), SqlTypes.STRING), - valueColumn(TEST1, ColumnName.of("COL3"), SqlTypes.DOUBLE), - valueColumn(TEST1, ColumnName.of("COL4"), SqlTypes.array(SqlTypes.DOUBLE)), - valueColumn(TEST1, ColumnName.of("COL5"), SqlTypes.map(SqlTypes.DOUBLE)) - )); - - } - @Test public void shouldRewriteTimeComparisonInFilter() { // Given: @@ -745,77 +710,6 @@ SchemaKStream join( ); } - @Test - public void shouldBuildStepForStreamStreamJoin() { - // Given: - final SchemaKStream initialSchemaKStream = buildSchemaKStreamForJoin(ksqlStream); - final JoinWindows joinWindow = JoinWindows.of(Duration.ofMillis(10L)); - - final List> cases = ImmutableList.of( - Pair.of(JoinType.LEFT, initialSchemaKStream::leftJoin), - Pair.of(JoinType.INNER, initialSchemaKStream::join), - Pair.of(JoinType.OUTER, initialSchemaKStream::outerJoin) - ); - - for (final Pair testcase : cases) { - final SchemaKStream joinedKStream = testcase.right.join( - secondSchemaKStream, - validJoinKeyField, - joinWindow, - valueFormat, - rightFormat, - childContextStacker - ); - - // Then: - assertThat( - joinedKStream.getSourceStep(), - equalTo( - ExecutionStepFactory.streamStreamJoin( - childContextStacker, - testcase.left, - Formats.of(keyFormat, valueFormat, SerdeOption.none()), - Formats.of(keyFormat, rightFormat, SerdeOption.none()), - initialSchemaKStream.getSourceStep(), - secondSchemaKStream.getSourceStep(), - joinWindow - ) - ) - ); - } - } - - @Test - public void shouldBuildSchemaForStreamStreamJoin() { - // Given: - final SchemaKStream initialSchemaKStream = buildSchemaKStreamForJoin(ksqlStream); - final JoinWindows joinWindow = JoinWindows.of(Duration.ofMillis(10L)); - - final List> cases = ImmutableList.of( - Pair.of(JoinType.LEFT, initialSchemaKStream::leftJoin), - Pair.of(JoinType.INNER, initialSchemaKStream::join), - Pair.of(JoinType.OUTER, initialSchemaKStream::outerJoin) - ); - - for (final Pair testcase : cases) { - final SchemaKStream joinedKStream = testcase.right.join( - secondSchemaKStream, - validJoinKeyField, - joinWindow, - valueFormat, - rightFormat, - childContextStacker - ); - - // Then: - assertThat(joinedKStream.getSchema(), is(schemaResolver.resolve( - joinedKStream.getSourceStep(), - initialSchemaKStream.getSchema(), - secondSchemaKStream.getSchema())) - ); - } - } - @FunctionalInterface private interface StreamTableJoin { SchemaKStream join( @@ -909,16 +803,6 @@ private SchemaKStream buildSchemaKStreamForJoin(final KsqlStream ksqlStream) { ); } - private SchemaKStream buildSchemaKStreamForJoin( - final KsqlStream ksqlStream, - final ExecutionStep sourceStep) { - return buildSchemaKStream( - ksqlStream.getSchema().withAlias(SourceName.of("left")), - ksqlStream.getKeyField().withAlias(SourceName.of("left")), - sourceStep - ); - } - private PlanNode givenInitialKStreamOf(final String selectQuery) { final PlanNode logicalPlan = AnalysisTestUtil.buildLogicalPlan( ksqlConfig, diff --git a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java index 1c0b08ba863f..b514f3ba8d33 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java @@ -29,7 +29,6 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.when; @@ -77,7 +76,6 @@ import io.confluent.ksql.planner.plan.FilterNode; import io.confluent.ksql.planner.plan.PlanNode; import io.confluent.ksql.planner.plan.ProjectNode; -import io.confluent.ksql.schema.ksql.Column; import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.PersistenceSchema; @@ -140,7 +138,6 @@ public class SchemaKTableTest { private KeyField validKeyField; private SchemaKTable firstSchemaKTable; private SchemaKTable secondSchemaKTable; - private LogicalSchema joinSchema; private StepSchemaResolver schemaResolver; private final QueryContext.Stacker queryContext = new QueryContext.Stacker().push("node"); @@ -186,7 +183,6 @@ public void init() { .of(Optional.of(ColumnRef.of(ksqlTable.getName(), ColumnName.of("COL1")))); firstSchemaKTable = buildSchemaKTableForJoin(ksqlTable, mockKTable); secondSchemaKTable = buildSchemaKTableForJoin(secondKsqlTable, secondKTable); - joinSchema = getJoinSchema(ksqlTable.getSchema(), secondKsqlTable.getSchema()); when(queryBuilder.getKsqlConfig()).thenReturn(ksqlConfig); when(queryBuilder.getFunctionRegistry()).thenReturn(functionRegistry); @@ -384,7 +380,7 @@ public void shouldBuildSchemaKTableWithCorrectSchemaForFilter() { final SourceName test2 = SourceName.of("TEST2"); assertThat(filteredSchemaKStream.getSchema().value(), contains( valueColumn(test2, ColumnName.of("ROWTIME"), SqlTypes.BIGINT), - valueColumn(test2, ColumnName.of("ROWKEY"), SqlTypes.STRING), + valueColumn(test2, ColumnName.of("ROWKEY"), SqlTypes.BIGINT), valueColumn(test2, ColumnName.of("COL0"), SqlTypes.BIGINT), valueColumn(test2, ColumnName.of("COL1"), SqlTypes.STRING), valueColumn(test2, ColumnName.of("COL2"), SqlTypes.STRING), @@ -609,7 +605,6 @@ public void shouldPerformTableToTableLeftJoin() { ((SchemaKTable) joinedKStream).getSourceTableStep().build(planBuilder); verify(mockKTable); assertThat(joinedKStream, instanceOf(SchemaKTable.class)); - assertEquals(joinSchema, joinedKStream.getSchema()); assertThat(joinedKStream.getKeyField(), is(validKeyField)); } @@ -630,7 +625,6 @@ public void shouldPerformTableToTableInnerJoin() { ((SchemaKTable) joinedKStream).getSourceTableStep().build(planBuilder); verify(mockKTable); assertThat(joinedKStream, instanceOf(SchemaKTable.class)); - assertEquals(joinSchema, joinedKStream.getSchema()); assertThat(joinedKStream.getKeyField(), is(validKeyField)); } @@ -651,7 +645,6 @@ public void shouldPerformTableToTableOuterJoin() { ((SchemaKTable) joinedKStream).getSourceTableStep().build(planBuilder); verify(mockKTable); assertThat(joinedKStream, instanceOf(SchemaKTable.class)); - assertEquals(joinSchema, joinedKStream.getSchema()); assertThat(joinedKStream.getKeyField(), is(validKeyField)); } @@ -840,23 +833,6 @@ public void shouldSetKeyOnGroupBySingleExpressionThatIsInProjection() { is(KeyField.of(ColumnRef.withoutSource(ColumnName.of("COL1"))))); } - private LogicalSchema getJoinSchema( - final LogicalSchema leftSchema, - final LogicalSchema rightSchema - ) { - final LogicalSchema.Builder schemaBuilder = LogicalSchema.builder(); - final SourceName leftAlias = ksqlTable.getName(); - final SourceName rightAlias = secondKsqlTable.getName(); - for (final Column field : leftSchema.value()) { - schemaBuilder.valueColumn(leftAlias, field.name(), field.type()); - } - - for (final Column field : rightSchema.value()) { - schemaBuilder.valueColumn(rightAlias, field.name(), field.type()); - } - return schemaBuilder.build(); - } - private List givenInitialKTableOf(final String selectQuery) { final PlanNode logicalPlan = AnalysisTestUtil.buildLogicalPlan( ksqlConfig, diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json b/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json index 3179a9cc501a..6e99039d73ee 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json @@ -74,13 +74,13 @@ {"topic": "left_topic", "key": "foo", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} ], "outputs": [ - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "T_ROWKEY": "foo","NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "T_ROWKEY": "foo", "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "LEFT_OUTER_JOIN", "key": "10", "value": {"T_ID": 10, "T_ROWKEY": "foo", "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "T_ROWKEY": "foo", "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "T_ROWKEY": "foo", "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "LEFT_OUTER_JOIN", "key": "90", "value": {"T_ID": 90, "T_ROWKEY": "foo", "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "T_ROWKEY": "foo", "NAME": "bar", "VALUE": 99, "F1": null, "F2": null}, "timestamp": 30000} + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "T_ROWKEY": "foo","NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "T_ROWKEY": "foo", "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "LEFT_OUTER_JOIN", "key": 10, "value": {"T_ID": 10, "T_ROWKEY": "foo", "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "T_ROWKEY": "foo", "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "T_ROWKEY": "foo", "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "LEFT_OUTER_JOIN", "key": 90, "value": {"T_ID": 90, "T_ROWKEY": "foo", "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "T_ROWKEY": "foo", "NAME": "bar", "VALUE": 99, "F1": null, "F2": null}, "timestamp": 30000} ], "post": { "sources": [ @@ -109,14 +109,14 @@ {"topic": "left_topic", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} ], "outputs": [ - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-left-repartition", "key": "0", "value": {"ROWTIME": 0, "ROWKEY": "", "ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-right-repartition", "key": "0", "value": {"ROWTIME": 10000, "ROWKEY": "", "ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-left-repartition", "key": "10", "value": {"ROWTIME": 11000, "ROWKEY": "", "ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-left-repartition", "key": "0", "value": {"ROWTIME": 13000, "ROWKEY": "", "ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-right-repartition", "key": "0", "value": {"ROWTIME": 15000, "ROWKEY": "", "ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-right-repartition", "key": "100", "value": {"ROWTIME": 16000, "ROWKEY": "", "ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-left-repartition", "key": "90", "value": {"ROWTIME": 17000, "ROWKEY": "", "ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-left-repartition", "key": "0", "value": {"ROWTIME": 30000, "ROWKEY": "", "ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-left-repartition", "key": 0, "value": {"ROWTIME": 0, "ROWKEY": "", "ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-right-repartition", "key": 0, "value": {"ROWTIME": 10000, "ROWKEY": "", "ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-left-repartition", "key": 10, "value": {"ROWTIME": 11000, "ROWKEY": "", "ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-left-repartition", "key": 0, "value": {"ROWTIME": 13000, "ROWKEY": "", "ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-right-repartition", "key": 0, "value": {"ROWTIME": 15000, "ROWKEY": "", "ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-right-repartition", "key": 100, "value": {"ROWTIME": 16000, "ROWKEY": "", "ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-left-repartition", "key": 90, "value": {"ROWTIME": 17000, "ROWKEY": "", "ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-left-repartition", "key": 0, "value": {"ROWTIME": 30000, "ROWKEY": "", "ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000}, {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000016-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0001", "value": {"ROWTIME": 0, "ROWKEY": "", "ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-OUTEROTHER-0000000017-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u0000'\u0010\u0000\u0000\u0000\u0001", "value": {"ROWTIME": 10000, "ROWKEY": "", "ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000016-store-changelog", "key": "10\u0000\u0000\u0000\u0000\u0000\u0000*�\u0000\u0000\u0000\u0002", "value": {"ROWTIME": 11000, "ROWKEY": "", "ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, @@ -125,13 +125,13 @@ {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-OUTEROTHER-0000000017-store-changelog", "key": "100\u0000\u0000\u0000\u0000\u0000\u0000>�\u0000\u0000\u0000\u0003", "value": {"ROWTIME": 16000, "ROWKEY": "", "ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000016-store-changelog", "key": "90\u0000\u0000\u0000\u0000\u0000\u0000Bh\u0000\u0000\u0000\u0004", "value": {"ROWTIME": 17000, "ROWKEY": "", "ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000016-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u0000u0\u0000\u0000\u0000\u0005", "value": {"ROWTIME": 30000, "ROWKEY": "", "ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "LEFT_OUTER_JOIN", "key": "10", "value": {"T_ID": 10, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "LEFT_OUTER_JOIN", "key": "90", "value": {"T_ID": 90, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": null, "F2": null}, "timestamp": 30000} + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "LEFT_OUTER_JOIN", "key": 10, "value": {"T_ID": 10, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "LEFT_OUTER_JOIN", "key": 90, "value": {"T_ID": 90, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": null, "F2": null}, "timestamp": 30000} ], "post": { "sources": [ @@ -1286,7 +1286,7 @@ ], "expectedException": { "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Source table (T) has no key column defined. Only 'ROWKEY' is supported in the join criteria." + "message": "Source table (NO_KEY) has no key column defined. Only 'ROWKEY' is supported in the join criteria for a TABLE." } }, { @@ -1772,6 +1772,156 @@ {"name": "LEFT_OUTER_JOIN", "type": "stream", "keyField": "LEFT_ID"} ] } + }, + { + "name": "on non-STRING key", + "statements": [ + "CREATE STREAM INPUT_STREAM (ROWKEY BIGINT KEY, SF INT) WITH (kafka_topic='stream_topic', value_format='JSON');", + "CREATE TABLE INPUT_TABLE (ROWKEY BIGINT KEY, TF INT) WITH (kafka_topic='table_topic', value_format='JSON');", + "CREATE STREAM OUTPUT AS SELECT * FROM INPUT_STREAM S JOIN INPUT_TABLE T on S.ROWKEY = T.ROWKEY;" + ], + "inputs": [ + {"topic": "table_topic", "key": 26589, "value": {"TF": 1}, "timestamp": 0}, + {"topic": "stream_topic", "key": 12589, "value": {"SF": 0}, "timestamp": 100}, + {"topic": "table_topic", "key": 12589, "value": {"TF": 12}, "timestamp": 200}, + {"topic": "stream_topic", "key": 12589, "value": {"SF": 10}, "timestamp": 300} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 12589, "value": {"S_ROWKEY": 12589, "S_ROWTIME": 300, "S_SF": 10, "T_ROWKEY": 12589, "T_ROWTIME": 300, "T_TF": 12}, "timestamp": 300} + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "stream", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY BIGINT KEY, S_ROWTIME BIGINT, S_ROWKEY BIGINT, S_SF INT, T_ROWTIME BIGINT, T_ROWKEY BIGINT, T_TF INT" + } + ] + } + }, + { + "name": "on non-STRING value column", + "statements": [ + "CREATE STREAM INPUT_STREAM (ROWKEY STRING KEY, SF BIGINT) WITH (kafka_topic='stream_topic', value_format='JSON');", + "CREATE TABLE INPUT_TABLE (ROWKEY BIGINT KEY, ID BIGINT, TF INT) WITH (kafka_topic='table_topic', value_format='JSON', key='ID');", + "CREATE STREAM OUTPUT AS SELECT * FROM INPUT_STREAM S JOIN INPUT_TABLE T on S.SF = T.ID;" + ], + "inputs": [ + {"topic": "table_topic", "key": 26589, "value": {"ID": 26589, "TF": 1}, "timestamp": 0}, + {"topic": "stream_topic", "key": "a", "value": {"SF": 12589}, "timestamp": 100}, + {"topic": "table_topic", "key": 12589, "value": {"ID": 12589, "TF": 12}, "timestamp": 200}, + {"topic": "stream_topic", "key": "b", "value": {"SF": 12589}, "timestamp": 300} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 12589, "value": {"S_ROWKEY": "b", "S_ROWTIME": 300, "S_SF": 12589, "T_ROWKEY": 12589, "T_ROWTIME": 300, "T_ID": 12589, "T_TF": 12}, "timestamp": 300} + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "stream", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY BIGINT KEY, S_ROWTIME BIGINT, S_ROWKEY STRING, S_SF BIGINT, T_ROWTIME BIGINT, T_ROWKEY BIGINT, T_ID BIGINT, T_TF INT" + } + ] + } + }, + { + "name": "on non-key table column", + "statements": [ + "CREATE STREAM INPUT_STREAM (ROWKEY BIGINT KEY, SF BIGINT) WITH (kafka_topic='stream_topic', value_format='JSON');", + "CREATE TABLE INPUT_TABLE (ROWKEY BIGINT KEY, ID BIGINT, TF INT) WITH (kafka_topic='table_topic', value_format='JSON');", + "CREATE STREAM OUTPUT AS SELECT * FROM INPUT_STREAM S JOIN INPUT_TABLE T on S.ROWKEY = T.ID;" + ], + "expectedException": { + "type": "io.confluent.ksql.util.KsqlStatementException", + "message": "Invalid join criteria: Source table (INPUT_TABLE) has no key column defined. Only 'ROWKEY' is supported in the join criteria for a TABLE." + } + }, + { + "name": "on INT column - KAFKA", + "format": ["AVRO", "JSON"], + "statements": [ + "CREATE STREAM L (l0 INT, l1 INT) WITH (kafka_topic='left_topic', value_format='{FORMAT}');", + "CREATE STREAM R (r0 INT, r1 INT) WITH (kafka_topic='right_topic', value_format='{FORMAT}');", + "CREATE STREAM OUTPUT as SELECT L.ROWKEY, L1, R1 FROM L join R WITHIN 11 SECONDS ON L.l0 = R.r0;" + ], + "inputs": [ + {"topic": "left_topic", "key": "a", "value": {"L0": 10, "L1": 1}, "timestamp": 0}, + {"topic": "right_topic", "key": "b" ,"value": {"R0": 10, "R1": 2}, "timestamp": 10000} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 10, "value": {"L_ROWKEY": "a", "L1": 1, "R1": 2}, "timestamp": 10000} + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "stream", "schema": "ROWKEY INT KEY, L_ROWKEY STRING, L1 INT, R1 INT"} + ] + } + }, + { + "name": "on BIGINT column - KAFKA", + "format": ["AVRO", "JSON"], + "statements": [ + "CREATE STREAM L (l0 BIGINT, l1 INT) WITH (kafka_topic='left_topic', value_format='{FORMAT}');", + "CREATE STREAM R (r0 BIGINT, r1 INT) WITH (kafka_topic='right_topic', value_format='{FORMAT}');", + "CREATE STREAM OUTPUT as SELECT L.ROWKEY, L1, R1 FROM L join R WITHIN 11 SECONDS ON L.l0 = R.r0;" + ], + "inputs": [ + {"topic": "left_topic", "key": "a", "value": {"L0": 1000000000, "L1": 1}, "timestamp": 0}, + {"topic": "right_topic", "key": "b" ,"value": {"R0": 1000000000, "R1": 2}, "timestamp": 10000} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 1000000000, "value": {"L_ROWKEY": "a", "L1": 1, "R1": 2}, "timestamp": 10000} + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "stream", "schema": "ROWKEY BIGINT KEY, L_ROWKEY STRING, L1 INT, R1 INT"} + ] + } + }, + { + "name": "on DOUBLE column - KAFKA", + "format": ["AVRO", "JSON"], + "statements": [ + "CREATE STREAM L (l0 DOUBLE, l1 INT) WITH (kafka_topic='left_topic', value_format='{FORMAT}');", + "CREATE STREAM R (r0 DOUBLE, r1 INT) WITH (kafka_topic='right_topic', value_format='{FORMAT}');", + "CREATE STREAM OUTPUT as SELECT L.ROWKEY, L1, R1 FROM L join R WITHIN 11 SECONDS ON L.l0 = R.r0;" + ], + "inputs": [ + {"topic": "left_topic", "key": "a", "value": {"L0": 1.23, "L1": 1}, "timestamp": 0}, + {"topic": "right_topic", "key": "b" ,"value": {"R0": 1.23, "R1": 2}, "timestamp": 10000} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 1.23, "value": {"L_ROWKEY": "a", "L1": 1, "R1": 2}, "timestamp": 10000} + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "stream", "schema": "ROWKEY DOUBLE KEY, L_ROWKEY STRING, L1 INT, R1 INT"} + ] + } + }, + { + "name": "on STRING column - KAFKA", + "format": ["AVRO", "JSON"], + "statements": [ + "CREATE STREAM L (l0 STRING, l1 INT) WITH (kafka_topic='left_topic', value_format='{FORMAT}');", + "CREATE STREAM R (r0 STRING, r1 INT) WITH (kafka_topic='right_topic', value_format='{FORMAT}');", + "CREATE STREAM OUTPUT as SELECT L.ROWKEY, L1, R1 FROM L join R WITHIN 11 SECONDS ON L.l0 = R.r0;" + ], + "inputs": [ + {"topic": "left_topic", "key": "a", "value": {"L0": "x", "L1": 1}, "timestamp": 0}, + {"topic": "right_topic", "key": "b" ,"value": {"R0": "x", "R1": 2}, "timestamp": 10000} + ], + "outputs": [ + {"topic": "OUTPUT", "key": "x", "value": {"L_ROWKEY": "a", "L1": 1, "R1": 2}, "timestamp": 10000} + ], + "post": { + "sources": [ + {"name": "OUTPUT", "type": "stream", "schema": "ROWKEY STRING KEY, L_ROWKEY STRING, L1 INT, R1 INT"} + ] + } } ] } diff --git a/ksql-metastore/src/test/java/io/confluent/ksql/util/MetaStoreFixture.java b/ksql-metastore/src/test/java/io/confluent/ksql/util/MetaStoreFixture.java index c5a7fbb5301a..fb4e19b504d4 100644 --- a/ksql-metastore/src/test/java/io/confluent/ksql/util/MetaStoreFixture.java +++ b/ksql-metastore/src/test/java/io/confluent/ksql/util/MetaStoreFixture.java @@ -35,7 +35,6 @@ import io.confluent.ksql.serde.ValueFormat; import java.util.Optional; -@SuppressWarnings("OptionalGetWithoutIsPresent") public final class MetaStoreFixture { private MetaStoreFixture() { @@ -54,6 +53,7 @@ public static MutableMetaStore getNewMetaStore( final KeyFormat keyFormat = KeyFormat.nonWindowed(FormatInfo.of(Format.KAFKA)); final LogicalSchema test1Schema = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) .valueColumn(ColumnName.of("COL0"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("COL1"), SqlTypes.STRING) .valueColumn(ColumnName.of("COL2"), SqlTypes.STRING) @@ -102,6 +102,7 @@ public static MutableMetaStore getNewMetaStore( metaStore.putSource(ksqlStream1); final LogicalSchema test2Schema = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) .valueColumn(ColumnName.of("COL0"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("COL1"), SqlTypes.STRING) .valueColumn(ColumnName.of("COL2"), SqlTypes.STRING) @@ -178,6 +179,7 @@ public static MutableMetaStore getNewMetaStore( metaStore.putSource(ksqlStreamOrders); final LogicalSchema testTable3 = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) .valueColumn(ColumnName.of("COL0"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("COL1"), SqlTypes.STRING) .valueColumn(ColumnName.of("COL2"), SqlTypes.STRING) diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/JoinParamsFactory.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/JoinParamsFactory.java index ec66bc96293f..76de06518608 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/JoinParamsFactory.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/JoinParamsFactory.java @@ -15,9 +15,10 @@ package io.confluent.ksql.execution.streams; +import io.confluent.ksql.schema.ksql.Column; import io.confluent.ksql.schema.ksql.LogicalSchema; -import io.confluent.ksql.schema.ksql.types.SqlTypes; -import io.confluent.ksql.util.SchemaUtil; +import io.confluent.ksql.util.KsqlException; +import java.util.List; public final class JoinParamsFactory { private JoinParamsFactory() { @@ -36,15 +37,31 @@ public static LogicalSchema createSchema( final LogicalSchema leftSchema, final LogicalSchema rightSchema ) { - final LogicalSchema.Builder joinSchema = LogicalSchema.builder(); + throwOnKeyMismatch(leftSchema, rightSchema); - // Hard-wire for now, until we support custom type/name of key fields: - joinSchema.keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.STRING); + return LogicalSchema.builder() + .keyColumns(leftSchema.withoutAlias().key()) + .valueColumns(leftSchema.value()) + .valueColumns(rightSchema.value()) + .build(); + } - joinSchema.valueColumns(leftSchema.value()); + private static void throwOnKeyMismatch( + final LogicalSchema leftSchema, + final LogicalSchema rightSchema + ) { + final List leftCols = leftSchema.key(); + final List rightCols = rightSchema.key(); + if (leftCols.size() != 1 || rightCols.size() != 1) { + throw new UnsupportedOperationException("Multi-key joins not supported"); + } - joinSchema.valueColumns(rightSchema.value()); + final Column left = leftCols.get(0); + final Column right = rightCols.get(0); - return joinSchema.build(); + if (!left.type().equals(right.type())) { + throw new KsqlException("Invalid join. Key types differ: " + + left.type() + " vs " + right.type()); + } } } diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/JoinParamsFactoryTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/JoinParamsFactoryTest.java index 7b15b49345d7..01e6d3f6b1f1 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/JoinParamsFactoryTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/JoinParamsFactoryTest.java @@ -1,25 +1,32 @@ package io.confluent.ksql.execution.streams; +import static io.confluent.ksql.schema.ksql.ColumnMatchers.keyColumn; import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.contains; import static org.junit.Assert.assertThat; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.types.SqlTypes; +import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.SchemaUtil; -import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; public class JoinParamsFactoryTest { + private static final SourceName LEFT = SourceName.of("LEFT"); private static final SourceName RIGHT = SourceName.of("RIGHT"); + private static final LogicalSchema LEFT_SCHEMA = LogicalSchema.builder() .valueColumn(ColumnName.of("BLUE"), SqlTypes.STRING) .valueColumn(ColumnName.of("GREEN"), SqlTypes.INTEGER) .build() .withAlias(LEFT) .withMetaAndKeyColsInValue(); + private static final LogicalSchema RIGHT_SCHEMA = LogicalSchema.builder() .valueColumn(ColumnName.of("RED"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("ORANGE"), SqlTypes.DOUBLE) @@ -27,16 +34,18 @@ public class JoinParamsFactoryTest { .withAlias(RIGHT) .withMetaAndKeyColsInValue(); - private JoinParams joinParams; + @Rule + public final ExpectedException expectedException = ExpectedException.none(); - @Before - public void init() { - joinParams = JoinParamsFactory.create(LEFT_SCHEMA, RIGHT_SCHEMA); - } + private JoinParams joinParams; @Test public void shouldBuildCorrectSchema() { - final LogicalSchema expected = LogicalSchema.builder() + // when: + joinParams = JoinParamsFactory.create(LEFT_SCHEMA, RIGHT_SCHEMA); + + // Then: + assertThat(joinParams.getSchema(), is(LogicalSchema.builder() .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.STRING) .valueColumn(LEFT, SchemaUtil.ROWTIME_NAME, SqlTypes.BIGINT) .valueColumn(LEFT, SchemaUtil.ROWKEY_NAME, SqlTypes.STRING) @@ -46,7 +55,52 @@ public void shouldBuildCorrectSchema() { .valueColumn(RIGHT, SchemaUtil.ROWKEY_NAME, SqlTypes.STRING) .valueColumn(RIGHT, ColumnName.of("RED"), SqlTypes.BIGINT) .valueColumn(RIGHT, ColumnName.of("ORANGE"), SqlTypes.DOUBLE) - .build(); - assertThat(joinParams.getSchema(), is(expected)); + .build()) + ); + } + + @Test + public void shouldThrowOnKeyTypeMismatch() { + // Given: + final LogicalSchema intKeySchema = LogicalSchema.builder() + .keyColumn(ColumnName.of("BOB"), SqlTypes.INTEGER) + .valueColumn(ColumnName.of("BLUE"), SqlTypes.STRING) + .valueColumn(ColumnName.of("GREEN"), SqlTypes.INTEGER) + .build() + .withAlias(LEFT) + .withMetaAndKeyColsInValue(); + + // Expect: + expectedException.expect(KsqlException.class); + expectedException.expectMessage("Invalid join. Key types differ: INTEGER vs STRING"); + + // When: + JoinParamsFactory.create(intKeySchema, RIGHT_SCHEMA); + } + + @Test + public void shouldGetKeyFromLeftSource() { + // Given: + final LogicalSchema leftSchema = LogicalSchema.builder() + .keyColumn(ColumnName.of("BOB"), SqlTypes.BIGINT) + .valueColumn(ColumnName.of("BLUE"), SqlTypes.STRING) + .build() + .withAlias(LEFT) + .withMetaAndKeyColsInValue(); + + final LogicalSchema rightSchema = LogicalSchema.builder() + .keyColumn(ColumnName.of("VIC"), SqlTypes.BIGINT) + .valueColumn(ColumnName.of("GREEN"), SqlTypes.DOUBLE) + .build() + .withAlias(RIGHT) + .withMetaAndKeyColsInValue(); + + // when: + joinParams = JoinParamsFactory.create(leftSchema, rightSchema); + + // Then: + assertThat(joinParams.getSchema().key(), contains( + keyColumn(ColumnName.of("BOB"), SqlTypes.BIGINT) + )); } } \ No newline at end of file From cedf47e23eccad9566e41b054a3ee869d03a1de4 Mon Sep 17 00:00:00 2001 From: Steven Zhang <35498506+stevenpyzhang@users.noreply.github.com> Date: Tue, 17 Dec 2019 12:20:31 -0800 Subject: [PATCH 043/123] feat: add config to make error messages configurable (#4121) * feat: add config to make error messages configurable * refactor the config * changes --- .../ksql/rest/server/KsqlRestApplication.java | 32 +++++-- .../ksql/rest/server/KsqlRestConfig.java | 14 +++ .../rest/server/resources/KsqlResource.java | 17 ++-- .../streaming/StreamedQueryResource.java | 17 ++-- .../resources/streaming/WSQueryEndpoint.java | 13 +++ .../rest/server/computation/RecoveryTest.java | 4 +- .../server/resources/KsqlResourceTest.java | 51 ++++++----- .../streaming/StreamedQueryResourceTest.java | 58 ++++-------- .../streaming/WSQueryEndpointTest.java | 31 +++++++ .../ksql/rest/DefaultErrorMessages.java | 22 ++--- .../io/confluent/ksql/rest/ErrorMessages.java | 21 +++++ .../java/io/confluent/ksql/rest/Errors.java | 43 +++++++-- .../io/confluent/ksql/rest/ErrorsTest.java | 89 +++++++++++++++++++ 13 files changed, 297 insertions(+), 115 deletions(-) rename ksql-rest-app/src/main/java/io/confluent/ksql/rest/util/ErrorResponseUtil.java => ksql-rest-model/src/main/java/io/confluent/ksql/rest/DefaultErrorMessages.java (50%) create mode 100644 ksql-rest-model/src/main/java/io/confluent/ksql/rest/ErrorMessages.java create mode 100644 ksql-rest-model/src/test/java/io/confluent/ksql/rest/ErrorsTest.java diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java index 40b894b08d29..24bff5b12850 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java @@ -39,6 +39,8 @@ import io.confluent.ksql.parser.KsqlParser.ParsedStatement; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; import io.confluent.ksql.query.id.SpecificQueryIdGenerator; +import io.confluent.ksql.rest.ErrorMessages; +import io.confluent.ksql.rest.Errors; import io.confluent.ksql.rest.client.RestResponse; import io.confluent.ksql.rest.entity.KsqlEntityList; import io.confluent.ksql.rest.entity.KsqlErrorMessage; @@ -381,6 +383,10 @@ protected void registerWebSocketEndpoints(final ServerContainer container) { final StatementParser statementParser = new StatementParser(ksqlEngine); final Optional authorizationValidator = KsqlAuthorizationValidatorFactory.create(ksqlConfigNoPort, serviceContext); + final Errors errorHandler = new Errors(restConfig.getConfiguredInstance( + KsqlRestConfig.KSQL_SERVER_ERROR_MESSAGES, + ErrorMessages.class + )); container.addEndpoint( ServerEndpointConfig.Builder @@ -403,6 +409,7 @@ public T getEndpointInstance(final Class endpointClass) { Duration.ofMillis(config.getLong( KsqlRestConfig.DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_CONFIG)), authorizationValidator, + errorHandler, securityExtension, serverState ); @@ -498,6 +505,11 @@ static KsqlRestApplication buildApplication( final Optional authorizationValidator = KsqlAuthorizationValidatorFactory.create(ksqlConfig, serviceContext); + final Errors errorHandler = new Errors(restConfig.getConfiguredInstance( + KsqlRestConfig.KSQL_SERVER_ERROR_MESSAGES, + ErrorMessages.class + )); + final StreamedQueryResource streamedQueryResource = new StreamedQueryResource( ksqlEngine, commandStore, @@ -505,7 +517,17 @@ static KsqlRestApplication buildApplication( restConfig.getLong(KsqlRestConfig.STREAMED_QUERY_DISCONNECT_CHECK_MS_CONFIG)), Duration.ofMillis(restConfig.getLong(DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_CONFIG)), versionChecker::updateLastRequestTime, - authorizationValidator + authorizationValidator, + errorHandler + ); + + final KsqlResource ksqlResource = new KsqlResource( + ksqlEngine, + commandStore, + Duration.ofMillis(restConfig.getLong(DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_CONFIG)), + versionChecker::updateLastRequestTime, + authorizationValidator, + errorHandler ); final List managedTopics = new LinkedList<>(); @@ -526,14 +548,6 @@ static KsqlRestApplication buildApplication( metricsPrefix ); - final KsqlResource ksqlResource = new KsqlResource( - ksqlEngine, - commandStore, - Duration.ofMillis(restConfig.getLong(DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_CONFIG)), - versionChecker::updateLastRequestTime, - authorizationValidator - ); - final List preconditions = restConfig.getConfiguredInstances( KsqlRestConfig.KSQL_SERVER_PRECONDITIONS, KsqlServerPrecondition.class diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java index a2e0e97ea7e2..e1971e44e749 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java @@ -15,6 +15,8 @@ package io.confluent.ksql.rest.server; +import io.confluent.ksql.rest.DefaultErrorMessages; +import io.confluent.ksql.rest.ErrorMessages; import io.confluent.ksql.util.KsqlException; import io.confluent.rest.RestConfig; import java.util.Map; @@ -64,6 +66,12 @@ public class KsqlRestConfig extends RestConfig { + "will not start serving requests until all preconditions are satisfied. Until that time, " + "requests will return a 503 error"; + static final String KSQL_SERVER_ERROR_MESSAGES = + KSQL_CONFIG_PREFIX + "server.error.messages"; + private static final String KSQL_SERVER_ERRORS_DOC = + "A class the implementing " + ErrorMessages.class.getSimpleName() + " interface." + + "This allows the KSQL server to return pluggable error messages."; + static final String KSQL_SERVER_ENABLE_UNCAUGHT_EXCEPTION_HANDLER = KSQL_CONFIG_PREFIX + "server.exception.uncaught.handler.enable"; @@ -136,6 +144,12 @@ public class KsqlRestConfig extends RestConfig { 15000L, Importance.LOW, KSQL_COMMAND_RUNNER_BLOCKED_THRESHHOLD_ERROR_MS_DOC + ).define( + KSQL_SERVER_ERROR_MESSAGES, + Type.CLASS, + DefaultErrorMessages.class, + Importance.LOW, + KSQL_SERVER_ERRORS_DOC ); } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java index cf4cc872aaec..5258424d9793 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java @@ -43,7 +43,6 @@ import io.confluent.ksql.rest.server.validation.CustomValidators; import io.confluent.ksql.rest.server.validation.RequestValidator; import io.confluent.ksql.rest.util.CommandStoreUtil; -import io.confluent.ksql.rest.util.ErrorResponseUtil; import io.confluent.ksql.rest.util.TerminateCluster; import io.confluent.ksql.security.KsqlAuthorizationValidator; import io.confluent.ksql.services.SandboxedServiceContext; @@ -102,6 +101,7 @@ public class KsqlResource implements KsqlConfigurable { private final Optional authorizationValidator; private RequestValidator validator; private RequestHandler handler; + private Errors errorHandler; public KsqlResource( @@ -109,7 +109,8 @@ public KsqlResource( final CommandQueue commandQueue, final Duration distributedCmdResponseTimeout, final ActivenessRegistrar activenessRegistrar, - final Optional authorizationValidator + final Optional authorizationValidator, + final Errors errorHandler ) { this( ksqlEngine, @@ -117,7 +118,8 @@ public KsqlResource( distributedCmdResponseTimeout, activenessRegistrar, Injectors.DEFAULT, - authorizationValidator + authorizationValidator, + errorHandler ); } @@ -127,7 +129,8 @@ public KsqlResource( final Duration distributedCmdResponseTimeout, final ActivenessRegistrar activenessRegistrar, final BiFunction injectorFactory, - final Optional authorizationValidator + final Optional authorizationValidator, + final Errors errorHandler ) { this.ksqlEngine = Objects.requireNonNull(ksqlEngine, "ksqlEngine"); this.commandQueue = Objects.requireNonNull(commandQueue, "commandQueue"); @@ -138,6 +141,7 @@ public KsqlResource( this.injectorFactory = Objects.requireNonNull(injectorFactory, "injectorFactory"); this.authorizationValidator = Objects .requireNonNull(authorizationValidator, "authorizationValidator"); + this.errorHandler = Objects.requireNonNull(errorHandler, "errorHandler"); } @Override @@ -233,10 +237,9 @@ public Response handleKsqlStatements( } catch (final KsqlStatementException e) { return Errors.badStatement(e.getRawMessage(), e.getSqlStatement()); } catch (final KsqlException e) { - return ErrorResponseUtil.generateResponse( - e, Errors.badRequest(e)); + return errorHandler.generateResponse(e, Errors.badRequest(e)); } catch (final Exception e) { - return ErrorResponseUtil.generateResponse( + return errorHandler.generateResponse( e, Errors.serverErrorForStatement(e, request.getKsql())); } } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java index 8d13a4b1b6b6..57f1df807025 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java @@ -35,7 +35,6 @@ import io.confluent.ksql.rest.server.resources.KsqlConfigurable; import io.confluent.ksql.rest.server.resources.KsqlRestException; import io.confluent.ksql.rest.util.CommandStoreUtil; -import io.confluent.ksql.rest.util.ErrorResponseUtil; import io.confluent.ksql.security.KsqlAuthorizationValidator; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; @@ -80,6 +79,7 @@ public class StreamedQueryResource implements KsqlConfigurable { private final ObjectMapper objectMapper; private final ActivenessRegistrar activenessRegistrar; private final Optional authorizationValidator; + private final Errors errorHandler; private KsqlConfig ksqlConfig; public StreamedQueryResource( @@ -88,7 +88,8 @@ public StreamedQueryResource( final Duration disconnectCheckInterval, final Duration commandQueueCatchupTimeout, final ActivenessRegistrar activenessRegistrar, - final Optional authorizationValidator + final Optional authorizationValidator, + final Errors errorHandler ) { this( ksqlEngine, @@ -97,7 +98,8 @@ public StreamedQueryResource( disconnectCheckInterval, commandQueueCatchupTimeout, activenessRegistrar, - authorizationValidator + authorizationValidator, + errorHandler ); } @@ -109,7 +111,8 @@ public StreamedQueryResource( final Duration disconnectCheckInterval, final Duration commandQueueCatchupTimeout, final ActivenessRegistrar activenessRegistrar, - final Optional authorizationValidator + final Optional authorizationValidator, + final Errors errorHandler ) { this.ksqlEngine = Objects.requireNonNull(ksqlEngine, "ksqlEngine"); this.statementParser = Objects.requireNonNull(statementParser, "statementParser"); @@ -122,6 +125,7 @@ public StreamedQueryResource( this.activenessRegistrar = Objects.requireNonNull(activenessRegistrar, "activenessRegistrar"); this.authorizationValidator = authorizationValidator; + this.errorHandler = Objects.requireNonNull(errorHandler, "errorHandler");; } @Override @@ -224,12 +228,11 @@ private Response handleStatement( "Statement type `%s' not supported for this resource", statement.getClass().getName())); } catch (final TopicAuthorizationException e) { - return Errors.accessDeniedFromKafka(e); + return errorHandler.accessDeniedFromKafkaResponse(e); } catch (final KsqlStatementException e) { return Errors.badStatement(e.getRawMessage(), e.getSqlStatement()); } catch (final KsqlException e) { - return ErrorResponseUtil.generateResponse( - e, Errors.badRequest(e)); + return errorHandler.generateResponse(e, Errors.badRequest(e)); } } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java index d987477ef372..0d9fd46614fc 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java @@ -23,6 +23,7 @@ import io.confluent.ksql.parser.tree.PrintTopic; import io.confluent.ksql.parser.tree.Query; import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.rest.Errors; import io.confluent.ksql.rest.entity.KsqlErrorMessage; import io.confluent.ksql.rest.entity.KsqlRequest; import io.confluent.ksql.rest.entity.StreamedRow; @@ -60,6 +61,7 @@ import javax.websocket.Session; import javax.websocket.server.ServerEndpoint; import javax.ws.rs.core.Response; +import org.apache.kafka.common.errors.TopicAuthorizationException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -93,6 +95,7 @@ public class WSQueryEndpoint { private final UserServiceContextFactory serviceContextFactory; private final DefaultServiceContextFactory defaultServiceContextFactory; private final ServerState serverState; + private final Errors errorHandler; private WebSocketSubscriber subscriber; private ServiceContext serviceContext; @@ -109,6 +112,7 @@ public WSQueryEndpoint( final ActivenessRegistrar activenessRegistrar, final Duration commandQueueCatchupTimeout, final Optional authorizationValidator, + final Errors errorHandler, final KsqlSecurityExtension securityExtension, final ServerState serverState ) { @@ -124,6 +128,7 @@ public WSQueryEndpoint( activenessRegistrar, commandQueueCatchupTimeout, authorizationValidator, + errorHandler, securityExtension, RestServiceContextFactory::create, RestServiceContextFactory::create, @@ -145,6 +150,7 @@ public WSQueryEndpoint( final ActivenessRegistrar activenessRegistrar, final Duration commandQueueCatchupTimeout, final Optional authorizationValidator, + final Errors errorHandler, final KsqlSecurityExtension securityExtension, final UserServiceContextFactory serviceContextFactory, final DefaultServiceContextFactory defaultServiceContextFactory, @@ -172,6 +178,7 @@ public WSQueryEndpoint( this.defaultServiceContextFactory = Objects.requireNonNull(defaultServiceContextFactory, "defaultServiceContextFactory"); this.serverState = Objects.requireNonNull(serverState, "serverState"); + this.errorHandler = Objects.requireNonNull(errorHandler, "errorHandler");; } @SuppressWarnings("unused") @@ -221,6 +228,12 @@ public void onOpen(final Session session, final EndpointConfig unused) { HANDLER_MAP .getOrDefault(type, WSQueryEndpoint::handleUnsupportedStatement) .handle(this, new RequestContext(session, request, serviceContext), statement); + } catch (final TopicAuthorizationException e) { + log.debug("Error processing request", e); + SessionUtil.closeSilently( + session, + CloseCodes.CANNOT_ACCEPT, + errorHandler.kafkaAuthorizationErrorMessage(e)); } catch (final Exception e) { log.debug("Error processing request", e); SessionUtil.closeSilently(session, CloseCodes.CANNOT_ACCEPT, e.getMessage()); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java index a39474f64f47..facc873f2875 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java @@ -38,6 +38,7 @@ import io.confluent.ksql.name.SourceName; import io.confluent.ksql.query.QueryId; import io.confluent.ksql.query.id.SpecificQueryIdGenerator; +import io.confluent.ksql.rest.Errors; import io.confluent.ksql.rest.entity.CommandId; import io.confluent.ksql.rest.entity.CommandId.Action; import io.confluent.ksql.rest.entity.CommandId.Type; @@ -218,7 +219,8 @@ private class KsqlServer { fakeCommandQueue, Duration.ofMillis(0), ()->{}, - Optional.of((sc, metastore, statement) -> { }) + Optional.of((sc, metastore, statement) -> { }), + mock(Errors.class) ); this.statementExecutor.configure(ksqlConfig); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java index 1355df1c4a68..10af99d142a6 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java @@ -17,6 +17,7 @@ import static io.confluent.ksql.parser.ParserMatchers.configured; import static io.confluent.ksql.parser.ParserMatchers.preparedStatementText; +import static io.confluent.ksql.rest.Errors.ERROR_CODE_FORBIDDEN_KAFKA_ACCESS; import static io.confluent.ksql.rest.entity.CommandId.Action.CREATE; import static io.confluent.ksql.rest.entity.CommandId.Action.DROP; import static io.confluent.ksql.rest.entity.CommandId.Action.EXECUTE; @@ -30,6 +31,7 @@ import static io.confluent.ksql.rest.server.resources.KsqlRestExceptionMatchers.exceptionStatementErrorMessage; import static io.confluent.ksql.rest.server.resources.KsqlRestExceptionMatchers.exceptionStatusCode; import static java.util.Collections.emptyMap; +import static javax.ws.rs.core.Response.Status.FORBIDDEN; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.hasItem; @@ -179,6 +181,7 @@ import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.Mock; +import org.mockito.invocation.InvocationOnMock; import org.mockito.junit.MockitoJUnitRunner; import org.mockito.stubbing.Answer; @@ -269,6 +272,8 @@ public class KsqlResourceTest { private KsqlAuthorizationValidator authorizationValidator; @Mock private Producer transactionalProducer; + @Mock + private Errors errorsHandler; private KsqlResource ksqlResource; private SchemaRegistryClient schemaRegistryClient; @@ -333,6 +338,14 @@ public void setUp() throws IOException, RestClientException { when(topicInjector.inject(any())) .thenAnswer(inv -> inv.getArgument(0)); + when(errorsHandler.generateResponse(any(), any())).thenAnswer(new Answer() { + @Override + public Response answer(InvocationOnMock invocation) throws Throwable { + Object[] args = invocation.getArguments(); + return (Response) args[1]; + } + }); + setUpKsqlResource(); } @@ -363,7 +376,8 @@ public void shouldThrowOnHandleStatementIfNotConfigured() { schemaInjectorFactory.apply(sc), topicInjectorFactory.apply(ec), new TopicDeleteInjector(ec, sc)), - Optional.of(authorizationValidator) + Optional.of(authorizationValidator), + errorsHandler ); // Then: @@ -391,7 +405,8 @@ public void shouldThrowOnHandleTerminateIfNotConfigured() { schemaInjectorFactory.apply(sc), topicInjectorFactory.apply(ec), new TopicDeleteInjector(ec, sc)), - Optional.of(authorizationValidator) + Optional.of(authorizationValidator), + errorsHandler ); // Then: @@ -750,6 +765,11 @@ public void shouldFailIfCreateStatementMissingKafkaTopicName() { @Test public void shouldReturnForbiddenKafkaAccessIfKsqlTopicAuthorizationException() { // Given: + final String errorMsg = "some error"; + when(errorsHandler.generateResponse(any(), any())).thenReturn(Response + .status(FORBIDDEN) + .entity(new KsqlErrorMessage(ERROR_CODE_FORBIDDEN_KAFKA_ACCESS, errorMsg)) + .build()); doThrow(new KsqlTopicAuthorizationException( AclOperation.DELETE, Collections.singleton("topic"))).when(authorizationValidator).checkAuthorization(any(), any(), any()); @@ -762,29 +782,7 @@ public void shouldReturnForbiddenKafkaAccessIfKsqlTopicAuthorizationException() // Then: assertThat(result, is(instanceOf(KsqlErrorMessage.class))); assertThat(result.getErrorCode(), is(Errors.ERROR_CODE_FORBIDDEN_KAFKA_ACCESS)); - } - - @Test - public void shouldReturnForbiddenKafkaAccessIfRootCauseKsqlTopicAuthorizationException() { - // Given: - doThrow(new KsqlException("Could not delete the corresponding kafka topic: topic", - new KsqlTopicAuthorizationException( - AclOperation.DELETE, - Collections.singleton("topic")))) - .when(authorizationValidator).checkAuthorization(any(), any(), any()); - - - // When: - final KsqlErrorMessage result = makeFailingRequest( - "DROP STREAM TEST_STREAM DELETE TOPIC;", - Code.FORBIDDEN); - - // Then: - assertThat(result, is(instanceOf(KsqlErrorMessage.class))); - assertThat(result.getErrorCode(), is(Errors.ERROR_CODE_FORBIDDEN_KAFKA_ACCESS)); - assertThat(result.getMessage(), is( - "Could not delete the corresponding kafka topic: topic\n" + - "Caused by: Authorization denied to Delete on topic(s): [topic]")); + assertThat(result.getMessage(), is(errorMsg)); } @Test @@ -2099,7 +2097,8 @@ private void setUpKsqlResource() { schemaInjectorFactory.apply(sc), topicInjectorFactory.apply(ec), new TopicDeleteInjector(ec, sc)), - Optional.of(authorizationValidator) + Optional.of(authorizationValidator), + errorsHandler ); ksqlResource.configure(ksqlConfig); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java index 43655bb03ccd..8f4ee101ae2b 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java @@ -17,8 +17,10 @@ import static io.confluent.ksql.rest.entity.KsqlErrorMessageMatchers.errorCode; import static io.confluent.ksql.rest.entity.KsqlErrorMessageMatchers.errorMessage; +import static io.confluent.ksql.rest.Errors.ERROR_CODE_FORBIDDEN_KAFKA_ACCESS; import static io.confluent.ksql.rest.server.resources.KsqlRestExceptionMatchers.exceptionErrorMessage; import static io.confluent.ksql.rest.server.resources.KsqlRestExceptionMatchers.exceptionStatusCode; +import static javax.ws.rs.core.Response.Status.FORBIDDEN; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; @@ -60,7 +62,6 @@ import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; import io.confluent.ksql.util.KsqlConfig; -import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.QueryMetadata; import io.confluent.ksql.util.TransientQueryMetadata; import io.confluent.ksql.version.metrics.ActivenessRegistrar; @@ -111,6 +112,11 @@ public class StreamedQueryResourceTest { StreamsConfig.APPLICATION_SERVER_CONFIG, "something:1" )); private static final Long closeTimeout = KsqlConfig.KSQL_SHUTDOWN_TIMEOUT_MS_DEFAULT; + + private static Response AUTHORIZATION_ERROR_RESPONSE = Response + .status(FORBIDDEN) + .entity(new KsqlErrorMessage(ERROR_CODE_FORBIDDEN_KAFKA_ACCESS, "some error")) + .build(); private static final String TOPIC_NAME = "test_stream"; private static final String PUSH_QUERY_STRING = "SELECT * FROM " + TOPIC_NAME + " EMIT CHANGES;"; @@ -136,6 +142,8 @@ public class StreamedQueryResourceTest { private Consumer queryCloseCallback; @Mock private KsqlAuthorizationValidator authorizationValidator; + @Mock + private Errors errorsHandler; private StreamedQueryResource testResource; private PreparedStatement invalid; private PreparedStatement query; @@ -152,6 +160,7 @@ public void setup() { when(pullQuery.isPullQuery()).thenReturn(true); final PreparedStatement pullQueryStatement = PreparedStatement.of(PULL_QUERY_STRING, pullQuery); when(mockStatementParser.parseSingleStatement(PULL_QUERY_STRING)).thenReturn(pullQueryStatement); + when(errorsHandler.accessDeniedFromKafkaResponse(any(Exception.class))).thenReturn(AUTHORIZATION_ERROR_RESPONSE); testResource = new StreamedQueryResource( mockKsqlEngine, @@ -160,7 +169,8 @@ public void setup() { DISCONNECT_CHECK_INTERVAL, COMMAND_QUEUE_CATCHUP_TIMOEUT, activenessRegistrar, - Optional.of(authorizationValidator) + Optional.of(authorizationValidator), + errorsHandler ); testResource.configure(VALID_CONFIG); @@ -185,7 +195,8 @@ public void shouldThrowOnHandleStatementIfNotConfigured() { DISCONNECT_CHECK_INTERVAL, COMMAND_QUEUE_CATCHUP_TIMOEUT, activenessRegistrar, - Optional.of(authorizationValidator) + Optional.of(authorizationValidator), + errorsHandler ); // Then: @@ -550,39 +561,9 @@ public void shouldReturnForbiddenKafkaAccessIfKsqlTopicAuthorizationException() new KsqlRequest(PUSH_QUERY_STRING, Collections.emptyMap(), null) ); - final Response expected = Errors.accessDeniedFromKafka( - new KsqlTopicAuthorizationException(AclOperation.READ, Collections.singleton(TOPIC_NAME))); - - final KsqlErrorMessage responseEntity = (KsqlErrorMessage) response.getEntity(); - final KsqlErrorMessage expectedEntity = (KsqlErrorMessage) expected.getEntity(); - assertEquals(response.getStatus(), expected.getStatus()); - assertEquals(responseEntity.getMessage(), expectedEntity.getMessage()); - } - - @Test - public void shouldReturnForbiddenKafkaAccessIfRootCauseKsqlTopicAuthorizationException() { - // Given: - when(mockStatementParser.parseSingleStatement(PUSH_QUERY_STRING)) - .thenReturn(query); - doThrow(new KsqlException( - "", - new KsqlTopicAuthorizationException(AclOperation.READ, Collections.singleton(TOPIC_NAME)))) - .when(authorizationValidator).checkAuthorization(any(), any(), any()); - - // When: - final Response response = testResource.streamQuery( - serviceContext, - new KsqlRequest(PUSH_QUERY_STRING, Collections.emptyMap(), null) - ); - - final Response expected = Errors.accessDeniedFromKafka( - new KsqlException( - "", - new KsqlTopicAuthorizationException(AclOperation.READ, Collections.singleton(TOPIC_NAME)))); - final KsqlErrorMessage responseEntity = (KsqlErrorMessage) response.getEntity(); - final KsqlErrorMessage expectedEntity = (KsqlErrorMessage) expected.getEntity(); - assertEquals(response.getStatus(), expected.getStatus()); + final KsqlErrorMessage expectedEntity = (KsqlErrorMessage) AUTHORIZATION_ERROR_RESPONSE.getEntity(); + assertEquals(response.getStatus(), AUTHORIZATION_ERROR_RESPONSE.getStatus()); assertEquals(responseEntity.getMessage(), expectedEntity.getMessage()); } @@ -603,11 +584,8 @@ public void shouldReturnForbiddenKafkaAccessIfPrintTopicKsqlTopicAuthorizationEx new KsqlRequest(PRINT_TOPIC, Collections.emptyMap(), null) ); - final Response expected = Errors.accessDeniedFromKafka( - new KsqlTopicAuthorizationException(AclOperation.READ, Collections.singleton(TOPIC_NAME))); - - assertEquals(response.getStatus(), expected.getStatus()); - assertEquals(response.getEntity(), expected.getEntity()); + assertEquals(response.getStatus(), AUTHORIZATION_ERROR_RESPONSE.getStatus()); + assertEquals(response.getEntity(), AUTHORIZATION_ERROR_RESPONSE.getEntity()); } @Test diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java index 2d3bd33861b5..86a312ec679d 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java @@ -36,13 +36,16 @@ import com.google.common.util.concurrent.ListeningScheduledExecutorService; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.engine.KsqlEngine; +import io.confluent.ksql.exception.KsqlTopicAuthorizationException; import io.confluent.ksql.json.JsonMapper; +import io.confluent.ksql.metastore.MetaStore; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; import io.confluent.ksql.parser.tree.Query; import io.confluent.ksql.parser.tree.Relation; import io.confluent.ksql.parser.tree.ResultMaterialization; import io.confluent.ksql.parser.tree.Select; import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.rest.Errors; import io.confluent.ksql.rest.entity.KsqlErrorMessage; import io.confluent.ksql.rest.entity.KsqlRequest; import io.confluent.ksql.rest.entity.Versions; @@ -80,6 +83,8 @@ import javax.websocket.Session; import javax.ws.rs.core.Response; import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.common.acl.AclOperation; +import org.apache.kafka.common.errors.TopicAuthorizationException; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -147,12 +152,16 @@ public class WSQueryEndpointTest { @Mock private ServiceContext serviceContext; @Mock + private MetaStore metaStore; + @Mock private UserServiceContextFactory serviceContextFactory; @Mock private ServerState serverState; @Mock private KsqlUserContextProvider userContextProvider; @Mock + private Errors errorsHandler; + @Mock private DefaultServiceContextFactory defaultServiceContextProvider; @Captor private ArgumentCaptor closeReasonCaptor; @@ -177,6 +186,7 @@ public void setUp() { when(defaultServiceContextProvider.create(any(), any())).thenReturn(serviceContext); when(serviceContext.getTopicClient()).thenReturn(topicClient); when(serverState.checkReady()).thenReturn(Optional.empty()); + when(ksqlEngine.getMetaStore()).thenReturn(metaStore); givenRequest(VALID_REQUEST); wsQueryEndpoint = new WSQueryEndpoint( @@ -192,6 +202,7 @@ public void setUp() { activenessRegistrar, COMMAND_QUEUE_CATCHUP_TIMEOUT, Optional.of(authorizationValidator), + errorsHandler, securityExtension, serviceContextFactory, defaultServiceContextProvider, @@ -383,6 +394,26 @@ public void shouldHandlePushQuery() { any()); } + @Test + public void shouldReturnErrorMessageWhenTopicAuthorizationException() throws Exception { + // Given: + final String errorMessage = "authorization error"; + givenRequestIs(query); + when(errorsHandler.kafkaAuthorizationErrorMessage(any(TopicAuthorizationException.class))) + .thenReturn(errorMessage); + doThrow(new KsqlTopicAuthorizationException(AclOperation.CREATE, Collections.singleton("topic"))) + .when(authorizationValidator).checkAuthorization(serviceContext, metaStore, query); + + // When: + wsQueryEndpoint.onOpen(session, null); + + // Then: + verifyClosedContainingReason( + errorMessage, + CloseCodes.CANNOT_ACCEPT + ); + } + @Test public void shouldHandlePullQuery() { // Given: diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/util/ErrorResponseUtil.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/DefaultErrorMessages.java similarity index 50% rename from ksql-rest-app/src/main/java/io/confluent/ksql/rest/util/ErrorResponseUtil.java rename to ksql-rest-model/src/main/java/io/confluent/ksql/rest/DefaultErrorMessages.java index 907262bba87a..d6ff2a273256 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/util/ErrorResponseUtil.java +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/DefaultErrorMessages.java @@ -13,24 +13,12 @@ * specific language governing permissions and limitations under the License. */ -package io.confluent.ksql.rest.util; +package io.confluent.ksql.rest; -import io.confluent.ksql.rest.Errors; -import javax.ws.rs.core.Response; -import org.apache.commons.lang3.exception.ExceptionUtils; -import org.apache.kafka.common.errors.TopicAuthorizationException; +public class DefaultErrorMessages implements ErrorMessages { - -public final class ErrorResponseUtil { - - private ErrorResponseUtil() { - } - - public static Response generateResponse(final Exception e, final Response defaultResponse) { - if (ExceptionUtils.indexOfType(e, TopicAuthorizationException.class) >= 0) { - return Errors.accessDeniedFromKafka(e); - } else { - return defaultResponse; - } + @Override + public String kafkaAuthorizationErrorMessage(final Exception e) { + return e.getMessage(); } } diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/ErrorMessages.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/ErrorMessages.java new file mode 100644 index 000000000000..064399ae4f9b --- /dev/null +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/ErrorMessages.java @@ -0,0 +1,21 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest; + +public interface ErrorMessages { + + String kafkaAuthorizationErrorMessage(Exception e); +} diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/Errors.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/Errors.java index b17890e06974..78b76a0d14a8 100644 --- a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/Errors.java +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/Errors.java @@ -25,15 +25,19 @@ import io.confluent.ksql.rest.entity.KsqlEntityList; import io.confluent.ksql.rest.entity.KsqlErrorMessage; import io.confluent.ksql.rest.entity.KsqlStatementErrorMessage; +import java.util.Objects; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; +import org.apache.commons.lang3.exception.ExceptionUtils; +import org.apache.kafka.common.errors.TopicAuthorizationException; + public final class Errors { private static final int HTTP_TO_ERROR_CODE_MULTIPLIER = 100; public static final int ERROR_CODE_BAD_REQUEST = toErrorCode(BAD_REQUEST.getStatusCode()); public static final int ERROR_CODE_BAD_STATEMENT = toErrorCode(BAD_REQUEST.getStatusCode()) + 1; - public static final int ERROR_CODE_QUERY_ENDPOINT = toErrorCode(BAD_REQUEST.getStatusCode()) + 2; + private static final int ERROR_CODE_QUERY_ENDPOINT = toErrorCode(BAD_REQUEST.getStatusCode()) + 2; public static final int ERROR_CODE_UNAUTHORIZED = toErrorCode(UNAUTHORIZED.getStatusCode()); @@ -52,12 +56,11 @@ public final class Errors { public static final int ERROR_CODE_SERVER_NOT_READY = toErrorCode(SERVICE_UNAVAILABLE.getStatusCode()) + 2; - private Errors() { - } - public static final int ERROR_CODE_SERVER_ERROR = toErrorCode(INTERNAL_SERVER_ERROR.getStatusCode()); + private final ErrorMessages errorMessages; + public static int toStatusCode(final int errorCode) { return errorCode / HTTP_TO_ERROR_CODE_MULTIPLIER; } @@ -81,10 +84,10 @@ public static Response accessDenied(final String msg) { .build(); } - public static Response accessDeniedFromKafka(final Throwable t) { + private Response constructAccessDeniedFromKafkaResponse(final String errorMessage) { return Response .status(FORBIDDEN) - .entity(new KsqlErrorMessage(ERROR_CODE_FORBIDDEN_KAFKA_ACCESS, t)) + .entity(new KsqlErrorMessage(ERROR_CODE_FORBIDDEN_KAFKA_ACCESS, errorMessage)) .build(); } @@ -106,7 +109,7 @@ public static Response badStatement(final String msg, final String statementText return badStatement(msg, statementText, new KsqlEntityList()); } - static Response badStatement( + public static Response badStatement( final String msg, final String statementText, final KsqlEntityList entities) { @@ -121,7 +124,7 @@ public static Response badStatement(final Throwable t, final String statementTex return badStatement(t, statementText, new KsqlEntityList()); } - static Response badStatement( + public static Response badStatement( final Throwable t, final String statementText, final KsqlEntityList entities) { @@ -190,4 +193,28 @@ public static Response serverNotReady(final KsqlErrorMessage error) { .entity(error) .build(); } + + + public Errors(final ErrorMessages errorMessages) { + this.errorMessages = Objects.requireNonNull(errorMessages, "errorMessages"); + } + + public Response accessDeniedFromKafkaResponse(final Exception e) { + return constructAccessDeniedFromKafkaResponse(errorMessages.kafkaAuthorizationErrorMessage(e)); + } + + public String kafkaAuthorizationErrorMessage(final Exception e) { + return errorMessages.kafkaAuthorizationErrorMessage(e); + } + + public Response generateResponse( + final Exception e, + final Response defaultResponse + ) { + if (ExceptionUtils.indexOfType(e, TopicAuthorizationException.class) >= 0) { + return accessDeniedFromKafkaResponse(e); + } else { + return defaultResponse; + } + } } diff --git a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/ErrorsTest.java b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/ErrorsTest.java new file mode 100644 index 000000000000..f5f5ad0ac077 --- /dev/null +++ b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/ErrorsTest.java @@ -0,0 +1,89 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest; + +import static javax.ws.rs.core.Response.Status.FORBIDDEN; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import io.confluent.ksql.rest.entity.KsqlErrorMessage; +import io.confluent.ksql.util.KsqlException; +import org.apache.kafka.common.errors.TopicAuthorizationException; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +import javax.ws.rs.core.Response; + +@RunWith(MockitoJUnitRunner.class) +public class ErrorsTest { + + private static String SOME_ERROR = "error string"; + private static Response KAFKA_DENIED_ERROR = Response + .status(FORBIDDEN) + .entity(new KsqlErrorMessage(Errors.ERROR_CODE_FORBIDDEN_KAFKA_ACCESS, SOME_ERROR)) + .build(); + + @Mock + private ErrorMessages errorMessages; + @Mock + private Exception exception; + + private Errors errorHandler; + + @Before + public void setUp() { + when(errorMessages.kafkaAuthorizationErrorMessage(any(Exception.class))) + .thenReturn(SOME_ERROR); + errorHandler = new Errors(errorMessages); + } + + @Test + public void shouldReturnForbiddenKafkaResponse() { + final Response response = errorHandler.accessDeniedFromKafkaResponse(exception); + assertThat(response.getStatus(), is(403)); + assertThat(response.getEntity(), is(instanceOf(KsqlErrorMessage.class))); + assertThat(((KsqlErrorMessage) response.getEntity()).getMessage(), is(SOME_ERROR)); + } + + @Test + public void shouldReturnForbiddenKafkaErrorMessageString() { + final String error = errorHandler.kafkaAuthorizationErrorMessage(exception); + assertThat(error, is(SOME_ERROR)); + } + + @Test + public void shouldReturnForbiddenKafkaResponseIfRootCauseTopicAuthorizationException() { + final Response response = errorHandler.generateResponse(new KsqlException( + new TopicAuthorizationException("error")), Errors.badRequest("bad")); + assertThat(response.getStatus(), is(403)); + assertThat(response.getEntity(), is(instanceOf(KsqlErrorMessage.class))); + assertThat(((KsqlErrorMessage) response.getEntity()).getMessage(), is(SOME_ERROR)); + } + + @Test + public void shouldReturnResponseIfRootCauseNotTopicAuthorizationException() { + final Response response = errorHandler.generateResponse(new KsqlException( + new RuntimeException("error")), Errors.badRequest("bad")); + assertThat(response.getStatus(), is(400)); + } +} From 2f41aac6f0b61f2c91a212708d82768f0e3e71f3 Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Tue, 17 Dec 2019 13:51:45 -0800 Subject: [PATCH 044/123] docs: update codeowners file for docs-md directory (DOCS-3120) (#4161) --- .github/CODEOWNERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 62d94aa60167..1cac0dbc4f80 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,5 +1,6 @@ # assign ksql team as reviewers for all PRs: * @confluentinc/ksql -# Doc's also need a review by Jim: +# Docs also need a review by Jim: docs/ @confluentinc/ksql @JimGalasyn +docs-md/ @confluentinc/ksql @JimGalasyn From 0ac8747c67a7b8df5c4388bac2064573f9d03767 Mon Sep 17 00:00:00 2001 From: Steven Zhang <35498506+stevenpyzhang@users.noreply.github.com> Date: Tue, 17 Dec 2019 13:56:57 -0800 Subject: [PATCH 045/123] fix: show topics doesn't display topics with different casing (#4159) --- .../server/execution/ListTopicsExecutor.java | 2 +- .../execution/ListTopicsExecutorTest.java | 59 ++++++++++++------- 2 files changed, 40 insertions(+), 21 deletions(-) diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListTopicsExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListTopicsExecutor.java index c7d34ed66cdc..96da8ecec210 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListTopicsExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListTopicsExecutor.java @@ -125,7 +125,7 @@ private static Map filterKsqlInternalTopics( for (final Map.Entry entry : kafkaTopicDescriptions.entrySet()) { if (!entry.getKey().startsWith(serviceId + persistentQueryPrefix) && !entry.getKey().startsWith(serviceId + transientQueryPrefix)) { - filteredKafkaTopics.put(entry.getKey().toLowerCase(), entry.getValue()); + filteredKafkaTopics.put(entry.getKey(), entry.getValue()); } } return filteredKafkaTopics; diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListTopicsExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListTopicsExecutorTest.java index 0f0d2c3b99f8..008b7faedc07 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListTopicsExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListTopicsExecutorTest.java @@ -34,15 +34,32 @@ import org.apache.kafka.clients.admin.ConsumerGroupListing; import org.apache.kafka.clients.admin.ListConsumerGroupsResult; import org.apache.kafka.common.internals.KafkaFutureImpl; +import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class ListTopicsExecutorTest { @Rule public final TemporaryEngine engine = new TemporaryEngine(); + @Mock + private AdminClient adminClient; + + private ServiceContext serviceContext; + + @Before + public void setUp() { + serviceContext = TestServiceContext.create( + engine.getServiceContext().getKafkaClientSupplier(), + adminClient, + engine.getServiceContext().getTopicClient(), + engine.getServiceContext().getSchemaRegistryClientFactory(), + engine.getServiceContext().getConnectClient() + ); + } @Test public void shouldListKafkaTopics() { @@ -50,15 +67,27 @@ public void shouldListKafkaTopics() { engine.givenKafkaTopic("topic1"); engine.givenKafkaTopic("topic2"); - final AdminClient mockAdminClient = mock(AdminClient.class); + // When: + final KafkaTopicsList topicsList = + (KafkaTopicsList) CustomExecutors.LIST_TOPICS.execute( + engine.configure("LIST TOPICS;"), + ImmutableMap.of(), + engine.getEngine(), + serviceContext + ).orElseThrow(IllegalStateException::new); - final ServiceContext serviceContext = TestServiceContext.create( - engine.getServiceContext().getKafkaClientSupplier(), - mockAdminClient, - engine.getServiceContext().getTopicClient(), - engine.getServiceContext().getSchemaRegistryClientFactory(), - engine.getServiceContext().getConnectClient() - ); + // Then: + assertThat(topicsList.getTopics(), containsInAnyOrder( + new KafkaTopicInfo("topic1", ImmutableList.of(1)), + new KafkaTopicInfo("topic2", ImmutableList.of(1)) + )); + } + + @Test + public void shouldListKafkaTopicsThatDifferByCase() { + // Given: + engine.givenKafkaTopic("topic1"); + engine.givenKafkaTopic("toPIc1"); // When: final KafkaTopicsList topicsList = @@ -72,7 +101,7 @@ public void shouldListKafkaTopics() { // Then: assertThat(topicsList.getTopics(), containsInAnyOrder( new KafkaTopicInfo("topic1", ImmutableList.of(1)), - new KafkaTopicInfo("topic2", ImmutableList.of(1)) + new KafkaTopicInfo("toPIc1", ImmutableList.of(1)) )); } @@ -82,22 +111,13 @@ public void shouldListKafkaTopicsExtended() { engine.givenKafkaTopic("topic1"); engine.givenKafkaTopic("topic2"); - final AdminClient mockAdminClient = mock(AdminClient.class); final ListConsumerGroupsResult result = mock(ListConsumerGroupsResult.class); final KafkaFutureImpl> groups = new KafkaFutureImpl<>(); when(result.all()).thenReturn(groups); - when(mockAdminClient.listConsumerGroups()).thenReturn(result); + when(adminClient.listConsumerGroups()).thenReturn(result); groups.complete(ImmutableList.of()); - final ServiceContext serviceContext = TestServiceContext.create( - engine.getServiceContext().getKafkaClientSupplier(), - mockAdminClient, - engine.getServiceContext().getTopicClient(), - engine.getServiceContext().getSchemaRegistryClientFactory(), - engine.getServiceContext().getConnectClient() - ); - // When: final KafkaTopicsListExtended topicsList = (KafkaTopicsListExtended) CustomExecutors.LIST_TOPICS.execute( @@ -113,5 +133,4 @@ public void shouldListKafkaTopicsExtended() { new KafkaTopicInfoExtended("topic2", ImmutableList.of(1), 0, 0) )); } - } From e59a6fe7c850adab5d854e83588fe0dbcfba9f32 Mon Sep 17 00:00:00 2001 From: Victoria Xia Date: Tue, 17 Dec 2019 15:20:36 -0800 Subject: [PATCH 046/123] test: do not mock config store in StandaloneExecutor integration test (#4128) --- .../io/confluent/ksql/util/KsqlConfig.java | 4 ++-- .../ksql/rest/server/StandaloneExecutor.java | 7 ++++++- .../StandaloneExecutorFunctionalTest.java | 9 ++------- .../rest/server/StandaloneExecutorTest.java | 19 +++++++++++++++++++ 4 files changed, 29 insertions(+), 10 deletions(-) diff --git a/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java b/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java index ef517b4b6761..5d2c4c3b5267 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java +++ b/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java @@ -256,7 +256,7 @@ public class KsqlConfig extends AbstractConfig { new CompatibilityBreakingConfigDef( SINK_NUMBER_OF_PARTITIONS_PROPERTY, Type.INT, - 4, + null, null, Importance.LOW, Optional.empty(), @@ -266,7 +266,7 @@ public class KsqlConfig extends AbstractConfig { new CompatibilityBreakingConfigDef( SINK_NUMBER_OF_REPLICAS_PROPERTY, ConfigDef.Type.SHORT, - (short) 1, + null, null, ConfigDef.Importance.LOW, Optional.empty(), diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StandaloneExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StandaloneExecutor.java index cb4ccd2cc033..8e7134223945 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StandaloneExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StandaloneExecutor.java @@ -15,6 +15,7 @@ package io.confluent.ksql.rest.server; +import static java.util.Objects.nonNull; import static java.util.Objects.requireNonNull; import com.google.common.collect.ImmutableMap; @@ -118,7 +119,11 @@ public void start() { processesQueryFile(readQueriesFile(queriesFile)); showWelcomeMessage(); final Properties properties = new Properties(); - ksqlConfig.originals().forEach((key, value) -> properties.put(key, value.toString())); + ksqlConfig.originals().forEach((key, value) -> { + if (nonNull(value)) { + properties.put(key, value.toString()); + } + }); versionChecker.start(KsqlModuleType.SERVER, properties); } catch (final Exception e) { log.error("Failed to start KSQL Server with query file: " + queriesFile, e); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorFunctionalTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorFunctionalTest.java index e583259ac971..685731d09811 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorFunctionalTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorFunctionalTest.java @@ -17,14 +17,13 @@ import static io.confluent.ksql.serde.Format.AVRO; import static io.confluent.ksql.serde.Format.JSON; -import static org.mockito.Mockito.when; import com.google.common.collect.ImmutableMap; import io.confluent.common.utils.IntegrationTest; import io.confluent.ksql.KsqlConfigTestUtil; import io.confluent.ksql.integration.IntegrationTestHarness; import io.confluent.ksql.name.ColumnName; -import io.confluent.ksql.rest.server.computation.ConfigStore; +import io.confluent.ksql.rest.server.computation.KafkaConfigStore; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.PhysicalSchema; import io.confluent.ksql.schema.ksql.types.SqlTypes; @@ -77,8 +76,6 @@ public class StandaloneExecutorFunctionalTest { @Mock private VersionCheckerAgent versionChecker; - @Mock - private ConfigStore configStore; private Path queryFile; private StandaloneExecutor standalone; private String s1; @@ -106,8 +103,6 @@ public void setUp() throws Exception { .build(); ksqlConfig = new KsqlConfig(properties); - when(configStore.getKsqlConfig()).thenReturn(ksqlConfig); - final Function serviceContextFactory = config -> TestServiceContext.create( ksqlConfig, @@ -119,7 +114,7 @@ public void setUp() throws Exception { queryFile.toString(), ".", serviceContextFactory, - (topicName, currentConfig) -> configStore, + KafkaConfigStore::new, activeQuerySupplier -> versionChecker, StandaloneExecutor::new ); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorTest.java index 8ce38b331b1d..48ad47ba1503 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorTest.java @@ -83,6 +83,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.OptionalInt; @@ -338,6 +339,24 @@ public void shouldLoadQueryFile() { verify(ksqlEngine).parse("This statement"); } + @Test + public void shouldNotThrowIfNullValueInKsqlConfig() { + standaloneExecutor = new StandaloneExecutor( + serviceContext, + processingLogConfig, + new KsqlConfig(Collections.singletonMap("test", null)), + ksqlEngine, + queriesFile.toString(), + udfLoader, + false, + versionChecker, + injectorFactory + ); + + // When: + standaloneExecutor.start(); + } + @Test public void shouldThrowIfCanNotLoadQueryFile() { // Given: From 75b539e8551b0b9a88577ef40933d90dba2c2dbd Mon Sep 17 00:00:00 2001 From: Almog Gavra Date: Wed, 18 Dec 2019 08:37:33 -0800 Subject: [PATCH 047/123] fix: decimals in structs should display as numeric (#4165) --- .../confluent/ksql/json/StructSerializationModule.java | 9 +++++++-- .../ksql/json/StructSerializationModuleTest.java | 10 +++++++--- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/ksql-common/src/main/java/io/confluent/ksql/json/StructSerializationModule.java b/ksql-common/src/main/java/io/confluent/ksql/json/StructSerializationModule.java index 4d8478a92cee..aca2cef1b0b3 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/json/StructSerializationModule.java +++ b/ksql-common/src/main/java/io/confluent/ksql/json/StructSerializationModule.java @@ -20,10 +20,12 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializerProvider; import com.fasterxml.jackson.databind.module.SimpleModule; +import com.google.common.collect.ImmutableMap; import java.io.IOException; -import java.util.Collections; import org.apache.kafka.connect.data.Struct; +import org.apache.kafka.connect.json.DecimalFormat; import org.apache.kafka.connect.json.JsonConverter; +import org.apache.kafka.connect.json.JsonConverterConfig; public class StructSerializationModule extends SimpleModule { @@ -32,7 +34,10 @@ public class StructSerializationModule extends SimpleModule { public StructSerializationModule() { super(); - jsonConverter.configure(Collections.singletonMap("schemas.enable", false), false); + jsonConverter.configure(ImmutableMap.of( + JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, false, + JsonConverterConfig.DECIMAL_FORMAT_CONFIG, DecimalFormat.NUMERIC.name() + ), false); addSerializer(Struct.class, new StructSerializationModule.Serializer()); } diff --git a/ksql-common/src/test/java/io/confluent/ksql/json/StructSerializationModuleTest.java b/ksql-common/src/test/java/io/confluent/ksql/json/StructSerializationModuleTest.java index 381fa62c140b..af88bbaa5927 100644 --- a/ksql-common/src/test/java/io/confluent/ksql/json/StructSerializationModuleTest.java +++ b/ksql-common/src/test/java/io/confluent/ksql/json/StructSerializationModuleTest.java @@ -20,6 +20,8 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; +import io.confluent.ksql.util.DecimalUtil; +import java.math.BigDecimal; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; @@ -48,6 +50,7 @@ public class StructSerializationModuleTest { .field("ITEMID", Schema.INT64_SCHEMA) .field("NAME", Schema.STRING_SCHEMA) .field("CATEGORY", categorySchema) + .field("COST", DecimalUtil.builder(4, 2).build()) .optional().build(); private ObjectMapper objectMapper; @@ -82,9 +85,10 @@ public void shouldSerializeStructWithNestedStructCorrectly() throws JsonProcessi item.put("ITEMID", 1L); item.put("NAME", "ICE CREAM"); item.put("CATEGORY", category); + item.put("COST", new BigDecimal("10.01")); final byte[] serializedBytes = objectMapper.writeValueAsBytes(item); final String jsonString = new String(serializedBytes, StandardCharsets.UTF_8); - assertThat(jsonString, equalTo("{\"ITEMID\":1,\"NAME\":\"ICE CREAM\",\"CATEGORY\":{\"ID\":1,\"NAME\":\"Food\"}}")); + assertThat(jsonString, equalTo("{\"ITEMID\":1,\"NAME\":\"ICE CREAM\",\"CATEGORY\":{\"ID\":1,\"NAME\":\"Food\"},\"COST\":10.01}")); } @Test @@ -99,7 +103,7 @@ public void shouldSerializeStructWithNestedStructAndNullFieldsCorrectly() throws item.put("CATEGORY", null); final byte[] serializedBytes = objectMapper.writeValueAsBytes(item); final String jsonString = new String(serializedBytes, StandardCharsets.UTF_8); - assertThat(jsonString, equalTo("{\"ITEMID\":1,\"NAME\":\"ICE CREAM\",\"CATEGORY\":null}")); + assertThat(jsonString, equalTo("{\"ITEMID\":1,\"NAME\":\"ICE CREAM\",\"CATEGORY\":null,\"COST\":null}")); } @Test @@ -122,6 +126,6 @@ public void shouldSerializeStructInsideListCorrectly() throws JsonProcessingExce final byte[] serializedBytes = objectMapper.writeValueAsBytes(list); final String jsonString = new String(serializedBytes, StandardCharsets.UTF_8); - assertThat(jsonString, equalTo("[\"Hello\",1,1,1.0,{\"ITEMID\":1,\"NAME\":\"ICE CREAM\",\"CATEGORY\":null}]")); + assertThat(jsonString, equalTo("[\"Hello\",1,1,1.0,{\"ITEMID\":1,\"NAME\":\"ICE CREAM\",\"CATEGORY\":null,\"COST\":null}]")); } } \ No newline at end of file From e92d2f33578cac066dbc0defb2e01f3db2419e03 Mon Sep 17 00:00:00 2001 From: Robert Yokota Date: Wed, 18 Dec 2019 08:54:21 -0800 Subject: [PATCH 048/123] test: disambiguate some Schema Registry methods (#4166) As part of adding support to Schema Registry for additional schema types beyond Avro, some methods on SchemaRegistryClient will be overloaded. This change is to disambiguate these method calls in the KSQL tests. --- .../confluent/ksql/engine/KsqlEngineTest.java | 2 +- .../io/confluent/ksql/util/AvroUtilTest.java | 29 ++++++++++--------- .../resources/streaming/TopicStreamTest.java | 4 ++- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/ksql-engine/src/test/java/io/confluent/ksql/engine/KsqlEngineTest.java b/ksql-engine/src/test/java/io/confluent/ksql/engine/KsqlEngineTest.java index 223f4651743d..c4eb645ce3a9 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/engine/KsqlEngineTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/engine/KsqlEngineTest.java @@ -1131,7 +1131,7 @@ public void shouldNotRegisterAnySchemasDuringSandboxExecute() throws Exception { ); // Then: - verify(schemaRegistryClient, never()).register(any(), any()); + verify(schemaRegistryClient, never()).register(any(), any(Schema.class)); } @Test diff --git a/ksql-engine/src/test/java/io/confluent/ksql/util/AvroUtilTest.java b/ksql-engine/src/test/java/io/confluent/ksql/util/AvroUtilTest.java index 6eaa30181cae..d90aa65f18f3 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/util/AvroUtilTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/util/AvroUtilTest.java @@ -48,7 +48,8 @@ import java.io.IOException; import java.util.Collections; import java.util.Optional; -import org.apache.kafka.connect.data.Schema; + +import org.apache.avro.Schema; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -128,13 +129,13 @@ public void setUp() { @Test public void shouldValidateSchemaEvolutionWithCorrectSubject() throws Exception { // Given: - when(srClient.testCompatibility(anyString(), any())).thenReturn(true); + when(srClient.testCompatibility(anyString(), any(Schema.class))).thenReturn(true); // When: AvroUtil.throwOnInvalidSchemaEvolution(STATEMENT_TEXT, ddlCommand, srClient, ksqlConfig); // Then: - verify(srClient).testCompatibility(eq(RESULT_TOPIC_NAME + "-value"), any()); + verify(srClient).testCompatibility(eq(RESULT_TOPIC_NAME + "-value"), any(Schema.class)); } @Test @@ -144,7 +145,7 @@ public void shouldValidateSchemaEvolutionWithCorrectSchema() throws Exception { final org.apache.avro.Schema expectedAvroSchema = AvroSchemas .getAvroSchema(schema.valueSchema(), SCHEMA_NAME, ksqlConfig); - when(srClient.testCompatibility(anyString(), any())).thenReturn(true); + when(srClient.testCompatibility(anyString(), any(Schema.class))).thenReturn(true); // When: AvroUtil.throwOnInvalidSchemaEvolution(STATEMENT_TEXT, ddlCommand, srClient, ksqlConfig); @@ -160,7 +161,7 @@ public void shouldValidateSchemaWithMaps() throws Exception { final PhysicalSchema schema = PhysicalSchema .from(SCHEMA_WITH_MAPS, SerdeOption.none()); - when(srClient.testCompatibility(anyString(), any())).thenReturn(true); + when(srClient.testCompatibility(anyString(), any(Schema.class))).thenReturn(true); final org.apache.avro.Schema expectedAvroSchema = AvroSchemas .getAvroSchema(schema.valueSchema(), SCHEMA_NAME, ksqlConfig); @@ -179,7 +180,7 @@ public void shouldValidateWrappedSingleFieldSchemaEvolution() throws Exception { final PhysicalSchema schema = PhysicalSchema .from(SINGLE_FIELD_SCHEMA, SerdeOption.none()); - when(srClient.testCompatibility(anyString(), any())).thenReturn(true); + when(srClient.testCompatibility(anyString(), any(Schema.class))).thenReturn(true); final org.apache.avro.Schema expectedAvroSchema = AvroSchemas .getAvroSchema(schema.valueSchema(), SCHEMA_NAME, ksqlConfig); @@ -204,7 +205,7 @@ public void shouldValidateUnwrappedSingleFieldSchemaEvolution() throws Exception final PhysicalSchema schema = PhysicalSchema .from(SINGLE_FIELD_SCHEMA, SerdeOption.of(SerdeOption.UNWRAP_SINGLE_VALUES)); - when(srClient.testCompatibility(anyString(), any())).thenReturn(true); + when(srClient.testCompatibility(anyString(), any(Schema.class))).thenReturn(true); final org.apache.avro.Schema expectedAvroSchema = AvroSchemas .getAvroSchema(schema.valueSchema(), SCHEMA_NAME, ksqlConfig); @@ -219,7 +220,7 @@ public void shouldValidateUnwrappedSingleFieldSchemaEvolution() throws Exception @Test public void shouldNotThrowInvalidEvolution() throws Exception { // Given: - when(srClient.testCompatibility(any(), any())).thenReturn(true); + when(srClient.testCompatibility(any(), any(Schema.class))).thenReturn(true); // When: AvroUtil.throwOnInvalidSchemaEvolution(STATEMENT_TEXT, ddlCommand, srClient, ksqlConfig); @@ -228,7 +229,7 @@ public void shouldNotThrowInvalidEvolution() throws Exception { @Test public void shouldReturnInvalidEvolution() throws Exception { // Given: - when(srClient.testCompatibility(any(), any())).thenReturn(false); + when(srClient.testCompatibility(any(), any(Schema.class))).thenReturn(false); expectedException.expect(KsqlException.class); expectedException.expectMessage("Cannot register avro schema for actual-name as the schema is incompatible with the current schema version registered for the topic"); @@ -240,7 +241,7 @@ public void shouldReturnInvalidEvolution() throws Exception { @Test public void shouldNotThrowInvalidEvolutionIfSubjectNotRegistered() throws Exception { // Given: - when(srClient.testCompatibility(any(), any())) + when(srClient.testCompatibility(any(), any(Schema.class))) .thenThrow(new RestClientException("Unknown subject", 404, 40401)); // When: @@ -250,7 +251,7 @@ public void shouldNotThrowInvalidEvolutionIfSubjectNotRegistered() throws Except @Test public void shouldThrowOnSrAuthorizationErrors() throws Exception { // Given: - when(srClient.testCompatibility(any(), any())) + when(srClient.testCompatibility(any(), any(Schema.class))) .thenThrow(new RestClientException("Unknown subject", 403, 40401)); // Expect: @@ -269,7 +270,7 @@ public void shouldThrowOnSrAuthorizationErrors() throws Exception { @Test public void shouldThrowOnAnyOtherEvolutionSrException() throws Exception { // Given: - when(srClient.testCompatibility(any(), any())) + when(srClient.testCompatibility(any(), any(Schema.class))) .thenThrow(new RestClientException("Unknown subject", 500, 40401)); // Expect: @@ -283,7 +284,7 @@ public void shouldThrowOnAnyOtherEvolutionSrException() throws Exception { @Test public void shouldThrowOnAnyOtherEvolutionIOException() throws Exception { // Given: - when(srClient.testCompatibility(any(), any())) + when(srClient.testCompatibility(any(), any(Schema.class))) .thenThrow(new IOException("something")); // Expect: @@ -298,7 +299,7 @@ private static LogicalSchema toKsqlSchema(final String avroSchemaString) { final org.apache.avro.Schema avroSchema = new org.apache.avro.Schema.Parser().parse(avroSchemaString); final AvroData avroData = new AvroData(new AvroDataConfig(Collections.emptyMap())); - final Schema connectSchema = new ConnectSchemaTranslator() + final org.apache.kafka.connect.data.Schema connectSchema = new ConnectSchemaTranslator() .toKsqlSchema(avroData.toConnectSchema(avroSchema)); final ConnectToSqlTypeConverter converter = SchemaConverters diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamTest.java index 527dd10ccfd6..4fbd639abd4e 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamTest.java @@ -49,6 +49,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.utils.Bytes; import org.junit.Before; +import org.junit.Ignore; import org.junit.Test; public class TopicStreamTest { @@ -65,6 +66,7 @@ public void setUp() { } @Test + @Ignore("Temporarily disable this test until new Schema Registry changes land") public void shouldMatchAvroFormatter() throws Exception { // Given: final Schema schema = parseAvroSchema( @@ -79,7 +81,7 @@ public void shouldMatchAvroFormatter() throws Exception { final GenericData.Record avroRecord = new GenericData.Record(schema); avroRecord.put("str1", "My first string"); - expect(schemaRegistryClient.register(anyString(), anyObject())).andReturn(1); + expect(schemaRegistryClient.register(anyString(), anyObject(Schema.class))).andReturn(1); expect(schemaRegistryClient.getById(anyInt())).andReturn(schema).times(2); replay(schemaRegistryClient); From 56ac607cbbdff0e8f5d866a23955b0606296d70f Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Wed, 18 Dec 2019 18:40:01 +0000 Subject: [PATCH 049/123] chore: simplify group by schema resolving code (#4154) As per comment here: https://github.com/confluentinc/ksql/pull/4108/files#r357023751 --- .../streams/GroupByParamsFactory.java | 4 ++-- .../execution/streams/StepSchemaResolver.java | 21 ++++++++++--------- .../streams/StepSchemaResolverTest.java | 12 +++++++++-- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByParamsFactory.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByParamsFactory.java index 96706bd0c651..e1fcd25b88a6 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByParamsFactory.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/GroupByParamsFactory.java @@ -56,13 +56,13 @@ public static GroupByParams build( return new GroupByParams(schema, mapper); } - static LogicalSchema multiExpressionSchema( + private static LogicalSchema multiExpressionSchema( final LogicalSchema sourceSchema ) { return buildSchema(sourceSchema, SqlTypes.STRING); } - static LogicalSchema singleExpressionSchema( + private static LogicalSchema singleExpressionSchema( final LogicalSchema sourceSchema, final SqlType rowKeyType ) { diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java index 0bb2bb41a021..b4d5eefe7ae6 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java @@ -15,7 +15,8 @@ package io.confluent.ksql.execution.streams; -import io.confluent.ksql.execution.expression.tree.Expression; +import io.confluent.ksql.execution.codegen.CodeGenRunner; +import io.confluent.ksql.execution.codegen.ExpressionMetadata; import io.confluent.ksql.execution.plan.AbstractStreamSource; import io.confluent.ksql.execution.plan.ExecutionStep; import io.confluent.ksql.execution.plan.StreamAggregate; @@ -54,6 +55,7 @@ /** * Computes the schema produced by an execution step, given the schema(s) going into the step. */ +@SuppressWarnings("MethodMayBeStatic") // Methods can not be used in HANDLERS is static. public final class StepSchemaResolver { private static final HandlerMaps.ClassHandlerMapR2 HANDLERS @@ -166,16 +168,15 @@ private LogicalSchema handleGroupBy( final LogicalSchema sourceSchema, final StreamGroupBy streamGroupBy ) { - final List groupBy = streamGroupBy.getGroupByExpressions(); - - if (groupBy.size() != 1) { - return GroupByParamsFactory.multiExpressionSchema(sourceSchema); - } - - final SqlType rowKeyType = new ExpressionTypeManager(sourceSchema, functionRegistry) - .getExpressionSqlType(groupBy.get(0)); + final List compiledGroupBy = CodeGenRunner.compileExpressions( + streamGroupBy.getGroupByExpressions().stream(), + "Group By", + sourceSchema, + ksqlConfig, + functionRegistry + ); - return GroupByParamsFactory.singleExpressionSchema(sourceSchema, rowKeyType); + return GroupByParamsFactory.build(sourceSchema, compiledGroupBy).getSchema(); } private LogicalSchema handleStreamSelect( diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java index d0e8e674c3a8..7c3c4c64980d 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java @@ -82,12 +82,17 @@ @RunWith(MockitoJUnitRunner.class) public class StepSchemaResolverTest { + private static final KsqlConfig CONFIG = new KsqlConfig(Collections.emptyMap()); + private static final LogicalSchema SCHEMA = LogicalSchema.builder() .valueColumn(ColumnName.of("ORANGE"), SqlTypes.INTEGER) .valueColumn(ColumnName.of("APPLE"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("BANANA"), SqlTypes.STRING) .build(); + + private static final ColumnRef ORANGE_COL_REF = ColumnRef.withoutSource(ColumnName.of("ORANGE")); + private static final ExecutionStepPropertiesV1 PROPERTIES = new ExecutionStepPropertiesV1( new QueryContext.Stacker().getQueryContext() ); @@ -233,14 +238,17 @@ public void shouldResolveSchemaForStreamGroupBy() { PROPERTIES, streamSource, formats, - Collections.emptyList() + ImmutableList.of(new ColumnReferenceExp(Optional.empty(), ORANGE_COL_REF)) ); // When: final LogicalSchema result = resolver.resolve(step, SCHEMA); // Then: - assertThat(result, is(SCHEMA)); + assertThat(result, is(LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.INTEGER) + .valueColumns(SCHEMA.value()) + .build())); } @Test From 6c6695cc9e7c47084673b117742707ffe15327fb Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Thu, 19 Dec 2019 12:59:19 +0000 Subject: [PATCH 050/123] chore: enforce WITH KEY column type matches ROWKEY type (#4147) * chore: enforce WITH KEY column type matches ROWKEY type BREAKING CHANGE: Any `KEY` column identified in the `WITH` clause must be of the same Sql type as `ROWKEY`. Users can provide the name of a value column that matches the key column, e.g. ```sql CREATE STREAM S (ID INT, NAME STRING) WITH (KEY='ID', ...); ``` Before primitive keys was introduced all keys were treated as `STRING`. With primitive keys `ROWKEY` can be types other than `STRING`, e.g. `BIGINT`. It therefore follows that any `KEY` column identified in the `WITH` clause must have the same SQL type as the _actual_ key, i.e. `ROWKEY`. With this change the above example statement will fail with the error: ``` The KEY field (ID) identified in the WITH clause is of a different type to the actual key column. Either change the type of the KEY field to match ROWKEY, or explicitly set ROWKEY to the type of the KEY field by adding 'ROWKEY INTEGER KEY' in the schema. KEY field type: INTEGER ROWKEY type: STRING ``` As the error message says, the error can be resolved by changing the statement to: ```sql CREATE STREAM S (ROWKEY INT KEY, ID INT, NAME STRING) WITH (KEY='ID', ...); ``` --- .../builder/PropertiesListTableBuilder.java | 1 - .../java/io/confluent/ksql/cli/CliTest.java | 13 +- .../confluent/ksql/cli/SslFunctionalTest.java | 3 +- .../ksql/engine/InsertValuesExecutor.java | 4 +- .../ksql/ddl/commands/DdlCommandExecTest.java | 7 +- .../confluent/ksql/engine/KsqlEngineTest.java | 8 +- .../ksql/integration/JsonFormatTest.java | 3 +- .../integration/SecureIntegrationTest.java | 12 +- .../StreamsSelectAndProjectIntTest.java | 6 +- .../ksql/integration/WindowingIntTest.java | 6 +- .../io/confluent/ksql/util/AvroUtilTest.java | 1 - .../ddl/commands/CreateSourceCommand.java | 43 +- .../ddl/commands/CreateSourceCommandTest.java | 136 +++ .../io/confluent/ksql/test/tools/Record.java | 19 +- .../ksql/test/tools/TestExecutor.java | 109 +- .../ksql/test/TopologyFileRewriter.java | 73 +- .../ksql/test/rest/RestTestExecutor.java | 29 +- .../identifiers_-_aliased_join_source | 4 +- .../identifiers_-_aliased_join_source_with_AS | 4 +- ...identifiers_-_aliased_left_unaliased_right | 4 +- ...identifiers_-_unaliased_left_aliased_right | 4 +- ...mestamp_-_stream_stream_inner_join_with_ts | 4 +- ...am_inner_join_with_ts_extractor_both_sides | 4 +- ...am_table_join_with_ts_extractor_both_sides | 2 +- .../joins_-_join_using_ROWKEY_in_the_criteria | 2 +- ..._the_criteria_-_join_key_not_in_projection | 2 +- ...n_the_criteria_-_left_rowkey_in_projection | 4 +- ...he_criteria_-_right_join_key_in_projection | 2 +- ..._the_criteria_-_right_rowkey_in_projection | 4 +- .../joins_-_multiple_join_keys_in_projection | 4 +- .../joins_-_stream_stream_inner_join | 4 +- ...am_inner_join_-_join_key_not_in_projection | 4 +- ..._inner_join_-_right_join_key_in_projection | 4 +- ...oins_-_stream_stream_inner_join_all_fields | 6 +- ...ream_inner_join_all_left_fields_some_right | 6 +- ...ream_inner_join_all_right_fields_some_left | 6 +- ...in_with_different_before_and_after_windows | 4 +- ...ream_inner_join_with_out_of_order_messages | 4 +- .../0_6_0-pre/joins_-_stream_stream_left_join | 4 +- ...m_left_join_-_both_join_keys_in_projection | 107 -- ...eam_left_join_-_join_key_not_in_projection | 4 +- .../joins_-_stream_stream_left_join_-_rekey | 107 -- ...m_left_join_-_right_join_key_in_projection | 107 -- .../joins_-_stream_stream_outer_join | 4 +- ..._outer_join_-_right_join_key_in_projection | 4 +- .../0_6_0-pre/joins_-_stream_table_inner_join | 2 +- ...le_inner_join_-_join_key_not_in_projection | 2 +- ..._inner_join_-_right_join_key_in_projection | 2 +- .../0_6_0-pre/joins_-_stream_table_left_join | 2 +- ...ble_left_join_-_join_key_not_in_projection | 2 +- ...e_left_join_-_right_join_key_in_projection | 2 +- ...have_key_field_and_joining_by_table_ROWKEY | 82 -- ...have_key_field_and_joining_by_table_ROWKEY | 2 +- .../joins_-_unqualified_join_criteria | 4 +- ...multiple_copies_of_key_field_in_projection | 2 +- ...field_-_where_only_rowkey_is_in_projection | 2 +- .../query-validation-tests/group-by.json | 156 +-- .../query-validation-tests/histogram.json | 2 +- .../hopping-windows.json | 2 +- .../query-validation-tests/identifiers.json | 40 +- .../join-with-custom-timestamp.json | 100 +- .../query-validation-tests/joins.json | 1068 ++++++++--------- .../query-validation-tests/key-field.json | 82 +- .../query-validation-tests/partition-by.json | 12 +- .../project-filter.json | 6 +- .../session-windows.json | 4 +- .../query-validation-tests/simple-struct.json | 16 +- .../query-validation-tests/stringdate.json | 2 +- .../stringtimestamp.json | 2 +- .../resources/query-validation-tests/sum.json | 16 +- .../table-functions.json | 14 +- .../query-validation-tests/table.json | 2 +- .../insert-values.json | 58 +- .../test-runner/correct/join/input.json | 16 +- .../test-runner/correct/join/output.json | 18 +- .../test-runner/correct/join/statements.sql | 4 +- .../correct/simple_project_filter/input.json | 6 +- .../correct/simple_project_filter/output.json | 2 +- .../simple_project_filter/statements.sql | 2 +- .../expected_mismatch/statements.sql | 2 +- .../test/resources/testing_tool_tests.json | 8 +- .../confluent/ksql/util/MetaStoreFixture.java | 2 + .../execution/ListPropertiesExecutor.java | 2 - .../StandaloneExecutorFunctionalTest.java | 4 +- .../rest/server/computation/RecoveryTest.java | 1 - .../execution/ListPropertiesExecutorTest.java | 5 +- .../server/resources/KsqlResourceTest.java | 7 +- .../streaming/StreamedQueryResourceTest.java | 2 +- .../ksql/rest/entity/PropertiesList.java | 1 - .../io/confluent/ksql/rest/ErrorsTest.java | 4 +- .../rest/entity/SourceDescriptionTest.java | 3 +- 91 files changed, 1271 insertions(+), 1396 deletions(-) create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/ddl/commands/CreateSourceCommandTest.java delete mode 100644 ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_both_join_keys_in_projection delete mode 100644 ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_rekey delete mode 100644 ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_right_join_key_in_projection delete mode 100644 ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_neither_have_key_field_and_joining_by_table_ROWKEY diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/PropertiesListTableBuilder.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/PropertiesListTableBuilder.java index ba183411651e..b36d8207c0a1 100644 --- a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/PropertiesListTableBuilder.java +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/PropertiesListTableBuilder.java @@ -20,7 +20,6 @@ import io.confluent.ksql.cli.console.table.Table.Builder; import io.confluent.ksql.rest.entity.PropertiesList; import io.confluent.ksql.rest.entity.PropertiesList.Property; - import java.util.Comparator; import java.util.List; import java.util.Objects; diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java index 69a54a567f3f..73a74bfc29ae 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java @@ -113,7 +113,7 @@ */ @SuppressWarnings("SameParameterValue") @RunWith(MockitoJUnitRunner.class) -@Category({IntegrationTest.class}) +@Category(IntegrationTest.class) public class CliTest { private static final EmbeddedSingleNodeKafkaCluster CLUSTER = EmbeddedSingleNodeKafkaCluster.build(); @@ -171,7 +171,7 @@ public class CliTest { private String tableName; @BeforeClass - public static void classSetUp() throws Exception { + public static void classSetUp() { restClient = KsqlRestClient.create( REST_APP.getHttpListener().toString(), ImmutableMap.of(), @@ -236,15 +236,14 @@ private static void run(final String command, final Cli localCli) { } } - private static void produceInputStream(final TestDataProvider dataProvider) throws Exception { + private static void produceInputStream(final TestDataProvider dataProvider) { topicProducer.produceInputData(dataProvider); } private static void createKStream(final TestDataProvider dataProvider, final Cli cli) { run(String.format( - "CREATE STREAM %s %s WITH (value_format = 'json', kafka_topic = '%s' , key='%s');", - dataProvider.kstreamName(), dataProvider.ksqlSchemaString(), dataProvider.topicName(), - dataProvider.key()), + "CREATE STREAM %s %s WITH (value_format = 'json', kafka_topic = '%s');", + dataProvider.kstreamName(), dataProvider.ksqlSchemaString(), dataProvider.topicName()), cli); } @@ -1206,7 +1205,7 @@ private static Matcher>> isRow( } @SafeVarargs - @SuppressWarnings({"varargs", "unchecked"}) + @SuppressWarnings({"varargs", "unchecked", "rawtypes"}) private static Matcher>> hasRow( final Matcher... expected ) { diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/SslFunctionalTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/SslFunctionalTest.java index 89e6574d81ab..07acd2145b31 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/SslFunctionalTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/SslFunctionalTest.java @@ -103,13 +103,12 @@ public class SslFunctionalTest { private SslContextFactory sslContextFactory; @BeforeClass - public static void classSetUp() throws Exception { + public static void classSetUp() { final OrderDataProvider dataProvider = new OrderDataProvider(); CLUSTER.createTopic(TOPIC_NAME); new TopicProducer(CLUSTER).produceInputData(dataProvider); } - @SuppressWarnings("deprecation") @Before public void setUp() { clientProps = Collections.emptyMap(); diff --git a/ksql-engine/src/main/java/io/confluent/ksql/engine/InsertValuesExecutor.java b/ksql-engine/src/main/java/io/confluent/ksql/engine/InsertValuesExecutor.java index 23b029efc055..8fb24f558ad2 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/engine/InsertValuesExecutor.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/engine/InsertValuesExecutor.java @@ -350,9 +350,9 @@ private static void handleExplicitKeyField( if (keyValue == null) { values.put(key.name(), rowKeyValue); } else { - values.put(SchemaUtil.ROWKEY_NAME, keyValue.toString()); + values.put(SchemaUtil.ROWKEY_NAME, keyValue); } - } else if (keyValue != null && !Objects.equals(keyValue.toString(), rowKeyValue)) { + } else if (keyValue != null && !Objects.equals(keyValue, rowKeyValue)) { throw new KsqlException(String.format( "Expected ROWKEY and %s to match but got %s and %s respectively.", key.toString(FormatOptions.noEscape()), rowKeyValue, keyValue)); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/DdlCommandExecTest.java b/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/DdlCommandExecTest.java index 9558261866f2..83ddfc31f9ce 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/DdlCommandExecTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/DdlCommandExecTest.java @@ -21,7 +21,6 @@ import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.schema.ksql.LogicalSchema; -import io.confluent.ksql.schema.ksql.types.SqlPrimitiveType; import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.serde.Format; import io.confluent.ksql.serde.FormatInfo; @@ -30,6 +29,7 @@ import io.confluent.ksql.serde.ValueFormat; import io.confluent.ksql.serde.WindowInfo; import io.confluent.ksql.util.MetaStoreFixture; +import io.confluent.ksql.util.SchemaUtil; import java.util.Optional; import java.util.Set; import org.hamcrest.MatcherAssert; @@ -47,8 +47,9 @@ public class DdlCommandExecTest { private static final SourceName TABLE_NAME = SourceName.of("t1"); private static final String TOPIC_NAME = "topic"; private static final LogicalSchema SCHEMA = new LogicalSchema.Builder() - .valueColumn(ColumnName.of("F1"), SqlPrimitiveType.of("INTEGER")) - .valueColumn(ColumnName.of("F2"), SqlPrimitiveType.of("VARCHAR")) + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) + .valueColumn(ColumnName.of("F1"), SqlTypes.BIGINT) + .valueColumn(ColumnName.of("F2"), SqlTypes.STRING) .build(); private static final ValueFormat VALUE_FORMAT = ValueFormat.of(FormatInfo.of(Format.JSON)); private static final KeyFormat KEY_FORMAT = KeyFormat.nonWindowed(FormatInfo.of(Format.KAFKA)); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/engine/KsqlEngineTest.java b/ksql-engine/src/test/java/io/confluent/ksql/engine/KsqlEngineTest.java index c4eb645ce3a9..371376f8900b 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/engine/KsqlEngineTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/engine/KsqlEngineTest.java @@ -279,15 +279,15 @@ public void shouldThrowOnInsertIntoWithKeyMismatch() { expectedException.expect(rawMessage(containsString( "Incompatible key fields for sink and results. " + "Sink key field is ORDERTIME (type: BIGINT) " - + "while result key field is ITEMID (type: STRING)"))); + + "while result key field is ORDERID (type: BIGINT)"))); expectedException.expect(statementText( - is("insert into bar select * from orders partition by itemid;"))); + is("insert into bar select * from orders partition by orderid;"))); // When: KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, - "insert into bar select * from orders partition by itemid;", + "insert into bar select * from orders partition by orderid;", KSQL_CONFIG, Collections.emptyMap() ); @@ -767,7 +767,7 @@ public void shouldHandleMultipleStatements() { + "CREATE STREAM S0 (a INT, b VARCHAR) " + " WITH (kafka_topic='s0_topic', value_format='DELIMITED');\n" + "\n" - + "CREATE TABLE T1 (f0 BIGINT, f1 DOUBLE) " + + "CREATE TABLE T1 (ROWKEY BIGINT KEY, f0 BIGINT, f1 DOUBLE) " + " WITH (kafka_topic='t1_topic', value_format='JSON', key = 'f0');\n" + "\n" + "CREATE STREAM S1 AS SELECT * FROM S0;\n" diff --git a/ksql-engine/src/test/java/io/confluent/ksql/integration/JsonFormatTest.java b/ksql-engine/src/test/java/io/confluent/ksql/integration/JsonFormatTest.java index 84f2230a0395..935da945bda4 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/integration/JsonFormatTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/integration/JsonFormatTest.java @@ -144,7 +144,8 @@ private void produceInitData() throws Exception { } private void execInitCreateStreamQueries() { - final String ordersStreamStr = String.format("CREATE STREAM %s (ORDERTIME bigint, ORDERID varchar, " + final String ordersStreamStr = String.format("CREATE STREAM %s (" + + "ROWKEY BIGINT KEY, ORDERTIME bigint, ORDERID varchar, " + "ITEMID varchar, ORDERUNITS double, PRICEARRAY array, KEYVALUEMAP " + "map) WITH (value_format = 'json', " + "kafka_topic='%s' , " diff --git a/ksql-engine/src/test/java/io/confluent/ksql/integration/SecureIntegrationTest.java b/ksql-engine/src/test/java/io/confluent/ksql/integration/SecureIntegrationTest.java index 1d2d198ae2fc..a089d702c815 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/integration/SecureIntegrationTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/integration/SecureIntegrationTest.java @@ -320,7 +320,7 @@ private static Map getKsqlConfig(final Credentials user) { return configs; } - private void produceInitData() throws Exception { + private void produceInitData() { if (topicClient.isTopicExists(INPUT_TOPIC)) { return; } @@ -340,11 +340,11 @@ private void awaitAsyncInputTopicCreation() { } private void execInitCreateStreamQueries() { - final String ordersStreamStr = String.format("CREATE STREAM %s (ORDERTIME bigint, ORDERID varchar, " - + "ITEMID varchar, ORDERUNITS double, PRICEARRAY array, KEYVALUEMAP " - + "map) WITH (value_format = 'json', " - + "kafka_topic='%s' , " - + "key='ordertime');", INPUT_STREAM, INPUT_TOPIC); + final String ordersStreamStr = + "CREATE STREAM " + INPUT_STREAM + " (ORDERTIME bigint, ORDERID varchar, " + + "ITEMID varchar, ORDERUNITS double, PRICEARRAY array, KEYVALUEMAP " + + "map) WITH (value_format = 'json', " + + "kafka_topic='" + INPUT_TOPIC + "');"; KsqlEngineTestUtil.execute( serviceContext, diff --git a/ksql-engine/src/test/java/io/confluent/ksql/integration/StreamsSelectAndProjectIntTest.java b/ksql-engine/src/test/java/io/confluent/ksql/integration/StreamsSelectAndProjectIntTest.java index e99f711a147f..35dac0d8a615 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/integration/StreamsSelectAndProjectIntTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/integration/StreamsSelectAndProjectIntTest.java @@ -432,13 +432,13 @@ private void createOrdersStream() { + " KEYVALUEMAP map"; ksqlContext.sql("CREATE STREAM " + JSON_STREAM_NAME + " (" + columns + ") WITH " - + "(kafka_topic='" + jsonTopicName + "', value_format='JSON', key='ordertime');"); + + "(kafka_topic='" + jsonTopicName + "', value_format='JSON');"); ksqlContext.sql("CREATE STREAM " + AVRO_STREAM_NAME + " (" + columns + ") WITH " - + "(kafka_topic='" + avroTopicName + "', value_format='AVRO', key='ordertime');"); + + "(kafka_topic='" + avroTopicName + "', value_format='AVRO');"); ksqlContext.sql("CREATE STREAM " + AVRO_TIMESTAMP_STREAM_NAME + " (" + columns + ") WITH " - + "(kafka_topic='" + avroTopicName + "', value_format='AVRO', key='ordertime', " + + "(kafka_topic='" + avroTopicName + "', value_format='AVRO', " + "timestamp='timestamp', timestamp_format='yyyy-MM-dd');"); } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/integration/WindowingIntTest.java b/ksql-engine/src/test/java/io/confluent/ksql/integration/WindowingIntTest.java index 23e5c39f8b15..e791e5fc53fe 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/integration/WindowingIntTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/integration/WindowingIntTest.java @@ -269,9 +269,9 @@ private Set getTopicNames() { return names; } - private static Deserializer getKeyDeserializerFor(final Object key) { + private static Deserializer getKeyDeserializerFor(final Object key) { if (key instanceof Windowed) { - if (((Windowed) key).window() instanceof SessionWindow) { + if (((Windowed) key).window() instanceof SessionWindow) { return SESSION_WINDOWED_DESERIALIZER; } return TIME_WINDOWED_DESERIALIZER; @@ -288,6 +288,6 @@ private void createOrdersStream() { + "ORDERUNITS double, " + "PRICEARRAY array, " + "KEYVALUEMAP map) " - + "WITH (kafka_topic='" + sourceTopicName + "', value_format='JSON', key='ordertime');"); + + "WITH (kafka_topic='" + sourceTopicName + "', value_format='JSON');"); } } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/util/AvroUtilTest.java b/ksql-engine/src/test/java/io/confluent/ksql/util/AvroUtilTest.java index d90aa65f18f3..94193b495cf1 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/util/AvroUtilTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/util/AvroUtilTest.java @@ -48,7 +48,6 @@ import java.io.IOException; import java.util.Collections; import java.util.Optional; - import org.apache.avro.Schema; import org.junit.Before; import org.junit.Rule; diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateSourceCommand.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateSourceCommand.java index c4aae635222f..32a80dcaebbf 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateSourceCommand.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateSourceCommand.java @@ -19,9 +19,13 @@ import io.confluent.ksql.execution.timestamp.TimestampColumn; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.name.SourceName; +import io.confluent.ksql.schema.ksql.Column; import io.confluent.ksql.schema.ksql.ColumnRef; +import io.confluent.ksql.schema.ksql.FormatOptions; import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.schema.ksql.types.SqlType; import io.confluent.ksql.serde.WindowInfo; +import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.SchemaUtil; import java.util.Objects; import java.util.Optional; @@ -56,10 +60,7 @@ public abstract class CreateSourceCommand implements DdlCommand { this.formats = Objects.requireNonNull(formats, "formats"); this.windowInfo = Objects.requireNonNull(windowInfo, "windowInfo"); - if (schema.findValueColumn(ColumnRef.withoutSource(SchemaUtil.ROWKEY_NAME)).isPresent() - || schema.findValueColumn(ColumnRef.withoutSource(SchemaUtil.ROWTIME_NAME)).isPresent()) { - throw new IllegalArgumentException("Schema contains implicit columns in value schema"); - } + validate(schema, keyField); } public SourceName getSourceName() { @@ -89,4 +90,38 @@ public Formats getFormats() { public Optional getWindowInfo() { return windowInfo; } + + private static void validate(final LogicalSchema schema, final Optional keyField) { + if (schema.findValueColumn(ColumnRef.withoutSource(SchemaUtil.ROWKEY_NAME)).isPresent() + || schema.findValueColumn(ColumnRef.withoutSource(SchemaUtil.ROWTIME_NAME)).isPresent()) { + throw new IllegalArgumentException("Schema contains implicit columns in value schema"); + } + + if (schema.key().size() != 1) { + throw new UnsupportedOperationException("Only single key columns supported"); + } + + if (keyField.isPresent()) { + final SqlType keyFieldType = schema.findColumn(ColumnRef.withoutSource(keyField.get())) + .map(Column::type) + .orElseThrow(IllegalArgumentException::new); + + final SqlType keyType = schema.key().get(0).type(); + + if (!keyFieldType.equals(keyType)) { + throw new KsqlException("The KEY field (" + + keyField.get().toString(FormatOptions.noEscape()) + + ") identified in the WITH clause is of a different type to the actual key column." + + System.lineSeparator() + + "Either change the type of the KEY field to match ROWKEY, " + + "or explicitly set ROWKEY to the type of the KEY field by adding " + + "'ROWKEY " + keyFieldType + " KEY' in the schema." + + System.lineSeparator() + + "KEY field type: " + keyFieldType + + System.lineSeparator() + + "ROWKEY type: " + keyType + ); + } + } + } } diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/ddl/commands/CreateSourceCommandTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/ddl/commands/CreateSourceCommandTest.java new file mode 100644 index 000000000000..e83462fca350 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/ddl/commands/CreateSourceCommandTest.java @@ -0,0 +1,136 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.ddl.commands; + +import static org.mockito.Mockito.mock; + +import io.confluent.ksql.execution.plan.Formats; +import io.confluent.ksql.execution.timestamp.TimestampColumn; +import io.confluent.ksql.name.ColumnName; +import io.confluent.ksql.name.SourceName; +import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.schema.ksql.types.SqlTypes; +import io.confluent.ksql.serde.WindowInfo; +import io.confluent.ksql.util.KsqlException; +import java.util.Optional; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class CreateSourceCommandTest { + + private static final SourceName SOURCE_NAME = SourceName.of("bob"); + private static final String TOPIC_NAME = "vic"; + private static final Formats FORAMTS = mock(Formats.class); + + @Rule + public final ExpectedException expectedException = ExpectedException.none(); + private static final ColumnName KEY_FIELD = ColumnName.of("keyField"); + + @Test(expected = UnsupportedOperationException.class) + public void shouldThrowOnMultipleKeyColumns() { + // Given: + final LogicalSchema schema = LogicalSchema.builder() + .keyColumn(ColumnName.of("k0"), SqlTypes.STRING) + .keyColumn(ColumnName.of("k1"), SqlTypes.STRING) + .build(); + + // When: + new TestCommand( + SOURCE_NAME, + schema, + Optional.empty(), + Optional.empty(), + TOPIC_NAME, + FORAMTS, + Optional.empty() + ); + } + + @Test + public void shouldThrowIfKeyFieldDoesNotMatchRowKeyType() { + // Given: + final ColumnName keyField = ColumnName.of("keyField"); + + final LogicalSchema schema = LogicalSchema.builder() + .keyColumn(ColumnName.of("k0"), SqlTypes.INTEGER) + .valueColumn(keyField, SqlTypes.STRING) + .build(); + + // Expect: + expectedException.expect(KsqlException.class); + expectedException.expectMessage("The KEY field (keyField) identified in the " + + "WITH clause is of a different type to the actual key column."); + expectedException.expectMessage( + "Either change the type of the KEY field to match ROWKEY, or explicitly set ROWKEY " + + "to the type of the KEY field by adding 'ROWKEY STRING KEY' in the schema."); + expectedException.expectMessage("KEY field type: STRING"); + expectedException.expectMessage("ROWKEY type: INTEGER"); + + // When: + new TestCommand( + SOURCE_NAME, + schema, + Optional.of(keyField), + Optional.empty(), + TOPIC_NAME, + FORAMTS, + Optional.empty() + ); + } + + @Test + public void shouldNotThrowIfKeyFieldMatchesRowKeyType() { + // Given: + final LogicalSchema schema = LogicalSchema.builder() + .keyColumn(ColumnName.of("k0"), SqlTypes.INTEGER) + .valueColumn(KEY_FIELD, SqlTypes.INTEGER) + .build(); + + // When: + new TestCommand( + SOURCE_NAME, + schema, + Optional.of(KEY_FIELD), + Optional.empty(), + TOPIC_NAME, + FORAMTS, + Optional.empty() + ); + + // Then: builds without error + } + + private static final class TestCommand extends CreateSourceCommand { + + TestCommand( + final SourceName sourceName, + final LogicalSchema schema, + final Optional keyField, + final Optional timestampColumn, + final String topicName, + final Formats formats, + final Optional windowInfo + ) { + super(sourceName, schema, keyField, timestampColumn, topicName, formats, windowInfo); + } + + @Override + public DdlCommandResult execute(final Executor executor) { + return null; + } + } +} \ No newline at end of file diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Record.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Record.java index 2dd644ffcbab..711babda4711 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Record.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Record.java @@ -19,7 +19,6 @@ import io.confluent.ksql.test.model.WindowData; import java.util.Objects; import java.util.Optional; -import java.util.function.Function; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.serialization.Serializer; @@ -91,6 +90,10 @@ Deserializer keyDeserializer() { : new TimeWindowedDeserializer<>(inner, window.size()); } + public Object rawKey() { + return key; + } + public Object key() { if (window == null) { return key; @@ -125,20 +128,10 @@ public Optional getJsonValue() { return jsonValue; } - /** - * Coerce the key value to the correct type. - * - *

The type of the key loaded from the JSON test case file may not be the exact match on type, - * e.g. JSON will load a small number as an integer, but the key type of the source might be a - * long. - * - * @param keyCoercer function to coerce the key to the right type - * @return a new Record with the correct key type. - */ - public Record coerceKey(final Function keyCoercer) { + public Record withKey(final Object key) { return new Record( topic, - keyCoercer.apply(key), + key, value, jsonValue.orElse(null), timestamp, diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java index bdc8f0c866ca..fdc02b704295 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java @@ -37,8 +37,7 @@ import io.confluent.ksql.logging.processing.ProcessingLogContext; import io.confluent.ksql.metastore.MetaStoreImpl; import io.confluent.ksql.metastore.MutableMetaStore; -import io.confluent.ksql.metastore.model.DataSource; -import io.confluent.ksql.name.SourceName; +import io.confluent.ksql.parser.DurationParser; import io.confluent.ksql.query.QueryId; import io.confluent.ksql.query.id.SequentialQueryIdGenerator; import io.confluent.ksql.schema.ksql.DefaultSqlValueCoercer; @@ -71,6 +70,7 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.OptionalLong; import java.util.Set; import java.util.function.Function; import java.util.regex.Pattern; @@ -82,6 +82,7 @@ import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.TopologyTestDriver; +import org.apache.kafka.streams.kstream.TimeWindowedDeserializer; import org.hamcrest.Matcher; import org.hamcrest.StringDescription; @@ -102,6 +103,12 @@ public class TestExecutor implements Closeable { private static final Pattern INTERNAL_TOPIC_PATTERN = Pattern .compile("_confluent.*query_(.*_\\d+)-.*-(changelog|repartition)"); + private static final Pattern WINDOWED_JOIN_PATTERN = Pattern + .compile( + "CREATE .* JOIN .* WITHIN (\\d+ \\w+) ON .*", + Pattern.CASE_INSENSITIVE | Pattern.DOTALL + ); + private final ServiceContext serviceContext; private final KsqlEngine ksqlEngine; private final Map config = baseConfig(); @@ -250,14 +257,7 @@ private void validateTopicData( final Function keyCoercer = keyCoercerForTopic(topicName); for (int i = 0; i < expected.size(); i++) { - final Record expectedRecord; - try { - expectedRecord = expected.get(i).coerceKey(keyCoercer); - } catch (final Exception e) { - throw new AssertionError( - "Topic '" + topicName + "', message " + i - + ": Could not coerce key in test case to required type. " + e.getMessage(), e); - } + final Record expectedRecord = coerceRecordKey(expected.get(i), i, keyCoercer); final ProducerRecord actualProducerRecord = actual.get(i).getProducerRecord(); validateCreatedMessage( @@ -270,8 +270,39 @@ private void validateTopicData( } } + /** + * Coerce the key value to the correct type. + * + *

The type of the key loaded from the JSON test case file may not be the exact match on type, + * e.g. JSON will load a small number as an integer, but the key type of the source might be a + * long. + * + * @param record the record to coerce + * @param msgIndex the index of the message, displayed in the error message + * @param keyCoercer keyCoercer to use + * @return a new Record with the correct key type. + */ + private static Record coerceRecordKey( + final Record record, + final int msgIndex, + final Function keyCoercer + ) { + try { + final Object coerced = keyCoercer.apply(record.rawKey()); + return record.withKey(coerced); + } catch (final Exception e) { + throw new AssertionError( + "Topic '" + record.topic.getName() + "', message " + msgIndex + + ": Invalid test-case: could not coerce key in test case to required type. " + + e.getMessage(), + e); + } + } + private Function keyCoercerForTopic(final String topicName) { - final SqlType keyType = getTopicInfo(topicName) + final TopicInfo topicInfo = getTopicInfo(topicName); + + final SqlType keyType = topicInfo .getSchema() .key() .get(0) @@ -281,6 +312,7 @@ private Function keyCoercerForTopic(final String topicName) { if (key == null) { return null; } + return DefaultSqlValueCoercer.INSTANCE .coerce(key, keyType) .orElseThrow(() -> new AssertionError("Invalid key value for topic " + topicName + "." @@ -305,16 +337,30 @@ private TopicInfo getTopicInfo(final String topicName) { .orElseThrow(() -> new TestFrameworkException("Unknown queryId for internal topic: " + queryId)); - final SourceName sinkName = query.getSinkName(); - final DataSource source = ksqlEngine.getMetaStore().getSource(sinkName); - return new TopicInfo(source.getSchema(), source.getKsqlTopic().getKeyFormat()); + final java.util.regex.Matcher windowedJoinMatcher = WINDOWED_JOIN_PATTERN + .matcher(query.getStatementString()); + + final OptionalLong changeLogWindowSize = topicName.endsWith("-changelog") + && windowedJoinMatcher.matches() + ? OptionalLong.of(DurationParser.parse(windowedJoinMatcher.group(1)).toMillis()) + : OptionalLong.empty(); + + return new TopicInfo( + query.getLogicalSchema(), + query.getResultTopic().getKeyFormat(), + changeLogWindowSize + ); } // Source / sink topic: final Set keyTypes = ksqlEngine.getMetaStore().getAllDataSources().values() .stream() .filter(source -> source.getKafkaTopicName().equals(topicName)) - .map(source -> new TopicInfo(source.getSchema(), source.getKsqlTopic().getKeyFormat())) + .map(source -> new TopicInfo( + source.getSchema(), + source.getKsqlTopic().getKeyFormat(), + OptionalLong.empty() + )) .collect(Collectors.toSet()); if (keyTypes.isEmpty()) { @@ -353,13 +399,22 @@ private Deserializer getKeyDeserializer(final String topicName) { final SerdeSupplier keySerdeSupplier = SerdeUtil .getKeySerdeSupplier(topicInfo.getKeyFormat(), topicInfo::getSchema); - final Deserializer deserializer = keySerdeSupplier.getDeserializer( + Deserializer deserializer = keySerdeSupplier.getDeserializer( serviceContext.getSchemaRegistryClient() ); deserializer.configure(ImmutableMap.of(), true); - return deserializer; + if (!topicInfo.getChangeLogWindowSize().isPresent()) { + return deserializer; + } + + final TimeWindowedDeserializer changeLogDeserializer = new TimeWindowedDeserializer<>( + deserializer, topicInfo.getChangeLogWindowSize().getAsLong()); + + changeLogDeserializer.setIsChangelogTopic(true); + + return changeLogDeserializer; } private static String getActualsForErrorMessage(final List actual) { @@ -408,10 +463,15 @@ private void pipeRecordsFromProvidedInput( final TopologyTestDriverContainer topologyTestDriverContainer ) { + int inputRecordIndex = 0; for (final Record record : testCase.getInputRecords()) { if (topologyTestDriverContainer.getSourceTopicNames().contains(record.topic.getName())) { - final Record coerced = record.coerceKey(keyCoercerForTopic(record.topic.getName())); + final Record coerced = coerceRecordKey( + record, + inputRecordIndex, + keyCoercerForTopic(record.topic.getName()) + ); processSingleRecord( StubKafkaRecord.of(coerced, null), @@ -419,6 +479,7 @@ private void pipeRecordsFromProvidedInput( ImmutableSet.copyOf(stubKafkaService.getAllTopics()) ); } + ++inputRecordIndex; } } @@ -631,10 +692,16 @@ private static final class TopicInfo { private final LogicalSchema schema; private final KeyFormat keyFormat; + private final OptionalLong changeLogWindowSize; - TopicInfo(final LogicalSchema schema, final KeyFormat keyFormat) { + TopicInfo( + final LogicalSchema schema, + final KeyFormat keyFormat, + final OptionalLong changeLogWindowSize + ) { this.schema = requireNonNull(schema, "schema"); this.keyFormat = requireNonNull(keyFormat, "keyFormat"); + this.changeLogWindowSize = requireNonNull(changeLogWindowSize, "changeLogWindowSize"); } public KeyFormat getKeyFormat() { @@ -645,6 +712,10 @@ public LogicalSchema getSchema() { return schema; } + public OptionalLong getChangeLogWindowSize() { + return changeLogWindowSize; + } + @Override public boolean equals(final Object o) { if (this == o) { diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileRewriter.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileRewriter.java index 51a72f35a763..dadb063c7cc7 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileRewriter.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileRewriter.java @@ -191,6 +191,24 @@ private static String grabContent( return contents.substring(start, end); } + private static Map parseConfigs(final String configs) { + try { + final ObjectReader objectReader = OBJECT_MAPPER.readerFor(Map.class); + final Map parsed = objectReader.readValue(configs); + + final Set toRemove = parsed.entrySet().stream() + .filter(e -> e.getValue() == null) + .map(Entry::getKey) + .collect(Collectors.toSet()); + + parsed.remove("ksql.streams.state.dir"); + parsed.keySet().removeAll(toRemove); + return parsed; + } catch (final Exception e) { + throw new RuntimeException("Failed to parse configs: " + configs, e); + } + } + private interface Rewriter { String rewrite(final TestCase testCase, final Path path) throws Exception; @@ -321,7 +339,7 @@ private static int findCloseTagFor(final String contents, final int startIdx) { } } - private static final class RewriteSchemasOnly implements StructuredRewriter { + private static final class CustomRewriter implements StructuredRewriter { @Override public String rewriteSchemas(final TestCase testCase, final Path path, final String schemas) { @@ -332,6 +350,10 @@ public String rewriteSchemas(final TestCase testCase, final Path path, final Str } } + /** + * Uses the standard topology generation code to rewrite expected topology, i.e. it updates + * the topology to match what the current code would output, taking into account any config + */ private static final class RewriteTopologyOnly implements StructuredRewriter { private Map configs; @@ -363,23 +385,40 @@ public String rewriteTopologies( return grabContent(newContent, topologyStart, Optional.empty()); } + } - private static Map parseConfigs(final String configs) { - try { - final ObjectReader objectReader = OBJECT_MAPPER.readerFor(Map.class); - final Map parsed = objectReader.readValue(configs); - - final Set toRemove = parsed.entrySet().stream() - .filter(e -> e.getValue() == null) - .map(Entry::getKey) - .collect(Collectors.toSet()); - - parsed.remove("ksql.streams.state.dir"); - parsed.keySet().removeAll(toRemove); - return parsed; - } catch (final Exception e) { - throw new RuntimeException("Failed to parse configs: " + configs, e); - } + /** + * Uses the standard topology generation code to rewrite expected schemas, i.e. it updates + * the schemes to match what the current code would output, taking into account any config + */ + private static final class RewriteSchemasOnly implements StructuredRewriter { + + private Map configs; + + @Override + public String rewriteConfig( + final TestCase testCase, + final Path path, + final String configs + ) { + this.configs = parseConfigs(configs); + return configs; + } + + @Override + public String rewriteSchemas( + final TestCase testCase, + final Path path, + final String schemas + ) { + final String newContent = TopologyFileGenerator + .buildExpectedTopologyContent(testCase, Optional.of(configs)); + + return grabContent( + newContent, + Optional.of(CONFIG_END_MARKER), + Optional.of(SCHEMAS_END_MARKER) + ); } } } diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java index a36f1b24bf70..f8c740512986 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java @@ -48,6 +48,7 @@ import io.confluent.ksql.util.KsqlServerException; import io.confluent.ksql.util.RetryUtil; import java.io.Closeable; +import java.math.BigDecimal; import java.net.URL; import java.time.Duration; import java.util.List; @@ -361,7 +362,7 @@ private static void compareKeyValueTimestamp( final Object actualKey = actual.key(); final Object actualValue = actual.value(); - final Object expectedKey = expected.key(); + final Object expectedKey = coerceExpectedKey(expected.key(), actualKey); final JsonNode expectedValue = expected.getJsonValue() .orElseThrow(() -> new KsqlServerException( "could not get expected value from test record: " + expected)); @@ -386,6 +387,32 @@ private static void compareKeyValueTimestamp( } } + /** + * The expected key loaded from the JSON file may need a little coercing to the right type, e.g + * a double value of {@code 1.23} will be deserialized as a {@code BigDecimal}. + * @param expectedKey the key to coerce + * @param actualKey the type to coerce to. + * @return the coerced key. + */ + private static Object coerceExpectedKey( + final Object expectedKey, + final Object actualKey + ) { + if (actualKey == null || expectedKey == null) { + return expectedKey; + } + + if (actualKey instanceof Double && expectedKey instanceof BigDecimal) { + return ((BigDecimal) expectedKey).doubleValue(); + } + + if (actualKey instanceof Long && expectedKey instanceof Integer) { + return ((Integer)expectedKey).longValue(); + } + + return expectedKey; + } + private static T asJson(final Object response, final TypeReference type) { try { final String text = JsonMapper.INSTANCE.mapper.writeValueAsString(response); diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source index a03ebb608392..d1b3a6b85364 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source @@ -38,8 +38,8 @@ CONFIGS_END CSAS_OUTPUT_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_OUTPUT_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_OUTPUT_0.Join.Left = STRUCT NOT NULL -CSAS_OUTPUT_0.Join.Right = STRUCT NOT NULL +CSAS_OUTPUT_0.Join.Left = STRUCT NOT NULL +CSAS_OUTPUT_0.Join.Right = STRUCT NOT NULL CSAS_OUTPUT_0.OUTPUT = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source_with_AS b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source_with_AS index a03ebb608392..d1b3a6b85364 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source_with_AS +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_join_source_with_AS @@ -38,8 +38,8 @@ CONFIGS_END CSAS_OUTPUT_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_OUTPUT_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_OUTPUT_0.Join.Left = STRUCT NOT NULL -CSAS_OUTPUT_0.Join.Right = STRUCT NOT NULL +CSAS_OUTPUT_0.Join.Left = STRUCT NOT NULL +CSAS_OUTPUT_0.Join.Right = STRUCT NOT NULL CSAS_OUTPUT_0.OUTPUT = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_left_unaliased_right b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_left_unaliased_right index 075ff3076b05..39efc18a97e6 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_left_unaliased_right +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_aliased_left_unaliased_right @@ -38,8 +38,8 @@ CONFIGS_END CSAS_OUTPUT_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_OUTPUT_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_OUTPUT_0.Join.Left = STRUCT NOT NULL -CSAS_OUTPUT_0.Join.Right = STRUCT NOT NULL +CSAS_OUTPUT_0.Join.Left = STRUCT NOT NULL +CSAS_OUTPUT_0.Join.Right = STRUCT NOT NULL CSAS_OUTPUT_0.OUTPUT = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_unaliased_left_aliased_right b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_unaliased_left_aliased_right index e5da6585faad..9dcb2db47122 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_unaliased_left_aliased_right +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/identifiers_-_unaliased_left_aliased_right @@ -38,8 +38,8 @@ CONFIGS_END CSAS_OUTPUT_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_OUTPUT_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_OUTPUT_0.Join.Left = STRUCT NOT NULL -CSAS_OUTPUT_0.Join.Right = STRUCT NOT NULL +CSAS_OUTPUT_0.Join.Left = STRUCT NOT NULL +CSAS_OUTPUT_0.Join.Right = STRUCT NOT NULL CSAS_OUTPUT_0.OUTPUT = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts index 87736e88a55a..9437cc38e7ed 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts @@ -38,8 +38,8 @@ CONFIGS_END CSAS_S1_JOIN_S2_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_S1_JOIN_S2_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_S1_JOIN_S2_0.Join.Left = STRUCT NOT NULL -CSAS_S1_JOIN_S2_0.Join.Right = STRUCT NOT NULL +CSAS_S1_JOIN_S2_0.Join.Left = STRUCT NOT NULL +CSAS_S1_JOIN_S2_0.Join.Right = STRUCT NOT NULL CSAS_S1_JOIN_S2_0.S1_JOIN_S2 = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts_extractor_both_sides b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts_extractor_both_sides index 99366f9414d5..98e48420f0b8 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts_extractor_both_sides +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_stream_inner_join_with_ts_extractor_both_sides @@ -38,8 +38,8 @@ CONFIGS_END CSAS_S1_JOIN_S2_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_S1_JOIN_S2_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_S1_JOIN_S2_0.Join.Left = STRUCT NOT NULL -CSAS_S1_JOIN_S2_0.Join.Right = STRUCT NOT NULL +CSAS_S1_JOIN_S2_0.Join.Left = STRUCT NOT NULL +CSAS_S1_JOIN_S2_0.Join.Right = STRUCT NOT NULL CSAS_S1_JOIN_S2_0.S1_JOIN_S2 = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_table_join_with_ts_extractor_both_sides b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_table_join_with_ts_extractor_both_sides index 975b69045c1b..63409b3a007e 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_table_join_with_ts_extractor_both_sides +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/join-with-custom-timestamp_-_stream_table_join_with_ts_extractor_both_sides @@ -38,7 +38,7 @@ CONFIGS_END CSAS_S1_JOIN_T1_0.KafkaTopic_Right.Source = STRUCT NOT NULL CSAS_S1_JOIN_T1_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_S1_JOIN_T1_0.Join.Left = STRUCT NOT NULL +CSAS_S1_JOIN_T1_0.Join.Left = STRUCT NOT NULL CSAS_S1_JOIN_T1_0.S1_JOIN_T1 = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria index 2926b75846cb..21503a0b001c 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria @@ -38,7 +38,7 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_join_key_not_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_join_key_not_in_projection index d02a374b26e5..8a13c0f5de53 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_join_key_not_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_join_key_not_in_projection @@ -38,7 +38,7 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_left_rowkey_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_left_rowkey_in_projection index 7d3efc72e749..cc511c40b8e5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_left_rowkey_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_left_rowkey_in_projection @@ -38,8 +38,8 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: Sub-topology: 0 diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_join_key_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_join_key_in_projection index f18d2ed6e9d8..c30be53fb4b1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_join_key_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_join_key_in_projection @@ -38,7 +38,7 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_rowkey_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_rowkey_in_projection index 7d3efc72e749..cc511c40b8e5 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_rowkey_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_join_using_ROWKEY_in_the_criteria_-_right_rowkey_in_projection @@ -38,8 +38,8 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: Sub-topology: 0 diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_multiple_join_keys_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_multiple_join_keys_in_projection index 5eb6633fb917..8a5d2b9e629d 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_multiple_join_keys_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_multiple_join_keys_in_projection @@ -38,8 +38,8 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: Sub-topology: 0 diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join index 93fd5415057f..aa86da884e92 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join @@ -38,8 +38,8 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_join_key_not_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_join_key_not_in_projection index 6e2332e1bd86..375b78226952 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_join_key_not_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_join_key_not_in_projection @@ -38,8 +38,8 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_right_join_key_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_right_join_key_in_projection index a594dfc27f4c..3254d32bb358 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_right_join_key_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_-_right_join_key_in_projection @@ -38,8 +38,8 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_fields b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_fields index d5b2afcd310b..0668c442f9f2 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_fields +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_fields @@ -38,9 +38,9 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL -CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL +CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: Sub-topology: 0 diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_left_fields_some_right b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_left_fields_some_right index 3a8c5d7dd970..2026613102cb 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_left_fields_some_right +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_left_fields_some_right @@ -38,9 +38,9 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL -CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL +CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: Sub-topology: 0 diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_right_fields_some_left b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_right_fields_some_left index 70b894706142..939b95e3cbec 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_right_fields_some_left +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_all_right_fields_some_left @@ -38,9 +38,9 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL -CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL +CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: Sub-topology: 0 diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_different_before_and_after_windows b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_different_before_and_after_windows index 93fd5415057f..aa86da884e92 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_different_before_and_after_windows +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_different_before_and_after_windows @@ -38,8 +38,8 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_out_of_order_messages b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_out_of_order_messages index 93fd5415057f..aa86da884e92 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_out_of_order_messages +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_inner_join_with_out_of_order_messages @@ -38,8 +38,8 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Right = STRUCT NOT NULL CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join index fdabb61566d7..47fe13d94985 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join @@ -38,8 +38,8 @@ CONFIGS_END CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Right = STRUCT NOT NULL +CSAS_LEFT_OUTER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_LEFT_OUTER_JOIN_0.Join.Right = STRUCT NOT NULL CSAS_LEFT_OUTER_JOIN_0.LEFT_OUTER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_both_join_keys_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_both_join_keys_in_projection deleted file mode 100644 index 29c84ce6ae2f..000000000000 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_both_join_keys_in_projection +++ /dev/null @@ -1,107 +0,0 @@ -{ - "ksql.extension.dir" : "ext", - "ksql.streams.cache.max.bytes.buffering" : "0", - "ksql.security.extension.class" : null, - "ksql.transient.prefix" : "transient_", - "ksql.persistence.wrap.single.values" : "true", - "ksql.schema.registry.url" : "http://localhost:8081", - "ksql.streams.default.deserialization.exception.handler" : "io.confluent.ksql.errors.LogMetricAndContinueExceptionHandler", - "ksql.output.topic.name.prefix" : "", - "ksql.streams.auto.offset.reset" : "earliest", - "ksql.connect.url" : "http://localhost:8083", - "ksql.service.id" : "some.ksql.service.id", - "ksql.internal.topic.min.insync.replicas" : "1", - "ksql.internal.topic.replicas" : "1", - "ksql.insert.into.values.enabled" : "true", - "ksql.pull.queries.enable" : "true", - "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", - "ksql.access.validator.enable" : "auto", - "ksql.streams.bootstrap.servers" : "localhost:0", - "ksql.streams.commit.interval.ms" : "2000", - "ksql.metric.reporters" : "", - "ksql.streams.auto.commit.interval.ms" : "0", - "ksql.metrics.extension" : null, - "ksql.streams.topology.optimization" : "all", - "ksql.query.pull.streamsstore.rebalancing.timeout.ms" : "10000", - "ksql.streams.num.stream.threads" : "4", - "ksql.metrics.tags.custom" : "", - "ksql.udfs.enabled" : "true", - "ksql.udf.enable.security.manager" : "true", - "ksql.query.pull.skip.access.validator" : "false", - "ksql.connect.worker.config" : "", - "ksql.query.pull.routing.timeout.ms" : "30000", - "ksql.sink.window.change.log.additional.retention" : "1000000", - "ksql.udf.collect.metrics" : "false", - "ksql.persistent.prefix" : "query_", - "ksql.query.persistent.active.limit" : "2147483647" -} -CONFIGS_END -CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Right = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.LEFT_OUTER_JOIN = STRUCT NOT NULL -SCHEMAS_END -Topologies: - Sub-topology: 0 - Source: KSTREAM-SOURCE-0000000000 (topics: [left_topic]) - --> KSTREAM-TRANSFORMVALUES-0000000001 - Processor: KSTREAM-TRANSFORMVALUES-0000000001 (stores: []) - --> KSTREAM-FILTER-0000000002 - <-- KSTREAM-SOURCE-0000000000 - Processor: KSTREAM-FILTER-0000000002 (stores: []) - --> KSTREAM-KEY-SELECT-0000000003 - <-- KSTREAM-TRANSFORMVALUES-0000000001 - Processor: KSTREAM-KEY-SELECT-0000000003 (stores: []) - --> Join-left-repartition-filter - <-- KSTREAM-FILTER-0000000002 - Processor: Join-left-repartition-filter (stores: []) - --> Join-left-repartition-sink - <-- KSTREAM-KEY-SELECT-0000000003 - Sink: Join-left-repartition-sink (topic: Join-left-repartition) - <-- Join-left-repartition-filter - - Sub-topology: 1 - Source: Join-left-repartition-source (topics: [Join-left-repartition]) - --> Join-this-windowed - Source: Join-right-repartition-source (topics: [Join-right-repartition]) - --> Join-other-windowed - Processor: Join-other-windowed (stores: [KSTREAM-OUTEROTHER-0000000017-store]) - --> Join-outer-other-join - <-- Join-right-repartition-source - Processor: Join-this-windowed (stores: [KSTREAM-JOINTHIS-0000000016-store]) - --> Join-this-join - <-- Join-left-repartition-source - Processor: Join-outer-other-join (stores: [KSTREAM-JOINTHIS-0000000016-store]) - --> Join-merge - <-- Join-other-windowed - Processor: Join-this-join (stores: [KSTREAM-OUTEROTHER-0000000017-store]) - --> Join-merge - <-- Join-this-windowed - Processor: Join-merge (stores: []) - --> Project - <-- Join-this-join, Join-outer-other-join - Processor: Project (stores: []) - --> KSTREAM-SINK-0000000020 - <-- Join-merge - Sink: KSTREAM-SINK-0000000020 (topic: LEFT_OUTER_JOIN) - <-- Project - - Sub-topology: 2 - Source: KSTREAM-SOURCE-0000000004 (topics: [right_topic]) - --> KSTREAM-TRANSFORMVALUES-0000000005 - Processor: KSTREAM-TRANSFORMVALUES-0000000005 (stores: []) - --> KSTREAM-FILTER-0000000006 - <-- KSTREAM-SOURCE-0000000004 - Processor: KSTREAM-FILTER-0000000006 (stores: []) - --> KSTREAM-KEY-SELECT-0000000007 - <-- KSTREAM-TRANSFORMVALUES-0000000005 - Processor: KSTREAM-KEY-SELECT-0000000007 (stores: []) - --> Join-right-repartition-filter - <-- KSTREAM-FILTER-0000000006 - Processor: Join-right-repartition-filter (stores: []) - --> Join-right-repartition-sink - <-- KSTREAM-KEY-SELECT-0000000007 - Sink: Join-right-repartition-sink (topic: Join-right-repartition) - <-- Join-right-repartition-filter - diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_join_key_not_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_join_key_not_in_projection index 8d3da68c7e0f..d1049ad74346 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_join_key_not_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_join_key_not_in_projection @@ -38,8 +38,8 @@ CONFIGS_END CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Right = STRUCT NOT NULL +CSAS_LEFT_OUTER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_LEFT_OUTER_JOIN_0.Join.Right = STRUCT NOT NULL CSAS_LEFT_OUTER_JOIN_0.LEFT_OUTER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_rekey b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_rekey deleted file mode 100644 index c6ff88c986fd..000000000000 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_rekey +++ /dev/null @@ -1,107 +0,0 @@ -{ - "ksql.extension.dir" : "ext", - "ksql.streams.cache.max.bytes.buffering" : "0", - "ksql.security.extension.class" : null, - "ksql.transient.prefix" : "transient_", - "ksql.persistence.wrap.single.values" : "true", - "ksql.schema.registry.url" : "http://localhost:8081", - "ksql.streams.default.deserialization.exception.handler" : "io.confluent.ksql.errors.LogMetricAndContinueExceptionHandler", - "ksql.output.topic.name.prefix" : "", - "ksql.streams.auto.offset.reset" : "earliest", - "ksql.connect.url" : "http://localhost:8083", - "ksql.service.id" : "some.ksql.service.id", - "ksql.internal.topic.min.insync.replicas" : "1", - "ksql.internal.topic.replicas" : "1", - "ksql.insert.into.values.enabled" : "true", - "ksql.pull.queries.enable" : "true", - "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", - "ksql.access.validator.enable" : "auto", - "ksql.streams.bootstrap.servers" : "localhost:0", - "ksql.streams.commit.interval.ms" : "2000", - "ksql.metric.reporters" : "", - "ksql.streams.auto.commit.interval.ms" : "0", - "ksql.metrics.extension" : null, - "ksql.streams.topology.optimization" : "all", - "ksql.query.pull.streamsstore.rebalancing.timeout.ms" : "10000", - "ksql.streams.num.stream.threads" : "4", - "ksql.metrics.tags.custom" : "", - "ksql.udfs.enabled" : "true", - "ksql.udf.enable.security.manager" : "true", - "ksql.query.pull.skip.access.validator" : "false", - "ksql.connect.worker.config" : "", - "ksql.query.pull.routing.timeout.ms" : "30000", - "ksql.sink.window.change.log.additional.retention" : "1000000", - "ksql.udf.collect.metrics" : "false", - "ksql.persistent.prefix" : "query_", - "ksql.query.persistent.active.limit" : "2147483647" -} -CONFIGS_END -CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Right = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.LEFT_OUTER_JOIN = STRUCT NOT NULL -SCHEMAS_END -Topologies: - Sub-topology: 0 - Source: KSTREAM-SOURCE-0000000000 (topics: [left_topic]) - --> KSTREAM-TRANSFORMVALUES-0000000001 - Processor: KSTREAM-TRANSFORMVALUES-0000000001 (stores: []) - --> KSTREAM-FILTER-0000000002 - <-- KSTREAM-SOURCE-0000000000 - Processor: KSTREAM-FILTER-0000000002 (stores: []) - --> KSTREAM-KEY-SELECT-0000000003 - <-- KSTREAM-TRANSFORMVALUES-0000000001 - Processor: KSTREAM-KEY-SELECT-0000000003 (stores: []) - --> Join-left-repartition-filter - <-- KSTREAM-FILTER-0000000002 - Processor: Join-left-repartition-filter (stores: []) - --> Join-left-repartition-sink - <-- KSTREAM-KEY-SELECT-0000000003 - Sink: Join-left-repartition-sink (topic: Join-left-repartition) - <-- Join-left-repartition-filter - - Sub-topology: 1 - Source: Join-left-repartition-source (topics: [Join-left-repartition]) - --> Join-this-windowed - Source: Join-right-repartition-source (topics: [Join-right-repartition]) - --> Join-other-windowed - Processor: Join-other-windowed (stores: [KSTREAM-OUTEROTHER-0000000017-store]) - --> Join-outer-other-join - <-- Join-right-repartition-source - Processor: Join-this-windowed (stores: [KSTREAM-JOINTHIS-0000000016-store]) - --> Join-this-join - <-- Join-left-repartition-source - Processor: Join-outer-other-join (stores: [KSTREAM-JOINTHIS-0000000016-store]) - --> Join-merge - <-- Join-other-windowed - Processor: Join-this-join (stores: [KSTREAM-OUTEROTHER-0000000017-store]) - --> Join-merge - <-- Join-this-windowed - Processor: Join-merge (stores: []) - --> Project - <-- Join-this-join, Join-outer-other-join - Processor: Project (stores: []) - --> KSTREAM-SINK-0000000020 - <-- Join-merge - Sink: KSTREAM-SINK-0000000020 (topic: LEFT_OUTER_JOIN) - <-- Project - - Sub-topology: 2 - Source: KSTREAM-SOURCE-0000000004 (topics: [right_topic]) - --> KSTREAM-TRANSFORMVALUES-0000000005 - Processor: KSTREAM-TRANSFORMVALUES-0000000005 (stores: []) - --> KSTREAM-FILTER-0000000006 - <-- KSTREAM-SOURCE-0000000004 - Processor: KSTREAM-FILTER-0000000006 (stores: []) - --> KSTREAM-KEY-SELECT-0000000007 - <-- KSTREAM-TRANSFORMVALUES-0000000005 - Processor: KSTREAM-KEY-SELECT-0000000007 (stores: []) - --> Join-right-repartition-filter - <-- KSTREAM-FILTER-0000000006 - Processor: Join-right-repartition-filter (stores: []) - --> Join-right-repartition-sink - <-- KSTREAM-KEY-SELECT-0000000007 - Sink: Join-right-repartition-sink (topic: Join-right-repartition) - <-- Join-right-repartition-filter - diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_right_join_key_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_right_join_key_in_projection deleted file mode 100644 index 8cb1a944717a..000000000000 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_left_join_-_right_join_key_in_projection +++ /dev/null @@ -1,107 +0,0 @@ -{ - "ksql.extension.dir" : "ext", - "ksql.streams.cache.max.bytes.buffering" : "0", - "ksql.security.extension.class" : null, - "ksql.transient.prefix" : "transient_", - "ksql.persistence.wrap.single.values" : "true", - "ksql.schema.registry.url" : "http://localhost:8081", - "ksql.streams.default.deserialization.exception.handler" : "io.confluent.ksql.errors.LogMetricAndContinueExceptionHandler", - "ksql.output.topic.name.prefix" : "", - "ksql.streams.auto.offset.reset" : "earliest", - "ksql.connect.url" : "http://localhost:8083", - "ksql.service.id" : "some.ksql.service.id", - "ksql.internal.topic.min.insync.replicas" : "1", - "ksql.internal.topic.replicas" : "1", - "ksql.insert.into.values.enabled" : "true", - "ksql.pull.queries.enable" : "true", - "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", - "ksql.access.validator.enable" : "auto", - "ksql.streams.bootstrap.servers" : "localhost:0", - "ksql.streams.commit.interval.ms" : "2000", - "ksql.metric.reporters" : "", - "ksql.streams.auto.commit.interval.ms" : "0", - "ksql.metrics.extension" : null, - "ksql.streams.topology.optimization" : "all", - "ksql.query.pull.streamsstore.rebalancing.timeout.ms" : "10000", - "ksql.streams.num.stream.threads" : "4", - "ksql.metrics.tags.custom" : "", - "ksql.udfs.enabled" : "true", - "ksql.udf.enable.security.manager" : "true", - "ksql.query.pull.skip.access.validator" : "false", - "ksql.connect.worker.config" : "", - "ksql.query.pull.routing.timeout.ms" : "30000", - "ksql.sink.window.change.log.additional.retention" : "1000000", - "ksql.udf.collect.metrics" : "false", - "ksql.persistent.prefix" : "query_", - "ksql.query.persistent.active.limit" : "2147483647" -} -CONFIGS_END -CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Right = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.LEFT_OUTER_JOIN = STRUCT NOT NULL -SCHEMAS_END -Topologies: - Sub-topology: 0 - Source: KSTREAM-SOURCE-0000000000 (topics: [left_topic]) - --> KSTREAM-TRANSFORMVALUES-0000000001 - Processor: KSTREAM-TRANSFORMVALUES-0000000001 (stores: []) - --> KSTREAM-FILTER-0000000002 - <-- KSTREAM-SOURCE-0000000000 - Processor: KSTREAM-FILTER-0000000002 (stores: []) - --> KSTREAM-KEY-SELECT-0000000003 - <-- KSTREAM-TRANSFORMVALUES-0000000001 - Processor: KSTREAM-KEY-SELECT-0000000003 (stores: []) - --> Join-left-repartition-filter - <-- KSTREAM-FILTER-0000000002 - Processor: Join-left-repartition-filter (stores: []) - --> Join-left-repartition-sink - <-- KSTREAM-KEY-SELECT-0000000003 - Sink: Join-left-repartition-sink (topic: Join-left-repartition) - <-- Join-left-repartition-filter - - Sub-topology: 1 - Source: Join-left-repartition-source (topics: [Join-left-repartition]) - --> Join-this-windowed - Source: Join-right-repartition-source (topics: [Join-right-repartition]) - --> Join-other-windowed - Processor: Join-other-windowed (stores: [KSTREAM-OUTEROTHER-0000000017-store]) - --> Join-outer-other-join - <-- Join-right-repartition-source - Processor: Join-this-windowed (stores: [KSTREAM-JOINTHIS-0000000016-store]) - --> Join-this-join - <-- Join-left-repartition-source - Processor: Join-outer-other-join (stores: [KSTREAM-JOINTHIS-0000000016-store]) - --> Join-merge - <-- Join-other-windowed - Processor: Join-this-join (stores: [KSTREAM-OUTEROTHER-0000000017-store]) - --> Join-merge - <-- Join-this-windowed - Processor: Join-merge (stores: []) - --> Project - <-- Join-this-join, Join-outer-other-join - Processor: Project (stores: []) - --> KSTREAM-SINK-0000000020 - <-- Join-merge - Sink: KSTREAM-SINK-0000000020 (topic: LEFT_OUTER_JOIN) - <-- Project - - Sub-topology: 2 - Source: KSTREAM-SOURCE-0000000004 (topics: [right_topic]) - --> KSTREAM-TRANSFORMVALUES-0000000005 - Processor: KSTREAM-TRANSFORMVALUES-0000000005 (stores: []) - --> KSTREAM-FILTER-0000000006 - <-- KSTREAM-SOURCE-0000000004 - Processor: KSTREAM-FILTER-0000000006 (stores: []) - --> KSTREAM-KEY-SELECT-0000000007 - <-- KSTREAM-TRANSFORMVALUES-0000000005 - Processor: KSTREAM-KEY-SELECT-0000000007 (stores: []) - --> Join-right-repartition-filter - <-- KSTREAM-FILTER-0000000006 - Processor: Join-right-repartition-filter (stores: []) - --> Join-right-repartition-sink - <-- KSTREAM-KEY-SELECT-0000000007 - Sink: Join-right-repartition-sink (topic: Join-right-repartition) - <-- Join-right-repartition-filter - diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join index 90ada71ec074..f6d0b593e749 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join @@ -38,8 +38,8 @@ CONFIGS_END CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Right = STRUCT NOT NULL +CSAS_LEFT_OUTER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_LEFT_OUTER_JOIN_0.Join.Right = STRUCT NOT NULL CSAS_LEFT_OUTER_JOIN_0.LEFT_OUTER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join_-_right_join_key_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join_-_right_join_key_in_projection index 26e95ddea077..6e7f67f100cd 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join_-_right_join_key_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_stream_outer_join_-_right_join_key_in_projection @@ -38,8 +38,8 @@ CONFIGS_END CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Right = STRUCT NOT NULL +CSAS_LEFT_OUTER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_LEFT_OUTER_JOIN_0.Join.Right = STRUCT NOT NULL CSAS_LEFT_OUTER_JOIN_0.LEFT_OUTER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join index 2926b75846cb..21503a0b001c 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join @@ -38,7 +38,7 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_join_key_not_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_join_key_not_in_projection index d02a374b26e5..8a13c0f5de53 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_join_key_not_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_join_key_not_in_projection @@ -38,7 +38,7 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_right_join_key_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_right_join_key_in_projection index f18d2ed6e9d8..c30be53fb4b1 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_right_join_key_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_inner_join_-_right_join_key_in_projection @@ -38,7 +38,7 @@ CONFIGS_END CSAS_INNER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL CSAS_INNER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_INNER_JOIN_0.Join.Left = STRUCT NOT NULL CSAS_INNER_JOIN_0.INNER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join index 804d772620e9..980b16736acc 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join @@ -38,7 +38,7 @@ CONFIGS_END CSAS_LEFT_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL CSAS_LEFT_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_LEFT_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_LEFT_JOIN_0.Join.Left = STRUCT NOT NULL CSAS_LEFT_JOIN_0.LEFT_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_join_key_not_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_join_key_not_in_projection index 608b6d192db9..18bb24c9796c 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_join_key_not_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_join_key_not_in_projection @@ -38,7 +38,7 @@ CONFIGS_END CSAS_LEFT_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL CSAS_LEFT_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_LEFT_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_LEFT_JOIN_0.Join.Left = STRUCT NOT NULL CSAS_LEFT_JOIN_0.LEFT_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_right_join_key_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_right_join_key_in_projection index 9bab9545cfb6..4f730687c69b 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_right_join_key_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_table_left_join_-_right_join_key_in_projection @@ -38,7 +38,7 @@ CONFIGS_END CSAS_LEFT_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL CSAS_LEFT_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_LEFT_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_LEFT_JOIN_0.Join.Left = STRUCT NOT NULL CSAS_LEFT_JOIN_0.LEFT_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_neither_have_key_field_and_joining_by_table_ROWKEY b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_neither_have_key_field_and_joining_by_table_ROWKEY deleted file mode 100644 index a6aae02ab8c2..000000000000 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_neither_have_key_field_and_joining_by_table_ROWKEY +++ /dev/null @@ -1,82 +0,0 @@ -{ - "ksql.extension.dir" : "ext", - "ksql.streams.cache.max.bytes.buffering" : "0", - "ksql.security.extension.class" : null, - "ksql.transient.prefix" : "transient_", - "ksql.persistence.wrap.single.values" : "true", - "ksql.schema.registry.url" : "http://localhost:8081", - "ksql.streams.default.deserialization.exception.handler" : "io.confluent.ksql.errors.LogMetricAndContinueExceptionHandler", - "ksql.output.topic.name.prefix" : "", - "ksql.streams.auto.offset.reset" : "earliest", - "ksql.connect.url" : "http://localhost:8083", - "ksql.service.id" : "some.ksql.service.id", - "ksql.internal.topic.min.insync.replicas" : "1", - "ksql.internal.topic.replicas" : "1", - "ksql.insert.into.values.enabled" : "true", - "ksql.pull.queries.enable" : "true", - "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", - "ksql.access.validator.enable" : "auto", - "ksql.streams.bootstrap.servers" : "localhost:0", - "ksql.streams.commit.interval.ms" : "2000", - "ksql.metric.reporters" : "", - "ksql.streams.auto.commit.interval.ms" : "0", - "ksql.metrics.extension" : null, - "ksql.streams.topology.optimization" : "all", - "ksql.query.pull.streamsstore.rebalancing.timeout.ms" : "10000", - "ksql.streams.num.stream.threads" : "4", - "ksql.metrics.tags.custom" : "", - "ksql.udfs.enabled" : "true", - "ksql.udf.enable.security.manager" : "true", - "ksql.query.pull.skip.access.validator" : "false", - "ksql.connect.worker.config" : "", - "ksql.query.pull.routing.timeout.ms" : "30000", - "ksql.sink.window.change.log.additional.retention" : "1000000", - "ksql.udf.collect.metrics" : "false", - "ksql.persistent.prefix" : "query_", - "ksql.query.persistent.active.limit" : "2147483647" -} -CONFIGS_END -CSAS_OUTPUT_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_OUTPUT_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_OUTPUT_0.Join.Left = STRUCT NOT NULL -CSAS_OUTPUT_0.OUTPUT = STRUCT NOT NULL -SCHEMAS_END -Topologies: - Sub-topology: 0 - Source: Join-repartition-source (topics: [Join-repartition]) - --> Join - Processor: Join (stores: [KafkaTopic_Right-Reduce]) - --> Project - <-- Join-repartition-source - Source: KSTREAM-SOURCE-0000000000 (topics: [NO_KEY]) - --> KTABLE-SOURCE-0000000001 - Processor: KTABLE-SOURCE-0000000001 (stores: [KafkaTopic_Right-Reduce]) - --> KTABLE-TRANSFORMVALUES-0000000002 - <-- KSTREAM-SOURCE-0000000000 - Processor: Project (stores: []) - --> KSTREAM-SINK-0000000012 - <-- Join - Sink: KSTREAM-SINK-0000000012 (topic: OUTPUT) - <-- Project - Processor: KTABLE-TRANSFORMVALUES-0000000002 (stores: []) - --> none - <-- KTABLE-SOURCE-0000000001 - - Sub-topology: 1 - Source: KSTREAM-SOURCE-0000000003 (topics: [S]) - --> KSTREAM-TRANSFORMVALUES-0000000004 - Processor: KSTREAM-TRANSFORMVALUES-0000000004 (stores: []) - --> KSTREAM-FILTER-0000000005 - <-- KSTREAM-SOURCE-0000000003 - Processor: KSTREAM-FILTER-0000000005 (stores: []) - --> KSTREAM-KEY-SELECT-0000000006 - <-- KSTREAM-TRANSFORMVALUES-0000000004 - Processor: KSTREAM-KEY-SELECT-0000000006 (stores: []) - --> Join-repartition-filter - <-- KSTREAM-FILTER-0000000005 - Processor: Join-repartition-filter (stores: []) - --> Join-repartition-sink - <-- KSTREAM-KEY-SELECT-0000000006 - Sink: Join-repartition-sink (topic: Join-repartition) - <-- Join-repartition-filter - diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_table_does_not_have_key_field_and_joining_by_table_ROWKEY b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_table_does_not_have_key_field_and_joining_by_table_ROWKEY index 445292d13f79..0ad1721bee7f 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_table_does_not_have_key_field_and_joining_by_table_ROWKEY +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_stream_to_table_when_table_does_not_have_key_field_and_joining_by_table_ROWKEY @@ -38,7 +38,7 @@ CONFIGS_END CSAS_OUTPUT_0.KafkaTopic_Right.Source = STRUCT NOT NULL CSAS_OUTPUT_0.KafkaTopic_Left.Source = STRUCT NOT NULL -CSAS_OUTPUT_0.Join.Left = STRUCT NOT NULL +CSAS_OUTPUT_0.Join.Left = STRUCT NOT NULL CSAS_OUTPUT_0.OUTPUT = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_unqualified_join_criteria b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_unqualified_join_criteria index 4b1e97a64a96..4b3c095e2c0a 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_unqualified_join_criteria +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/joins_-_unqualified_join_criteria @@ -38,8 +38,8 @@ CONFIGS_END CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Left.Source = STRUCT NOT NULL CSAS_LEFT_OUTER_JOIN_0.KafkaTopic_Right.Source = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Left = STRUCT NOT NULL -CSAS_LEFT_OUTER_JOIN_0.Join.Right = STRUCT NOT NULL +CSAS_LEFT_OUTER_JOIN_0.Join.Left = STRUCT NOT NULL +CSAS_LEFT_OUTER_JOIN_0.Join.Right = STRUCT NOT NULL CSAS_LEFT_OUTER_JOIN_0.LEFT_OUTER_JOIN = STRUCT NOT NULL SCHEMAS_END Topologies: diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_multiple_copies_of_key_field_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_multiple_copies_of_key_field_in_projection index cd02078d2301..63fcf33e6629 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_multiple_copies_of_key_field_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_multiple_copies_of_key_field_in_projection @@ -37,7 +37,7 @@ } CONFIGS_END CSAS_OUTPUT_0.KsqlTopic.Source = STRUCT NOT NULL -CSAS_OUTPUT_0.OUTPUT = STRUCT NOT NULL +CSAS_OUTPUT_0.OUTPUT = STRUCT NOT NULL SCHEMAS_END Topologies: Sub-topology: 0 diff --git a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_only_rowkey_is_in_projection b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_only_rowkey_is_in_projection index eada382bc1f6..cdb00a218d76 100644 --- a/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_only_rowkey_is_in_projection +++ b/ksql-functional-tests/src/test/resources/expected_topology/0_6_0-pre/key-field_-_where_only_rowkey_is_in_projection @@ -37,7 +37,7 @@ } CONFIGS_END CSAS_OUTPUT_0.KsqlTopic.Source = STRUCT NOT NULL -CSAS_OUTPUT_0.OUTPUT = STRUCT NOT NULL +CSAS_OUTPUT_0.OUTPUT = STRUCT NOT NULL SCHEMAS_END Topologies: Sub-topology: 0 diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/group-by.json b/ksql-functional-tests/src/test/resources/query-validation-tests/group-by.json index e83d899c6931..ffb6cb89b766 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/group-by.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/group-by.json @@ -111,15 +111,15 @@ { "name": "fields (stream->table)", "statements": [ - "CREATE STREAM TEST (f1 INT, f2 VARCHAR) WITH (kafka_topic='test_topic', KEY='f1', value_format='DELIMITED');", + "CREATE STREAM TEST (ROWKEY INT KEY, f1 INT, f2 VARCHAR) WITH (kafka_topic='test_topic', KEY='f1', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT f1, f2, COUNT(*) FROM TEST GROUP BY f2, f1;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": "1,a"}, - {"topic": "test_topic", "key": "2", "value": "2,b"}, - {"topic": "test_topic", "key": "1", "value": "1,a"}, - {"topic": "test_topic", "key": "2", "value": "2,b"}, - {"topic": "test_topic", "key": "3", "value": "3,a"} + {"topic": "test_topic", "key": 1, "value": "1,a"}, + {"topic": "test_topic", "key": 2, "value": "2,b"}, + {"topic": "test_topic", "key": 1, "value": "1,a"}, + {"topic": "test_topic", "key": 2, "value": "2,b"}, + {"topic": "test_topic", "key": 3, "value": "3,a"} ], "outputs": [ {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": "a|+|1", "value": "1,a,0,1"}, @@ -142,16 +142,16 @@ { "name": "fields (stream->table) - format", "statements": [ - "CREATE STREAM TEST (f1 INT, f2 VARCHAR) WITH (kafka_topic='test_topic', KEY='f1', value_format='{FORMAT}');", + "CREATE STREAM TEST (ROWKEY INT KEY, f1 INT, f2 VARCHAR) WITH (kafka_topic='test_topic', KEY='f1', value_format='{FORMAT}');", "CREATE TABLE OUTPUT AS SELECT f1, f2, COUNT(*) FROM TEST GROUP BY f2, f1;" ], "format": ["AVRO", "JSON"], "inputs": [ - {"topic": "test_topic", "key": "1", "value": {"F1": 1, "F2": "a"}}, - {"topic": "test_topic", "key": "2", "value": {"F1": 2, "F2": "b"}}, - {"topic": "test_topic", "key": "1", "value": {"F1": 1, "F2": "a"}}, - {"topic": "test_topic", "key": "2", "value": {"F1": 2, "F2": "b"}}, - {"topic": "test_topic", "key": "3", "value": {"F1": 3, "F2": "a"}} + {"topic": "test_topic", "key": 1, "value": {"F1": 1, "F2": "a"}}, + {"topic": "test_topic", "key": 2, "value": {"F1": 2, "F2": "b"}}, + {"topic": "test_topic", "key": 1, "value": {"F1": 1, "F2": "a"}}, + {"topic": "test_topic", "key": 2, "value": {"F1": 2, "F2": "b"}}, + {"topic": "test_topic", "key": 3, "value": {"F1": 3, "F2": "a"}} ], "outputs": [ {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": "a|+|1", "value": {"KSQL_INTERNAL_COL_0": 1, "KSQL_INTERNAL_COL_1": "a", "KSQL_INTERNAL_COL_2": 0, "KSQL_AGG_VARIABLE_0": 1}}, @@ -174,15 +174,15 @@ { "name": "with groupings (stream->table)", "statements": [ - "CREATE STREAM TEST (f1 INT, f2 VARCHAR, f3 INT) WITH (kafka_topic='test_topic', KEY='f1', value_format='DELIMITED');", + "CREATE STREAM TEST (ROWKEY INT KEY, f1 INT, f2 VARCHAR, f3 INT) WITH (kafka_topic='test_topic', KEY='f1', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT f1, f2, COUNT(*) FROM TEST GROUP BY f3, (f2, f1);" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": "1,a,-1"}, - {"topic": "test_topic", "key": "2", "value": "2,b,-2"}, - {"topic": "test_topic", "key": "1", "value": "1,a,-1"}, - {"topic": "test_topic", "key": "2", "value": "2,b,-2"}, - {"topic": "test_topic", "key": "3", "value": "3,a,-3"} + {"topic": "test_topic", "key": 1, "value": "1,a,-1"}, + {"topic": "test_topic", "key": 2, "value": "2,b,-2"}, + {"topic": "test_topic", "key": 1, "value": "1,a,-1"}, + {"topic": "test_topic", "key": 2, "value": "2,b,-2"}, + {"topic": "test_topic", "key": 3, "value": "3,a,-3"} ], "outputs": [ {"topic": "OUTPUT", "key": "-1|+|a|+|1", "value": "1,a,1"}, @@ -200,15 +200,15 @@ { "name": "fields (table->table)", "statements": [ - "CREATE TABLE TEST (f1 INT, f2 VARCHAR) WITH (kafka_topic='test_topic', KEY='f1', value_format='DELIMITED');", + "CREATE TABLE TEST (ROWKEY INT KEY, f1 INT, f2 VARCHAR) WITH (kafka_topic='test_topic', KEY='f1', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT f1, f2, COUNT(*) FROM TEST GROUP BY f2, f1;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": "1,a"}, - {"topic": "test_topic", "key": "2", "value": "2,b"}, - {"topic": "test_topic", "key": "1", "value": "1,b"}, - {"topic": "test_topic", "key": "2", "value": null}, - {"topic": "test_topic", "key": "1", "value": "1,a"} + {"topic": "test_topic", "key": 1, "value": "1,a"}, + {"topic": "test_topic", "key": 2, "value": "2,b"}, + {"topic": "test_topic", "key": 1, "value": "1,b"}, + {"topic": "test_topic", "key": 2, "value": null}, + {"topic": "test_topic", "key": 1, "value": "1,a"} ], "outputs": [ {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": "a|+|1", "value": "1,a,0,1"}, @@ -235,16 +235,16 @@ { "name": "fields (table->table) - format", "statements": [ - "CREATE TABLE TEST (f1 INT, f2 VARCHAR) WITH (kafka_topic='test_topic', KEY='f1', value_format='{FORMAT}');", + "CREATE TABLE TEST (ROWKEY INT KEY, f1 INT, f2 VARCHAR) WITH (kafka_topic='test_topic', KEY='f1', value_format='{FORMAT}');", "CREATE TABLE OUTPUT AS SELECT f1, f2, COUNT(*) FROM TEST GROUP BY f2, f1;" ], "format": ["AVRO", "JSON"], "inputs": [ - {"topic": "test_topic", "key": "1", "value": {"F1": 1, "F2": "a"}}, - {"topic": "test_topic", "key": "2", "value": {"F1": 2, "F2": "b"}}, - {"topic": "test_topic", "key": "1", "value": {"F1": 1, "F2": "b"}}, - {"topic": "test_topic", "key": "2", "value": null}, - {"topic": "test_topic", "key": "1", "value": {"F1": 1, "F2": "a"}} + {"topic": "test_topic", "key": 1, "value": {"F1": 1, "F2": "a"}}, + {"topic": "test_topic", "key": 2, "value": {"F1": 2, "F2": "b"}}, + {"topic": "test_topic", "key": 1, "value": {"F1": 1, "F2": "b"}}, + {"topic": "test_topic", "key": 2, "value": null}, + {"topic": "test_topic", "key": 1, "value": {"F1": 1, "F2": "a"}} ], "outputs": [ {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": "a|+|1", "value": {"KSQL_INTERNAL_COL_0": 1, "KSQL_INTERNAL_COL_1": "a", "KSQL_INTERNAL_COL_2": 0, "KSQL_AGG_VARIABLE_0": 1}}, @@ -380,15 +380,15 @@ { "name": "field with re-key (table->table)", "statements": [ - "CREATE TABLE TEST (user INT, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", + "CREATE TABLE TEST (ROWKEY INT KEY, user INT, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT COUNT(*) FROM TEST GROUP BY region;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": "1,r0"}, - {"topic": "test_topic", "key": "2", "value": "2,r1"}, - {"topic": "test_topic", "key": "3", "value": "3,r0"}, - {"topic": "test_topic", "key": "1", "value": null}, - {"topic": "test_topic", "key": "2", "value": "2,r0"} + {"topic": "test_topic", "key": 1, "value": "1,r0"}, + {"topic": "test_topic", "key": 2, "value": "2,r1"}, + {"topic": "test_topic", "key": 3, "value": "3,r0"}, + {"topic": "test_topic", "key": 1, "value": null}, + {"topic": "test_topic", "key": 2, "value": "2,r0"} ], "outputs": [ {"topic": "OUTPUT", "key": "r0", "value": "1"}, @@ -429,15 +429,15 @@ { "name": "with aggregate arithmetic (table->table)", "statements": [ - "CREATE TABLE TEST (user INT, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", + "CREATE TABLE TEST (ROWKEY INT KEY, user INT, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT COUNT(*) * 2 FROM TEST GROUP BY region;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": "1,r0"}, - {"topic": "test_topic", "key": "2", "value": "2,r1"}, - {"topic": "test_topic", "key": "3", "value": "3,r0"}, - {"topic": "test_topic", "key": "1", "value": null}, - {"topic": "test_topic", "key": "2", "value": "2,r0"} + {"topic": "test_topic", "key": 1, "value": "1,r0"}, + {"topic": "test_topic", "key": 2, "value": "2,r1"}, + {"topic": "test_topic", "key": 3, "value": "3,r0"}, + {"topic": "test_topic", "key": 1, "value": null}, + {"topic": "test_topic", "key": 2, "value": "2,r0"} ], "outputs": [ {"topic": "OUTPUT", "key": "r0", "value": "2"}, @@ -475,14 +475,14 @@ { "name": "with aggregate arithmetic involving source field (table->table)", "statements": [ - "CREATE TABLE TEST (f0 INT, f1 INT) WITH (kafka_topic='test_topic', key='f0', value_format='DELIMITED');", + "CREATE TABLE TEST (ROWKEY INT KEY, f0 INT, f1 INT) WITH (kafka_topic='test_topic', key='f0', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT f0 * SUM(f1) FROM TEST GROUP BY f0;" ], "inputs": [ - {"topic": "test_topic", "key": "2", "value": "2,10"}, - {"topic": "test_topic", "key": "2", "value": "2,20"}, - {"topic": "test_topic", "key": "2", "value": "2,30"}, - {"topic": "test_topic", "key": "2", "value": null} + {"topic": "test_topic", "key": 2, "value": "2,10"}, + {"topic": "test_topic", "key": 2, "value": "2,20"}, + {"topic": "test_topic", "key": 2, "value": "2,30"}, + {"topic": "test_topic", "key": 2, "value": null} ], "outputs": [ {"topic": "OUTPUT", "key": 2, "value": "20"}, @@ -496,7 +496,7 @@ { "name": "with aggregate arithmetic involving source field not in group by (table->table)", "statements": [ - "CREATE TABLE TEST (f0 INT, f1 INT, f2 INT) WITH (kafka_topic='test_topic', key='f0', value_format='DELIMITED');", + "CREATE TABLE TEST (ROWKEY INT KEY, f0 INT, f1 INT, f2 INT) WITH (kafka_topic='test_topic', key='f0', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT f1 * SUM(f2) FROM TEST GROUP BY f0;" ], "expectedException": { @@ -535,15 +535,15 @@ { "name": "function (table->table)", "statements": [ - "CREATE TABLE TEST (user INT, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", + "CREATE TABLE TEST (ROWKEY INT KEY, user INT, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT SUBSTRING(region, 7, 2), COUNT(*) FROM TEST GROUP BY SUBSTRING(region, 7, 2);" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": "1,prefixr0"}, - {"topic": "test_topic", "key": "2", "value": "2,prefixr1"}, - {"topic": "test_topic", "key": "3", "value": "3,prefixr0"}, - {"topic": "test_topic", "key": "1", "value": null}, - {"topic": "test_topic", "key": "2", "value": "2,prefixr0"} + {"topic": "test_topic", "key": 1, "value": "1,prefixr0"}, + {"topic": "test_topic", "key": 2, "value": "2,prefixr1"}, + {"topic": "test_topic", "key": 3, "value": "3,prefixr0"}, + {"topic": "test_topic", "key": 1, "value": null}, + {"topic": "test_topic", "key": 2, "value": "2,prefixr0"} ], "outputs": [ {"topic": "OUTPUT", "key": "r0", "value": "r0,1"}, @@ -600,7 +600,7 @@ { "name": "function with select field that is a subset of group by (table->table)", "statements": [ - "CREATE TABLE TEST (user INT, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", + "CREATE TABLE TEST (ROWKEY INT KEY, user INT, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT SUBSTRING(region, 7, 1), COUNT(*) FROM TEST GROUP BY SUBSTRING(region, 7, 2);" ], "expectedException": { @@ -622,7 +622,7 @@ { "name": "function with select field that is a superset of group by (table->table)", "statements": [ - "CREATE TABLE TEST (user INT, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", + "CREATE TABLE TEST (ROWKEY INT KEY, user INT, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT SUBSTRING(region, 7, 3), COUNT(*) FROM TEST GROUP BY SUBSTRING(region, 7, 2);" ], "expectedException": { @@ -751,15 +751,15 @@ { "name": "constant (table->table)", "statements": [ - "CREATE TABLE TEST (user INT, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", + "CREATE TABLE TEST (ROWKEY INT KEY, user INT, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT COUNT(*) FROM TEST GROUP BY 1;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": "1,r0"}, - {"topic": "test_topic", "key": "2", "value": "2,r1"}, - {"topic": "test_topic", "key": "3", "value": "3,r0"}, - {"topic": "test_topic", "key": "1", "value": null}, - {"topic": "test_topic", "key": "2", "value": "2,r0"} + {"topic": "test_topic", "key": 1, "value": "1,r0"}, + {"topic": "test_topic", "key": 2, "value": "2,r1"}, + {"topic": "test_topic", "key": 3, "value": "3,r0"}, + {"topic": "test_topic", "key": 1, "value": null}, + {"topic": "test_topic", "key": 2, "value": "2,r0"} ], "outputs": [ {"topic": "OUTPUT", "key": 1, "value": "1"}, @@ -794,15 +794,15 @@ { "name": "field with field used in function in projection (table->table)", "statements": [ - "CREATE TABLE TEST (user INT, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", + "CREATE TABLE TEST (ROWKEY INT KEY, user INT, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT SUBSTRING(region, 2, 1), COUNT(*) FROM TEST GROUP BY region;" ], "inputs": [ - {"topic": "test_topic", "key": "1", "value": "1,r0"}, - {"topic": "test_topic", "key": "2", "value": "2,r1"}, - {"topic": "test_topic", "key": "3", "value": "3,r0"}, - {"topic": "test_topic", "key": "1", "value": null}, - {"topic": "test_topic", "key": "2", "value": "2,r0"} + {"topic": "test_topic", "key": 1, "value": "1,r0"}, + {"topic": "test_topic", "key": 2, "value": "2,r1"}, + {"topic": "test_topic", "key": 3, "value": "3,r0"}, + {"topic": "test_topic", "key": 1, "value": null}, + {"topic": "test_topic", "key": 2, "value": "2,r0"} ], "outputs": [ {"topic": "OUTPUT", "key": "r0", "value": "0,1"}, @@ -837,7 +837,7 @@ { "name": "string concat using + op (table->table)", "statements": [ - "CREATE TABLE TEST (user INT, subregion VARCHAR, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", + "CREATE TABLE TEST (user INT, subregion VARCHAR, region VARCHAR) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT COUNT(*) FROM TEST GROUP BY region + subregion;" ], "inputs": [ @@ -875,7 +875,7 @@ { "name": "string concat using + op with projection field in wrong order (table->table)", "statements": [ - "CREATE TABLE TEST (user INT, subregion VARCHAR, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", + "CREATE TABLE TEST (user INT, subregion VARCHAR, region VARCHAR) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT subregion + region, COUNT(*) FROM TEST GROUP BY region + subregion;" ], "expectedException": { @@ -886,7 +886,7 @@ { "name": "string concat with separate fields in projection (stream->table)", "statements": [ - "CREATE STREAM TEST (f1 VARCHAR, f2 VARCHAR) WITH (kafka_topic='test_topic', KEY='f1', value_format='DELIMITED');", + "CREATE STREAM TEST (f1 VARCHAR, f2 VARCHAR) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT f1, f2, COUNT(*) FROM TEST GROUP BY f2 + f1;" ], "expectedException": { @@ -897,7 +897,7 @@ { "name": "string concat with separate fields in projection (table->table)", "statements": [ - "CREATE TABLE TEST (user INT, subregion VARCHAR, region VARCHAR) WITH (kafka_topic='test_topic', KEY='user', value_format='DELIMITED');", + "CREATE TABLE TEST (user INT, subregion VARCHAR, region VARCHAR) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT subregion, region, COUNT(*) FROM TEST GROUP BY region + subregion;" ], "expectedException": { @@ -927,7 +927,7 @@ { "name": "arithmetic binary expression with projection in-order & non-commutative group by (table->table)", "statements": [ - "CREATE TABLE TEST (f0 INT, f1 INT) WITH (kafka_topic='test_topic', KEY='f0', value_format='DELIMITED');", + "CREATE TABLE TEST (f0 INT, f1 INT) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT f0 - f1, COUNT(*) FROM TEST GROUP BY f0 - f1;" ], "inputs": [ @@ -960,7 +960,7 @@ { "name": "arithmetic binary expression with projection out-of-order & non-commutative group by (table->table)", "statements": [ - "CREATE TABLE TEST (f0 INT, f1 INT) WITH (kafka_topic='test_topic', KEY='f0', value_format='DELIMITED');", + "CREATE TABLE TEST (f0 INT, f1 INT) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT f1 - f0, COUNT(*) FROM TEST GROUP BY f0 - f1;" ], "expectedException": { @@ -992,7 +992,7 @@ { "name": "with having expression (table->table)", "statements": [ - "CREATE TABLE TEST (f0 INT, f1 INT) WITH (kafka_topic='test_topic', KEY='f0', value_format='DELIMITED');", + "CREATE TABLE TEST (f0 INT, f1 INT) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT f1, SUM(f0) FROM TEST GROUP BY f1 HAVING COUNT(f1) > 0;" ], "inputs": [ @@ -1122,7 +1122,7 @@ { "name": "missing matching projection field (table->table)", "statements": [ - "CREATE TABLE TEST (f1 INT, f2 VARCHAR) WITH (kafka_topic='test_topic', KEY='f1', value_format='DELIMITED');", + "CREATE TABLE TEST (f1 INT, f2 VARCHAR) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT COUNT(*) FROM TEST GROUP BY f2;" ], "inputs": [ @@ -1176,7 +1176,7 @@ { "name": "with non-aggregate projection field not in group by (table->table)", "statements": [ - "CREATE TABLE TEST (d1 VARCHAR, d2 VARCHAR) WITH (kafka_topic='test_topic', key='d1', value_format='DELIMITED');", + "CREATE TABLE TEST (d1 VARCHAR, d2 VARCHAR) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT d1, COUNT(*) FROM TEST GROUP BY d2;" ], "expectedException": { @@ -1263,7 +1263,7 @@ { "name": "UDAF nested in UDF in select expression (table->table)", "statements": [ - "CREATE TABLE TEST (d0 INT, d1 VARCHAR) WITH (kafka_topic='test_topic', key='d0', value_format='DELIMITED');", + "CREATE TABLE TEST (d0 INT, d1 VARCHAR) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE TABLE OUTPUT AS SELECT SUBSTRING('Mr Bugalicious', CAST(COUNT(*) AS INT), 1) FROM TEST GROUP BY d1;" ], "inputs": [ diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/histogram.json b/ksql-functional-tests/src/test/resources/query-validation-tests/histogram.json index 88cc78561416..d5bb57adfe35 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/histogram.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/histogram.json @@ -34,7 +34,7 @@ "name": "histogram on a table", "format": ["AVRO","JSON"], "statements": [ - "CREATE TABLE TEST (ID bigint, NAME varchar, REGION string) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST (ID bigint, NAME varchar, REGION string) WITH (kafka_topic='test_topic', value_format='{FORMAT}');", "CREATE TABLE COUNT_BY_REGION AS SELECT region, histogram(name) AS COUNTS FROM TEST GROUP BY region;" ], "inputs": [ diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/hopping-windows.json b/ksql-functional-tests/src/test/resources/query-validation-tests/hopping-windows.json index d3797e2dc521..ff4737c2b029 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/hopping-windows.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/hopping-windows.json @@ -157,7 +157,7 @@ { "name": "import table with invalid window size", "statements": [ - "CREATE TABLE TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID', WINDOW_TYPE='Hopping', WINDOW_SIZE='30 bobs');" + "CREATE TABLE TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', WINDOW_TYPE='Hopping', WINDOW_SIZE='30 bobs');" ], "expectedException": { "type": "io.confluent.ksql.parser.exception.ParseFailedException", diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/identifiers.json b/ksql-functional-tests/src/test/resources/query-validation-tests/identifiers.json index 6242f7412d6b..31a79b56daff 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/identifiers.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/identifiers.json @@ -32,61 +32,61 @@ { "name": "aliased join source", "statements": [ - "CREATE STREAM INPUT_1 (foo INT, bar INT) WITH (kafka_topic='t1', value_format='JSON', KEY='FOO');", - "CREATE STREAM INPUT_2 (foo INT, bar INT) WITH (kafka_topic='t2', value_format='JSON', KEY='FOO');", + "CREATE STREAM INPUT_1 (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='t1', value_format='JSON', KEY='FOO');", + "CREATE STREAM INPUT_2 (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='t2', value_format='JSON', KEY='FOO');", "CREATE STREAM OUTPUT AS SELECT I1.BAR, I2.BAR FROM INPUT_1 I1 JOIN INPUT_2 I2 WITHIN 1 MINUTE ON I1.FOO = I2.FOO;" ], "inputs": [ - {"topic": "t1", "value": {"foo": 1, "bar": 2}}, - {"topic": "t2", "value": {"foo": 1, "bar": 2}} + {"topic": "t1", "key": 1, "value": {"foo": 1, "bar": 2}}, + {"topic": "t2", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "value": {"I1_BAR": 2, "I2_BAR": 2}} + {"topic": "OUTPUT", "key": 1, "value": {"I1_BAR": 2, "I2_BAR": 2}} ] }, { "name": "aliased join source with AS", "statements": [ - "CREATE STREAM INPUT_1 (foo INT, bar INT) WITH (kafka_topic='t1', value_format='JSON', KEY='FOO');", - "CREATE STREAM INPUT_2 (foo INT, bar INT) WITH (kafka_topic='t2', value_format='JSON', KEY='FOO');", + "CREATE STREAM INPUT_1 (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='t1', value_format='JSON', KEY='FOO');", + "CREATE STREAM INPUT_2 (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='t2', value_format='JSON', KEY='FOO');", "CREATE STREAM OUTPUT AS SELECT I1.BAR, I2.BAR FROM INPUT_1 AS I1 JOIN INPUT_2 AS I2 WITHIN 1 MINUTE ON I1.FOO = I2.FOO;" ], "inputs": [ - {"topic": "t1", "value": {"foo": 1, "bar": 2}}, - {"topic": "t2", "value": {"foo": 1, "bar": 2}} + {"topic": "t1", "key": 1, "value": {"foo": 1, "bar": 2}}, + {"topic": "t2", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "value": {"I1_BAR": 2, "I2_BAR": 2}} + {"topic": "OUTPUT", "key": 1, "value": {"I1_BAR": 2, "I2_BAR": 2}} ] }, { "name": "aliased left unaliased right", "statements": [ - "CREATE STREAM INPUT_1 (foo INT, bar INT) WITH (kafka_topic='t1', value_format='JSON', KEY='FOO');", - "CREATE STREAM INPUT_2 (foo INT, bar INT) WITH (kafka_topic='t2', value_format='JSON', KEY='FOO');", + "CREATE STREAM INPUT_1 (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='t1', value_format='JSON', KEY='FOO');", + "CREATE STREAM INPUT_2 (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='t2', value_format='JSON', KEY='FOO');", "CREATE STREAM OUTPUT AS SELECT I1.BAR, INPUT_2.BAR FROM INPUT_1 AS I1 JOIN INPUT_2 WITHIN 1 MINUTE ON I1.FOO = INPUT_2.FOO;" ], "inputs": [ - {"topic": "t1", "value": {"foo": 1, "bar": 2}}, - {"topic": "t2", "value": {"foo": 1, "bar": 2}} + {"topic": "t1", "key": 1, "value": {"foo": 1, "bar": 2}}, + {"topic": "t2", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "value": {"I1_BAR": 2, "INPUT_2_BAR": 2}} + {"topic": "OUTPUT", "key": 1, "value": {"I1_BAR": 2, "INPUT_2_BAR": 2}} ] }, { "name": "unaliased left aliased right", "statements": [ - "CREATE STREAM INPUT_1 (foo INT, bar INT) WITH (kafka_topic='t1', value_format='JSON', KEY='FOO');", - "CREATE STREAM INPUT_2 (foo INT, bar INT) WITH (kafka_topic='t2', value_format='JSON', KEY='FOO');", + "CREATE STREAM INPUT_1 (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='t1', value_format='JSON', KEY='FOO');", + "CREATE STREAM INPUT_2 (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='t2', value_format='JSON', KEY='FOO');", "CREATE STREAM OUTPUT AS SELECT INPUT_1.BAR, I2.BAR FROM INPUT_1 JOIN INPUT_2 AS I2 WITHIN 1 MINUTE ON INPUT_1.FOO = I2.FOO;" ], "inputs": [ - {"topic": "t1", "value": {"foo": 1, "bar": 2}}, - {"topic": "t2", "value": {"foo": 1, "bar": 2}} + {"topic": "t1", "key": 1, "value": {"foo": 1, "bar": 2}}, + {"topic": "t2", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "value": {"INPUT_1_BAR": 2, "I2_BAR": 2}} + {"topic": "OUTPUT", "key": 1, "value": {"INPUT_1_BAR": 2, "I2_BAR": 2}} ] }, { diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/join-with-custom-timestamp.json b/ksql-functional-tests/src/test/resources/query-validation-tests/join-with-custom-timestamp.json index e7995da7911a..1c5f237ef871 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/join-with-custom-timestamp.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/join-with-custom-timestamp.json @@ -12,105 +12,105 @@ "name": "stream stream inner join with ts", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM S1 (ID bigint, NAME varchar, TS bigint) WITH (timestamp='TS', kafka_topic='s1', value_format='{FORMAT}', key='ID');", - "CREATE STREAM S2 (ID bigint, F1 varchar, F2 varchar) WITH (kafka_topic='s2', value_format='{FORMAT}', key='ID');", + "CREATE STREAM S1 (ROWKEY BIGINT KEY, ID bigint, NAME varchar, TS bigint) WITH (timestamp='TS', kafka_topic='s1', value_format='{FORMAT}', key='ID');", + "CREATE STREAM S2 (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 varchar) WITH (kafka_topic='s2', value_format='{FORMAT}', key='ID');", "CREATE STREAM S1_JOIN_S2 WITH(timestamp='TS') as SELECT S1.id as id, S1.name as name, S1.ts as ts, s2.f1, s2.f2 from S1 join S2 WITHIN 11 SECONDS ON s1.id = s2.id;" ], "inputs": [ - {"topic": "s1", "key": "0", "value": {"ID": 0, "NAME": "zero", "TS": 0}, "timestamp": 0}, - {"topic": "s2", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": "foo"}, "timestamp": 10000}, - {"topic": "s2", "key": "10", "value": {"ID": 10, "F1": "foo", "F2": "bar"}, "timestamp": 13000}, - {"topic": "s1", "key": "10", "value": {"ID": 10, "NAME": "100", "TS": 11000}, "timestamp": 22000}, - {"topic": "s1", "key": "0", "value": {"ID": 0, "NAME": "jan", "TS": 8000}, "timestamp": 33000} + {"topic": "s1", "key": 0, "value": {"ID": 0, "NAME": "zero", "TS": 0}, "timestamp": 0}, + {"topic": "s2", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": "foo"}, "timestamp": 10000}, + {"topic": "s2", "key": 10, "value": {"ID": 10, "F1": "foo", "F2": "bar"}, "timestamp": 13000}, + {"topic": "s1", "key": 10, "value": {"ID": 10, "NAME": "100", "TS": 11000}, "timestamp": 22000}, + {"topic": "s1", "key": 0, "value": {"ID": 0, "NAME": "jan", "TS": 8000}, "timestamp": 33000} ], "outputs": [ - {"topic": "S1_JOIN_S2", "key": "0", "value": {"ID": 0, "NAME": "zero", "TS": 0, "F1": "blah", "F2": "foo"}, "timestamp": 10000}, - {"topic": "S1_JOIN_S2", "key": "10", "value": {"ID": 10, "NAME": "100", "TS": 11000, "F1": "foo", "F2": "bar"}, "timestamp": 13000}, - {"topic": "S1_JOIN_S2", "key": "0", "value": {"ID": 0, "NAME": "jan", "TS": 8000, "F1": "blah", "F2": "foo"}, "timestamp": 10000} + {"topic": "S1_JOIN_S2", "key": 0, "value": {"ID": 0, "NAME": "zero", "TS": 0, "F1": "blah", "F2": "foo"}, "timestamp": 10000}, + {"topic": "S1_JOIN_S2", "key": 10, "value": {"ID": 10, "NAME": "100", "TS": 11000, "F1": "foo", "F2": "bar"}, "timestamp": 13000}, + {"topic": "S1_JOIN_S2", "key": 0, "value": {"ID": 0, "NAME": "jan", "TS": 8000, "F1": "blah", "F2": "foo"}, "timestamp": 10000} ] }, { "name": "stream stream inner join with ts extractor both sides", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM S1 (ID bigint, NAME varchar, TS bigint) WITH (timestamp='TS', kafka_topic='s1', value_format='{FORMAT}', key='ID');", - "CREATE STREAM S2 (ID bigint, F1 varchar, F2 varchar, RTS bigint) WITH (timestamp='RTS', kafka_topic='s2', value_format='{FORMAT}', key='ID');", + "CREATE STREAM S1 (ROWKEY BIGINT KEY, ID bigint, NAME varchar, TS bigint) WITH (timestamp='TS', kafka_topic='s1', value_format='{FORMAT}', key='ID');", + "CREATE STREAM S2 (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 varchar, RTS bigint) WITH (timestamp='RTS', kafka_topic='s2', value_format='{FORMAT}', key='ID');", "CREATE STREAM S1_JOIN_S2 WITH(timestamp='TS') as SELECT S1.id as id, S1.name as name, S1.ts as ts, s2.f1, s2.f2 from S1 join S2 WITHIN 11 SECONDS ON s1.id = s2.id;" ], "inputs": [ - {"topic": "s1", "key": "0", "value": {"ID": 0, "NAME": "zero", "TS": 0}, "timestamp": 0}, - {"topic": "s2", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": "foo", "RTS": 10000}, "timestamp": 0}, - {"topic": "s2", "key": "10", "value": {"ID": 10, "F1": "foo", "F2": "bar", "RTS": 13000}, "timestamp": 0}, - {"topic": "s1", "key": "10", "value": {"ID": 10, "NAME": "100", "TS": 11000}, "timestamp": 0}, - {"topic": "s1", "key": "0", "value": {"ID": 0, "NAME": "jan", "TS": 8000}, "timestamp": 0} + {"topic": "s1", "key": 0, "value": {"ID": 0, "NAME": "zero", "TS": 0}, "timestamp": 0}, + {"topic": "s2", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": "foo", "RTS": 10000}, "timestamp": 0}, + {"topic": "s2", "key": 10, "value": {"ID": 10, "F1": "foo", "F2": "bar", "RTS": 13000}, "timestamp": 0}, + {"topic": "s1", "key": 10, "value": {"ID": 10, "NAME": "100", "TS": 11000}, "timestamp": 0}, + {"topic": "s1", "key": 0, "value": {"ID": 0, "NAME": "jan", "TS": 8000}, "timestamp": 0} ], "outputs": [ - {"topic": "S1_JOIN_S2", "key": "0", "value": {"ID": 0, "NAME": "zero", "TS": 0, "F1": "blah", "F2": "foo"}, "timestamp": 10000}, - {"topic": "S1_JOIN_S2", "key": "10", "value": {"ID": 10, "NAME": "100", "TS": 11000, "F1": "foo", "F2": "bar"}, "timestamp": 13000}, - {"topic": "S1_JOIN_S2", "key": "0", "value": {"ID": 0, "NAME": "jan", "TS": 8000, "F1": "blah", "F2": "foo"}, "timestamp": 10000} + {"topic": "S1_JOIN_S2", "key": 0, "value": {"ID": 0, "NAME": "zero", "TS": 0, "F1": "blah", "F2": "foo"}, "timestamp": 10000}, + {"topic": "S1_JOIN_S2", "key": 10, "value": {"ID": 10, "NAME": "100", "TS": 11000, "F1": "foo", "F2": "bar"}, "timestamp": 13000}, + {"topic": "S1_JOIN_S2", "key": 0, "value": {"ID": 0, "NAME": "jan", "TS": 8000, "F1": "blah", "F2": "foo"}, "timestamp": 10000} ] }, { "name": "stream table join with ts extractor both sides", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM S1 (ID bigint, NAME varchar, TS bigint) WITH (timestamp='TS', kafka_topic='s1', value_format='{FORMAT}', key='ID');", - "CREATE TABLE T1 (ID bigint, F1 varchar, F2 varchar, RTS bigint) WITH (timestamp='RTS', kafka_topic='t1', value_format='{FORMAT}', key='ID');", + "CREATE STREAM S1 (ROWKEY BIGINT KEY, ID bigint, NAME varchar, TS bigint) WITH (timestamp='TS', kafka_topic='s1', value_format='{FORMAT}', key='ID');", + "CREATE TABLE T1 (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 varchar, RTS bigint) WITH (timestamp='RTS', kafka_topic='t1', value_format='{FORMAT}', key='ID');", "CREATE STREAM S1_JOIN_T1 WITH(timestamp='TS') as SELECT S1.id as id, S1.name as name, S1.ts as ts, T1.f1, T1.f2 from S1 inner join T1 ON s1.id = t1.id;" ], "inputs": [ - {"topic": "t1", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": "foo", "RTS": 10000}, "timestamp": 10000}, - {"topic": "s1", "key": "0", "value": {"ID": 0, "NAME": "zero", "TS": 0}, "timestamp": 10000}, - {"topic": "t1", "key": "10", "value": {"ID": 10, "F1": "foo", "F2": "bar", "RTS": 13000}, "timestamp": 90000}, - {"topic": "s1", "key": "10", "value": {"ID": 10, "NAME": "100", "TS": 11000}, "timestamp": 800000}, - {"topic": "s1", "key": "0", "value": {"ID": 0, "NAME": "jan", "TS": 8000}, "timestamp": 0} + {"topic": "t1", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": "foo", "RTS": 10000}, "timestamp": 10000}, + {"topic": "s1", "key": 0, "value": {"ID": 0, "NAME": "zero", "TS": 0}, "timestamp": 10000}, + {"topic": "t1", "key": 10, "value": {"ID": 10, "F1": "foo", "F2": "bar", "RTS": 13000}, "timestamp": 90000}, + {"topic": "s1", "key": 10, "value": {"ID": 10, "NAME": "100", "TS": 11000}, "timestamp": 800000}, + {"topic": "s1", "key": 0, "value": {"ID": 0, "NAME": "jan", "TS": 8000}, "timestamp": 0} ], "outputs": [ - {"topic": "S1_JOIN_T1", "key": "0", "value": {"ID": 0, "NAME": "zero", "TS": 0, "F1": "blah", "F2": "foo"}, "timestamp": 0}, - {"topic": "S1_JOIN_T1", "key": "10", "value": {"ID": 10, "NAME": "100", "TS": 11000, "F1": "foo", "F2": "bar"}, "timestamp": 11000}, - {"topic": "S1_JOIN_T1", "key": "0", "value": {"ID": 0, "NAME": "jan", "TS": 8000, "F1": "blah", "F2": "foo"}, "timestamp": 8000} + {"topic": "S1_JOIN_T1", "key": 0, "value": {"ID": 0, "NAME": "zero", "TS": 0, "F1": "blah", "F2": "foo"}, "timestamp": 0}, + {"topic": "S1_JOIN_T1", "key": 10, "value": {"ID": 10, "NAME": "100", "TS": 11000, "F1": "foo", "F2": "bar"}, "timestamp": 11000}, + {"topic": "S1_JOIN_T1", "key": 0, "value": {"ID": 0, "NAME": "jan", "TS": 8000, "F1": "blah", "F2": "foo"}, "timestamp": 8000} ] }, { "name": "table table inner join with ts", "format": ["AVRO", "JSON"], "statements": [ - "CREATE TABLE S1 (ID bigint, NAME varchar, TS bigint) WITH (timestamp='TS', kafka_topic='s1', value_format='{FORMAT}', key='ID');", - "CREATE TABLE S2 (ID bigint, F1 varchar, F2 varchar) WITH (kafka_topic='s2', value_format='{FORMAT}', key='ID');", + "CREATE TABLE S1 (ROWKEY BIGINT KEY, ID bigint, NAME varchar, TS bigint) WITH (timestamp='TS', kafka_topic='s1', value_format='{FORMAT}', key='ID');", + "CREATE TABLE S2 (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 varchar) WITH (kafka_topic='s2', value_format='{FORMAT}', key='ID');", "CREATE TABLE S1_JOIN_S2 WITH(timestamp='TS') as SELECT S1.id as id, S1.name as name, S1.ts as ts, s2.f1, s2.f2 from S1 join S2 ON s1.id = s2.id;" ], "inputs": [ - {"topic": "s1", "key": "0", "value": {"ID": 0, "NAME": "zero", "TS": 0}, "timestamp": 0}, - {"topic": "s2", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": "foo"}, "timestamp": 10000}, - {"topic": "s2", "key": "10", "value": {"ID": 10, "F1": "foo", "F2": "bar"}, "timestamp": 13000}, - {"topic": "s1", "key": "10", "value": {"ID": 10, "NAME": "100", "TS": 19000}, "timestamp": 22000}, - {"topic": "s1", "key": "0", "value": {"ID": 0, "NAME": "jan", "TS": 18000}, "timestamp": 33000} + {"topic": "s1", "key": 0, "value": {"ID": 0, "NAME": "zero", "TS": 0}, "timestamp": 0}, + {"topic": "s2", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": "foo"}, "timestamp": 10000}, + {"topic": "s2", "key": 10, "value": {"ID": 10, "F1": "foo", "F2": "bar"}, "timestamp": 13000}, + {"topic": "s1", "key": 10, "value": {"ID": 10, "NAME": "100", "TS": 19000}, "timestamp": 22000}, + {"topic": "s1", "key": 0, "value": {"ID": 0, "NAME": "jan", "TS": 18000}, "timestamp": 33000} ], "outputs": [ - {"topic": "S1_JOIN_S2", "key": "0", "value": {"ID": 0, "NAME": "zero", "TS": 0, "F1": "blah", "F2": "foo"}, "timestamp": 10000}, - {"topic": "S1_JOIN_S2", "key": "10", "value": {"ID": 10, "NAME": "100", "TS": 19000, "F1": "foo", "F2": "bar"}, "timestamp": 19000}, - {"topic": "S1_JOIN_S2", "key": "0", "value": {"ID": 0, "NAME": "jan", "TS": 18000, "F1": "blah", "F2": "foo"}, "timestamp": 18000} + {"topic": "S1_JOIN_S2", "key": 0, "value": {"ID": 0, "NAME": "zero", "TS": 0, "F1": "blah", "F2": "foo"}, "timestamp": 10000}, + {"topic": "S1_JOIN_S2", "key": 10, "value": {"ID": 10, "NAME": "100", "TS": 19000, "F1": "foo", "F2": "bar"}, "timestamp": 19000}, + {"topic": "S1_JOIN_S2", "key": 0, "value": {"ID": 0, "NAME": "jan", "TS": 18000, "F1": "blah", "F2": "foo"}, "timestamp": 18000} ] }, { "name": "table table inner join with ts extractor both sides", "format": ["AVRO", "JSON"], "statements": [ - "CREATE TABLE S1 (ID bigint, NAME varchar, TS bigint) WITH (timestamp='TS', kafka_topic='s1', value_format='{FORMAT}', key='ID');", - "CREATE TABLE S2 (ID bigint, F1 varchar, F2 varchar, RTS bigint) WITH (timestamp='RTS', kafka_topic='s2', value_format='{FORMAT}', key='ID');", + "CREATE TABLE S1 (ROWKEY BIGINT KEY, ID bigint, NAME varchar, TS bigint) WITH (timestamp='TS', kafka_topic='s1', value_format='{FORMAT}', key='ID');", + "CREATE TABLE S2 (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 varchar, RTS bigint) WITH (timestamp='RTS', kafka_topic='s2', value_format='{FORMAT}', key='ID');", "CREATE TABLE S1_JOIN_S2 WITH(timestamp='TS') as SELECT S1.id as id, S1.name as name, S1.ts as ts, s2.f1, s2.f2 from S1 join S2 ON s1.id = s2.id;" ], "inputs": [ - {"topic": "s1", "key": "0", "value": {"ID": 0, "NAME": "zero", "TS": 0}, "timestamp": 0}, - {"topic": "s2", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": "foo", "RTS": 10000}, "timestamp": 0}, - {"topic": "s2", "key": "10", "value": {"ID": 10, "F1": "foo", "F2": "bar", "RTS": 13000}, "timestamp": 0}, - {"topic": "s1", "key": "10", "value": {"ID": 10, "NAME": "100", "TS": 11000}, "timestamp": 0}, - {"topic": "s1", "key": "0", "value": {"ID": 0, "NAME": "jan", "TS": 8000}, "timestamp": 0} + {"topic": "s1", "key": 0, "value": {"ID": 0, "NAME": "zero", "TS": 0}, "timestamp": 0}, + {"topic": "s2", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": "foo", "RTS": 10000}, "timestamp": 0}, + {"topic": "s2", "key": 10, "value": {"ID": 10, "F1": "foo", "F2": "bar", "RTS": 13000}, "timestamp": 0}, + {"topic": "s1", "key": 10, "value": {"ID": 10, "NAME": "100", "TS": 11000}, "timestamp": 0}, + {"topic": "s1", "key": 0, "value": {"ID": 0, "NAME": "jan", "TS": 8000}, "timestamp": 0} ], "outputs": [ - {"topic": "S1_JOIN_S2", "key": "0", "value": {"ID": 0, "NAME": "zero", "TS": 0, "F1": "blah", "F2": "foo"}, "timestamp": 10000}, - {"topic": "S1_JOIN_S2", "key": "10", "value": {"ID": 10, "NAME": "100", "TS": 11000, "F1": "foo", "F2": "bar"}, "timestamp": 13000}, - {"topic": "S1_JOIN_S2", "key": "0", "value": {"ID": 0, "NAME": "jan", "TS": 8000, "F1": "blah", "F2": "foo"}, "timestamp": 10000} + {"topic": "S1_JOIN_S2", "key": 0, "value": {"ID": 0, "NAME": "zero", "TS": 0, "F1": "blah", "F2": "foo"}, "timestamp": 10000}, + {"topic": "S1_JOIN_S2", "key": 10, "value": {"ID": 10, "NAME": "100", "TS": 11000, "F1": "foo", "F2": "bar"}, "timestamp": 13000}, + {"topic": "S1_JOIN_S2", "key": 0, "value": {"ID": 0, "NAME": "jan", "TS": 8000, "F1": "blah", "F2": "foo"}, "timestamp": 10000} ] } ] diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json b/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json index 6e99039d73ee..7f10d5d5a06d 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json @@ -4,36 +4,36 @@ "name": "stream stream left join", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", - "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST_STREAM (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", "CREATE STREAM LEFT_OUTER_JOIN as SELECT t.id, name, value, f1, f2 FROM test t left join TEST_STREAM tt WITHIN 11 seconds ON t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "right_topic", "key": "100", "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "right_topic", "key": 100, "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} ], "outputs": [ - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0001", "value": {"ROWTIME": 0, "ROWKEY": "0", "ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-OUTEROTHER-0000000007-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u0000'\u0010\u0000\u0000\u0000\u0001", "value": {"ROWTIME": 10000, "ROWKEY": "0", "ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "key": "10\u0000\u0000\u0000\u0000\u0000\u0000*�\u0000\u0000\u0000\u0002", "value": {"ROWTIME": 11000, "ROWKEY": "10", "ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u00002�\u0000\u0000\u0000\u0003", "value": {"ROWTIME": 13000, "ROWKEY": "0", "ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-OUTEROTHER-0000000007-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u0000:�\u0000\u0000\u0000\u0002", "value": {"ROWTIME": 15000, "ROWKEY": "0", "ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-OUTEROTHER-0000000007-store-changelog", "key": "100\u0000\u0000\u0000\u0000\u0000\u0000>�\u0000\u0000\u0000\u0003", "value": {"ROWTIME": 16000, "ROWKEY": "100", "ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "key": "90\u0000\u0000\u0000\u0000\u0000\u0000Bh\u0000\u0000\u0000\u0004", "value": {"ROWTIME": 17000, "ROWKEY": "90", "ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u0000u0\u0000\u0000\u0000\u0005", "value": {"ROWTIME": 30000, "ROWKEY": "0", "ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "LEFT_OUTER_JOIN", "key": "10", "value": {"T_ID": 10, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "LEFT_OUTER_JOIN", "key": "90", "value": {"T_ID": 90, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": null, "F2": null}, "timestamp": 30000} + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "window": {"start": 0, "end": 11000, "type": "time"}, "key": 0, "value": {"ROWTIME": 0, "ROWKEY": 0, "ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-OUTEROTHER-0000000007-store-changelog", "window": {"start": 10000, "end": 21000, "type": "time"}, "key": 0, "value": {"ROWTIME": 10000, "ROWKEY": 0, "ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "window": {"start": 11000, "end": 22000, "type": "time"}, "key": 10, "value": {"ROWTIME": 11000, "ROWKEY": 10, "ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "window": {"start": 13000, "end": 24000, "type": "time"}, "key": 0, "value": {"ROWTIME": 13000, "ROWKEY": 0, "ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-OUTEROTHER-0000000007-store-changelog", "window": {"start": 15000, "end": 26000, "type": "time"}, "key": 0, "value": {"ROWTIME": 15000, "ROWKEY": 0, "ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-OUTEROTHER-0000000007-store-changelog", "window": {"start": 16000, "end": 27000, "type": "time"}, "key": 100, "value": {"ROWTIME": 16000, "ROWKEY": 100, "ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "window": {"start": 17000, "end": 28000, "type": "time"}, "key": 90, "value": {"ROWTIME": 17000, "ROWKEY": 90, "ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "window": {"start": 30000, "end": 41000, "type": "time"}, "key": 0, "value": {"ROWTIME": 30000, "ROWKEY": 0, "ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "LEFT_OUTER_JOIN", "key": 10, "value": {"T_ID": 10, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "LEFT_OUTER_JOIN", "key": 90, "value": {"T_ID": 90, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": null, "F2": null}, "timestamp": 30000} ], "post": { "sources": [ @@ -44,8 +44,8 @@ { "name": "stream stream left join - KAFKA", "statements": [ - "CREATE STREAM S_LEFT (ID bigint) WITH (kafka_topic='left_topic', value_format='KAFKA', key='ID');", - "CREATE STREAM S_RIGHT (ID bigint) WITH (kafka_topic='right_topic', value_format='KAFKA', key='ID');", + "CREATE STREAM S_LEFT (ID bigint) WITH (kafka_topic='left_topic', value_format='KAFKA');", + "CREATE STREAM S_RIGHT (ID bigint) WITH (kafka_topic='right_topic', value_format='KAFKA');", "CREATE STREAM OUTPUT WITH(value_format='delimited') as SELECT * FROM s_left join s_right WITHIN 1 second ON s_left.id = s_right.id;" ], "expectedException": { @@ -117,14 +117,14 @@ {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-right-repartition", "key": 100, "value": {"ROWTIME": 16000, "ROWKEY": "", "ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-left-repartition", "key": 90, "value": {"ROWTIME": 17000, "ROWKEY": "", "ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-Join-left-repartition", "key": 0, "value": {"ROWTIME": 30000, "ROWKEY": "", "ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000016-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0001", "value": {"ROWTIME": 0, "ROWKEY": "", "ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-OUTEROTHER-0000000017-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u0000'\u0010\u0000\u0000\u0000\u0001", "value": {"ROWTIME": 10000, "ROWKEY": "", "ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000016-store-changelog", "key": "10\u0000\u0000\u0000\u0000\u0000\u0000*�\u0000\u0000\u0000\u0002", "value": {"ROWTIME": 11000, "ROWKEY": "", "ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000016-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u00002�\u0000\u0000\u0000\u0003", "value": {"ROWTIME": 13000, "ROWKEY": "", "ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-OUTEROTHER-0000000017-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u0000:�\u0000\u0000\u0000\u0002", "value": {"ROWTIME": 15000, "ROWKEY": "", "ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-OUTEROTHER-0000000017-store-changelog", "key": "100\u0000\u0000\u0000\u0000\u0000\u0000>�\u0000\u0000\u0000\u0003", "value": {"ROWTIME": 16000, "ROWKEY": "", "ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000016-store-changelog", "key": "90\u0000\u0000\u0000\u0000\u0000\u0000Bh\u0000\u0000\u0000\u0004", "value": {"ROWTIME": 17000, "ROWKEY": "", "ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000016-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u0000u0\u0000\u0000\u0000\u0005", "value": {"ROWTIME": 30000, "ROWKEY": "", "ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000016-store-changelog", "window": {"start": 0, "end": 11000, "type": "time"}, "key": 0, "value": {"ROWTIME": 0, "ROWKEY": "", "ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-OUTEROTHER-0000000017-store-changelog", "window": {"start": 10000, "end": 21000, "type": "time"}, "key": 0, "value": {"ROWTIME": 10000, "ROWKEY": "", "ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000016-store-changelog", "window": {"start": 11000, "end": 22000, "type": "time"}, "key": 10, "value": {"ROWTIME": 11000, "ROWKEY": "", "ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000016-store-changelog", "window": {"start": 13000, "end": 24000, "type": "time"}, "key": 0, "value": {"ROWTIME": 13000, "ROWKEY": "", "ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-OUTEROTHER-0000000017-store-changelog", "window": {"start": 140000, "end": 25000, "type": "time"}, "key": 0, "value": {"ROWTIME": 15000, "ROWKEY": "", "ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-OUTEROTHER-0000000017-store-changelog", "window": {"start": 16000, "end": 27000, "type": "time"}, "key": 100, "value": {"ROWTIME": 16000, "ROWKEY": "", "ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000016-store-changelog", "window": {"start": 17000, "end": 28000, "type": "time"}, "key": 90, "value": {"ROWTIME": 17000, "ROWKEY": "", "ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_LEFT_OUTER_JOIN_0-KSTREAM-JOINTHIS-0000000016-store-changelog", "window": {"start": 30000, "end": 41000, "type": "time"}, "key": 0, "value": {"ROWTIME": 30000, "ROWKEY": "", "ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000}, {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, {"topic": "LEFT_OUTER_JOIN", "key": 10, "value": {"T_ID": 10, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, @@ -142,28 +142,28 @@ { "name": "stream stream left join - join key not in projection", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", - "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST_STREAM (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", "CREATE STREAM LEFT_OUTER_JOIN as SELECT name, value, f1, f2 FROM test t left join TEST_STREAM tt WITHIN 11 seconds ON t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "right_topic", "key": "100", "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "right_topic", "key": 100, "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} ], "outputs": [ - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "LEFT_OUTER_JOIN", "key": "10", "value": {"NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "LEFT_OUTER_JOIN", "key": "90", "value": {"NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"NAME": "bar", "VALUE": 99, "F1": null, "F2": null}, "timestamp": 30000} + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "LEFT_OUTER_JOIN", "key": 10, "value": {"NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "LEFT_OUTER_JOIN", "key": 90, "value": {"NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"NAME": "bar", "VALUE": 99, "F1": null, "F2": null}, "timestamp": 30000} ], "post": { "sources": [ @@ -246,24 +246,24 @@ "name": "stream stream inner join", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", - "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST_STREAM (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", "CREATE STREAM INNER_JOIN as SELECT t.id, name, value, f1, f2 FROM test t join TEST_STREAM tt WITHIN 11 SECONDS ON t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "right_topic", "key": "100", "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "right_topic", "key": 100, "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000} ], "post": { "sources": [ @@ -275,48 +275,48 @@ "name": "stream stream inner join all left fields some right", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", - "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST_STREAM (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", "CREATE STREAM INNER_JOIN as SELECT t.*, tt.f1 FROM test t inner join TEST_STREAM tt WITHIN 11 SECONDS ON t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "right_topic", "key": "100", "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "right_topic", "key": 100, "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "T_NAME": "zero", "T_VALUE": 0, "F1": "blah", "T_ROWKEY": "0", "T_ROWTIME": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "T_NAME": "foo", "T_VALUE": 100, "F1": "blah", "T_ROWKEY": "0", "T_ROWTIME": 13000}, "timestamp": 13000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "T_NAME": "foo", "T_VALUE": 100, "F1": "a", "T_ROWKEY": "0", "T_ROWTIME": 13000}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "T_NAME": "zero", "T_VALUE": 0, "F1": "blah", "T_ROWKEY": 0, "T_ROWTIME": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "T_NAME": "foo", "T_VALUE": 100, "F1": "blah", "T_ROWKEY": 0, "T_ROWTIME": 13000}, "timestamp": 13000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "T_NAME": "foo", "T_VALUE": 100, "F1": "a", "T_ROWKEY": 0, "T_ROWTIME": 13000}, "timestamp": 15000} ] }, { "name": "stream stream inner join all right fields some left", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", - "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST_STREAM (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", "CREATE STREAM INNER_JOIN as SELECT t.*, tt.name FROM test tt inner join TEST_STREAM t WITHIN 11 SECONDS ON t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "right_topic", "key": "100", "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "right_topic", "key": 100, "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "T_F1": "blah", "T_F2": 50, "T_ROWKEY": "0", "T_ROWTIME": 10000, "NAME": "zero"}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "T_F1": "blah", "T_F2": 50, "T_ROWKEY": "0", "T_ROWTIME": 10000, "NAME": "foo"}, "timestamp": 13000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "T_F1": "a", "T_F2": 10, "T_ROWKEY": "0", "T_ROWTIME": 15000, "NAME": "foo"}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "T_F1": "blah", "T_F2": 50, "T_ROWKEY": 0, "T_ROWTIME": 10000, "NAME": "zero"}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "T_F1": "blah", "T_F2": 50, "T_ROWKEY": 0, "T_ROWTIME": 10000, "NAME": "foo"}, "timestamp": 13000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "T_F1": "a", "T_F2": 10, "T_ROWKEY": 0, "T_ROWTIME": 15000, "NAME": "foo"}, "timestamp": 15000} ], "post": { "issues": [ @@ -331,32 +331,32 @@ "name": "stream stream inner join all fields", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", - "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST_STREAM (ROWKEY BIGINT KEY, ID bigint, F1 varchar) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", "CREATE STREAM INNER_JOIN as SELECT * FROM test tt inner join TEST_STREAM t WITHIN 11 SECONDS ON t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero"}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah"}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100"}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo"}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a"}, "timestamp": 15000}, - {"topic": "right_topic", "key": "100", "value": {"ID": 100, "F1": "newblah"}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety"}, "timestamp": 17000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar"}, "timestamp": 30000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero"}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah"}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100"}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo"}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a"}, "timestamp": 15000}, + {"topic": "right_topic", "key": 100, "value": {"ID": 100, "F1": "newblah"}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety"}, "timestamp": 17000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar"}, "timestamp": 30000} ], "outputs": [ - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0001", "value": {"ROWTIME": 0, "ROWKEY": "0", "ID": 0, "NAME": "zero"}, "timestamp": 0}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINOTHER-0000000007-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u0000'\u0010\u0000\u0000\u0000\u0001", "value": {"ROWTIME": 10000, "ROWKEY": "0", "ID": 0, "F1": "blah"}, "timestamp": 10000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "key": "10\u0000\u0000\u0000\u0000\u0000\u0000*�\u0000\u0000\u0000\u0002", "value": {"ROWTIME": 11000, "ROWKEY": "10", "ID": 10, "NAME": "100"}, "timestamp": 11000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u00002�\u0000\u0000\u0000\u0003", "value": {"ROWTIME": 13000, "ROWKEY": "0", "ID": 0, "NAME": "foo"}, "timestamp": 13000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINOTHER-0000000007-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u0000:�\u0000\u0000\u0000\u0002", "value": {"ROWTIME": 15000, "ROWKEY": "0", "ID": 0, "F1": "a"}, "timestamp": 15000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINOTHER-0000000007-store-changelog", "key": "100\u0000\u0000\u0000\u0000\u0000\u0000>�\u0000\u0000\u0000\u0003", "value": {"ROWTIME": 16000, "ROWKEY": "100", "ID": 100, "F1": "newblah"}, "timestamp": 16000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "key": "90\u0000\u0000\u0000\u0000\u0000\u0000Bh\u0000\u0000\u0000\u0004", "value": {"ROWTIME": 17000, "ROWKEY": "90", "ID": 90, "NAME": "ninety"}, "timestamp": 17000}, - {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "key": "0\u0000\u0000\u0000\u0000\u0000\u0000u0\u0000\u0000\u0000\u0005", "value": {"ROWTIME": 30000, "ROWKEY": "0", "ID": 0, "NAME": "bar"}, "timestamp": 30000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "T_F1": "blah", "T_ROWKEY": "0", "T_ROWTIME": 10000, "TT_ID": 0, "TT_NAME": "zero", "TT_ROWKEY": "0", "TT_ROWTIME": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "T_F1": "blah", "T_ROWKEY": "0", "T_ROWTIME": 10000, "TT_ID": 0, "TT_NAME": "foo", "TT_ROWKEY": "0", "TT_ROWTIME": 13000}, "timestamp": 13000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "T_F1": "a", "T_ROWKEY": "0", "T_ROWTIME": 15000, "TT_ID": 0, "TT_NAME": "foo", "TT_ROWKEY": "0", "TT_ROWTIME": 13000}, "timestamp": 15000} + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "window": {"start": 0, "end": 11000, "type": "time"}, "key": 0, "value": {"ROWTIME": 0, "ROWKEY": 0, "ID": 0, "NAME": "zero"}, "timestamp": 0}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINOTHER-0000000007-store-changelog", "window": {"start": 10000, "end": 21000, "type": "time"}, "key": 0, "value": {"ROWTIME": 10000, "ROWKEY": 0, "ID": 0, "F1": "blah"}, "timestamp": 10000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "window": {"start": 11000, "end": 22000, "type": "time"}, "key": 10, "value": {"ROWTIME": 11000, "ROWKEY": 10, "ID": 10, "NAME": "100"}, "timestamp": 11000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "window": {"start": 13000, "end": 24000, "type": "time"}, "key": 0, "value": {"ROWTIME": 13000, "ROWKEY": 0, "ID": 0, "NAME": "foo"}, "timestamp": 13000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINOTHER-0000000007-store-changelog", "window": {"start": 15000, "end": 26000, "type": "time"}, "key": 0, "value": {"ROWTIME": 15000, "ROWKEY": 0, "ID": 0, "F1": "a"}, "timestamp": 15000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINOTHER-0000000007-store-changelog", "window": {"start": 16000, "end": 27000, "type": "time"}, "key": 100, "value": {"ROWTIME": 16000, "ROWKEY": 100, "ID": 100, "F1": "newblah"}, "timestamp": 16000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "window": {"start": 17000, "end": 28000, "type": "time"}, "key": 90, "value": {"ROWTIME": 17000, "ROWKEY": 90, "ID": 90, "NAME": "ninety"}, "timestamp": 17000}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CSAS_INNER_JOIN_0-KSTREAM-JOINTHIS-0000000006-store-changelog", "window": {"start": 30000, "end": 41000, "type": "time"}, "key": 0, "value": {"ROWTIME": 30000, "ROWKEY": 0, "ID": 0, "NAME": "bar"}, "timestamp": 30000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "T_F1": "blah", "T_ROWKEY": 0, "T_ROWTIME": 10000, "TT_ID": 0, "TT_NAME": "zero", "TT_ROWKEY": 0, "TT_ROWTIME": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "T_F1": "blah", "T_ROWKEY": 0, "T_ROWTIME": 10000, "TT_ID": 0, "TT_NAME": "foo", "TT_ROWKEY": 0, "TT_ROWTIME": 13000}, "timestamp": 13000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "T_F1": "a", "T_ROWKEY": 0, "T_ROWTIME": 15000, "TT_ID": 0, "TT_NAME": "foo", "TT_ROWKEY": 0, "TT_ROWTIME": 13000}, "timestamp": 15000} ], "post": { "sources": [ @@ -368,73 +368,73 @@ "name": "stream stream inner join with different before and after windows", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", - "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST_STREAM (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", "CREATE STREAM INNER_JOIN as SELECT t.id, name, value, f1, f2 FROM test t join TEST_STREAM tt WITHIN (11 seconds, 10 seconds) on t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 11000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 12000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "right_topic", "key": "100", "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 11000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 12000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "right_topic", "key": 100, "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000} ] }, { "name": "stream stream inner join with out of order messages", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", - "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST_STREAM (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", "CREATE STREAM INNER_JOIN as SELECT t.id, name, value, f1, f2 FROM test t join TEST_STREAM tt WITHIN 10 seconds on t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 9999}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "right_topic", "key": "100", "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "late-message", "VALUE": 10000}, "timestamp": 6000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 9999}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "right_topic", "key": 100, "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "late-message", "VALUE": 10000}, "timestamp": 6000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 9999}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "late-message", "VALUE": 10000, "F1": "blah", "F2": 50}, "timestamp": 9999}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "late-message", "VALUE": 10000, "F1": "a", "F2": 10}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 9999}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "late-message", "VALUE": 10000, "F1": "blah", "F2": 50}, "timestamp": 9999}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "late-message", "VALUE": 10000, "F1": "a", "F2": 10}, "timestamp": 15000} ] }, { "name": "stream stream inner join - join key not in projection", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", - "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST_STREAM (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", "CREATE STREAM INNER_JOIN as SELECT name, value, f1, f2 FROM test t join TEST_STREAM tt WITHIN 11 SECONDS ON t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "right_topic", "key": "100", "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "right_topic", "key": 100, "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000} ], "post": { "sources": [ @@ -445,24 +445,24 @@ { "name": "stream stream inner join - right join key in projection", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", - "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST_STREAM (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", "CREATE STREAM INNER_JOIN as SELECT tt.id, name, value, f1, f2 FROM test t join TEST_STREAM tt WITHIN 11 SECONDS ON t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "right_topic", "key": "100", "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "right_topic", "key": 100, "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000} ], "post": { "issues": [ @@ -477,30 +477,30 @@ "name": "stream stream outer join", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", - "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST_STREAM (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", "CREATE STREAM LEFT_OUTER_JOIN as SELECT t.id, name, value, f1, f2 FROM test t FULL OUTER join TEST_STREAM tt WITHIN 11 seconds on t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "right_topic", "key": "100", "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 20000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "right_topic", "key": 100, "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 20000} ], "outputs": [ - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "LEFT_OUTER_JOIN", "key": "10", "value": {"T_ID": 10, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": null, "F2": null}, "timestamp": 30000}, - {"topic": "LEFT_OUTER_JOIN", "key": "90", "value": {"T_ID": 90, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000}, - {"topic": "LEFT_OUTER_JOIN", "key": "100", "value": {"T_ID": null, "NAME": null, "VALUE": null, "F1": "newblah", "F2": 150}, "timestamp": 20000} + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "LEFT_OUTER_JOIN", "key": 10, "value": {"T_ID": 10, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": null, "F2": null}, "timestamp": 30000}, + {"topic": "LEFT_OUTER_JOIN", "key": 90, "value": {"T_ID": 90, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000}, + {"topic": "LEFT_OUTER_JOIN", "key": 100, "value": {"T_ID": null, "NAME": null, "VALUE": null, "F1": "newblah", "F2": 150}, "timestamp": 20000} ], "post": { "comments": [ @@ -514,30 +514,30 @@ { "name": "stream stream outer join - right join key in projection", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", - "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST_STREAM (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", "CREATE STREAM LEFT_OUTER_JOIN as SELECT tt.id, name, value, f1, f2 FROM test t FULL OUTER join TEST_STREAM tt WITHIN 11 seconds on t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "right_topic", "key": "100", "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 20000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "right_topic", "key": 100, "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 20000} ], "outputs": [ - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"TT_ID": null, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "LEFT_OUTER_JOIN", "key": "10", "value": {"TT_ID": null, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"TT_ID": null, "NAME": "bar", "VALUE": 99, "F1": null, "F2": null}, "timestamp": 30000}, - {"topic": "LEFT_OUTER_JOIN", "key": "90", "value": {"TT_ID": null, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000}, - {"topic": "LEFT_OUTER_JOIN", "key": "100", "value": {"TT_ID": 100, "NAME": null, "VALUE": null, "F1": "newblah", "F2": 150}, "timestamp": 20000} + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"TT_ID": null, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "LEFT_OUTER_JOIN", "key": 10, "value": {"TT_ID": null, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"TT_ID": null, "NAME": "bar", "VALUE": 99, "F1": null, "F2": null}, "timestamp": 30000}, + {"topic": "LEFT_OUTER_JOIN", "key": 90, "value": {"TT_ID": null, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000}, + {"topic": "LEFT_OUTER_JOIN", "key": 100, "value": {"TT_ID": 100, "NAME": null, "VALUE": null, "F1": "newblah", "F2": 150}, "timestamp": 20000} ], "post": { "comments": [ @@ -552,27 +552,27 @@ "name": "table table left join", "format": ["AVRO", "JSON"], "statements": [ - "CREATE TABLE TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE LEFT_OUTER_JOIN as SELECT t.id, name, value, f1, f2 FROM test t left join TEST_TABLE tt on t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000} ], "outputs": [ - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "LEFT_OUTER_JOIN", "key": "10", "value": {"T_ID": 10, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000}, - {"topic": "LEFT_OUTER_JOIN", "key": "90", "value": {"T_ID": 90, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000} + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "LEFT_OUTER_JOIN", "key": 10, "value": {"T_ID": 10, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000}, + {"topic": "LEFT_OUTER_JOIN", "key": 90, "value": {"T_ID": 90, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000} ], "post": { "sources": [ @@ -583,27 +583,27 @@ { "name": "table table left join - join key not in projection", "statements": [ - "CREATE TABLE TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", "CREATE TABLE LEFT_OUTER_JOIN as SELECT name, value, f1, f2 FROM test t left join TEST_TABLE tt on t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000} ], "outputs": [ - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "LEFT_OUTER_JOIN", "key": "10", "value": {"NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000}, - {"topic": "LEFT_OUTER_JOIN", "key": "90", "value": {"NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000} + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "LEFT_OUTER_JOIN", "key": 10, "value": {"NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000}, + {"topic": "LEFT_OUTER_JOIN", "key": 90, "value": {"NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000} ], "post": { "sources": [ @@ -614,27 +614,27 @@ { "name": "table table left join - right join key in projection", "statements": [ - "CREATE TABLE TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", "CREATE TABLE LEFT_OUTER_JOIN as SELECT tt.id, name, value, f1, f2 FROM test t left join TEST_TABLE tt on t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000} ], "outputs": [ - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"TT_ID": null, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "LEFT_OUTER_JOIN", "key": "10", "value": {"TT_ID": null, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000}, - {"topic": "LEFT_OUTER_JOIN", "key": "90", "value": {"TT_ID": null, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000} + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"TT_ID": null, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "LEFT_OUTER_JOIN", "key": 10, "value": {"TT_ID": null, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000}, + {"topic": "LEFT_OUTER_JOIN", "key": 90, "value": {"TT_ID": null, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000} ], "post": { "comments": [ @@ -648,27 +648,27 @@ { "name": "table table left join - both join keys in projection", "statements": [ - "CREATE TABLE TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", "CREATE TABLE LEFT_OUTER_JOIN as SELECT t.id, tt.id, name, value, f1, f2 FROM test t left join TEST_TABLE tt on t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000} ], "outputs": [ - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "TT_ID": null, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "TT_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "LEFT_OUTER_JOIN", "key": "10", "value": {"T_ID": 10, "TT_ID": null, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "TT_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000}, - {"topic": "LEFT_OUTER_JOIN", "key": "90", "value": {"T_ID": 90, "TT_ID": null, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000} + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "TT_ID": null, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "TT_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "LEFT_OUTER_JOIN", "key": 10, "value": {"T_ID": 10, "TT_ID": null, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "TT_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000}, + {"topic": "LEFT_OUTER_JOIN", "key": 90, "value": {"T_ID": 90, "TT_ID": null, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000} ], "post": { "sources": [ @@ -680,25 +680,25 @@ "name": "table table inner join", "format": ["AVRO", "JSON"], "statements": [ - "CREATE TABLE TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE INNER_JOIN as SELECT t.id, name, value, f1, f2 FROM test t join TEST_TABLE tt on t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "right_topic", "key": "15", "value": {"ID": 15, "F1": "c", "F2": 20}, "timestamp": 15500}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "right_topic", "key": 15, "value": {"ID": 15, "F1": "c", "F2": 20}, "timestamp": 15500}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000} + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000} ], "post": { "sources": [ @@ -709,25 +709,25 @@ { "name": "table table inner join - join key not in projection", "statements": [ - "CREATE TABLE TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", "CREATE TABLE INNER_JOIN as SELECT name, value, f1, f2 FROM test t join TEST_TABLE tt on t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "right_topic", "key": "15", "value": {"ID": 15, "F1": "c", "F2": 20}, "timestamp": 15500}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "right_topic", "key": 15, "value": {"ID": 15, "F1": "c", "F2": 20}, "timestamp": 15500}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000} + {"topic": "INNER_JOIN", "key": 0, "value": {"NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000} ], "post": { "sources": [ @@ -738,25 +738,25 @@ { "name": "table table inner join - right join key in projection", "statements": [ - "CREATE TABLE TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", "CREATE TABLE INNER_JOIN as SELECT tt.id, name, value, f1, f2 FROM test t join TEST_TABLE tt on t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "right_topic", "key": "15", "value": {"ID": 15, "F1": "c", "F2": 20}, "timestamp": 15500}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "right_topic", "key": 15, "value": {"ID": 15, "F1": "c", "F2": 20}, "timestamp": 15500}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000} + {"topic": "INNER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000} ], "post": { "issues": [ @@ -771,27 +771,27 @@ "name": "table table outer join", "format": ["AVRO", "JSON"], "statements": [ - "CREATE TABLE TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", "CREATE TABLE OUTER_JOIN as SELECT t.id, name, value, f1, f2 FROM test t FULL OUTER join TEST_TABLE tt on t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "right_topic", "key": "15", "value": {"ID": 15, "F1": "c", "F2": 20}, "timestamp": 15500}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "right_topic", "key": 15, "value": {"ID": 15, "F1": "c", "F2": 20}, "timestamp": 15500}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000} ], "outputs": [ - {"topic": "OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, - {"topic": "OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "OUTER_JOIN", "key": "10", "value": {"T_ID": 10, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, - {"topic": "OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "OUTER_JOIN", "key": "15", "value": {"T_ID": null, "NAME": null, "VALUE": null, "F1": "c", "F2": 20}, "timestamp": 15500}, - {"topic": "OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000} + {"topic": "OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, + {"topic": "OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "OUTER_JOIN", "key": 10, "value": {"T_ID": 10, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, + {"topic": "OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "OUTER_JOIN", "key": 15, "value": {"T_ID": null, "NAME": null, "VALUE": null, "F1": "c", "F2": 20}, "timestamp": 15500}, + {"topic": "OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000} ], "post": { "comments": [ @@ -805,27 +805,27 @@ { "name": "table table outer join - right join key in projection", "statements": [ - "CREATE TABLE TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", "CREATE TABLE OUTER_JOIN as SELECT tt.id, name, value, f1, f2 FROM test t FULL OUTER join TEST_TABLE tt on t.id = tt.id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "right_topic", "key": "15", "value": {"ID": 15, "F1": "c", "F2": 20}, "timestamp": 15500}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "right_topic", "key": 15, "value": {"ID": 15, "F1": "c", "F2": 20}, "timestamp": 15500}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000} ], "outputs": [ - {"topic": "OUTER_JOIN", "key": "0", "value": {"TT_ID": null, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, - {"topic": "OUTER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "OUTER_JOIN", "key": "10", "value": {"TT_ID": null, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, - {"topic": "OUTER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "OUTER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "OUTER_JOIN", "key": "15", "value": {"TT_ID": 15, "NAME": null, "VALUE": null, "F1": "c", "F2": 20}, "timestamp": 15500}, - {"topic": "OUTER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000} + {"topic": "OUTER_JOIN", "key": 0, "value": {"TT_ID": null, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, + {"topic": "OUTER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "OUTER_JOIN", "key": 10, "value": {"TT_ID": null, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, + {"topic": "OUTER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "OUTER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "OUTER_JOIN", "key": 15, "value": {"TT_ID": 15, "NAME": null, "VALUE": null, "F1": "c", "F2": 20}, "timestamp": 15500}, + {"topic": "OUTER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 16000} ], "post": { "comments": [ @@ -840,24 +840,24 @@ "name": "stream table left join", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='{FORMAT}', key='ID');", "CREATE STREAM LEFT_JOIN as SELECT t.id, name, value, f1, f2 FROM test t left join test_table tt on t.id = tt.id;" ], "inputs": [ - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, - {"topic": "test_table", "key": "10", "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, - {"topic": "test_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, + {"topic": "test_table", "key": 10, "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, + {"topic": "test_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} ], "outputs": [ - {"topic": "LEFT_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "LEFT_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "LEFT_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "LEFT_JOIN", "key": "90", "value": {"T_ID": 90, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 15000} + {"topic": "LEFT_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "LEFT_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "LEFT_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "LEFT_JOIN", "key": 90, "value": {"T_ID": 90, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 15000} ], "post": { "sources": [ @@ -868,24 +868,24 @@ { "name": "stream table left join - join key not in projection", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", "CREATE STREAM LEFT_JOIN as SELECT name, value, f1, f2 FROM test t left join test_table tt on t.id = tt.id;" ], "inputs": [ - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, - {"topic": "test_table", "key": "10", "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, - {"topic": "test_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, + {"topic": "test_table", "key": 10, "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, + {"topic": "test_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} ], "outputs": [ - {"topic": "LEFT_JOIN", "key": "0", "value": {"NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "LEFT_JOIN", "key": "0", "value": {"NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "LEFT_JOIN", "key": "0", "value": {"NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "LEFT_JOIN", "key": "90", "value": {"NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 15000} + {"topic": "LEFT_JOIN", "key": 0, "value": {"NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "LEFT_JOIN", "key": 0, "value": {"NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "LEFT_JOIN", "key": 0, "value": {"NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "LEFT_JOIN", "key": 90, "value": {"NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 15000} ], "post": { "sources": [ @@ -896,24 +896,24 @@ { "name": "stream table left join - right join key in projection", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", "CREATE STREAM LEFT_JOIN as SELECT tt.id, name, value, f1, f2 FROM test t left join test_table tt on t.id = tt.id;" ], "inputs": [ - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, - {"topic": "test_table", "key": "10", "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, - {"topic": "test_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, + {"topic": "test_table", "key": 10, "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, + {"topic": "test_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} ], "outputs": [ - {"topic": "LEFT_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "LEFT_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "LEFT_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "LEFT_JOIN", "key": "90", "value": {"TT_ID": null, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 15000} + {"topic": "LEFT_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "LEFT_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "LEFT_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "LEFT_JOIN", "key": 90, "value": {"TT_ID": null, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 15000} ], "post": { "comments": [ @@ -928,23 +928,23 @@ "name": "stream table inner join", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='{FORMAT}', key='ID');", "CREATE STREAM INNER_JOIN as SELECT t.id, name, value, f1, f2 FROM test t join test_table tt on t.id = tt.id;" ], "inputs": [ - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, - {"topic": "test_table", "key": "10", "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, - {"topic": "test_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, + {"topic": "test_table", "key": 10, "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, + {"topic": "test_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} ], "post": { "sources": [ @@ -955,23 +955,23 @@ { "name": "stream table inner join - join key not in projection", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", "CREATE STREAM INNER_JOIN as SELECT name, value, f1, f2 FROM test t join test_table tt on t.id = tt.id;" ], "inputs": [ - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, - {"topic": "test_table", "key": "10", "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, - {"topic": "test_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, + {"topic": "test_table", "key": 10, "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, + {"topic": "test_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} ], "post": { "sources": [ @@ -982,23 +982,23 @@ { "name": "stream table inner join - right join key in projection", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", "CREATE STREAM INNER_JOIN as SELECT tt.id, name, value, f1, f2 FROM test t join test_table tt on t.id = tt.id;" ], "inputs": [ - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, - {"topic": "test_table", "key": "10", "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, - {"topic": "test_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, + {"topic": "test_table", "key": 10, "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, + {"topic": "test_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} ], "post": { "issues": [ @@ -1013,23 +1013,23 @@ "name": "join using ROWKEY in the criteria", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='{FORMAT}', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='{FORMAT}', key='ID');", "CREATE STREAM INNER_JOIN as SELECT t.id, name, value, f1, f2 FROM test t join test_table tt on t.ROWKEY = tt.ROWKEY;" ], "inputs": [ - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, - {"topic": "test_table", "key": "10", "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, - {"topic": "test_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, + {"topic": "test_table", "key": 10, "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, + {"topic": "test_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} ], "post": { "sources": [ @@ -1072,23 +1072,23 @@ { "name": "join using ROWKEY in the criteria - join key not in projection", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", "CREATE STREAM INNER_JOIN as SELECT name, value, f1, f2 FROM test t join test_table tt on t.ROWKEY = tt.ROWKEY;" ], "inputs": [ - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, - {"topic": "test_table", "key": "10", "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, - {"topic": "test_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, + {"topic": "test_table", "key": 10, "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, + {"topic": "test_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} ], "post": { "sources": [ @@ -1099,23 +1099,23 @@ { "name": "join using ROWKEY in the criteria - right join key in projection", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", "CREATE STREAM INNER_JOIN as SELECT tt.id, name, value, f1, f2 FROM test t join test_table tt on t.ROWKEY = tt.ROWKEY;" ], "inputs": [ - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, - {"topic": "test_table", "key": "10", "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, - {"topic": "test_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, + {"topic": "test_table", "key": 10, "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, + {"topic": "test_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"TT_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"TT_ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} ], "post": { "issues": [ @@ -1129,23 +1129,23 @@ { "name": "join using ROWKEY in the criteria - left rowkey in projection", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", "CREATE STREAM INNER_JOIN as SELECT t.rowkey AS ID, name, value, f1, f2 FROM test t join test_table tt on t.ROWKEY = tt.ROWKEY;" ], "inputs": [ - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, - {"topic": "test_table", "key": "10", "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, - {"topic": "test_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, + {"topic": "test_table", "key": 10, "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, + {"topic": "test_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"ID": "0", "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"ID": "0", "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"ID": "0", "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"ID": 0, "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} ], "post": { "issues": [ @@ -1159,23 +1159,23 @@ { "name": "join using ROWKEY in the criteria - right rowkey in projection", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", "CREATE STREAM INNER_JOIN as SELECT tt.rowkey AS ID, name, value, f1, f2 FROM test t join test_table tt on t.ROWKEY = tt.ROWKEY;" ], "inputs": [ - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, - {"topic": "test_table", "key": "10", "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, - {"topic": "test_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, + {"topic": "test_table", "key": 10, "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, + {"topic": "test_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"ID": "0", "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"ID": "0", "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"ID": "0", "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"ID": 0, "NAME": "blah", "VALUE": 50, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100, "F1": "zero", "F2": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99, "F1": "a", "F2": 10}, "timestamp": 15000} ], "post": { "issues": [ @@ -1189,23 +1189,23 @@ { "name": "multiple join keys in projection", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='test_table', value_format='JSON', key='ID');", "CREATE STREAM INNER_JOIN as SELECT t.ID AS ID1, t.ID AS ID2, t.rowkey AS ID3, tt.ID AS ID4, tt.ROWKEY AS ID5 FROM test t join test_table tt on t.ROWKEY = tt.ROWKEY;" ], "inputs": [ - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, - {"topic": "test_table", "key": "10", "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, - {"topic": "test_table", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "test_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, - {"topic": "test_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "zero", "F2": 0}, "timestamp": 0}, + {"topic": "test_table", "key": 10, "value": {"ID": 10, "F1": "100", "F2": 5}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "blah", "VALUE": 50}, "timestamp": 10000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 10000}, + {"topic": "test_table", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "test_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 15000}, + {"topic": "test_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 15000} ], "outputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"ID1": 0, "ID2": 0, "ID3": "0", "ID4": 0, "ID5": "0"}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"ID1": 0, "ID2": 0, "ID3": "0", "ID4": 0, "ID5": "0"}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "0", "value": {"ID1": 0, "ID2": 0, "ID3": "0", "ID4": 0, "ID5": "0"}, "timestamp": 15000} + {"topic": "INNER_JOIN", "key": 0, "value": {"ID1": 0, "ID2": 0, "ID3": 0, "ID4": 0, "ID5": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"ID1": 0, "ID2": 0, "ID3": 0, "ID4": 0, "ID5": 0}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 0, "value": {"ID1": 0, "ID2": 0, "ID3": 0, "ID4": 0, "ID5": 0}, "timestamp": 15000} ], "post": { "issues": [ @@ -1222,9 +1222,9 @@ "name": "table join pipeline", "format": ["JSON"], "statements": [ - "CREATE TABLE TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", - "CREATE TABLE TEST_TABLE_2 (ID bigint, F3 varchar) WITH (kafka_topic='right_topic_2', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='{FORMAT}', key='ID');", + "CREATE TABLE TEST_TABLE_2 (ROWKEY BIGINT KEY, ID bigint, F3 varchar) WITH (kafka_topic='right_topic_2', value_format='{FORMAT}', key='ID');", "CREATE TABLE INNER_JOIN WITH (PARTITIONS=4) as SELECT t.id, name, value, f1, f2 FROM test t join TEST_TABLE tt on t.id = tt.id;", "CREATE TABLE INNER_JOIN_2 AS SELECT t_id, name, f1, f3 FROM inner_join tt join TEST_TABLE_2 t ON t.id = tt.t_id;" ], @@ -1236,40 +1236,40 @@ } ], "inputs": [ - {"topic": "INNER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "X", "VALUE": 0, "F1": "yo dawg", "F2": 50}, "timestamp": 0}, - {"topic": "right_topic_2", "key": "0", "value": {"ID": 0, "F3": "I heard you like joins"}, "timestamp": 10000}, - {"topic": "INNER_JOIN", "key": "100", "value": {"T_ID": 100, "NAME": "X", "VALUE": 0, "F1": "KSQL has table-table joins", "F2": 50}, "timestamp": 15000}, - {"topic": "right_topic_2", "key": "100", "value": {"ID": 100, "F3": "so now you can join your join"}, "timestamp": 20000} + {"topic": "INNER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "X", "VALUE": 0, "F1": "yo dawg", "F2": 50}, "timestamp": 0}, + {"topic": "right_topic_2", "key": 0, "value": {"ID": 0, "F3": "I heard you like joins"}, "timestamp": 10000}, + {"topic": "INNER_JOIN", "key": 100, "value": {"T_ID": 100, "NAME": "X", "VALUE": 0, "F1": "KSQL has table-table joins", "F2": 50}, "timestamp": 15000}, + {"topic": "right_topic_2", "key": 100, "value": {"ID": 100, "F3": "so now you can join your join"}, "timestamp": 20000} ], "outputs": [ - {"topic": "INNER_JOIN_2", "key": "0", "value": {"T_ID": 0, "NAME": "X", "F1": "yo dawg", "F3": "I heard you like joins"}, "timestamp": 10000}, - {"topic": "INNER_JOIN_2", "key": "100", "value": {"T_ID": 100, "NAME": "X", "F1": "KSQL has table-table joins", "F3": "so now you can join your join"}, "timestamp": 20000} + {"topic": "INNER_JOIN_2", "key": 0, "value": {"T_ID": 0, "NAME": "X", "F1": "yo dawg", "F3": "I heard you like joins"}, "timestamp": 10000}, + {"topic": "INNER_JOIN_2", "key": 100, "value": {"T_ID": 100, "NAME": "X", "F1": "KSQL has table-table joins", "F3": "so now you can join your join"}, "timestamp": 20000} ] }, { "name": "table table join with where clause", "statements": [ - "CREATE TABLE TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", - "CREATE TABLE TEST_TABLE (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", + "CREATE TABLE TEST_TABLE (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", "CREATE TABLE OUTPUT as SELECT t.id, name, tt.f1, f2 FROM test t JOIN test_table tt ON t.id = tt.id WHERE t.value > 10 AND tt.f2 > 5;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 4}, "timestamp": 10000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "right_topic", "key": "90", "value": {"ID": 0, "F1": "b", "F2": 10}, "timestamp": 18000}, - {"topic": "right_topic", "key": "90", "value": null, "timestamp": 19000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 4}, "timestamp": 10000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "right_topic", "key": 90, "value": {"ID": 0, "F1": "b", "F2": 10}, "timestamp": 18000}, + {"topic": "right_topic", "key": 90, "value": null, "timestamp": 19000} ], "outputs": [ - {"topic": "OUTPUT", "key": "0", "value": null, "timestamp": 10000}, - {"topic": "OUTPUT", "key": "0", "value": null, "timestamp": 13000}, - {"topic": "OUTPUT", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "OUTPUT", "key": "0", "value": {"T_ID": 0, "NAME": "bar", "F1": "a", "F2": 10}, "timestamp": 16000}, - {"topic": "OUTPUT", "key": "90", "value": {"T_ID": 90, "NAME": "ninety", "F1": "b", "F2": 10}, "timestamp": 18000}, - {"topic": "OUTPUT", "key": "90", "value": null, "timestamp": 19000} + {"topic": "OUTPUT", "key": 0, "value": null, "timestamp": 10000}, + {"topic": "OUTPUT", "key": 0, "value": null, "timestamp": 13000}, + {"topic": "OUTPUT", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "OUTPUT", "key": 0, "value": {"T_ID": 0, "NAME": "bar", "F1": "a", "F2": 10}, "timestamp": 16000}, + {"topic": "OUTPUT", "key": 90, "value": {"T_ID": 90, "NAME": "ninety", "F1": "b", "F2": 10}, "timestamp": 18000}, + {"topic": "OUTPUT", "key": 90, "value": null, "timestamp": 19000} ], "post": { "sources": [ @@ -1314,16 +1314,16 @@ { "name": "stream to table when table does not have key field and joining by table ROWKEY", "statements": [ - "CREATE STREAM S (ID bigint) WITH (kafka_topic='S', value_format='JSON', key='ID');", - "CREATE TABLE NO_KEY (ID bigint, NAME string) WITH (kafka_topic='NO_KEY', value_format='JSON');", + "CREATE STREAM S (ROWKEY BIGINT KEY, ID bigint) WITH (kafka_topic='S', value_format='JSON', key='ID');", + "CREATE TABLE NO_KEY (ROWKEY BIGINT KEY, ID bigint, NAME string) WITH (kafka_topic='NO_KEY', value_format='JSON');", "CREATE STREAM OUTPUT as SELECT s.id, name FROM S JOIN NO_KEY t ON s.id = t.ROWKEY;" ], "inputs": [ - {"topic": "NO_KEY", "key": "0", "value": {"ID": 0, "name": "bob"}, "timestamp": 0}, - {"topic": "S", "key": "0", "value": {"ID": 0}, "timestamp": 10} + {"topic": "NO_KEY", "key": 0, "value": {"ID": 0, "name": "bob"}, "timestamp": 0}, + {"topic": "S", "key": 0, "value": {"ID": 0}, "timestamp": 10} ], "outputs": [ - {"topic": "OUTPUT", "key": "0", "value": {"S_ID": 0, "NAME": "bob"}, "timestamp": 10} + {"topic": "OUTPUT", "key": 0, "value": {"S_ID": 0, "NAME": "bob"}, "timestamp": 10} ], "post": { "sources": [ @@ -1744,28 +1744,28 @@ { "name": "unqualified join criteria", "statements": [ - "CREATE STREAM TEST (LEFT_ID bigint, NAME varchar) WITH (kafka_topic='left_topic', value_format='JSON', key='LEFT_ID');", - "CREATE STREAM TEST_STREAM (RIGHT_ID bigint, F1 varchar) WITH (kafka_topic='right_topic', value_format='JSON', key='RIGHT_ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, LEFT_ID bigint, NAME varchar) WITH (kafka_topic='left_topic', value_format='JSON', key='LEFT_ID');", + "CREATE STREAM TEST_STREAM (ROWKEY BIGINT KEY, RIGHT_ID bigint, F1 varchar) WITH (kafka_topic='right_topic', value_format='JSON', key='RIGHT_ID');", "CREATE STREAM LEFT_OUTER_JOIN as SELECT t.left_id, name, f1 FROM test t left join TEST_STREAM tt WITHIN 11 seconds ON left_id = right_id;" ], "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"LEFT_ID": 0, "NAME": "zero"}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"RIGHT_ID": 0, "F1": "blah"}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"LEFT_ID": 10, "NAME": "100"}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"LEFT_ID": 0, "NAME": "foo"}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"RIGHT_ID": 0, "F1": "a"}, "timestamp": 15000}, - {"topic": "right_topic", "key": "100", "value": {"RIGHT_ID": 100, "F1": "newblah"}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"LEFT_ID": 90, "NAME": "ninety"}, "timestamp": 17000}, - {"topic": "left_topic", "key": "0", "value": {"LEFT_ID": 0, "NAME": "bar"}, "timestamp": 30000} + {"topic": "left_topic", "key": 0, "value": {"LEFT_ID": 0, "NAME": "zero"}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"RIGHT_ID": 0, "F1": "blah"}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"LEFT_ID": 10, "NAME": "100"}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"LEFT_ID": 0, "NAME": "foo"}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"RIGHT_ID": 0, "F1": "a"}, "timestamp": 15000}, + {"topic": "right_topic", "key": 100, "value": {"RIGHT_ID": 100, "F1": "newblah"}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"LEFT_ID": 90, "NAME": "ninety"}, "timestamp": 17000}, + {"topic": "left_topic", "key": 0, "value": {"LEFT_ID": 0, "NAME": "bar"}, "timestamp": 30000} ], "outputs": [ - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"LEFT_ID": 0, "NAME": "zero", "F1": null}, "timestamp": 0}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"LEFT_ID": 0, "NAME": "zero", "F1": "blah"}, "timestamp": 10000}, - {"topic": "LEFT_OUTER_JOIN", "key": "10", "value": {"LEFT_ID": 10, "NAME": "100", "F1": null}, "timestamp": 11000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"LEFT_ID": 0, "NAME": "foo", "F1": "blah"}, "timestamp": 13000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"LEFT_ID": 0, "NAME": "foo", "F1": "a"}, "timestamp": 15000}, - {"topic": "LEFT_OUTER_JOIN", "key": "90", "value": {"LEFT_ID": 90, "NAME": "ninety", "F1": null}, "timestamp": 17000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"LEFT_ID": 0, "NAME": "bar", "F1": null}, "timestamp": 30000} + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"LEFT_ID": 0, "NAME": "zero", "F1": null}, "timestamp": 0}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"LEFT_ID": 0, "NAME": "zero", "F1": "blah"}, "timestamp": 10000}, + {"topic": "LEFT_OUTER_JOIN", "key": 10, "value": {"LEFT_ID": 10, "NAME": "100", "F1": null}, "timestamp": 11000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"LEFT_ID": 0, "NAME": "foo", "F1": "blah"}, "timestamp": 13000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"LEFT_ID": 0, "NAME": "foo", "F1": "a"}, "timestamp": 15000}, + {"topic": "LEFT_OUTER_JOIN", "key": 90, "value": {"LEFT_ID": 90, "NAME": "ninety", "F1": null}, "timestamp": 17000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"LEFT_ID": 0, "NAME": "bar", "F1": null}, "timestamp": 30000} ], "post": { "sources": [ diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json b/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json index ba4bdb68b4b1..c631d12c8b3e 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/key-field.json @@ -167,7 +167,7 @@ {"topic": "input_topic", "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": 1,"value": {"KSQL_COL_0": 1}} + {"topic": "OUTPUT", "key": 1, "value": {"KSQL_COL_0": 1}} ], "post": { "sources": [ @@ -179,14 +179,14 @@ { "name": "stream | initially set | no key change | key in value | no aliasing", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT * FROM INPUT;" ], "inputs": [ - {"topic": "input_topic", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "value": {"FOO":1, "BAR": 2}} + {"topic": "OUTPUT", "key": 1, "value": {"FOO":1, "BAR": 2}} ], "post": { "sources": [ @@ -198,14 +198,14 @@ { "name": "stream | initially set | no key change | key in value | aliasing", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT foo as aliased, bar FROM INPUT;" ], "inputs": [ - {"topic": "input_topic", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "value": {"ALIASED":1, "BAR": 2}} + {"topic": "OUTPUT", "key": 1, "value": {"ALIASED":1, "BAR": 2}} ], "post": { "sources": [ @@ -217,14 +217,14 @@ { "name": "stream | initially set | no key change | key in value | aliasing + duplicate", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT foo as aliased, bar as foo FROM INPUT;" ], "inputs": [ - {"topic": "input_topic", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "value": {"ALIASED":1, "FOO": 2}} + {"topic": "OUTPUT", "key": 1, "value": {"ALIASED":1, "FOO": 2}} ], "post": { "sources": [ @@ -236,14 +236,14 @@ { "name": "stream | initially set | no key change | key not in value | -", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT bar FROM INPUT;" ], "inputs": [ - {"topic": "input_topic", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "value": {"BAR": 2}} + {"topic": "OUTPUT", "key": 1, "value": {"BAR": 2}} ], "post": { "sources": [ @@ -310,11 +310,11 @@ { "name": "stream | initially set | partition by (different) | key in value | no aliasing", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT * FROM INPUT PARTITION BY bar;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ {"topic": "OUTPUT", "key": 2, "value": {"FOO": 1, "BAR": 2}} @@ -329,7 +329,7 @@ { "name": "stream | initially set | partition by (different) | key in value | aliasing", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT foo, bar AS aliased FROM INPUT PARTITION BY aliased;" ], "expectedException": { @@ -340,11 +340,11 @@ { "name": "stream | initially set | partition by (different) | key not in value | -", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT foo FROM INPUT PARTITION BY bar;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ {"topic": "OUTPUT", "key": 2, "value": {"FOO":1}} @@ -773,14 +773,14 @@ { "name": "where clause", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT foo FROM INPUT WHERE bar < 10;" ], "inputs": [ - {"topic": "input_topic", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "value": {"FOO": 1}} + {"topic": "OUTPUT", "key": 1, "value": {"FOO": 1}} ], "post": { "sources": [ @@ -791,14 +791,14 @@ { "name": "where clause with alias", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT foo as boo FROM INPUT WHERE bar < 10;" ], "inputs": [ - {"topic": "input_topic", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "value": {"BOO": 1}} + {"topic": "OUTPUT", "key": 1, "value": {"BOO": 1}} ], "post": { "sources": [ @@ -809,14 +809,14 @@ { "name": "using source alias in projection", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT i.foo FROM INPUT i WHERE bar < 10;" ], "inputs": [ - {"topic": "input_topic", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "value": {"FOO": 1}} + {"topic": "OUTPUT", "key": 1, "value": {"FOO": 1}} ], "post": { "sources": [ @@ -824,17 +824,27 @@ ] } }, + { + "name": "different type to ROWKEY", + "statements": [ + "CREATE STREAM INPUT (ROWKEY STRING KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', value_format='JSON', KEY='Foo');" + ], + "expectedException": { + "type": "io.confluent.ksql.util.KsqlStatementException", + "message": "The KEY field (FOO) identified in the WITH clause is of a different type to the actual key column.\nEither change the type of the KEY field to match ROWKEY, or explicitly set ROWKEY to the type of the KEY field by adding 'ROWKEY INTEGER KEY' in the schema.\nKEY field type: INTEGER\nROWKEY type: STRING" + } + }, { "name": "using full source name in projection", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT input.foo FROM INPUT WHERE bar < 10;" ], "inputs": [ - {"topic": "input_topic", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "value": {"FOO": 1}} + {"topic": "OUTPUT", "key": 1, "value": {"FOO": 1}} ], "post": { "sources": [ @@ -845,14 +855,14 @@ { "name": "where only rowkey is in projection", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT ROWKEY AS ID FROM INPUT;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": {"ID": "1"}} + {"topic": "OUTPUT", "key": 1, "value": {"ID": 1}} ], "post": { "issues": [ @@ -866,14 +876,14 @@ { "name": "multiple copies of key field in projection", "statements": [ - "CREATE STREAM INPUT (foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", + "CREATE STREAM INPUT (ROWKEY INT KEY, foo INT, bar INT) WITH (kafka_topic='input_topic', key='foo', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT foo as foo0, foo as foo1, rowkey as foo2, rowkey as foo3 FROM INPUT;" ], "inputs": [ - {"topic": "input_topic", "key": "1", "value": {"foo": 1, "bar": 2}} + {"topic": "input_topic", "key": 1, "value": {"foo": 1, "bar": 2}} ], "outputs": [ - {"topic": "OUTPUT", "key": "1", "value": {"FOO0": 1, "FOO1": 1, "FOO2": "1", "FOO3": "1"}} + {"topic": "OUTPUT", "key": 1, "value": {"FOO0": 1, "FOO1": 1, "FOO2": 1, "FOO3": 1}} ], "post": { "issues": [ diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json b/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json index 6d0b16be480b..1fbf48f9d1f4 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json @@ -3,7 +3,7 @@ { "name": "partition by with projection select some", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) with (kafka_topic='test_topic', value_format = 'delimited', key='ID');", + "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) with (kafka_topic='test_topic', value_format = 'delimited');", "CREATE STREAM REPARTITIONED AS select name,id from TEST partition by name;" ], "inputs": [ @@ -85,7 +85,7 @@ { "name": "partition by with projection select all", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) with (kafka_topic='test_topic', value_format = 'delimited', key='ID');", + "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) with (kafka_topic='test_topic', value_format = 'delimited');", "CREATE STREAM REPARTITIONED AS select * from TEST partition by name;" ], "inputs": [ @@ -98,12 +98,12 @@ { "name": "partition by with null value", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) with (kafka_topic='test_topic', value_format = 'delimited', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) with (kafka_topic='test_topic', value_format = 'delimited', key='ID');", "CREATE STREAM REPARTITIONED AS select name,id from TEST partition by name;" ], "inputs": [ - {"topic": "test_topic", "key": "0", "value": null}, - {"topic": "test_topic", "key": "0", "value": "0,zero,50"} + {"topic": "test_topic", "key": 0, "value": null}, + {"topic": "test_topic", "key": 0, "value": "0,zero,50"} ], "outputs": [ {"topic": "REPARTITIONED", "key": "zero", "value": "zero,0"} @@ -112,7 +112,7 @@ { "name": "partition by with null partition by value", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) with (kafka_topic='test_topic', value_format = 'delimited', key='ID');", + "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) with (kafka_topic='test_topic', value_format = 'delimited');", "CREATE STREAM REPARTITIONED AS select name,id from TEST partition by name;" ], "inputs": [ diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/project-filter.json b/ksql-functional-tests/src/test/resources/query-validation-tests/project-filter.json index cb2e0de9d9ff..026e723b5a8e 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/project-filter.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/project-filter.json @@ -11,7 +11,7 @@ { "name": "project and filter", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE STREAM S1 as SELECT name FROM test where id > 100;" ], "inputs": [ @@ -26,7 +26,7 @@ { "name": "project string with embedded code", "statements": [ - "CREATE STREAM TEST (ID bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ID bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE STREAM S1 as SELECT '\" + new java.util.function.Supplier(){public String get() {return \"boom\";}}.get() + \"' as x FROM test;" ], "inputs": [ @@ -69,7 +69,7 @@ { "name": "project and negative filter", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE STREAM S2 as SELECT name, id FROM test where id < -100;" ], "inputs": [ diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/session-windows.json b/ksql-functional-tests/src/test/resources/query-validation-tests/session-windows.json index 056a944334da..74e69372c76e 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/session-windows.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/session-windows.json @@ -36,7 +36,7 @@ { "name": "import session table", "statements": [ - "CREATE TABLE TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', KEY='ID', WINDOW_TYPE='SeSSion');", + "CREATE TABLE TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', WINDOW_TYPE='SeSSion');", "CREATE TABLE S2 as SELECT *, ROWKEY as KEY FROM test;" ], "inputs": [ @@ -103,7 +103,7 @@ { "name": "import table with invalid window size", "statements": [ - "CREATE TABLE TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID', WINDOW_TYPE='Session', WINDOW_SIZE='30 seconds');" + "CREATE TABLE TEST (ID bigint, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', WINDOW_TYPE='Session', WINDOW_SIZE='30 seconds');" ], "expectedException": { "type": "io.confluent.ksql.parser.exception.ParseFailedException", diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/simple-struct.json b/ksql-functional-tests/src/test/resources/query-validation-tests/simple-struct.json index bd38e6754f33..5c3e7dce6068 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/simple-struct.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/simple-struct.json @@ -12,7 +12,7 @@ { "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM orders (ordertime bigint, orderid bigint, itemid STRUCT< ITEMID BIGINT, NAME VARCHAR, CATEGORY STRUCT< ID BIGINT, NAME VARCHAR>>, ORDERUNITS double, ARRAYCOL array, MAPCOL map, address STRUCT < number bigint, street varchar, city varchar, state varchar, zipcode bigint>) WITH (kafka_topic='test_topic', key='orderid', value_format='{FORMAT}');", + "CREATE STREAM orders (ordertime bigint, orderid bigint, itemid STRUCT< ITEMID BIGINT, NAME VARCHAR, CATEGORY STRUCT< ID BIGINT, NAME VARCHAR>>, ORDERUNITS double, ARRAYCOL array, MAPCOL map, address STRUCT < number bigint, street varchar, city varchar, state varchar, zipcode bigint>) WITH (kafka_topic='test_topic', value_format='{FORMAT}');", "CREATE STREAM s1 AS SELECT * FROM orders;" ], "name": "simple struct select star", @@ -392,12 +392,12 @@ ] }, { + "name": "simple struct read struct as json string", "format": ["JSON"], "statements": [ - "CREATE STREAM orders (ordertime bigint, orderid bigint, itemid VARCHAR, ORDERUNITS double, ARRAYCOL array, MAPCOL map, address STRUCT < number bigint, street varchar, city varchar, state varchar, zipcode bigint>) WITH (kafka_topic='test_topic', key='orderid', value_format='{FORMAT}');", + "CREATE STREAM orders (ordertime bigint, orderid bigint, itemid VARCHAR, ORDERUNITS double, ARRAYCOL array, MAPCOL map, address STRUCT < number bigint, street varchar, city varchar, state varchar, zipcode bigint>) WITH (kafka_topic='test_topic', value_format='{FORMAT}');", "CREATE STREAM s1 AS SELECT itemid, EXTRACTJSONFIELD(itemid, '$.ITEMID') FROM orders;" ], - "name": "simple struct read struct as json string", "inputs": [ { "topic": "test_topic", @@ -637,7 +637,7 @@ "name": "simple struct select filter", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM orders (ordertime bigint, orderid bigint, itemid STRUCT< ITEMID BIGINT, NAME VARCHAR, CATEGORY STRUCT< ID BIGINT, NAME VARCHAR>>, ORDERUNITS double, ARRAYCOL array, MAPCOL map, address STRUCT < number bigint, street varchar, city varchar, state varchar, zipcode bigint>) WITH (kafka_topic='test_topic', key='orderid', value_format='{FORMAT}');", + "CREATE STREAM orders (ordertime bigint, orderid bigint, itemid STRUCT< ITEMID BIGINT, NAME VARCHAR, CATEGORY STRUCT< ID BIGINT, NAME VARCHAR>>, ORDERUNITS double, ARRAYCOL array, MAPCOL map, address STRUCT < number bigint, street varchar, city varchar, state varchar, zipcode bigint>) WITH (kafka_topic='test_topic', value_format='{FORMAT}');", "CREATE STREAM S2 AS SELECT itemid->name FROM orders where itemid->name = 'Item_6';" ], "inputs": [ @@ -858,7 +858,7 @@ "name": "simple struct select filter 2", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM orders (ordertime bigint, orderid bigint, itemid STRUCT< ITEMID BIGINT, NAME VARCHAR, CATEGORY STRUCT< ID BIGINT, NAME VARCHAR>>, ORDERUNITS double, ARRAYCOL array, MAPCOL map, address STRUCT < number bigint, street varchar, city varchar, state varchar, zipcode bigint>) WITH (kafka_topic='test_topic', key='orderid', value_format='{FORMAT}');", + "CREATE STREAM orders (ordertime bigint, orderid bigint, itemid STRUCT< ITEMID BIGINT, NAME VARCHAR, CATEGORY STRUCT< ID BIGINT, NAME VARCHAR>>, ORDERUNITS double, ARRAYCOL array, MAPCOL map, address STRUCT < number bigint, street varchar, city varchar, state varchar, zipcode bigint>) WITH (kafka_topic='test_topic', value_format='{FORMAT}');", "CREATE STREAM S3 AS SELECT itemid->category->id, address->street , address->zipcode as zipcode, address->state as state FROM orders WHERE address->state LIKE '%_9';" ], "inputs": [ @@ -1253,7 +1253,7 @@ "name": "simples struct select filter 3", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM orders (ordertime bigint, orderid bigint, itemid STRUCT< ITEMID BIGINT, NAME VARCHAR, CATEGORY STRUCT< ID BIGINT, NAME VARCHAR>>, ORDERUNITS double, ARRAYCOL array, MAPCOL map, address STRUCT < number bigint, street varchar, city varchar, state varchar, zipcode bigint>) WITH (kafka_topic='test_topic', key='orderid', value_format='{FORMAT}');", + "CREATE STREAM orders (ordertime bigint, orderid bigint, itemid STRUCT< ITEMID BIGINT, NAME VARCHAR, CATEGORY STRUCT< ID BIGINT, NAME VARCHAR>>, ORDERUNITS double, ARRAYCOL array, MAPCOL map, address STRUCT < number bigint, street varchar, city varchar, state varchar, zipcode bigint>) WITH (kafka_topic='test_topic', value_format='{FORMAT}');", "CREATE STREAM S4 AS SELECT itemid->itemid, itemid as iid, itemid->category->name as catname FROM orders WHERE itemid->itemid = 6 OR itemid->category->name = 'Food';" ], "outputs": [ @@ -1518,7 +1518,7 @@ "name": "simple struct select filter 4", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM orders (ordertime bigint, orderid bigint, itemid STRUCT< ITEMID BIGINT, NAME VARCHAR, CATEGORY STRUCT< ID BIGINT, NAME VARCHAR>>, ORDERUNITS double, ARRAYCOL array, MAPCOL map, address STRUCT < number bigint, street varchar, city varchar, state varchar, zipcode bigint>) WITH (kafka_topic='test_topic', key='orderid', value_format='{FORMAT}');", + "CREATE STREAM orders (ordertime bigint, orderid bigint, itemid STRUCT< ITEMID BIGINT, NAME VARCHAR, CATEGORY STRUCT< ID BIGINT, NAME VARCHAR>>, ORDERUNITS double, ARRAYCOL array, MAPCOL map, address STRUCT < number bigint, street varchar, city varchar, state varchar, zipcode bigint>) WITH (kafka_topic='test_topic', value_format='{FORMAT}');", "CREATE STREAM S5 as SELECT itemid->itemid * 10 as itemid, concat(itemid->category->name, '_HELLO') as cname, len(address->state) as state_length FROM orders WHERE address->state LIKE '%1' OR address->state LIKE '%9';" ], "outputs": [ @@ -1721,7 +1721,7 @@ "name": "simple struct select with nulls", "format": ["AVRO", "JSON"], "statements": [ - "CREATE STREAM orders (ordertime bigint, orderid bigint, itemid STRUCT< ITEMID BIGINT, NAME VARCHAR, CATEGORY STRUCT< ID BIGINT, NAME VARCHAR>>, ORDERUNITS double, ARRAYCOL array, MAPCOL map, address STRUCT < number bigint, street varchar, city varchar, state varchar, zipcode bigint>) WITH (kafka_topic='test_topic', key='orderid', value_format='{FORMAT}');", + "CREATE STREAM orders (ordertime bigint, orderid bigint, itemid STRUCT< ITEMID BIGINT, NAME VARCHAR, CATEGORY STRUCT< ID BIGINT, NAME VARCHAR>>, ORDERUNITS double, ARRAYCOL array, MAPCOL map, address STRUCT < number bigint, street varchar, city varchar, state varchar, zipcode bigint>) WITH (kafka_topic='test_topic', value_format='{FORMAT}');", "CREATE STREAM S6 as SELECT itemid->itemid * 10 as itemid, itemid->category->id as catid, itemid->category as cat FROM orders;" ], "outputs": [ diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/stringdate.json b/ksql-functional-tests/src/test/resources/query-validation-tests/stringdate.json index 226d68a48c2c..cf330c548b02 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/stringdate.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/stringdate.json @@ -3,7 +3,7 @@ { "name": "string to date", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, date varchar, format varchar) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ID bigint, NAME varchar, date varchar, format varchar) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE STREAM TS AS select id, stringtodate(date, format) as ts from test;" ], "inputs": [ diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/stringtimestamp.json b/ksql-functional-tests/src/test/resources/query-validation-tests/stringtimestamp.json index 011c203ec3e4..1e68f6f0187c 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/stringtimestamp.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/stringtimestamp.json @@ -11,7 +11,7 @@ { "name": "string to timestamp", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, timestamp varchar) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ID bigint, NAME varchar, timestamp varchar) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE STREAM TS AS select id, stringtotimestamp(timestamp, 'yyyy-MM-dd''T''HH:mm:ssX') as ts from test;" ], "inputs": [ diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/sum.json b/ksql-functional-tests/src/test/resources/query-validation-tests/sum.json index dc40f2cfa97c..43a4a132f3ea 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/sum.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/sum.json @@ -26,17 +26,17 @@ "name": "sum int left join of table", "comment": "from https://github.com/confluentinc/ksql/issues/2490", "statements": [ - "CREATE TABLE t1 (ID bigint, TOTAL integer) WITH (kafka_topic='T1', value_format='AVRO', key='ID');", - "CREATE TABLE t2 (ID bigint, TOTAL integer) WITH (kafka_topic='T2', value_format='AVRO', key='ID');", + "CREATE TABLE t1 (ROWKEY BIGINT KEY, ID bigint, TOTAL integer) WITH (kafka_topic='T1', value_format='AVRO', key='ID');", + "CREATE TABLE t2 (ROWKEY BIGINT KEY, ID bigint, TOTAL integer) WITH (kafka_topic='T2', value_format='AVRO', key='ID');", "CREATE TABLE OUTPUT AS SELECT t1.id as ID, SUM(t2.total) as SUM FROM T1 LEFT JOIN T2 ON (t1.id = t2.id) GROUP BY t1.id;" ], "inputs": [ - {"topic": "T1", "key": "0", "value": {"id": 0, "total": 100}}, - {"topic": "T1", "key": "1", "value": {"id": 1, "total": 101}}, - {"topic": "T2", "key": "0", "value": {"id": 0, "total": 5}}, - {"topic": "T2", "key": "1", "value": {"id": 1, "total": 10}}, - {"topic": "T2", "key": "0", "value": {"id": 0, "total": 20}}, - {"topic": "T2", "key": "0", "value": null} + {"topic": "T1", "key": 0, "value": {"id": 0, "total": 100}}, + {"topic": "T1", "key": 1, "value": {"id": 1, "total": 101}}, + {"topic": "T2", "key": 0, "value": {"id": 0, "total": 5}}, + {"topic": "T2", "key": 1, "value": {"id": 1, "total": 10}}, + {"topic": "T2", "key": 0, "value": {"id": 0, "total": 20}}, + {"topic": "T2", "key": 0, "value": null} ], "outputs": [ {"topic": "OUTPUT", "key": 0,"value": {"ID": 0, "SUM": 0}}, diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/table-functions.json b/ksql-functional-tests/src/test/resources/query-validation-tests/table-functions.json index 7672e5b23905..bab92b332956 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/table-functions.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/table-functions.json @@ -6,7 +6,7 @@ { "name": "table function as last select", "statements": [ - "CREATE STREAM TEST (ID BIGINT, MY_ARR ARRAY) WITH (kafka_topic='test_topic', KEY='ID', value_format='JSON');", + "CREATE STREAM TEST (ID BIGINT, MY_ARR ARRAY) WITH (kafka_topic='test_topic', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT ID, EXPLODE(MY_ARR) VAL FROM TEST;" ], "inputs": [ @@ -23,7 +23,7 @@ { "name": "table function as first select", "statements": [ - "CREATE STREAM TEST (ID BIGINT, MY_ARR ARRAY) WITH (kafka_topic='test_topic', KEY='ID', value_format='JSON');", + "CREATE STREAM TEST (ID BIGINT, MY_ARR ARRAY) WITH (kafka_topic='test_topic', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT EXPLODE(MY_ARR) AS VAL, ID FROM TEST;" ], "inputs": [ @@ -40,7 +40,7 @@ { "name": "table function with non selected columns", "statements": [ - "CREATE STREAM TEST (FOO BIGINT, ID BIGINT, MY_ARR ARRAY, BAR BIGINT) WITH (kafka_topic='test_topic', KEY='ID', value_format='JSON');", + "CREATE STREAM TEST (FOO BIGINT, ID BIGINT, MY_ARR ARRAY, BAR BIGINT) WITH (kafka_topic='test_topic', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT EXPLODE(MY_ARR) AS VAL, ID FROM TEST;" ], "inputs": [ @@ -57,7 +57,7 @@ { "name": "table function with no other selected columns", "statements": [ - "CREATE STREAM TEST (FOO BIGINT, ID BIGINT, MY_ARR ARRAY, BAR BIGINT) WITH (kafka_topic='test_topic', KEY='ID', value_format='JSON');", + "CREATE STREAM TEST (FOO BIGINT, ID BIGINT, MY_ARR ARRAY, BAR BIGINT) WITH (kafka_topic='test_topic', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT EXPLODE(MY_ARR) AS VAL FROM TEST;" ], "inputs": [ @@ -74,7 +74,7 @@ { "name": "table function with no alias", "statements": [ - "CREATE STREAM TEST (FOO BIGINT, ID BIGINT, MY_ARR ARRAY, BAR BIGINT) WITH (kafka_topic='test_topic', KEY='ID', value_format='JSON');", + "CREATE STREAM TEST (FOO BIGINT, ID BIGINT, MY_ARR ARRAY, BAR BIGINT) WITH (kafka_topic='test_topic', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT EXPLODE(MY_ARR) FROM TEST;" ], "inputs": [ @@ -91,7 +91,7 @@ { "name": "table function shouldn't be in FROM clause", "statements": [ - "CREATE STREAM TEST (ID BIGINT, MY_ARR ARRAY) WITH (kafka_topic='test_topic', KEY='ID', value_format='JSON');", + "CREATE STREAM TEST (ID BIGINT, MY_ARR ARRAY) WITH (kafka_topic='test_topic', value_format='JSON');", "CREATE STREAM OUTPUT AS SELECT * FROM EXPLODE(MY_ARR);" ], "expectedException": { @@ -241,7 +241,7 @@ { "name": "table functions don't support table sources", "statements": [ - "CREATE TABLE TEST (ID BIGINT, MY_ARR ARRAY) WITH (kafka_topic='test_topic', KEY='ID', value_format='JSON');", + "CREATE TABLE TEST (ID BIGINT, MY_ARR ARRAY) WITH (kafka_topic='test_topic', value_format='JSON');", "CREATE TABLE OUTPUT AS SELECT ID, EXPLODE(MY_ARR) VAL FROM TEST;" ], "expectedException": { diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/table.json b/ksql-functional-tests/src/test/resources/query-validation-tests/table.json index 3dbeab6cbc80..f0b224cb9918 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/table.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/table.json @@ -3,7 +3,7 @@ { "name": "update-delete", "statements": [ - "CREATE TABLE TEST (ID bigint, NAME varchar, VALUE int) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE TABLE TEST (ID bigint, NAME varchar, VALUE int) WITH (kafka_topic='test_topic', value_format='DELIMITED');", "CREATE TABLE T1 as SELECT NAME, VALUE FROM test;" ], "inputs": [ diff --git a/ksql-functional-tests/src/test/resources/rest-query-validation-tests/insert-values.json b/ksql-functional-tests/src/test/resources/rest-query-validation-tests/insert-values.json index 3cdf825a9d26..908c4819cace 100644 --- a/ksql-functional-tests/src/test/resources/rest-query-validation-tests/insert-values.json +++ b/ksql-functional-tests/src/test/resources/rest-query-validation-tests/insert-values.json @@ -81,13 +81,13 @@ { "name": "rowkey should be set when stream has int key and only key specified in insert", "statements": [ - "CREATE STREAM TEST (ID INT) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY INT KEY, ID INT) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", "INSERT INTO TEST (ID) VALUES (10);" ], "inputs": [ ], "outputs": [ - {"topic": "test_topic", "key": "10", "value": {"ID": 10}} + {"topic": "test_topic", "key": 10, "value": {"ID": 10}} ] }, { @@ -105,37 +105,25 @@ { "name": "rowkey should be set when stream has double key and only key specified in insert", "statements": [ - "CREATE STREAM TEST (ID DOUBLE) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY DOUBLE KEY, ID DOUBLE) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", "INSERT INTO TEST (ID) VALUES (1.23);" ], "inputs": [ ], "outputs": [ - {"topic": "test_topic", "key": "1.23", "value": {"ID": 1.23}} + {"topic": "test_topic", "key": 1.23, "value": {"ID": 1.23}} ] }, { "name": "rowkey should be set when stream has bigint key and only key specified in insert", "statements": [ - "CREATE STREAM TEST (ID BIGINT) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID BIGINT) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", "INSERT INTO TEST (ID) VALUES (10);" ], "inputs": [ ], "outputs": [ - {"topic": "test_topic", "key": "10", "value": {"ID": 10}} - ] - }, - { - "name": "rowkey should be set when stream has boolean key and only key specified in insert", - "statements": [ - "CREATE STREAM TEST (ID BOOLEAN) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", - "INSERT INTO TEST (ID) VALUES (TRUE);" - ], - "inputs": [ - ], - "outputs": [ - {"topic": "test_topic", "key": "true", "value": {"ID": true}} + {"topic": "test_topic", "key": 10, "value": {"ID": 10}} ] }, { @@ -153,13 +141,13 @@ { "name": "rowkey and key should match when stream has int key", "statements": [ - "CREATE STREAM TEST (ID INT) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", - "INSERT INTO TEST (ROWKEY, ID) VALUES ('10', 10);" + "CREATE STREAM TEST (ROWKEY INT KEY, ID INT) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "INSERT INTO TEST (ROWKEY, ID) VALUES (10, 10);" ], "inputs": [ ], "outputs": [ - {"topic": "test_topic", "key": "10", "value": {"ID": 10}} + {"topic": "test_topic", "key": 10, "value": {"ID": 10}} ] }, { @@ -177,37 +165,25 @@ { "name": "rowkey and key should match when stream has double key", "statements": [ - "CREATE STREAM TEST (ID DOUBLE) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", - "INSERT INTO TEST (ROWKEY, ID) VALUES ('1.23', 1.23);" + "CREATE STREAM TEST (ROWKEY DOUBLE KEY, ID DOUBLE) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "INSERT INTO TEST (ROWKEY, ID) VALUES (1.23, 1.23);" ], "inputs": [ ], "outputs": [ - {"topic": "test_topic", "key": "1.23", "value": {"ID": 1.23}} + {"topic": "test_topic", "key": 1.23, "value": {"ID": 1.23}} ] }, { "name": "rowkey and key should match when stream has bigint key", "statements": [ - "CREATE STREAM TEST (ID BIGINT) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", - "INSERT INTO TEST (ROWKEY, ID) VALUES ('10', 10);" - ], - "inputs": [ - ], - "outputs": [ - {"topic": "test_topic", "key": "10", "value": {"ID": 10}} - ] - }, - { - "name": "rowkey and key should match when stream has boolean key", - "statements": [ - "CREATE STREAM TEST (ID BOOLEAN) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", - "INSERT INTO TEST (ROWKEY, ID) VALUES ('true', true);" + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID BIGINT) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "INSERT INTO TEST (ROWKEY, ID) VALUES (10, 10);" ], "inputs": [ ], "outputs": [ - {"topic": "test_topic", "key": "true", "value": {"ID": true}} + {"topic": "test_topic", "key": 10, "value": {"ID": 10}} ] }, { @@ -225,8 +201,8 @@ { "name": "should fail on mismatch between rowkey and key values when stream has key", "statements": [ - "CREATE STREAM TEST (ID INT) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", - "INSERT INTO TEST (ROWKEY, ID) VALUES ('10', 5);" + "CREATE STREAM TEST (ROWKEY INT KEY, ID INT) WITH (kafka_topic='test_topic', value_format='JSON', key='ID');", + "INSERT INTO TEST (ROWKEY, ID) VALUES (10, 5);" ], "expectedError": { "type": "io.confluent.ksql.rest.entity.KsqlStatementErrorMessage", diff --git a/ksql-functional-tests/src/test/resources/test-runner/correct/join/input.json b/ksql-functional-tests/src/test/resources/test-runner/correct/join/input.json index b251e0df3bda..f7a7cd2bc660 100644 --- a/ksql-functional-tests/src/test/resources/test-runner/correct/join/input.json +++ b/ksql-functional-tests/src/test/resources/test-runner/correct/join/input.json @@ -1,12 +1,12 @@ { "inputs": [ - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "left_topic", "key": "10", "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, - {"topic": "right_topic", "key": "0", "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "right_topic", "key": "100", "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, - {"topic": "left_topic", "key": "90", "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, - {"topic": "left_topic", "key": "0", "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "zero", "VALUE": 0}, "timestamp": 0}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "left_topic", "key": 10, "value": {"ID": 10, "NAME": "100", "VALUE": 5}, "timestamp": 11000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "foo", "VALUE": 100}, "timestamp": 13000}, + {"topic": "right_topic", "key": 0, "value": {"ID": 0, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "right_topic", "key": 100, "value": {"ID": 100, "F1": "newblah", "F2": 150}, "timestamp": 16000}, + {"topic": "left_topic", "key": 90, "value": {"ID": 90, "NAME": "ninety", "VALUE": 90}, "timestamp": 17000}, + {"topic": "left_topic", "key": 0, "value": {"ID": 0, "NAME": "bar", "VALUE": 99}, "timestamp": 30000} ] } \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/test-runner/correct/join/output.json b/ksql-functional-tests/src/test/resources/test-runner/correct/join/output.json index bafa77d8f5dc..0015dc0ac627 100644 --- a/ksql-functional-tests/src/test/resources/test-runner/correct/join/output.json +++ b/ksql-functional-tests/src/test/resources/test-runner/correct/join/output.json @@ -1,13 +1,13 @@ { "outputs": [ - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, - {"topic": "LEFT_OUTER_JOIN", "key": "10", "value": {"T_ID": 10, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, - {"topic": "LEFT_OUTER_JOIN", "key": "90", "value": {"T_ID": 90, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000}, - {"topic": "LEFT_OUTER_JOIN", "key": "0", "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": null, "F2": null}, "timestamp": 30000}, - {"topic": "FOO", "key": "90", "value": {"T_ID": 90, "NAME": "ninety"}, "timestamp": 17000}, - {"topic": "BAR", "key": "90", "value": {"T_ID": 90, "NAME": "ninety"}, "timestamp": 17000} + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": null, "F2": null}, "timestamp": 0}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "zero", "VALUE": 0, "F1": "blah", "F2": 50}, "timestamp": 10000}, + {"topic": "LEFT_OUTER_JOIN", "key": 10, "value": {"T_ID": 10, "NAME": "100", "VALUE": 5, "F1": null, "F2": null}, "timestamp": 11000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "blah", "F2": 50}, "timestamp": 13000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "foo", "VALUE": 100, "F1": "a", "F2": 10}, "timestamp": 15000}, + {"topic": "LEFT_OUTER_JOIN", "key": 90, "value": {"T_ID": 90, "NAME": "ninety", "VALUE": 90, "F1": null, "F2": null}, "timestamp": 17000}, + {"topic": "LEFT_OUTER_JOIN", "key": 0, "value": {"T_ID": 0, "NAME": "bar", "VALUE": 99, "F1": null, "F2": null}, "timestamp": 30000}, + {"topic": "FOO", "key": 90, "value": {"T_ID": 90, "NAME": "ninety"}, "timestamp": 17000}, + {"topic": "BAR", "key": 90, "value": {"T_ID": 90, "NAME": "ninety"}, "timestamp": 17000} ] } \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/test-runner/correct/join/statements.sql b/ksql-functional-tests/src/test/resources/test-runner/correct/join/statements.sql index a8eae55c0b16..1b5bbb102f59 100644 --- a/ksql-functional-tests/src/test/resources/test-runner/correct/join/statements.sql +++ b/ksql-functional-tests/src/test/resources/test-runner/correct/join/statements.sql @@ -1,5 +1,5 @@ -CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID'); -CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID'); +CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID'); +CREATE STREAM TEST_STREAM (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID'); CREATE STREAM LEFT_OUTER_JOIN as SELECT t.id, name, value, f1, f2 FROM test t left join TEST_STREAM tt WITHIN 11 seconds ON t.id = tt.id; CREATE STREAM foo AS SELECT t_id, name FROM LEFT_OUTER_JOIN WHERE t_id = 90; CREATE STREAM bar AS SELECT * FROM foo; \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/test-runner/correct/simple_project_filter/input.json b/ksql-functional-tests/src/test/resources/test-runner/correct/simple_project_filter/input.json index e85a14288d1f..d89062c20df6 100644 --- a/ksql-functional-tests/src/test/resources/test-runner/correct/simple_project_filter/input.json +++ b/ksql-functional-tests/src/test/resources/test-runner/correct/simple_project_filter/input.json @@ -1,7 +1,7 @@ { "inputs": [ - {"topic": "test_topic", "key": "0", "value": "0,zero,0.0"}, - {"topic": "test_topic", "key": "100", "value": "100,100,0.0"}, - {"topic": "test_topic", "key": "101", "value": "101,101,0.0"} + {"topic": "test_topic", "key": 0, "value": "0,zero,0.0"}, + {"topic": "test_topic", "key": 100, "value": "100,100,0.0"}, + {"topic": "test_topic", "key": 101, "value": "101,101,0.0"} ] } \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/test-runner/correct/simple_project_filter/output.json b/ksql-functional-tests/src/test/resources/test-runner/correct/simple_project_filter/output.json index fed4ce67fa81..e3713a5b6f18 100644 --- a/ksql-functional-tests/src/test/resources/test-runner/correct/simple_project_filter/output.json +++ b/ksql-functional-tests/src/test/resources/test-runner/correct/simple_project_filter/output.json @@ -1,5 +1,5 @@ { "outputs": [ - {"topic": "S1", "key": "101", "value": "101"} + {"topic": "S1", "key": 101, "value": "101"} ] } \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/test-runner/correct/simple_project_filter/statements.sql b/ksql-functional-tests/src/test/resources/test-runner/correct/simple_project_filter/statements.sql index 7ad0ab008a4f..7cf880bc6c7a 100644 --- a/ksql-functional-tests/src/test/resources/test-runner/correct/simple_project_filter/statements.sql +++ b/ksql-functional-tests/src/test/resources/test-runner/correct/simple_project_filter/statements.sql @@ -1,2 +1,2 @@ -CREATE STREAM TEST (ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID'); +CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID'); CREATE STREAM S1 as SELECT name FROM test where id > 100; \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/test-runner/incorrect/expected_mismatch/statements.sql b/ksql-functional-tests/src/test/resources/test-runner/incorrect/expected_mismatch/statements.sql index 7ad0ab008a4f..77dc9d3e24a4 100644 --- a/ksql-functional-tests/src/test/resources/test-runner/incorrect/expected_mismatch/statements.sql +++ b/ksql-functional-tests/src/test/resources/test-runner/incorrect/expected_mismatch/statements.sql @@ -1,2 +1,2 @@ -CREATE STREAM TEST (ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID'); +CREATE STREAM TEST (ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='DELIMITED'); CREATE STREAM S1 as SELECT name FROM test where id > 100; \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/testing_tool_tests.json b/ksql-functional-tests/src/test/resources/testing_tool_tests.json index 1558f499d161..ca042017f2c5 100644 --- a/ksql-functional-tests/src/test/resources/testing_tool_tests.json +++ b/ksql-functional-tests/src/test/resources/testing_tool_tests.json @@ -6,7 +6,7 @@ { "name": "project and filter", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE double) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE STREAM S1 as SELECT name FROM test where id > 100;" ], "inputs": [ @@ -59,8 +59,8 @@ { "name": "stream stream left join", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", - "CREATE STREAM TEST_STREAM (ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='left_topic', value_format='JSON', key='ID');", + "CREATE STREAM TEST_STREAM (ROWKEY BIGINT KEY, ID bigint, F1 varchar, F2 bigint) WITH (kafka_topic='right_topic', value_format='JSON', key='ID');", "CREATE STREAM LEFT_OUTER_JOIN as SELECT t.id, name, value, f1, f2 FROM test t left join TEST_STREAM tt WITHIN 11 seconds ON t.id = tt.id;", "CREATE STREAM foo AS SELECT t_id, name FROM LEFT_OUTER_JOIN WHERE t_id = 90;", "CREATE STREAM bar AS SELECT * FROM foo;" @@ -96,7 +96,7 @@ { "name": "max tumbling", "statements": [ - "CREATE STREAM TEST (ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", + "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID bigint, NAME varchar, VALUE bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED', key='ID');", "CREATE TABLE S2 as SELECT id, max(value) FROM test WINDOW TUMBLING (SIZE 30 SECONDS) group by id;", "CREATE TABLE foo AS SELECT id from s2 where id = 100;" ], diff --git a/ksql-metastore/src/test/java/io/confluent/ksql/util/MetaStoreFixture.java b/ksql-metastore/src/test/java/io/confluent/ksql/util/MetaStoreFixture.java index fb4e19b504d4..c5504080ad66 100644 --- a/ksql-metastore/src/test/java/io/confluent/ksql/util/MetaStoreFixture.java +++ b/ksql-metastore/src/test/java/io/confluent/ksql/util/MetaStoreFixture.java @@ -149,6 +149,7 @@ public static MutableMetaStore getNewMetaStore( .build(); final LogicalSchema ordersSchema = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) .valueColumn(ColumnName.of("ORDERTIME"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("ORDERID"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("ITEMID"), SqlTypes.STRING) @@ -265,6 +266,7 @@ public static MutableMetaStore getNewMetaStore( final LogicalSchema sensorReadingsSchema = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) .valueColumn(ColumnName.of("ID"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("SENSOR_NAME"), SqlTypes.STRING) .valueColumn(ColumnName.of("ARR1"), SqlTypes.array(SqlTypes.BIGINT)) diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutor.java index be70d4963140..e02baf1ffb6c 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutor.java @@ -24,7 +24,6 @@ import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; import io.confluent.ksql.util.KsqlConfig; - import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -34,7 +33,6 @@ import java.util.Optional; import java.util.Properties; import java.util.stream.Collectors; - import org.apache.kafka.common.utils.Utils; public final class ListPropertiesExecutor { diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorFunctionalTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorFunctionalTest.java index 8864b90c6342..4574f2f2bca4 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorFunctionalTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorFunctionalTest.java @@ -138,7 +138,7 @@ public void shouldHandleJsonWithSchemas() { + " WITH (kafka_topic='" + JSON_TOPIC + "', value_format='json');\n" + "\n" + "CREATE TABLE T (ORDERTIME BIGINT) " - + " WITH (kafka_topic='" + JSON_TOPIC + "', value_format='json', key='ORDERTIME');\n" + + " WITH (kafka_topic='" + JSON_TOPIC + "', value_format='json');\n" + "\n" + "SET 'auto.offset.reset' = 'earliest';" + "\n" @@ -179,7 +179,7 @@ public void shouldHandleAvroWithSchemas() { + " WITH (kafka_topic='" + AVRO_TOPIC + "', value_format='avro');\n" + "\n" + "CREATE TABLE T (ORDERTIME BIGINT) " - + " WITH (kafka_topic='" + AVRO_TOPIC + "', value_format='avro', key='ORDERTIME');\n" + + " WITH (kafka_topic='" + AVRO_TOPIC + "', value_format='avro');\n" + "\n" + "SET 'auto.offset.reset' = 'earliest';" + "\n" diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java index facc873f2875..9876553979c7 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java @@ -51,7 +51,6 @@ import io.confluent.ksql.services.FakeKafkaTopicClient; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.services.TestServiceContext; -import io.confluent.ksql.statement.ConfiguredStatement; import io.confluent.ksql.util.KsqlConfig; import io.confluent.ksql.util.PersistentQueryMetadata; import java.time.Duration; diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutorTest.java index 208c97191940..d6c517d519ad 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutorTest.java @@ -29,14 +29,13 @@ import io.confluent.ksql.rest.entity.PropertiesList.Property; import io.confluent.ksql.rest.server.TemporaryEngine; import io.confluent.ksql.util.KsqlConfig; +import java.util.HashMap; +import java.util.Map; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.junit.MockitoJUnitRunner; -import java.util.HashMap; -import java.util.Map; - @RunWith(MockitoJUnitRunner.class) public class ListPropertiesExecutorTest { diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java index 10af99d142a6..4b13e92df8df 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java @@ -1820,8 +1820,7 @@ public void shouldFailIfCreateExistingSourceTable() { // When: final String createSql = - "CREATE TABLE SOURCE (val int) " - + "WITH (kafka_topic='topic2', value_format='json', key='val');"; + "CREATE TABLE SOURCE (val int) WITH (kafka_topic='topic2', value_format='json');"; makeSingleRequest(createSql, CommandStatusEntity.class); } @@ -2151,7 +2150,7 @@ private void givenSource( SourceName.of(sourceName), schema, SerdeOption.none(), - KeyField.of(schema.value().get(0).ref()), + KeyField.none(), Optional.empty(), false, ksqlTopic @@ -2164,7 +2163,7 @@ private void givenSource( SourceName.of(sourceName), schema, SerdeOption.none(), - KeyField.of(schema.value().get(0).ref()), + KeyField.none(), Optional.empty(), false, ksqlTopic diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java index 8f4ee101ae2b..fb4a23ffe9c4 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java @@ -15,9 +15,9 @@ package io.confluent.ksql.rest.server.resources.streaming; +import static io.confluent.ksql.rest.Errors.ERROR_CODE_FORBIDDEN_KAFKA_ACCESS; import static io.confluent.ksql.rest.entity.KsqlErrorMessageMatchers.errorCode; import static io.confluent.ksql.rest.entity.KsqlErrorMessageMatchers.errorMessage; -import static io.confluent.ksql.rest.Errors.ERROR_CODE_FORBIDDEN_KAFKA_ACCESS; import static io.confluent.ksql.rest.server.resources.KsqlRestExceptionMatchers.exceptionErrorMessage; import static io.confluent.ksql.rest.server.resources.KsqlRestExceptionMatchers.exceptionStatusCode; import static javax.ws.rs.core.Response.Status.FORBIDDEN; diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java index 62bbe63a008d..501457dbd05b 100644 --- a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java @@ -18,7 +18,6 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; - import java.util.Collections; import java.util.List; import java.util.Objects; diff --git a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/ErrorsTest.java b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/ErrorsTest.java index f5f5ad0ac077..de70d02321ee 100644 --- a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/ErrorsTest.java +++ b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/ErrorsTest.java @@ -16,7 +16,6 @@ package io.confluent.ksql.rest; import static javax.ws.rs.core.Response.Status.FORBIDDEN; - import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -25,6 +24,7 @@ import io.confluent.ksql.rest.entity.KsqlErrorMessage; import io.confluent.ksql.util.KsqlException; +import javax.ws.rs.core.Response; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.junit.Before; import org.junit.Test; @@ -32,8 +32,6 @@ import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; -import javax.ws.rs.core.Response; - @RunWith(MockitoJUnitRunner.class) public class ErrorsTest { diff --git a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionTest.java b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionTest.java index ba4c587c3caa..5027bbd9b229 100644 --- a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionTest.java +++ b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionTest.java @@ -16,13 +16,12 @@ package io.confluent.ksql.rest.entity; import com.google.common.testing.EqualsTester; +import java.util.Collections; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; -import java.util.Collections; - @RunWith(MockitoJUnitRunner.class) public class SourceDescriptionTest { From 6e558dafa2ce277f4ab0234a5a38cffd6416c184 Mon Sep 17 00:00:00 2001 From: Almog Gavra Date: Thu, 19 Dec 2019 12:04:51 -0800 Subject: [PATCH 051/123] feat: add support for inline struct creation (#4120) --- docs/developer-guide/syntax-reference.rst | 7 ++ .../rewrite/ExpressionTreeRewriter.java | 12 ++ .../rewrite/ExpressionTreeRewriterTest.java | 24 +++- .../ksql/execution/codegen/CodeGenRunner.java | 14 +++ .../ksql/execution/codegen/CodeGenSpec.java | 61 +++++++++- .../ksql/execution/codegen/CodeGenUtil.java | 5 + .../execution/codegen/SqlToJavaVisitor.java | 37 +++++- .../formatter/ExpressionFormatter.java | 14 +++ .../tree/CreateStructExpression.java | 114 ++++++++++++++++++ .../expression/tree/ExpressionVisitor.java | 2 + .../tree/TraversalExpressionVisitor.java | 6 + .../tree/VisitParentExpressionVisitor.java | 5 + .../execution/util/ExpressionTypeManager.java | 15 +++ .../codegen/SqlToJavaVisitorTest.java | 25 +++- .../formatter/ExpressionFormatterTest.java | 13 ++ .../util/ExpressionTypeManagerTest.java | 28 +++++ .../query-validation-tests/create-struct.json | 90 ++++++++++++++ .../insert-values.json | 36 ++++++ .../io/confluent/ksql/parser/SqlBase.g4 | 28 +++-- .../io/confluent/ksql/parser/AstBuilder.java | 19 +++ .../schema/ksql/DefaultSqlValueCoercer.java | 33 ++++- .../ksql/DefaultSqlValueCoercerTest.java | 45 ++++++- 22 files changed, 602 insertions(+), 31 deletions(-) create mode 100644 ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/CreateStructExpression.java create mode 100644 ksql-functional-tests/src/test/resources/query-validation-tests/create-struct.json diff --git a/docs/developer-guide/syntax-reference.rst b/docs/developer-guide/syntax-reference.rst index 8632b971af5e..90a7e6ba9155 100644 --- a/docs/developer-guide/syntax-reference.rst +++ b/docs/developer-guide/syntax-reference.rst @@ -71,6 +71,13 @@ encapsulate a street address and a postal code: orderId BIGINT, address STRUCT) WITH (...); + +You can create a struct in a query by specifying the names of the columns +and expressions that construct the values, separated by ``,`` and wrapped with +curly braces. For example: ``SELECT STRUCT(name := col0, ageInDogYears := col1*7) AS dogs FROM animals`` +creates a schema ``col0 STRUCT``, assuming ``col0`` was a string and +``col1`` was an integer. + Access the fields in a ``STRUCT`` by using the dereference operator (``->``): .. code:: sql diff --git a/ksql-engine/src/main/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriter.java b/ksql-engine/src/main/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriter.java index 4d36fef4e267..a28ff3302297 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriter.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriter.java @@ -16,6 +16,7 @@ package io.confluent.ksql.engine.rewrite; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableList.Builder; import io.confluent.ksql.execution.expression.tree.ArithmeticBinaryExpression; import io.confluent.ksql.execution.expression.tree.ArithmeticUnaryExpression; import io.confluent.ksql.execution.expression.tree.BetweenPredicate; @@ -23,6 +24,8 @@ import io.confluent.ksql.execution.expression.tree.Cast; import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; import io.confluent.ksql.execution.expression.tree.ComparisonExpression; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression.Field; import io.confluent.ksql.execution.expression.tree.DecimalLiteral; import io.confluent.ksql.execution.expression.tree.DereferenceExpression; import io.confluent.ksql.execution.expression.tree.DoubleLiteral; @@ -185,6 +188,15 @@ public Expression visitSubscriptExpression( return new SubscriptExpression(node.getLocation(), base, index); } + @Override + public Expression visitStructExpression(CreateStructExpression node, C context) { + final Builder fields = ImmutableList.builder(); + for (Field field : node.getFields()) { + fields.add(new Field(field.getName(), rewriter.apply(field.getValue(), context))); + } + return new CreateStructExpression(node.getLocation(), fields.build()); + } + @Override public Expression visitComparisonExpression( final ComparisonExpression node, diff --git a/ksql-engine/src/test/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriterTest.java b/ksql-engine/src/test/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriterTest.java index 669362819a50..47e53d7a0ce0 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriterTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriterTest.java @@ -32,7 +32,10 @@ import io.confluent.ksql.execution.expression.tree.BetweenPredicate; import io.confluent.ksql.execution.expression.tree.BooleanLiteral; import io.confluent.ksql.execution.expression.tree.Cast; +import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; import io.confluent.ksql.execution.expression.tree.ComparisonExpression; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression.Field; import io.confluent.ksql.execution.expression.tree.DecimalLiteral; import io.confluent.ksql.execution.expression.tree.DereferenceExpression; import io.confluent.ksql.execution.expression.tree.DoubleLiteral; @@ -48,9 +51,6 @@ import io.confluent.ksql.execution.expression.tree.LongLiteral; import io.confluent.ksql.execution.expression.tree.NotExpression; import io.confluent.ksql.execution.expression.tree.NullLiteral; -import io.confluent.ksql.name.ColumnName; -import io.confluent.ksql.schema.ksql.ColumnRef; -import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; import io.confluent.ksql.execution.expression.tree.SearchedCaseExpression; import io.confluent.ksql.execution.expression.tree.SimpleCaseExpression; import io.confluent.ksql.execution.expression.tree.StringLiteral; @@ -61,11 +61,13 @@ import io.confluent.ksql.execution.expression.tree.WhenClause; import io.confluent.ksql.function.FunctionRegistry; import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; import io.confluent.ksql.parser.KsqlParserTestUtil; import io.confluent.ksql.parser.tree.Query; import io.confluent.ksql.parser.tree.SelectItem; import io.confluent.ksql.parser.tree.SingleColumn; +import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.types.SqlPrimitiveType; import io.confluent.ksql.util.MetaStoreFixture; import java.util.List; @@ -530,6 +532,22 @@ public void shouldRewriteSubscriptExpression() { assertThat(rewritten, equalTo(new SubscriptExpression(parsed.getLocation(), expr1, expr2))); } + @Test + public void shouldRewriteStructExpression() { + // Given: + final CreateStructExpression parsed = parseExpression("STRUCT(FOO := 'foo', BAR := col4[1])"); + final Expression fooVal = parsed.getFields().stream().filter(f -> f.getName().equals("FOO")).findFirst().get().getValue(); + final Expression barVal = parsed.getFields().stream().filter(f -> f.getName().equals("BAR")).findFirst().get().getValue(); + when(processor.apply(fooVal, context)).thenReturn(expr1); + when(processor.apply(barVal, context)).thenReturn(expr2); + + // When: + final Expression rewritten = expressionRewriter.rewrite(parsed, context); + + // Then: + assertThat(rewritten, equalTo(new CreateStructExpression(ImmutableList.of(new Field("FOO", expr1), new Field("BAR", expr2))))); + } + @Test public void shouldRewriteSubscriptExpressionUsingPlugin() { // Given: diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenRunner.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenRunner.java index 8ac7265b6672..f912a874c9cc 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenRunner.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenRunner.java @@ -16,6 +16,7 @@ package io.confluent.ksql.execution.codegen; import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression; import io.confluent.ksql.execution.expression.tree.DereferenceExpression; import io.confluent.ksql.execution.expression.tree.Expression; import io.confluent.ksql.execution.expression.tree.FunctionCall; @@ -40,6 +41,8 @@ import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.Stream; +import javax.annotation.Nullable; +import org.apache.kafka.connect.data.Schema; import org.codehaus.commons.compiler.CompileException; import org.codehaus.commons.compiler.CompilerFactoryFactory; import org.codehaus.commons.compiler.IExpressionEvaluator; @@ -175,6 +178,17 @@ public Void visitSubscriptExpression(SubscriptExpression node, Void context) { return null; } + @Override + public Void visitStructExpression(CreateStructExpression exp, @Nullable Void context) { + exp.getFields().forEach(val -> process(val.getValue(), context)); + final Schema schema = SchemaConverters + .sqlToConnectConverter() + .toConnectSchema(expressionTypeManager.getExpressionSqlType(exp)); + + spec.addStructSchema(exp, schema); + return null; + } + @Override public Void visitColumnReference(ColumnReferenceExp node, Void context) { addRequiredColumn(node.getReference()); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenSpec.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenSpec.java index 11047fd38ee3..ee5a5b9bd334 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenSpec.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenSpec.java @@ -22,6 +22,8 @@ import com.google.common.collect.ImmutableMap; import com.google.errorprone.annotations.Immutable; import io.confluent.ksql.GenericRow; +import io.confluent.ksql.execution.expression.formatter.ExpressionFormatter; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression; import io.confluent.ksql.function.udf.Kudf; import io.confluent.ksql.name.FunctionName; import io.confluent.ksql.schema.ksql.ColumnRef; @@ -30,6 +32,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import org.apache.kafka.connect.data.Schema; @Immutable public final class CodeGenSpec { @@ -37,14 +40,18 @@ public final class CodeGenSpec { private final ImmutableList arguments; private final ImmutableMap columnToCodeName; private final ImmutableListMultimap functionToCodeName; + private final ImmutableMap structToCodeName; private CodeGenSpec( - ImmutableList arguments, ImmutableMap columnToCodeName, - ImmutableListMultimap functionToCodeName + ImmutableList arguments, + ImmutableMap columnToCodeName, + ImmutableListMultimap functionToCodeName, + ImmutableMap structToCodeName ) { this.arguments = arguments; this.columnToCodeName = columnToCodeName; this.functionToCodeName = functionToCodeName; + this.structToCodeName = structToCodeName; } public String[] argumentNames() { @@ -77,14 +84,27 @@ public void resolve(GenericRow row, Object[] parameters) { } } + public String getStructSchemaName(CreateStructExpression createStructExpression) { + final String schemaName = structToCodeName.get(createStructExpression); + if (schemaName == null) { + throw new KsqlException( + "Cannot get name for " + ExpressionFormatter.formatExpression(createStructExpression) + ); + } + return schemaName; + } + static class Builder { private final ImmutableList.Builder argumentBuilder = ImmutableList.builder(); private final Map columnRefToName = new HashMap<>(); private final ImmutableListMultimap.Builder functionNameBuilder = ImmutableListMultimap.builder(); + private final ImmutableMap.Builder structToSchemaName = + ImmutableMap.builder(); private int argumentCount = 0; + private int structSchemaCount = 0; void addParameter( final ColumnRef columnRef, @@ -102,11 +122,18 @@ void addFunction(FunctionName functionName, Kudf function) { argumentBuilder.add(new FunctionArgumentSpec(codeName, function.getClass(), function)); } + void addStructSchema(CreateStructExpression struct, Schema schema) { + final String structSchemaName = CodeGenUtil.schemaName(structSchemaCount++); + structToSchemaName.put(struct, structSchemaName); + argumentBuilder.add(new SchemaArgumentSpec(structSchemaName, schema)); + } + CodeGenSpec build() { return new CodeGenSpec( argumentBuilder.build(), ImmutableMap.copyOf(columnRefToName), - functionNameBuilder.build() + functionNameBuilder.build(), + structToSchemaName.build() ); } } @@ -208,4 +235,32 @@ public String toString() { + '}'; } } + + @Immutable + public static final class SchemaArgumentSpec extends BaseArgumentSpec { + + private final Schema schema; + + SchemaArgumentSpec( + String name, + Schema schema + ) { + super(name, Schema.class); + this.schema = requireNonNull(schema, "schema"); + } + + @Override + public Object resolve(GenericRow value) { + return schema; + } + + @Override + public String toString() { + return "StructSchemaArgumentSpec{" + + "name='" + name() + '\'' + + ", type=" + type() + + ", schema=" + schema + + '}'; + } + } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenUtil.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenUtil.java index 1faeae171ba3..8279121166f1 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenUtil.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenUtil.java @@ -20,6 +20,7 @@ public final class CodeGenUtil { private static final String PARAM_NAME_PREFIX = "var"; + private static final String SCHEMA_NAME_PREFIX = "schema"; private CodeGenUtil() { } @@ -28,6 +29,10 @@ public static String paramName(int index) { return PARAM_NAME_PREFIX + index; } + public static String schemaName(int index) { + return SCHEMA_NAME_PREFIX + index; + } + public static String functionName(FunctionName fun, int index) { return fun.name() + "_" + index; } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java index c233ef1846a5..61821035fe45 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java @@ -31,6 +31,8 @@ import io.confluent.ksql.execution.expression.tree.Cast; import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; import io.confluent.ksql.execution.expression.tree.ComparisonExpression; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression.Field; import io.confluent.ksql.execution.expression.tree.DecimalLiteral; import io.confluent.ksql.execution.expression.tree.DereferenceExpression; import io.confluent.ksql.execution.expression.tree.DoubleLiteral; @@ -87,6 +89,8 @@ import org.apache.commons.lang3.StringEscapeUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.text.StrSubstitutor; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.apache.kafka.connect.data.Struct; public class SqlToJavaVisitor { @@ -104,7 +108,9 @@ public class SqlToJavaVisitor { DecimalUtil.class.getCanonicalName(), BigDecimal.class.getCanonicalName(), MathContext.class.getCanonicalName(), - RoundingMode.class.getCanonicalName() + RoundingMode.class.getCanonicalName(), + SchemaBuilder.class.getCanonicalName(), + Struct.class.getCanonicalName() ); private static final Map DECIMAL_OPERATOR_NAME = ImmutableMap @@ -133,6 +139,7 @@ public class SqlToJavaVisitor { private final ExpressionTypeManager expressionTypeManager; private final Function funNameToCodeName; private final Function colRefToCodeName; + private final Function structToCodeName; public static SqlToJavaVisitor of( LogicalSchema schema, FunctionRegistry functionRegistry, CodeGenSpec spec @@ -145,14 +152,16 @@ public static SqlToJavaVisitor of( name -> { int index = nameCounts.add(name, 1); return spec.getUniqueNameForFunction(name, index); - } - ); + }, + spec::getStructSchemaName); } @VisibleForTesting SqlToJavaVisitor( LogicalSchema schema, FunctionRegistry functionRegistry, - Function colRefToCodeName, Function funNameToCodeName + Function colRefToCodeName, + Function funNameToCodeName, + Function structToCodeName ) { this.expressionTypeManager = new ExpressionTypeManager(schema, functionRegistry); @@ -160,6 +169,7 @@ public static SqlToJavaVisitor of( this.functionRegistry = Objects.requireNonNull(functionRegistry, "functionRegistry"); this.colRefToCodeName = Objects.requireNonNull(colRefToCodeName, "colRefToCodeName"); this.funNameToCodeName = Objects.requireNonNull(funNameToCodeName, "funNameToCodeName"); + this.structToCodeName = Objects.requireNonNull(structToCodeName, "structToCodeName"); } public String process(Expression expression) { @@ -712,6 +722,25 @@ public Pair visitSubscriptExpression(SubscriptExpression node, } } + @Override + public Pair visitStructExpression(CreateStructExpression node, Void context) { + final String schemaName = structToCodeName.apply(node); + final StringBuilder struct = new StringBuilder("new Struct(").append(schemaName).append(")"); + for (Field field : node.getFields()) { + struct.append(".put(") + .append('"') + .append(field.getName()) + .append('"') + .append(",") + .append(process(field.getValue(), context).getLeft()) + .append(")"); + } + return new Pair<>( + "((Struct)" + struct.toString() + ")", + expressionTypeManager.getExpressionSqlType(node) + ); + } + @Override public Pair visitBetweenPredicate(BetweenPredicate node, Void context) { Pair value = process(node.getValue(), context); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatter.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatter.java index 505a50d149d0..95b969512aa3 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatter.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatter.java @@ -24,6 +24,7 @@ import io.confluent.ksql.execution.expression.tree.Cast; import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; import io.confluent.ksql.execution.expression.tree.ComparisonExpression; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression; import io.confluent.ksql.execution.expression.tree.DecimalLiteral; import io.confluent.ksql.execution.expression.tree.DereferenceExpression; import io.confluent.ksql.execution.expression.tree.DoubleLiteral; @@ -52,6 +53,7 @@ import io.confluent.ksql.schema.ksql.FormatOptions; import io.confluent.ksql.util.KsqlConstants; import java.util.List; +import java.util.stream.Collectors; public final class ExpressionFormatter { @@ -100,6 +102,18 @@ public String visitSubscriptExpression(SubscriptExpression node, Context context + "[" + process(node.getIndex(), context) + "]"; } + @Override + public String visitStructExpression(CreateStructExpression exp, Context context) { + return exp + .getFields() + .stream() + .map(struct -> + context.formatOptions.escape(struct.getName()) + + ":=" + + process(struct.getValue(), context)) + .collect(Collectors.joining(", ", "STRUCT(", ")")); + } + @Override public String visitLongLiteral(LongLiteral node, Context context) { return Long.toString(node.getValue()); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/CreateStructExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/CreateStructExpression.java new file mode 100644 index 000000000000..925e49638c2b --- /dev/null +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/CreateStructExpression.java @@ -0,0 +1,114 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.expression.tree; + +import com.google.common.collect.ImmutableList; +import com.google.errorprone.annotations.Immutable; +import io.confluent.ksql.parser.NodeLocation; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +@Immutable +public class CreateStructExpression extends Expression { + + private final ImmutableList fields; + + public CreateStructExpression( + final List fields + ) { + this(Optional.empty(), fields); + } + + public CreateStructExpression( + final Optional location, + final List fields + ) { + super(location); + this.fields = ImmutableList.copyOf(fields); + } + + @Override + protected R accept(ExpressionVisitor visitor, C context) { + return visitor.visitStructExpression(this, context); + } + + public ImmutableList getFields() { + return fields; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CreateStructExpression that = (CreateStructExpression) o; + return Objects.equals(fields, that.fields); + } + + @Override + public int hashCode() { + return Objects.hash(fields); + } + + public static class Field { + private final String name; + private final Expression value; + + public Field(String name, Expression value) { + this.name = Objects.requireNonNull(name, "name"); + this.value = Objects.requireNonNull(value, "value"); + } + + public String getName() { + return name; + } + + public Expression getValue() { + return value; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Field field = (Field) o; + return Objects.equals(name, field.name) + && Objects.equals(value, field.value); + } + + @Override + public int hashCode() { + return Objects.hash(name, value); + } + + @Override + public String toString() { + return "Field{" + + "name='" + name + '\'' + + ", value=" + value + + '}'; + } + } + +} diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ExpressionVisitor.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ExpressionVisitor.java index 8d877956be52..d60c7d8dac52 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ExpressionVisitor.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ExpressionVisitor.java @@ -73,6 +73,8 @@ default R process(Expression node, @Nullable C context) { R visitSubscriptExpression(SubscriptExpression exp, @Nullable C context); + R visitStructExpression(CreateStructExpression exp, @Nullable C context); + R visitTimeLiteral(TimeLiteral exp, @Nullable C context); R visitTimestampLiteral(TimestampLiteral exp, @Nullable C context); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TraversalExpressionVisitor.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TraversalExpressionVisitor.java index 9aaea11d54c8..08ceef9e0098 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TraversalExpressionVisitor.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TraversalExpressionVisitor.java @@ -50,6 +50,12 @@ public Void visitSubscriptExpression(SubscriptExpression node, C context) { return null; } + @Override + public Void visitStructExpression(CreateStructExpression node, C context) { + node.getFields().forEach(field -> process(field.getValue(), context)); + return null; + } + @Override public Void visitComparisonExpression(ComparisonExpression node, C context) { process(node.getLeft(), context); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/VisitParentExpressionVisitor.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/VisitParentExpressionVisitor.java index 498fe6ed9945..f5c48c28d46a 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/VisitParentExpressionVisitor.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/VisitParentExpressionVisitor.java @@ -166,6 +166,11 @@ public R visitSubscriptExpression(SubscriptExpression node, C context) { return visitExpression(node, context); } + @Override + public R visitStructExpression(CreateStructExpression node, C context) { + return visitExpression(node, context); + } + @Override public R visitLongLiteral(LongLiteral node, C context) { return visitLiteral(node, context); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java index 0e1f91a3beaf..3e2c117266ec 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java @@ -23,6 +23,7 @@ import io.confluent.ksql.execution.expression.tree.Cast; import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; import io.confluent.ksql.execution.expression.tree.ComparisonExpression; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression; import io.confluent.ksql.execution.expression.tree.DecimalLiteral; import io.confluent.ksql.execution.expression.tree.DereferenceExpression; import io.confluent.ksql.execution.expression.tree.DoubleLiteral; @@ -61,6 +62,7 @@ import io.confluent.ksql.schema.ksql.types.SqlArray; import io.confluent.ksql.schema.ksql.types.SqlMap; import io.confluent.ksql.schema.ksql.types.SqlStruct; +import io.confluent.ksql.schema.ksql.types.SqlStruct.Builder; import io.confluent.ksql.schema.ksql.types.SqlType; import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.util.KsqlException; @@ -320,6 +322,19 @@ public Void visitSubscriptExpression( return null; } + @Override + public Void visitStructExpression(CreateStructExpression exp, ExpressionTypeContext context) { + final Builder builder = SqlStruct.builder(); + + for (CreateStructExpression.Field field : exp.getFields()) { + process(field.getValue(), context); + builder.field(field.getName(), context.getSqlType()); + } + + context.setSqlType(builder.build()); + return null; + } + @Override public Void visitFunctionCall(FunctionCall node, ExpressionTypeContext expressionTypeContext) { if (functionRegistry.isAggregate(node.getName().name())) { diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitorTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitorTest.java index fc94f7702ffc..d9e552db1455 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitorTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitorTest.java @@ -38,6 +38,8 @@ import io.confluent.ksql.execution.expression.tree.Cast; import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; import io.confluent.ksql.execution.expression.tree.ComparisonExpression; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression.Field; import io.confluent.ksql.execution.expression.tree.DoubleLiteral; import io.confluent.ksql.execution.expression.tree.Expression; import io.confluent.ksql.execution.expression.tree.FunctionCall; @@ -93,11 +95,13 @@ public class SqlToJavaVisitorTest { @Before public void init() { AtomicInteger funCounter = new AtomicInteger(); + AtomicInteger structCounter = new AtomicInteger(); sqlToJavaVisitor = new SqlToJavaVisitor( SCHEMA, functionRegistry, ref -> ref.aliasedFieldName().replace(".", "_"), - name -> name.name() + "_" + funCounter.getAndIncrement() + name -> name.name() + "_" + funCounter.getAndIncrement(), + struct -> "schema" + structCounter.getAndIncrement() ); } @@ -140,6 +144,25 @@ public void shouldProcessMapExpressionCorrectly() { assertThat(javaExpression, equalTo("((Double) ((java.util.Map)TEST1_COL5).get(\"key1\"))")); } + @Test + public void shouldProcessStructExpressionCorrectly() { + // Given: + Expression expression = new CreateStructExpression( + ImmutableList.of( + new Field("col1", new StringLiteral("foo")), + new Field("col2", new SubscriptExpression(MAPCOL, new StringLiteral("key1"))) + ) + ); + + // When: + String javaExpression = sqlToJavaVisitor.process(expression); + + // Then: + assertThat( + javaExpression, + equalTo("((Struct)new Struct(schema0).put(\"col1\",\"foo\").put(\"col2\",((Double) ((java.util.Map)TEST1_COL5).get(\"key1\"))))")); + } + @Test public void shouldCreateCorrectCastJavaExpression() { // Given: diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatterTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatterTest.java index ff55537ffb35..c44e684d622b 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatterTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatterTest.java @@ -27,12 +27,15 @@ import io.confluent.ksql.execution.expression.tree.Cast; import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; import io.confluent.ksql.execution.expression.tree.ComparisonExpression; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression.Field; import io.confluent.ksql.execution.expression.tree.DecimalLiteral; import io.confluent.ksql.execution.expression.tree.DereferenceExpression; import io.confluent.ksql.execution.expression.tree.DoubleLiteral; import io.confluent.ksql.execution.expression.tree.FunctionCall; import io.confluent.ksql.execution.expression.tree.InListExpression; import io.confluent.ksql.execution.expression.tree.InPredicate; +import io.confluent.ksql.execution.expression.tree.IntegerLiteral; import io.confluent.ksql.execution.expression.tree.IsNotNullPredicate; import io.confluent.ksql.execution.expression.tree.IsNullPredicate; import io.confluent.ksql.execution.expression.tree.LikePredicate; @@ -84,6 +87,16 @@ public void shouldFormatSubscriptExpression() { equalTo("'abc'[3.0]")); } + @Test + public void shouldFormatStructExpression() { + assertThat(ExpressionFormatter.formatExpression(new CreateStructExpression( + ImmutableList.of( + new Field("foo", new StringLiteral("abc")), + new Field("bar", new SubscriptExpression(new ColumnReferenceExp(ColumnRef.withoutSource(ColumnName.of("abc"))), new IntegerLiteral(1)))) + ), FormatOptions.of(exp -> exp.equals("foo"))), + equalTo("STRUCT(`foo`:='abc', bar:=abc[1])")); + } + @Test public void shouldFormatLongLiteral() { assertThat(ExpressionFormatter.formatExpression(new LongLiteral(1)), equalTo("1")); diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java index 8b72f633f4d2..52937d157dd6 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java @@ -38,6 +38,8 @@ import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; import io.confluent.ksql.execution.expression.tree.ComparisonExpression; import io.confluent.ksql.execution.expression.tree.ComparisonExpression.Type; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression.Field; import io.confluent.ksql.execution.expression.tree.DereferenceExpression; import io.confluent.ksql.execution.expression.tree.Expression; import io.confluent.ksql.execution.expression.tree.FunctionCall; @@ -316,6 +318,32 @@ public void shouldFailIfThereIsInvalidFieldNameInStructCall() { expressionTypeManager.getExpressionSqlType(expression); } + @Test + public void shouldEvaluateTypeForStructExpression() { + // Given: + LogicalSchema schema = LogicalSchema.builder() + .valueColumn(TEST1, COL0, SqlTypes.array(SqlTypes.INTEGER)) + .build(); + expressionTypeManager = new ExpressionTypeManager(schema, functionRegistry); + + Expression exp = new CreateStructExpression(ImmutableList.of( + new Field("field1", new StringLiteral("foo")), + new Field("field2", new ColumnReferenceExp(ColumnRef.of(TEST1, COL0))), + new Field("field3", new CreateStructExpression(ImmutableList.of())) + )); + + // When: + final SqlType sqlType = expressionTypeManager.getExpressionSqlType(exp); + + // Then: + assertThat(sqlType, + is(SqlTypes.struct() + .field("field1", SqlTypes.STRING) + .field("field2", SqlTypes.array(SqlTypes.INTEGER)) + .field("field3", SqlTypes.struct().build()) + .build())); + } + @Test public void shouldEvaluateTypeForStructDereferenceInArray() { // Given: diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/create-struct.json b/ksql-functional-tests/src/test/resources/query-validation-tests/create-struct.json new file mode 100644 index 000000000000..46f1f8b5f8af --- /dev/null +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/create-struct.json @@ -0,0 +1,90 @@ +{ + "comments": [ + "Tests covering the use of the array returning UDFs." + ], + "tests": [ + { + "name": "basic struct creation", + "statements": [ + "CREATE STREAM INPUT (col1 VARCHAR, col2 ARRAY) WITH (kafka_topic='test', value_format='JSON');", + "CREATE STREAM BIG_STRUCT AS SELECT STRUCT(F1 := COL1, F2 := COL2, F3 := SUBSTRING(col1, 2)) AS s FROM INPUT;" + ], + "inputs": [ + { "topic": "test", "value": {"col1": "foo", "col2": ["bar"]}} + ], + "outputs": [ + { "topic": "BIG_STRUCT", "value": {"S": {"F1": "foo", "F2": ["bar"], "F3": "oo"}}} + ], + "post": { + "sources": [ + {"name": "BIG_STRUCT", "type": "stream", "schema": "ROWKEY STRING KEY, S STRUCT, F3 STRING>"} + ] + } + }, + { + "name": "nested struct creation", + "statements": [ + "CREATE STREAM INPUT (col1 VARCHAR) WITH (kafka_topic='test', value_format='JSON');", + "CREATE STREAM BIG_STRUCT AS SELECT STRUCT(f1 := STRUCT(c1 := col1)) AS s FROM INPUT;" + ], + "inputs": [ + { "topic": "test", "value": {"col1": "foo"}} + ], + "outputs": [ + { "topic": "BIG_STRUCT", "value": {"S": {"F1": {"C1": "foo"}}}} + ], + "post": { + "sources": [ + {"name": "BIG_STRUCT", "type": "stream","schema": "ROWKEY STRING KEY, S STRUCT>"} + ] + } + }, + { + "name": "quoted identifiers", + "statements": [ + "CREATE STREAM INPUT (col1 VARCHAR) WITH (kafka_topic='test', value_format='JSON');", + "CREATE STREAM BIG_STRUCT AS SELECT STRUCT(FOO := col1, `foo` := col1) AS s FROM INPUT;" + ], + "inputs": [ + { "topic": "test", "value": {"col1": "foo"}} + ], + "outputs": [ + { "topic": "BIG_STRUCT", "value": {"S": {"FOO": "foo", "foo": "foo"}}} + ], + "post": { + "sources": [ + {"name": "BIG_STRUCT", "type": "stream","schema": "ROWKEY STRING KEY, S STRUCT"} + ] + } + }, + { + "name": "empty struct creation", + "statements": [ + "CREATE STREAM INPUT (col1 VARCHAR) WITH (kafka_topic='test', value_format='JSON');", + "CREATE STREAM BIG_STRUCT AS SELECT STRUCT() AS s FROM INPUT;" + ], + "inputs": [ + { "topic": "test", "value": {"col1": "foo"}} + ], + "outputs": [ + { "topic": "BIG_STRUCT", "value": {"S": {}}} + ], + "post": { + "sources": [ + {"name": "BIG_STRUCT", "type": "stream", "schema": "ROWKEY STRING KEY, S STRUCT< >"} + ] + } + }, + { + "name": "duplicate fields", + "statements": [ + "CREATE STREAM INPUT (col1 VARCHAR) WITH (kafka_topic='test', value_format='JSON');", + "CREATE STREAM BIG_STRUCT AS SELECT STRUCT(foo := col1, foo := col1) AS s FROM INPUT;" + ], + "expectedException": { + "type": "io.confluent.ksql.util.KsqlException", + "message": "Duplicate field names found in STRUCT" + } + } + ] +} \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/rest-query-validation-tests/insert-values.json b/ksql-functional-tests/src/test/resources/rest-query-validation-tests/insert-values.json index 908c4819cace..9f65eb6fa8de 100644 --- a/ksql-functional-tests/src/test/resources/rest-query-validation-tests/insert-values.json +++ b/ksql-functional-tests/src/test/resources/rest-query-validation-tests/insert-values.json @@ -248,6 +248,42 @@ "responses": [ {"admin": {"@type": "currentStatus", "statementText": "{STATEMENT}"}} ] + }, + { + "name": "should handle struct expressions", + "statements": [ + "CREATE STREAM TEST (val STRUCT>) WITH (kafka_topic='test_topic', value_format='JSON');", + "INSERT INTO TEST (val) VALUES (STRUCT(FOO := '2.1', `bar` := AS_ARRAY('bar')));" + ], + "inputs": [ + ], + "outputs": [ + {"topic": "test_topic", "key": null, "value": {"VAL": {"FOO": 2.1, "bar": ["bar"]}}} + ] + }, + { + "name": "should handle struct coercion", + "statements": [ + "CREATE STREAM TEST (val STRUCT, baz DOUBLE>) WITH (kafka_topic='test_topic', value_format='JSON');", + "INSERT INTO TEST (val) VALUES (STRUCT(FOO := 2, BAR := AS_ARRAY(2), BAZ := 2));" + ], + "inputs": [ + ], + "outputs": [ + {"topic": "test_topic", "key": null, "value": {"VAL": {"FOO": 2, "BAR": [2], "BAZ": 2.0}}} + ] + }, + { + "name": "should handle empty struct expressions", + "statements": [ + "CREATE STREAM TEST (val STRUCT>) WITH (kafka_topic='test_topic', value_format='JSON');", + "INSERT INTO TEST (val) VALUES (STRUCT());" + ], + "inputs": [ + ], + "outputs": [ + {"topic": "test_topic", "key": null, "value": {"VAL": {"FOO": null, "bar": null}}} + ] } ] } \ No newline at end of file diff --git a/ksql-parser/src/main/antlr4/io/confluent/ksql/parser/SqlBase.g4 b/ksql-parser/src/main/antlr4/io/confluent/ksql/parser/SqlBase.g4 index a87a44af7831..57775858df4e 100644 --- a/ksql-parser/src/main/antlr4/io/confluent/ksql/parser/SqlBase.g4 +++ b/ksql-parser/src/main/antlr4/io/confluent/ksql/parser/SqlBase.g4 @@ -248,19 +248,20 @@ valueExpression ; primaryExpression - : literal #literalExpression - | identifier STRING #typeConstructor - | identifier '(' ASTERISK ')' #functionCall - | identifier'(' (expression (',' expression)*)? ')' #functionCall - | CASE valueExpression whenClause+ (ELSE elseExpression=expression)? END #simpleCase - | CASE whenClause+ (ELSE elseExpression=expression)? END #searchedCase - | CAST '(' expression AS type ')' #cast - | ARRAY '[' (expression (',' expression)*)? ']' #arrayConstructor - | value=primaryExpression '[' index=valueExpression ']' #subscript - | identifier #columnReference - | identifier '.' identifier #columnReference - | base=primaryExpression STRUCT_FIELD_REF fieldName=identifier #dereference - | '(' expression ')' #parenthesizedExpression + : literal #literalExpression + | identifier STRING #typeConstructor + | CASE valueExpression whenClause+ (ELSE elseExpression=expression)? END #simpleCase + | CASE whenClause+ (ELSE elseExpression=expression)? END #searchedCase + | CAST '(' expression AS type ')' #cast + | ARRAY '[' (expression (',' expression)*)? ']' #arrayConstructor + | STRUCT '(' (identifier ASSIGN expression (',' identifier ASSIGN expression)*)? ')' #structConstructor + | identifier '(' ASTERISK ')' #functionCall + | identifier'(' (expression (',' expression)*)? ')' #functionCall + | value=primaryExpression '[' index=valueExpression ']' #subscript + | identifier #columnReference + | identifier '.' identifier #columnReference + | base=primaryExpression STRUCT_FIELD_REF fieldName=identifier #dereference + | '(' expression ')' #parenthesizedExpression ; timeZoneSpecifier @@ -472,6 +473,7 @@ SLASH: '/'; PERCENT: '%'; CONCAT: '||'; +ASSIGN: ':='; STRUCT_FIELD_REF: '->'; STRING diff --git a/ksql-parser/src/main/java/io/confluent/ksql/parser/AstBuilder.java b/ksql-parser/src/main/java/io/confluent/ksql/parser/AstBuilder.java index 68775ee6204b..b1eb61a74d3a 100644 --- a/ksql-parser/src/main/java/io/confluent/ksql/parser/AstBuilder.java +++ b/ksql-parser/src/main/java/io/confluent/ksql/parser/AstBuilder.java @@ -31,6 +31,8 @@ import io.confluent.ksql.execution.expression.tree.Cast; import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; import io.confluent.ksql.execution.expression.tree.ComparisonExpression; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression; +import io.confluent.ksql.execution.expression.tree.CreateStructExpression.Field; import io.confluent.ksql.execution.expression.tree.DecimalLiteral; import io.confluent.ksql.execution.expression.tree.DereferenceExpression; import io.confluent.ksql.execution.expression.tree.Expression; @@ -925,6 +927,23 @@ public Node visitCast(final SqlBaseParser.CastContext context) { ); } + @Override + public Node visitStructConstructor(SqlBaseParser.StructConstructorContext context) { + ImmutableList.Builder fields = ImmutableList.builder(); + + for (int i = 0; i < context.identifier().size(); i++) { + fields.add(new Field( + ParserUtil.getIdentifierText(context.identifier(i)), + (Expression) visit(context.expression(i)) + )); + } + + return new CreateStructExpression( + getLocation(context), + fields.build() + ); + } + @Override public Node visitSubscript(final SqlBaseParser.SubscriptContext context) { return new SubscriptExpression( diff --git a/ksql-parser/src/main/java/io/confluent/ksql/schema/ksql/DefaultSqlValueCoercer.java b/ksql-parser/src/main/java/io/confluent/ksql/schema/ksql/DefaultSqlValueCoercer.java index 788e8d0c707f..37c408fada11 100644 --- a/ksql-parser/src/main/java/io/confluent/ksql/schema/ksql/DefaultSqlValueCoercer.java +++ b/ksql-parser/src/main/java/io/confluent/ksql/schema/ksql/DefaultSqlValueCoercer.java @@ -20,6 +20,7 @@ import io.confluent.ksql.schema.ksql.types.SqlArray; import io.confluent.ksql.schema.ksql.types.SqlDecimal; import io.confluent.ksql.schema.ksql.types.SqlMap; +import io.confluent.ksql.schema.ksql.types.SqlStruct; import io.confluent.ksql.schema.ksql.types.SqlType; import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.util.KsqlException; @@ -31,6 +32,8 @@ import java.util.Map; import java.util.Optional; import java.util.function.Function; +import org.apache.kafka.connect.data.Field; +import org.apache.kafka.connect.data.Struct; public enum DefaultSqlValueCoercer implements SqlValueCoercer { @@ -57,7 +60,7 @@ private static Optional doCoerce(final Object value, final SqlType targetType case MAP: return coerceMap(value, (SqlMap) targetType); case STRUCT: - throw new KsqlException("Unsupported SQL type: " + targetType.baseType()); + return coerceStruct(value, (SqlStruct) targetType); default: break; } @@ -77,6 +80,34 @@ private static Optional doCoerce(final Object value, final SqlType targetType return Optional.of(result); } + private static Optional coerceStruct(Object value, SqlStruct targetType) { + if (!(value instanceof Struct)) { + return Optional.empty(); + } + + final Struct struct = (Struct) value; + final Struct coerced = new Struct( + SchemaConverters.sqlToConnectConverter().toConnectSchema(targetType) + ); + + for (Field field : coerced.schema().fields()) { + Optional sqlField = targetType.field(field.name()); + if (!sqlField.isPresent()) { + // if there was a field in the struct that wasn't in the schema + // we cannot coerce + return Optional.empty(); + } else if (struct.schema().field(field.name()) == null) { + // if we cannot find the field in the struct, we can ignore it + continue; + } + + Optional val = doCoerce(struct.get(field), sqlField.get().type()); + val.ifPresent(v -> coerced.put(field.name(), v)); + } + + return Optional.of(coerced); + } + private static Optional coerceArray(final Object value, final SqlArray targetType) { if (!(value instanceof List)) { return Optional.empty(); diff --git a/ksql-parser/src/test/java/io/confluent/ksql/schema/ksql/DefaultSqlValueCoercerTest.java b/ksql-parser/src/test/java/io/confluent/ksql/schema/ksql/DefaultSqlValueCoercerTest.java index fc15c1e12a4f..ba238ed23ea1 100644 --- a/ksql-parser/src/test/java/io/confluent/ksql/schema/ksql/DefaultSqlValueCoercerTest.java +++ b/ksql-parser/src/test/java/io/confluent/ksql/schema/ksql/DefaultSqlValueCoercerTest.java @@ -19,6 +19,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -27,6 +28,7 @@ import io.confluent.ksql.schema.ksql.types.SqlMap; import io.confluent.ksql.schema.ksql.types.SqlType; import io.confluent.ksql.schema.ksql.types.SqlTypes; +import io.confluent.ksql.util.DecimalUtil; import io.confluent.ksql.util.KsqlException; import java.math.BigDecimal; import java.util.Arrays; @@ -35,6 +37,7 @@ import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; +import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.Struct; import org.junit.Before; @@ -84,12 +87,6 @@ public void setUp() { coercer = DefaultSqlValueCoercer.INSTANCE; } - @Test(expected = KsqlException.class) - public void shouldThrowOnStruct() { - coercer.coerce(new Struct(SchemaBuilder.struct()), - SqlTypes.struct().field("foo", SqlTypes.STRING).build()); - } - @Test public void shouldCoerceToBoolean() { assertThat(coercer.coerce(true, SqlTypes.BOOLEAN), is(Optional.of(true))); @@ -197,6 +194,42 @@ public void shouldNotCoerceToMap() { assertThat(coercer.coerce(ImmutableList.of("foo"), mapType), is(Optional.empty())); } + + @SuppressWarnings("unchecked") + @Test + public void shouldCoerceToStruct() { + // Given: + final Schema schema = SchemaBuilder.struct().field("foo", Schema.STRING_SCHEMA); + final Struct struct = new Struct(schema).put("foo", "2.1"); + final SqlType structType = SqlTypes.struct().field("foo", SqlTypes.decimal(2, 1)).build(); + + // When: + final Optional coerced = (Optional) coercer.coerce(struct, structType); + + // Then: + assertThat("", coerced.isPresent()); + assertThat(coerced.get().get("foo"), is(new BigDecimal("2.1"))); + } + + @SuppressWarnings("unchecked") + @Test + public void shouldSubsetCoerceToStruct() { + // Given: + final Schema schema = SchemaBuilder.struct().field("foo", Schema.STRING_SCHEMA); + final Struct struct = new Struct(schema).put("foo", "val1"); + final SqlType structType = SqlTypes.struct() + .field("foo", SqlTypes.STRING) + .field("bar", SqlTypes.STRING).build(); + + // When: + final Optional coerced = (Optional) coercer.coerce(struct, structType); + + // Then: + assertThat("", coerced.isPresent()); + assertThat(coerced.get().get("foo"), is("val1")); + assertThat(coerced.get().get("bar"), nullValue()); + } + @Test public void shouldCoerceToString() { assertThat(coercer.coerce("foobar", SqlTypes.STRING), is(Optional.of("foobar"))); From 91c421a28624a026cd60427994855f2290c2a5e7 Mon Sep 17 00:00:00 2001 From: Rohan Date: Thu, 19 Dec 2019 13:17:04 -0800 Subject: [PATCH 052/123] fix: change query id generation to work with planned commands (#4149) * fix: change query id generation to work with planned commands This patch changes up how we generate query IDs to play nice with planned commands. Before this change, statements would get the current offset as their query id. However planned commands get their query IDs before being enqueued, so they should really get the _next_ expected offset as their ID. This patch changes up the id generation to work this way. The next ID is set _after_ statemetns/plans are executed, and is set to the next expected offset. Co-Authored-By: Victoria Xia --- .../query/id/SpecificQueryIdGenerator.java | 4 +-- .../id/SpecificQueryIdGeneratorTest.java | 7 +++-- .../InteractiveStatementExecutor.java | 16 ++++++---- .../InteractiveStatementExecutorTest.java | 31 +++++++++++++++++++ .../rest/server/computation/RecoveryTest.java | 16 +++++----- 5 files changed, 57 insertions(+), 17 deletions(-) diff --git a/ksql-engine/src/main/java/io/confluent/ksql/query/id/SpecificQueryIdGenerator.java b/ksql-engine/src/main/java/io/confluent/ksql/query/id/SpecificQueryIdGenerator.java index 512dec937217..2b99fd97c6ad 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/query/id/SpecificQueryIdGenerator.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/query/id/SpecificQueryIdGenerator.java @@ -30,7 +30,7 @@ public class SpecificQueryIdGenerator implements QueryIdGenerator { public SpecificQueryIdGenerator() { this.nextId = 0L; - this.alreadyUsed = true; + this.alreadyUsed = false; } public void setNextId(final long nextId) { @@ -50,6 +50,6 @@ public String getNext() { @Override public QueryIdGenerator createSandbox() { - return new SequentialQueryIdGenerator(nextId + 1); + return new SequentialQueryIdGenerator(nextId); } } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/query/id/SpecificQueryIdGeneratorTest.java b/ksql-engine/src/test/java/io/confluent/ksql/query/id/SpecificQueryIdGeneratorTest.java index b5115b27e29f..92aa0a3e0c35 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/query/id/SpecificQueryIdGeneratorTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/query/id/SpecificQueryIdGeneratorTest.java @@ -40,7 +40,10 @@ public void shouldGenerateIdBasedOnSetNextId() { assertThat(generator.getNext(), is("5")); } - + @Test + public void shouldReturnZeroIdForFirstQuery() { + assertThat(generator.getNext(), is("0")); + } @Test(expected = KsqlServerException.class) public void shouldThrowWhenGetNextBeforeSet() { @@ -54,6 +57,6 @@ public void shouldReturnSequentialGeneratorFromLastId() { generator.setNextId(3L); final QueryIdGenerator copy = generator.createSandbox(); assertThat(copy, instanceOf(SequentialQueryIdGenerator.class)); - assertThat(copy.getNext(), is("4")); + assertThat(copy.getNext(), is("3")); } } \ No newline at end of file diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutor.java index 89ca6eb41a94..7e7761f0b0db 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutor.java @@ -191,7 +191,7 @@ private void handleStatementWithTerminatedQueries( ) { try { if (command.getPlan().isPresent()) { - executePlan(command, commandId, commandStatusFuture, command.getPlan().get(), mode); + executePlan(command, commandId, commandStatusFuture, command.getPlan().get(), mode, offset); return; } final String statementString = command.getStatement(); @@ -224,7 +224,8 @@ private void executePlan( final CommandId commandId, final Optional commandStatusFuture, final KsqlPlan plan, - final Mode mode + final Mode mode, + final long offset ) { final KsqlConfig mergedConfig = buildMergedConfig(command); final ConfiguredKsqlPlan configured = ConfiguredKsqlPlan.of( @@ -238,8 +239,11 @@ private void executePlan( new CommandStatus(CommandStatus.Status.EXECUTING, "Executing statement") ); final ExecuteResult result = ksqlEngine.execute(serviceContext, configured); - if (mode == Mode.EXECUTE) { - result.getQuery().ifPresent(QueryMetadata::start); + if (result.getQuery().isPresent()) { + queryIdGenerator.setNextId(offset + 1); + if (mode == Mode.EXECUTE) { + result.getQuery().get().start(); + } } final String successMessage = getSuccessMessage(result); final CommandStatus successStatus = @@ -317,8 +321,6 @@ private PersistentQueryMetadata startQuery( final ConfiguredStatement configured = ConfiguredStatement.of( statement, command.getOverwriteProperties(), mergedConfig); - queryIdGenerator.setNextId(offset); - final KsqlPlan plan = ksqlEngine.plan(serviceContext, configured); final QueryMetadata queryMetadata = ksqlEngine @@ -328,6 +330,8 @@ private PersistentQueryMetadata startQuery( .getQuery() .orElseThrow(() -> new IllegalStateException("Statement did not return a query")); + queryIdGenerator.setNextId(offset + 1); + if (!(queryMetadata instanceof PersistentQueryMetadata)) { throw new KsqlException(String.format( "Unexpected query metadata type: %s; was expecting %s", diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java index d3ebec489c5b..44bd43a9933e 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java @@ -345,6 +345,18 @@ public void shouldExecutePlannedCommand() { verify(mockEngine).execute(serviceContext, ConfiguredKsqlPlan.of(plan, emptyMap(), ksqlConfig)); } + @Test + public void shouldSetNextQueryIdToNextOffsetWhenExecutingPlannedCommand() { + // Given: + givenMockPlannedQuery(); + + // When: + handleStatement(statementExecutorWithMocks, plannedCommand, COMMAND_ID, Optional.empty(), 2L); + + // Then: + verify(mockQueryIdGenerator).setNextId(3L); + } + @Test public void shouldUpdateStatusOnCompletedPlannedCommand() { // Given: @@ -570,6 +582,25 @@ public void shouldEnforceReferentialIntegrity() { CoreMatchers.equalTo(CommandStatus.Status.SUCCESS)); } + @Test + public void shouldSetNextQueryIdToNextOffsetWhenExecutingRestoreCommand() { + // Given: + mockReplayCSAS(new QueryId("csas-query-id")); + + // When: + statementExecutorWithMocks.handleRestore( + new QueuedCommand( + new CommandId(Type.STREAM, "foo", Action.CREATE), + new Command("CSAS", emptyMap(), emptyMap(), Optional.empty()), + Optional.empty(), + 2L + ) + ); + + // Then: + verify(mockQueryIdGenerator).setNextId(3L); + } + @Test public void shouldSkipStartWhenReplayingLog() { // Given: diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java index 9876553979c7..82f90254d469 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java @@ -37,6 +37,7 @@ import io.confluent.ksql.metrics.MetricCollectors; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.query.QueryId; +import io.confluent.ksql.query.id.QueryIdGenerator; import io.confluent.ksql.query.id.SpecificQueryIdGenerator; import io.confluent.ksql.rest.Errors; import io.confluent.ksql.rest.entity.CommandId; @@ -105,7 +106,7 @@ public void tearDown() { serviceContext.close(); } - private KsqlEngine createKsqlEngine() { + private KsqlEngine createKsqlEngine(final QueryIdGenerator queryIdGenerator) { final KsqlEngineMetrics engineMetrics = mock(KsqlEngineMetrics.class); return KsqlEngineTestUtil.createKsqlEngine( serviceContext, @@ -190,7 +191,8 @@ private class KsqlServer { final ServerState serverState; KsqlServer(final List commandLog) { - this.ksqlEngine = createKsqlEngine(); + final SpecificQueryIdGenerator queryIdGenerator = new SpecificQueryIdGenerator(); + this.ksqlEngine = createKsqlEngine(queryIdGenerator); this.fakeCommandQueue = new FakeCommandQueue(commandLog, transactionalProducer); serverState = new ServerState(); serverState.setReady(); @@ -561,7 +563,7 @@ public void shouldRecoverRecreates() { server1.submitCommands( "CREATE STREAM A (C1 STRING, C2 INT) WITH (KAFKA_TOPIC='A', VALUE_FORMAT='JSON');", "CREATE STREAM B AS SELECT C1 FROM A;", - "TERMINATE CSAS_B_1;", + "TERMINATE CSAS_B_0;", "DROP STREAM B;", "CREATE STREAM B AS SELECT C2 FROM A;" ); @@ -573,7 +575,7 @@ public void shouldRecoverTerminates() { server1.submitCommands( "CREATE STREAM A (COLUMN STRING) WITH (KAFKA_TOPIC='A', VALUE_FORMAT='JSON');", "CREATE STREAM B AS SELECT * FROM A;", - "TERMINATE CSAS_B_1;" + "TERMINATE CSAS_B_0;" ); shouldRecover(commands); } @@ -583,7 +585,7 @@ public void shouldRecoverDrop() { server1.submitCommands( "CREATE STREAM A (COLUMN STRING) WITH (KAFKA_TOPIC='A', VALUE_FORMAT='JSON');", "CREATE STREAM B AS SELECT * FROM A;", - "TERMINATE CSAS_B_1;", + "TERMINATE CSAS_B_0;", "DROP STREAM B;" ); shouldRecover(commands); @@ -595,7 +597,7 @@ public void shouldNotDeleteTopicsOnRecovery() { server1.submitCommands( "CREATE STREAM A (COLUMN STRING) WITH (KAFKA_TOPIC='A', VALUE_FORMAT='JSON');", "CREATE STREAM B AS SELECT * FROM A;", - "TERMINATE CSAS_B_1;", + "TERMINATE CSAS_B_0;", "DROP STREAM B DELETE TOPIC;" ); @@ -657,7 +659,7 @@ public void shouldRecoverQueryIDs() { final Set queryIdNames = queriesById(server.ksqlEngine.getPersistentQueries()) .keySet(); - assertThat(queryIdNames, contains(new QueryId("CSAS_C_7"))); + assertThat(queryIdNames, contains(new QueryId("CSAS_C_0"))); } } From 0ac71cf69935796af605a84d7f975d4edaa98af3 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Fri, 20 Dec 2019 12:02:40 +0000 Subject: [PATCH 053/123] fix: pull queries should work across nodes (#4169) * fix: pull queries should work across nodes Fixes: https://github.com/confluentinc/ksql/issues/4142 Fixes: https://github.com/confluentinc/ksql/issues/4151 Fixes: https://github.com/confluentinc/ksql/issues/4152 Introduces a new `inter.node.listener` that can be used to specify a URL that the node can be contacted on by other nodes. This can be different to the listeners defined in `listeners`. This can be required if `listeners` is set to a wildcard address, i.e. IPv4 `0.0.0.0` or IPv6 `[::]`, or if the node sits behind network infrastructure that requires other nodes to reach it using a different URL. If `inter.node.listener` is not set it still defaults to the first listener in `listener` config. However, it now replaces an wildcard address with `localHost`. This means inter-node comms is still possible for nodes running on the same host. Warnings are logged if the inter-node listener resolves to a loopback or local address. --- config/ksql-production-server.properties | 6 + config/ksql-server.properties | 6 + .../ksql/configdef/ConfigValidators.java | 14 + .../ksql/configdef/ConfigValidatorsTest.java | 25 + .../ksql/rest/server/KsqlRestApplication.java | 85 ++- .../ksql/rest/server/KsqlRestConfig.java | 227 +++++++- .../KsqlRestApplicationFunctionalTest.java | 107 ++++ .../rest/server/KsqlRestApplicationTest.java | 113 ++-- .../ksql/rest/server/KsqlRestConfigTest.java | 510 +++++++++++++++++- 9 files changed, 995 insertions(+), 98 deletions(-) create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationFunctionalTest.java diff --git a/config/ksql-production-server.properties b/config/ksql-production-server.properties index 1c3943aa67d1..f4590a5d78e5 100644 --- a/config/ksql-production-server.properties +++ b/config/ksql-production-server.properties @@ -18,17 +18,23 @@ ### HTTP ### # The URL the KSQL server will listen on: # The default is any IPv4 interface on the machine. +# NOTE: If set to wildcard or loopback set 'advertised.listener' to enable pull queries across machines listeners=http://0.0.0.0:8088 # Use the 'listeners' line below for any IPv6 interface on the machine. # listeners=http://[::]:8088 +# If running a multi-node cluster across multiple machines and 'listeners' is set to a wildcard or loopback address +# 'advertised.listener' must be set to the URL other KSQL nodes should use to reach this node. +# advertised.listener=? + ### HTTPS ### # To switch KSQL over to communicating using HTTPS comment out the 'listeners' line above # uncomment and complete the properties below. # See: https://docs.confluent.io/current/ksql/docs/installation/server-config/security.html#configuring-ksql-cli-for-https # # listeners=https://0.0.0.0:8088 +# advertised.listener=? # ssl.keystore.location=? # ssl.keystore.password=? # ssl.key.password=? diff --git a/config/ksql-server.properties b/config/ksql-server.properties index f11b09d8e24e..1737ca0ba58a 100644 --- a/config/ksql-server.properties +++ b/config/ksql-server.properties @@ -18,17 +18,23 @@ ### HTTP ### # The URL the KSQL server will listen on: # The default is any IPv4 interface on the machine. +# NOTE: If set to wildcard or loopback set 'advertised.listener' to enable pull queries across machines listeners=http://0.0.0.0:8088 # Use the 'listeners' line below for any IPv6 interface on the machine. # listeners=http://[::]:8088 +# If running a multi-node cluster across multiple machines and 'listeners' is set to a wildcard or loopback address +# 'advertised.listener' must be set to the URL other KSQL nodes should use to reach this node. +# advertised.listener=? + ### HTTPS ### # To switch KSQL over to communicating using HTTPS comment out the 'listeners' line above # uncomment and complete the properties below. # See: https://docs.confluent.io/current/ksql/docs/installation/server-config/security.html#configuring-ksql-cli-for-https # # listeners=https://0.0.0.0:8088 +# advertised.listener=? # ssl.keystore.location=? # ssl.keystore.password=? # ssl.key.password=? diff --git a/ksql-common/src/main/java/io/confluent/ksql/configdef/ConfigValidators.java b/ksql-common/src/main/java/io/confluent/ksql/configdef/ConfigValidators.java index db8075ef6f1a..3db0708f0bbd 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/configdef/ConfigValidators.java +++ b/ksql-common/src/main/java/io/confluent/ksql/configdef/ConfigValidators.java @@ -17,6 +17,7 @@ import static java.util.Objects.requireNonNull; +import java.net.URL; import java.util.Arrays; import java.util.EnumSet; import java.util.List; @@ -78,6 +79,19 @@ public static > Validator enumValues(final Class enumClass) return ValidCaseInsensitiveString.in(validValues); } + public static Validator validUrl() { + return (name, val) -> { + if (!(val instanceof String)) { + throw new IllegalArgumentException("validator should only be used with STRING defs"); + } + try { + new URL((String)val); + } catch (Exception e) { + throw new ConfigException(name, val, "Not valid URL: " + e.getMessage()); + } + }; + } + public static final class ValidCaseInsensitiveString implements Validator { private final List validStrings; diff --git a/ksql-common/src/test/java/io/confluent/ksql/configdef/ConfigValidatorsTest.java b/ksql-common/src/test/java/io/confluent/ksql/configdef/ConfigValidatorsTest.java index 52d2c5180796..b635e9a72f86 100644 --- a/ksql-common/src/test/java/io/confluent/ksql/configdef/ConfigValidatorsTest.java +++ b/ksql-common/src/test/java/io/confluent/ksql/configdef/ConfigValidatorsTest.java @@ -182,6 +182,31 @@ public void shouldThrowIfParserThrows() { validator.ensureValid("propName", "value"); } + @Test + public void shouldThrowOnInvalidURL() { + // Given: + final Validator validator = ConfigValidators.validUrl(); + + // Then: + expectedException.expect(ConfigException.class); + expectedException.expectMessage( + "Invalid value INVALID for configuration propName: Not valid URL: no protocol: INVALID"); + + // When: + validator.ensureValid("propName", "INVALID"); + } + + @Test + public void shouldNotThrowOnValidURL() { + // Given: + final Validator validator = ConfigValidators.validUrl(); + + // When: + validator.ensureValid("propName", "http://valid:25896/somePath"); + + // Then: did not throw. + } + private enum TestEnum { FOO, BAR } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java index 24bff5b12850..549293a091c2 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java @@ -16,6 +16,7 @@ package io.confluent.ksql.rest.server; import static io.confluent.ksql.rest.server.KsqlRestConfig.DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_CONFIG; +import static io.confluent.rest.RestConfig.LISTENERS_CONFIG; import static java.util.Objects.requireNonNull; import com.fasterxml.jackson.jaxrs.base.JsonParseExceptionMapper; @@ -77,6 +78,7 @@ import io.confluent.ksql.services.SimpleKsqlClient; import io.confluent.ksql.statement.ConfiguredStatement; import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlServerException; import io.confluent.ksql.util.RetryUtil; import io.confluent.ksql.util.Version; import io.confluent.ksql.util.WelcomeMsgUtils; @@ -87,6 +89,7 @@ import java.io.Console; import java.io.OutputStreamWriter; import java.io.PrintWriter; +import java.net.MalformedURLException; import java.net.URI; import java.net.URL; import java.nio.charset.StandardCharsets; @@ -106,6 +109,7 @@ import java.util.function.Predicate; import java.util.function.Supplier; import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.websocket.DeploymentException; import javax.websocket.server.ServerEndpoint; import javax.websocket.server.ServerEndpointConfig; @@ -213,7 +217,7 @@ public void setupResources(final Configurable config, final KsqlRestConfig ap } @Override - public void startAsync() throws Exception { + public void startAsync() { log.info("KSQL RESTful API listening on {}", StringUtils.join(getListeners(), ", ")); final KsqlConfig ksqlConfigWithPort = buildConfigWithPort(); configurables.forEach(c -> c.configure(ksqlConfigWithPort)); @@ -323,23 +327,46 @@ public void onShutdown() { } List getListeners() { - return Arrays.stream(server.getConnectors()) - .filter(connector -> connector instanceof ServerConnector) - .map(ServerConnector.class::cast) - .map(connector -> { - try { - final String protocol = new HashSet<>(connector.getProtocols()) - .stream() - .map(String::toLowerCase) - .anyMatch(s -> s.equals("ssl")) ? "https" : "http"; - - final int localPort = connector.getLocalPort(); - - return new URL(protocol, "localhost", localPort, ""); - } catch (final Exception e) { - throw new RuntimeException("Malformed listener", e); - } - }) + final Function> resolvePort = url -> + Arrays.stream(server.getConnectors()) + .filter(connector -> connector instanceof ServerConnector) + .map(ServerConnector.class::cast) + .filter(connector -> { + final String connectorProtocol = connector.getProtocols().stream() + .map(String::toLowerCase) + .anyMatch(p -> p.equals("ssl")) ? "https" : "http"; + + return connectorProtocol.equalsIgnoreCase(url.getProtocol()); + }) + .map(ServerConnector::getLocalPort) + .collect(Collectors.toSet()); + + final Function> resolveUrl = listener -> { + try { + final URL url = new URL(listener); + if (url.getPort() != 0) { + return Stream.of(url); + } + + // Need to resolve port using actual listeners: + return resolvePort.apply(url).stream() + .map(port -> { + try { + return new URL(url.getProtocol(), url.getHost(), port, url.getFile()); + } catch (MalformedURLException e) { + throw new KsqlServerException("Malformed URL specified in '" + + LISTENERS_CONFIG + "' config: " + listener, e); + } + }); + } catch (MalformedURLException e) { + throw new KsqlServerException("Malformed URL specified in '" + + LISTENERS_CONFIG + "' config: " + listener, e); + } + }; + + return restConfig.getList(LISTENERS_CONFIG).stream() + .flatMap(resolveUrl) + .distinct() .collect(Collectors.toList()); } @@ -681,21 +708,31 @@ private static void maybeCreateProcessingLogStream( * * @return true server config. */ - private KsqlConfig buildConfigWithPort() { + @VisibleForTesting + KsqlConfig buildConfigWithPort() { final Map props = ksqlConfigNoPort.originals(); - // Wire up KS IQ endpoint discovery to the FIRST listener: - final URL firstListener = getListeners().get(0); + // Wire up KS IQ so that pull queries work across KSQL nodes: props.put( KsqlConfig.KSQL_STREAMS_PREFIX + StreamsConfig.APPLICATION_SERVER_CONFIG, - firstListener.toString() + restConfig.getInterNodeListener(this::resolvePort).toString() ); - log.info("Using first listener URL for intra-node communication: {}", firstListener); - return new KsqlConfig(props); } + private int resolvePort(final URL listener) { + return getListeners().stream() + .filter(l -> + l.getProtocol().equals(listener.getProtocol()) + && l.getHost().equals(listener.getHost()) + ) + .map(URL::getPort) + .findFirst() + .orElseThrow(() -> + new IllegalStateException("Failed resolve port for listener: " + listener)); + } + private static KsqlRestConfig injectPathsWithoutAuthentication(final KsqlRestConfig restConfig) { final Set authenticationSkipPaths = new HashSet<>( restConfig.getList(RestConfig.AUTHENTICATION_SKIP_PATHS) diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java index e1971e44e749..398e23bb000b 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java @@ -15,24 +15,49 @@ package io.confluent.ksql.rest.server; +import com.google.common.annotations.VisibleForTesting; +import io.confluent.ksql.configdef.ConfigValidators; import io.confluent.ksql.rest.DefaultErrorMessages; import io.confluent.ksql.rest.ErrorMessages; import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.KsqlServerException; import io.confluent.rest.RestConfig; +import java.net.InetAddress; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.UnknownHostException; +import java.util.List; import java.util.Map; +import java.util.Optional; +import java.util.function.Function; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef.Importance; import org.apache.kafka.common.config.ConfigDef.Type; +import org.apache.kafka.common.config.ConfigException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class KsqlRestConfig extends RestConfig { + private static final Logger LOGGER = LoggerFactory.getLogger(KsqlRestConfig.class); + private static final String KSQL_CONFIG_PREFIX = "ksql."; - private static final String COMMAND_CONSUMER_PREFIX = + private static final String COMMAND_CONSUMER_PREFIX = KSQL_CONFIG_PREFIX + "server.command.consumer."; - private static final String COMMAND_PRODUCER_PREFIX = + private static final String COMMAND_PRODUCER_PREFIX = KSQL_CONFIG_PREFIX + "server.command.producer."; + public static final String ADVERTISED_LISTENER_CONFIG = + KSQL_CONFIG_PREFIX + "advertised.listener"; + private static final String ADVERTISED_LISTENER_DOC = + "The listener used for communication between KSQL nodes in the cluster, if different to the '" + + LISTENERS_CONFIG + "' config property. " + + "In IaaS environments, this may need to be different from the interface to which " + + "the server binds. If this is not set, the first value from listeners will be used. " + + "Unlike listeners, it is not valid to use the 0.0.0.0 (IPv4) or [::] (IPv6) " + + "wildcard addresses."; + static final String STREAMED_QUERY_DISCONNECT_CHECK_MS_CONFIG = "query.stream.disconnect.check"; @@ -96,6 +121,13 @@ public class KsqlRestConfig extends RestConfig { static { CONFIG_DEF = baseConfigDef().define( + ADVERTISED_LISTENER_CONFIG, + Type.STRING, + null, + ConfigValidators.nullsAllowed(ConfigValidators.validUrl()), + Importance.HIGH, + ADVERTISED_LISTENER_DOC + ).define( STREAMED_QUERY_DISCONNECT_CHECK_MS_CONFIG, Type.LONG, 1000L, @@ -155,10 +187,14 @@ public class KsqlRestConfig extends RestConfig { public KsqlRestConfig(final Map props) { super(CONFIG_DEF, props); - if (getList(RestConfig.LISTENERS_CONFIG).isEmpty()) { - throw new KsqlException(RestConfig.LISTENERS_CONFIG + " must be supplied. " - + RestConfig.LISTENERS_DOC); + + final List listeners = getList(LISTENERS_CONFIG); + if (listeners.isEmpty()) { + throw new KsqlException(LISTENERS_CONFIG + " must be supplied. " + LISTENERS_DOC); } + + listeners + .forEach(listener -> ConfigValidators.validUrl().ensureValid(LISTENERS_CONFIG, listener)); } // Bit of a hack to get around the fact that RestConfig.originals() is private for some reason @@ -183,4 +219,185 @@ Map getCommandProducerProperties() { public Map getKsqlConfigProperties() { return getOriginals(); } + + /** + * Determines which URL should be used to contact this node from other KSQL nodes. + * + *

Uses {@code INTER_NODE_LISTENER_CONFIG} by default, or first listener defined in + * {@code LISTENERS_CONFIG} if {@code INTER_NODE_LISTENER_CONFIG} not set. + * + *

Method takes a {@code portResolver} to resolve any auto-assigned port in + * {@code LISTENERS_CONFIG}, (i.e. port {@code 0}. + * + *

Any loopback or localhost in {@code LISTENERS_CONFIG} will be replaced with local machine + * name, though this is not guarenteed to work across machines in all circumstance. + * + * @param portResolver called to resolve port in first {@code LISTENERS_CONFIG} if {@code 0}. + * @return the resolved inter-node endpoint to use. + */ + public URL getInterNodeListener( + final Function portResolver + ) { + return getInterNodeListener(portResolver, LOGGER); + } + + @VisibleForTesting + URL getInterNodeListener( + final Function portResolver, + final Logger logger + ) { + return getString(ADVERTISED_LISTENER_CONFIG) == null + ? getInterNodeListenerFromFirstListener(portResolver, logger) + : getInterNodeListenerFromExplicitConfig(logger); + } + + private URL getInterNodeListenerFromFirstListener( + final Function portResolver, + final Logger logger + ) { + final List configValue = getList(LISTENERS_CONFIG); + + final URL firstListener = parseUrl(configValue.get(0), LISTENERS_CONFIG); + + final InetAddress address = parseInetAddress(firstListener.getHost()) + .orElseThrow(() -> new ConfigException( + LISTENERS_CONFIG, + configValue, + "Could not resolve first host" + )); + + final URL listener = sanitizeInterNodeListener( + firstListener, + portResolver, + address.isAnyLocalAddress() + ); + + logInterNodeListener( + logger, + listener, + Optional.of(address), + "first '" + LISTENERS_CONFIG + "'" + ); + + return listener; + } + + private URL getInterNodeListenerFromExplicitConfig(final Logger logger) { + final String configValue = getString(ADVERTISED_LISTENER_CONFIG); + + final URL listener = parseUrl(configValue, ADVERTISED_LISTENER_CONFIG); + + if (listener.getPort() <= 0) { + throw new ConfigException(ADVERTISED_LISTENER_CONFIG, configValue, "Must have valid port"); + } + + // Valid for address to not be resolvable, as may be _externally_ resolvable: + final Optional address = parseInetAddress(listener.getHost()); + + address.ifPresent(a -> { + if (a.isAnyLocalAddress()) { + throw new ConfigException(ADVERTISED_LISTENER_CONFIG, configValue, "Can not be wildcard"); + } + }); + + logInterNodeListener( + logger, + listener, + address, + "'" + ADVERTISED_LISTENER_CONFIG + "'" + ); + + return listener; + } + + private static void logInterNodeListener( + final Logger logger, + final URL listener, + final Optional address, + final String sourceConfigName + ) { + address.ifPresent(a -> { + if (a.isLoopbackAddress()) { + logger.warn( + "{} config is set to a loopback address: {}. Intra-node communication will only work " + + "between nodes running on the same machine.", + sourceConfigName, listener + ); + } + + if (a.isAnyLocalAddress()) { + logger.warn( + "{} config uses wildcard address: {}. Intra-node communication will only work " + + "between nodes running on the same machine.", + sourceConfigName, listener + ); + } + }); + + logger.info("Using {} config for intra-node communication: {}", sourceConfigName, listener); + } + + /** + * Used to sanitize the first `listener` config. + * + *

It will: + *

    + *
  • resolve any auto-port assignment to the actual port the server is listening on
  • + *
  • potentially, replace the host with localhost. This can be useful where the first + * listener is a wildcard address, e.g. {@code 0.0.0.0}/li> + *
+ * + * @param listener the URL to sanitize + * @param portResolver the function to call to resolve the port. + * @param replaceHost flag indicating if the host in the URL should be replaced with localhost. + * @return the sanitized URL. + */ + private static URL sanitizeInterNodeListener( + final URL listener, + final Function portResolver, + final boolean replaceHost + ) { + if (!replaceHost && listener.getPort() > 0) { + return listener; + } + + final String host = replaceHost + ? getLocalHostName() + : listener.getHost(); + + final int port = listener.getPort() == 0 + ? portResolver.apply(listener) + : listener.getPort(); + + try { + return new URL(listener.getProtocol(), host, port, listener.getFile()); + } catch (final MalformedURLException e) { + throw new KsqlServerException("Resolved first listener to malformed URL", e); + } + } + + private static URL parseUrl(final String address, final String configName) { + try { + return new URL(address); + } catch (final MalformedURLException e) { + throw new ConfigException(configName, address, e.getMessage()); + } + } + + + private static Optional parseInetAddress(final String address) { + try { + return Optional.of(InetAddress.getByName(address)); + } catch (final UnknownHostException e) { + return Optional.empty(); + } + } + + private static String getLocalHostName() { + try { + return InetAddress.getLocalHost().getCanonicalHostName(); + } catch (UnknownHostException e) { + throw new KsqlServerException("Failed to obtain local host info", e); + } + } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationFunctionalTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationFunctionalTest.java new file mode 100644 index 000000000000..6f44fb7f4e38 --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationFunctionalTest.java @@ -0,0 +1,107 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.server; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +import com.google.common.collect.ImmutableMap; +import io.confluent.ksql.test.util.EmbeddedSingleNodeKafkaCluster; +import io.confluent.ksql.version.metrics.KsqlVersionCheckerAgent; +import io.confluent.rest.RestConfig; +import java.net.URL; +import java.util.List; +import java.util.Map; +import org.apache.kafka.clients.CommonClientConfigs; +import org.junit.After; +import org.junit.ClassRule; +import org.junit.Test; + +public class KsqlRestApplicationFunctionalTest { + + @ClassRule + public static final EmbeddedSingleNodeKafkaCluster CLUSTER = + EmbeddedSingleNodeKafkaCluster.build(); + + private KsqlRestApplication app; + + @After + public void tearDown() throws Exception { + if (app != null) { + app.stop(); + } + } + + @Test + public void shouldResolveListenersWithExplicitAndAutoAssignedPorts() { + // Given: + givenAppStartedWith(ImmutableMap + .of(RestConfig.LISTENERS_CONFIG, "http://127.0.0.1:0,https://localHost:8088")); + + // When: + final List listeners = app.getListeners(); + + // Then: + assertThat(listeners, hasSize(2)); + assertThat(listeners.get(0).getProtocol(), is("http")); + assertThat(listeners.get(0).getHost(), is("127.0.0.1")); + assertThat(listeners.get(0).getPort(), is(not(0))); + assertThat(listeners.get(1).getProtocol(), is("https")); + assertThat(listeners.get(1).getHost(), is("localHost")); + assertThat(listeners.get(1).getPort(), is(8088)); + } + + @Test + public void shouldResolveMultipleListenersPerProtocol() { + // Given: + givenAppStartedWith(ImmutableMap + .of(RestConfig.LISTENERS_CONFIG, "http://localhost:0,http://localhost:0")); + + // When: + final List listeners = app.getListeners(); + + // Then: + assertThat(listeners, hasSize(2)); + assertThat(listeners.get(0).getProtocol(), is("http")); + assertThat(listeners.get(0).getHost(), is("localhost")); + assertThat(listeners.get(0).getPort(), is(not(0))); + assertThat(listeners.get(1).getProtocol(), is("http")); + assertThat(listeners.get(1).getHost(), is("localhost")); + assertThat(listeners.get(1).getPort(), is(not(0))); + } + + private void givenAppStartedWith(final Map config) { + + final KsqlRestConfig restConfig = new KsqlRestConfig(ImmutableMap.builder() + .putAll(config) + .put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()) + .build() + ); + + app = KsqlRestApplication.buildApplication( + restConfig, + KsqlVersionCheckerAgent::new + ); + + try { + app.start(); + } catch (Exception e) { + throw new AssertionError("Failed to start", e); + } + } +} \ No newline at end of file diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java index 712fa7fe61bd..cedc84acb5e2 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java @@ -15,6 +15,8 @@ package io.confluent.ksql.rest.server; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyMap; @@ -28,20 +30,16 @@ import static org.mockito.Mockito.when; import com.google.common.collect.ImmutableList; -import io.confluent.ksql.KsqlExecutionContext; +import com.google.common.collect.ImmutableMap; import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.logging.processing.ProcessingLogConfig; import io.confluent.ksql.logging.processing.ProcessingLogContext; -import io.confluent.ksql.metastore.MetaStore; import io.confluent.ksql.parser.KsqlParser.ParsedStatement; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; -import io.confluent.ksql.rest.entity.CommandId; import io.confluent.ksql.rest.entity.KsqlErrorMessage; import io.confluent.ksql.rest.entity.KsqlRequest; -import io.confluent.ksql.rest.server.computation.Command; import io.confluent.ksql.rest.server.computation.CommandRunner; import io.confluent.ksql.rest.server.computation.CommandStore; -import io.confluent.ksql.rest.server.computation.QueuedCommandStatus; import io.confluent.ksql.rest.server.context.KsqlRestServiceContextBinder; import io.confluent.ksql.rest.server.filters.KsqlAuthorizationFilter; import io.confluent.ksql.rest.server.resources.KsqlResource; @@ -59,13 +57,16 @@ import io.confluent.rest.RestConfig; import java.util.Collections; import java.util.LinkedList; +import java.util.Map; import java.util.Optional; import java.util.Queue; import java.util.function.Consumer; import javax.ws.rs.core.Configurable; -import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.streams.StreamsConfig; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.InOrder; import org.mockito.Mock; @@ -78,24 +79,16 @@ public class KsqlRestApplicationTest { private static final String LOG_TOPIC_NAME = "log_topic"; private static final String CMD_TOPIC_NAME = "command_topic"; - private final KsqlRestConfig restConfig = - new KsqlRestConfig( - Collections.singletonMap(RestConfig.LISTENERS_CONFIG, - "http://localhost:8088")); + @Rule + public final ExpectedException expectedException = ExpectedException.none(); @Mock private ServiceContext serviceContext; @Mock private KsqlEngine ksqlEngine; @Mock - private MetaStore metaStore; - @Mock - private KsqlExecutionContext sandBox; - @Mock private KsqlConfig ksqlConfig; @Mock - private KsqlRestConfig ksqlRestConfig; - @Mock private ProcessingLogConfig processingLogConfig; @Mock private CommandRunner commandRunner; @@ -112,8 +105,6 @@ public class KsqlRestApplicationTest { @Mock private CommandStore commandQueue; @Mock - private QueuedCommandStatus queuedCommandStatus; - @Mock private KsqlSecurityExtension securityExtension; @Mock private ProcessingLogContext processingLogContext; @@ -131,10 +122,10 @@ public class KsqlRestApplicationTest { private PreparedStatement preparedStatement; @Mock private Consumer rocksDBConfigSetterHandler; - @Mock - private Producer transactionalProducer; + private String logCreateStatement; private KsqlRestApplication app; + private KsqlRestConfig restConfig; @SuppressWarnings("unchecked") @Before @@ -163,26 +154,7 @@ public void setUp() { ksqlConfig ); - app = new KsqlRestApplication( - serviceContext, - ksqlEngine, - ksqlConfig, - restConfig, - commandRunner, - commandQueue, - rootDocument, - statusResource, - streamedQueryResource, - ksqlResource, - versionCheckerAgent, - KsqlRestServiceContextBinder::new, - securityExtension, - serverState, - processingLogContext, - ImmutableList.of(precondition1, precondition2), - ImmutableList.of(ksqlResource, streamedQueryResource), - rocksDBConfigSetterHandler - ); + givenAppWithRestConfig(ImmutableMap.of(RestConfig.LISTENERS_CONFIG, "http://localhost:0")); } @Test @@ -378,4 +350,65 @@ public void shouldConfigureRocksDBConfigSetter() { // Then: verify(rocksDBConfigSetterHandler).accept(ksqlConfig); } + + @Test + public void shouldConfigureIQWithInterNodeListenerIfSet() { + // Given: + givenAppWithRestConfig(ImmutableMap.of( + RestConfig.LISTENERS_CONFIG, "http://localhost:0", + KsqlRestConfig.ADVERTISED_LISTENER_CONFIG, "https://some.host:12345" + )); + + // When: + final KsqlConfig ksqlConfig = app.buildConfigWithPort(); + + // Then: + assertThat( + ksqlConfig.getKsqlStreamConfigProps().get(StreamsConfig.APPLICATION_SERVER_CONFIG), + is("https://some.host:12345") + ); + } + + @Test + public void shouldConfigureIQWithFirstListenerIfInterNodeNotSet() { + // Given: + givenAppWithRestConfig(ImmutableMap.of( + RestConfig.LISTENERS_CONFIG, "http://some.host:1244,https://some.other.host:1258" + )); + + // When: + final KsqlConfig ksqlConfig = app.buildConfigWithPort(); + + // Then: + assertThat( + ksqlConfig.getKsqlStreamConfigProps().get(StreamsConfig.APPLICATION_SERVER_CONFIG), + is("http://some.host:1244") + ); + } + + private void givenAppWithRestConfig(final Map restConfigMap) { + + restConfig = new KsqlRestConfig(restConfigMap); + + app = new KsqlRestApplication( + serviceContext, + ksqlEngine, + ksqlConfig, + restConfig, + commandRunner, + commandQueue, + rootDocument, + statusResource, + streamedQueryResource, + ksqlResource, + versionCheckerAgent, + KsqlRestServiceContextBinder::new, + securityExtension, + serverState, + processingLogContext, + ImmutableList.of(precondition1, precondition2), + ImmutableList.of(ksqlResource, streamedQueryResource), + rocksDBConfigSetterHandler + ); + } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestConfigTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestConfigTest.java index ce0249253740..f16e20d6133e 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestConfigTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestConfigTest.java @@ -16,60 +16,512 @@ package io.confluent.ksql.rest.server; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotSame; +import static io.confluent.ksql.rest.server.KsqlRestConfig.ADVERTISED_LISTENER_CONFIG; +import static io.confluent.rest.RestConfig.LISTENERS_CONFIG; +import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; import com.google.common.collect.ImmutableMap; import io.confluent.ksql.util.KsqlConfig; -import io.confluent.rest.RestConfig; -import java.util.HashMap; +import java.net.InetAddress; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.UnknownHostException; import java.util.Map; +import java.util.function.Function; import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.streams.StreamsConfig; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import org.slf4j.Logger; +@RunWith(MockitoJUnitRunner.class) public class KsqlRestConfigTest { - private static final Map MIN_VALID_CONFIGS = ImmutableMap.builder() + private static final Map MIN_VALID_CONFIGS = ImmutableMap.builder() .put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") - .put(RestConfig.LISTENERS_CONFIG, "http://localhost:8088") + .put(LISTENERS_CONFIG, "http://localhost:8088") .build(); - @Test - public void testGetKsqlConfigProperties() { - final Map inputProperties = new HashMap<>(MIN_VALID_CONFIGS); - inputProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - inputProperties.put(KsqlConfig.KSQL_SERVICE_ID_CONFIG, "test"); + private static final String QUOTED_INTER_NODE_LISTENER_CONFIG = + "'" + ADVERTISED_LISTENER_CONFIG + "'"; + + private static final String QUOTED_FIRST_LISTENER_CONFIG = + "first '" + LISTENERS_CONFIG + "'"; + + @Rule + public final ExpectedException expectedException = ExpectedException.none(); + + @Mock + private Function portResolver; + @Mock + private Logger logger; - final KsqlRestConfig config = new KsqlRestConfig(inputProperties); + @Test + public void shouldGetKsqlConfigProperties() { + // Given: + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .putAll(MIN_VALID_CONFIGS) + .put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") + .put(KsqlConfig.KSQL_SERVICE_ID_CONFIG, "test") + .build() + ); + // When: final Map ksqlConfigProperties = config.getKsqlConfigProperties(); - final Map expectedKsqlConfigProperties = new HashMap<>(); - expectedKsqlConfigProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); - expectedKsqlConfigProperties.put(RestConfig.LISTENERS_CONFIG, "http://localhost:8088"); - expectedKsqlConfigProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - expectedKsqlConfigProperties.put(KsqlConfig.KSQL_SERVICE_ID_CONFIG, "test"); - assertThat(ksqlConfigProperties, equalTo(expectedKsqlConfigProperties)); + + // Then: + assertThat(ksqlConfigProperties, is(ImmutableMap.of( + StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092", + LISTENERS_CONFIG, "http://localhost:8088", + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest", + KsqlConfig.KSQL_SERVICE_ID_CONFIG, "test")) + ); } // Just a sanity check to make sure that, although they contain identical mappings, successive maps returned by calls // to KsqlRestConfig.getOriginals() do not actually return the same object (mutability would then be an issue) @Test - public void testOriginalsReplicability() { - final String COMMIT_INTERVAL_MS = "10"; - - final Map inputProperties = new HashMap<>(MIN_VALID_CONFIGS); - inputProperties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, COMMIT_INTERVAL_MS); - final KsqlRestConfig config = new KsqlRestConfig(inputProperties); + public void shouldReturnDifferentMapOnEachCallToOriginals() { + // Given: + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .putAll(MIN_VALID_CONFIGS) + .put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, "10") + .build() + ); final Map originals1 = config.getOriginals(); final Map originals2 = config.getOriginals(); - assertEquals(originals1, originals2); - assertNotSame(originals1, originals2); - assertEquals(COMMIT_INTERVAL_MS, originals1.get(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG)); - assertEquals(COMMIT_INTERVAL_MS, originals2.get(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG)); + // When: + originals1.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, "99"); + + // Then: + assertThat(originals2.get(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG), is("10")); + } + + @Test + public void shouldThrowIfAnyListenerIsInvalidUrl() { + // Expect: + expectedException.expect(ConfigException.class); + expectedException.expectMessage("Invalid value INVALID for configuration " + + LISTENERS_CONFIG + + ": Not valid URL: no protocol: INVALID" + ); + + // Given: + new KsqlRestConfig(ImmutableMap.builder() + .put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + .put(LISTENERS_CONFIG, "http://localhost:9875,INVALID") + .build() + ); + } + + @Test + public void shouldThrowIfExplicitInterNodeListenerIsInvalidUrl() { + // Expect: + expectedException.expect(ConfigException.class); + expectedException.expectMessage("Invalid value INVALID for configuration " + + ADVERTISED_LISTENER_CONFIG + + ": Not valid URL: no protocol: INVALID" + ); + + // Given: + new KsqlRestConfig(ImmutableMap.builder() + .putAll(MIN_VALID_CONFIGS) + .put(ADVERTISED_LISTENER_CONFIG, "INVALID") + .build() + ); + } + + @Test + public void shouldUseExplicitInterNodeListenerSetToUnresolvableHost() { + // Given: + final URL expected = url("https://unresolvable.host:12345"); + + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .putAll(MIN_VALID_CONFIGS) + .put(ADVERTISED_LISTENER_CONFIG, expected.toString()) + .build() + ); + + // When: + final URL actual = config.getInterNodeListener(portResolver, logger); + + // Then: + assertThat(actual, is(expected)); + verifyLogsInterNodeListener(expected, QUOTED_INTER_NODE_LISTENER_CONFIG); + verifyNoMoreInteractions(logger); + } + + @Test + public void shouldUseExplicitInterNodeListenerSetToResolvableHost() { + // Given: + final URL expected = url("https://example.com:12345"); + + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .putAll(MIN_VALID_CONFIGS) + .put(ADVERTISED_LISTENER_CONFIG, expected.toString()) + .build() + ); + + // When: + final URL actual = config.getInterNodeListener(portResolver, logger); + + // Then: + assertThat(actual, is(expected)); + verifyLogsInterNodeListener(expected, QUOTED_INTER_NODE_LISTENER_CONFIG); + verifyNoMoreInteractions(logger); + } + + @Test + public void shouldUseExplicitInterNodeListenerIfSetToLocalHost() { + // Given: + final URL expected = url("https://localHost:52368"); + + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .putAll(MIN_VALID_CONFIGS) + .put(ADVERTISED_LISTENER_CONFIG, expected.toString()) + .build() + ); + + // When: + final URL actual = config.getInterNodeListener(portResolver, logger); + + // Then: + assertThat(actual, is(expected)); + verifyLogsInterNodeListener(expected, QUOTED_INTER_NODE_LISTENER_CONFIG); + verifyLogsLoopBackWarning(expected, QUOTED_INTER_NODE_LISTENER_CONFIG); + verifyNoMoreInteractions(logger); + } + + @Test + public void shouldUseExplicitInterNodeListenerIfSetToIpv4Loopback() { + // Given: + final URL expected = url("https://127.0.0.2:12345"); + + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .putAll(MIN_VALID_CONFIGS) + .put(ADVERTISED_LISTENER_CONFIG, expected.toString()) + .build() + ); + + // When: + final URL actual = config.getInterNodeListener(portResolver, logger); + + // Then: + assertThat(actual, is(expected)); + verifyLogsInterNodeListener(expected, QUOTED_INTER_NODE_LISTENER_CONFIG); + verifyLogsLoopBackWarning(expected, QUOTED_INTER_NODE_LISTENER_CONFIG); + verifyNoMoreInteractions(logger); + } + + @Test + public void shouldUseExplicitInterNodeListenerIfSetToIpv6Loopback() { + // Given: + final URL expected = url("https://[::1]:12345"); + + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .putAll(MIN_VALID_CONFIGS) + .put(ADVERTISED_LISTENER_CONFIG, expected.toString()) + .build() + ); + + // When: + final URL actual = config.getInterNodeListener(portResolver, logger); + + // Then: + assertThat(actual, is(expected)); + verifyLogsInterNodeListener(expected, QUOTED_INTER_NODE_LISTENER_CONFIG); + verifyLogsLoopBackWarning(expected, QUOTED_INTER_NODE_LISTENER_CONFIG); + verifyNoMoreInteractions(logger); + } + + @Test + public void shouldThrowIfExplicitInterNodeListenerHasAutoPortAssignment() { + // Given: + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .putAll(MIN_VALID_CONFIGS) + .put(ADVERTISED_LISTENER_CONFIG, "https://unresolvable.host:0") + .build() + ); + + // Expect: + expectedException.expect(ConfigException.class); + expectedException.expectMessage("Invalid value https://unresolvable.host:0 for configuration " + + ADVERTISED_LISTENER_CONFIG + + ": Must have valid port" + ); + + + // When: + config.getInterNodeListener(portResolver, logger); + } + + @Test + public void shouldThrowIfExplicitInterNodeListenerHasIpv4WildcardAddress() { + // Given: + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .putAll(MIN_VALID_CONFIGS) + .put(ADVERTISED_LISTENER_CONFIG, "https://0.0.0.0:12589") + .build() + ); + + // Expect: + expectedException.expect(ConfigException.class); + expectedException.expectMessage("Invalid value https://0.0.0.0:12589 for configuration " + + ADVERTISED_LISTENER_CONFIG + + ": Can not be wildcard" + ); + + // When: + config.getInterNodeListener(portResolver, logger); + } + + @Test + public void shouldThrowIfExplicitInterNodeListenerHasIpv6WildcardAddress() { + // Given: + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .putAll(MIN_VALID_CONFIGS) + .put(ADVERTISED_LISTENER_CONFIG, "https://[::]:1236") + .build() + ); + + // Expect: + expectedException.expect(ConfigException.class); + expectedException.expectMessage("Invalid value https://[::]:1236 for configuration " + + ADVERTISED_LISTENER_CONFIG + + ": Can not be wildcard" + ); + + // When: + config.getInterNodeListener(portResolver, logger); + } + + @Test + public void shouldThrowIfOnGetInterNodeListenerIfFirstListenerSetToUnresolvableHost() { + // Given: + final URL expected = url("https://unresolvable.host:12345"); + + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + .put(LISTENERS_CONFIG, expected.toString() + ",http://localhost:2589") + .build() + ); + + // Expect: + expectedException.expect(ConfigException.class); + expectedException.expectMessage("Invalid value " + + "[https://unresolvable.host:12345, http://localhost:2589] for configuration " + + LISTENERS_CONFIG + + ": Could not resolve first host" + ); + + // When: + config.getInterNodeListener(portResolver, logger); + } + + @Test + public void shouldResolveInterNodeListenerToFirstListenerSetToResolvableHost() { + // Given: + final URL expected = url("https://example.com:12345"); + + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + .put(LISTENERS_CONFIG, expected.toString() + ",http://localhost:2589") + .build() + ); + + // When: + final URL actual = config.getInterNodeListener(portResolver, logger); + + // Then: + assertThat(actual, is(expected)); + verifyLogsInterNodeListener(expected, QUOTED_FIRST_LISTENER_CONFIG); + verifyNoMoreInteractions(logger); + } + + @Test + public void shouldResolveInterNodeListenerToFirstListenerSetToLocalHost() { + // Given: + final URL expected = url("https://localHost:52368"); + + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + .put(LISTENERS_CONFIG, expected.toString() + ",http://localhost:2589") + .build() + ); + + // When: + final URL actual = config.getInterNodeListener(portResolver, logger); + + // Then: + assertThat(actual, is(expected)); + verifyLogsInterNodeListener(expected, QUOTED_FIRST_LISTENER_CONFIG); + verifyLogsLoopBackWarning(expected, QUOTED_FIRST_LISTENER_CONFIG); + verifyNoMoreInteractions(logger); + } + + @Test + public void shouldResolveInterNodeListenerToFirstListenerSetToIpv4Loopback() { + // Given: + final URL expected = url("https://127.0.0.2:12345"); + + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + .put(LISTENERS_CONFIG, expected.toString() + ",http://localhost:2589") + .build() + ); + + // When: + final URL actual = config.getInterNodeListener(portResolver, logger); + + // Then: + assertThat(actual, is(expected)); + verifyLogsInterNodeListener(expected, QUOTED_FIRST_LISTENER_CONFIG); + verifyLogsLoopBackWarning(expected, QUOTED_FIRST_LISTENER_CONFIG); + verifyNoMoreInteractions(logger); + } + + @Test + public void shouldResolveInterNodeListenerToFirstListenerSetToIpv6Loopback() { + // Given: + final URL expected = url("https://[::1]:12345"); + + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + .put(LISTENERS_CONFIG, expected.toString() + ",http://localhost:2589") + .build() + ); + + // When: + final URL actual = config.getInterNodeListener(portResolver, logger); + + // Then: + assertThat(actual, is(expected)); + verifyLogsInterNodeListener(expected, QUOTED_FIRST_LISTENER_CONFIG); + verifyLogsLoopBackWarning(expected, QUOTED_FIRST_LISTENER_CONFIG); + verifyNoMoreInteractions(logger); + } + + @Test + public void shouldResolveInterNodeListenerToFirstListenerWithAutoPortAssignment() { + // Given: + final URL autoPort = url("https://example.com:0"); + + when(portResolver.apply(any())).thenReturn(2222); + + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + .put(LISTENERS_CONFIG, autoPort.toString() + ",http://localhost:2589") + .build() + ); + + // When: + final URL actual = config.getInterNodeListener(portResolver, logger); + + // Then: + final URL expected = url("https://example.com:2222"); + + assertThat(actual, is(expected)); + verifyLogsInterNodeListener(expected, QUOTED_FIRST_LISTENER_CONFIG); + verifyNoMoreInteractions(logger); + } + + @Test + public void shouldResolveInterNodeListenerToFirstListenerWithIpv4WildcardAddress() { + // Given: + final URL wildcard = url("https://0.0.0.0:12589"); + + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + .put(LISTENERS_CONFIG, wildcard.toString() + ",http://localhost:2589") + .build() + ); + + // When: + final URL actual = config.getInterNodeListener(portResolver, logger); + + // Then: + final URL expected = url("https://" + getLocalHostName() + ":12589"); + + assertThat(actual, is(expected)); + verifyLogsInterNodeListener(expected, QUOTED_FIRST_LISTENER_CONFIG); + verifyLogsWildcardWarning(expected); + verifyNoMoreInteractions(logger); + } + + @Test + public void shouldResolveInterNodeListenerToFirstListenerWithIpv6WildcardAddress() { + // Given: + final URL wildcard = url("https://[::]:12345"); + + final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.builder() + .put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + .put(LISTENERS_CONFIG, wildcard.toString() + ",http://localhost:2589") + .build() + ); + + // When: + final URL actual = config.getInterNodeListener(portResolver, logger); + + // Then: + final URL expected = url("https://" + getLocalHostName() + ":12345"); + + assertThat(actual, is(expected)); + verifyLogsInterNodeListener(expected, QUOTED_FIRST_LISTENER_CONFIG); + verifyLogsWildcardWarning(expected); + verifyNoMoreInteractions(logger); + } + + private void verifyLogsInterNodeListener(final URL listener, final String sourceConfig) { + verify(logger).info( + "Using {} config for intra-node communication: {}", + sourceConfig, + listener + ); + } + + private void verifyLogsLoopBackWarning(final URL listener, final String sourceConfig) { + verify(logger).warn( + "{} config is set to a loopback address: {}. Intra-node communication will only work " + + "between nodes running on the same machine.", + sourceConfig, + listener + ); + } + + private void verifyLogsWildcardWarning(final URL listener) { + verify(logger).warn( + "{} config uses wildcard address: {}. Intra-node communication will only work " + + "between nodes running on the same machine.", + QUOTED_FIRST_LISTENER_CONFIG, + listener + ); + } + + private static URL url(final String address) { + try { + return new URL(address); + } catch (final MalformedURLException e) { + throw new AssertionError("Invalid URL in test: " + address, e); + } + } + + private static String getLocalHostName() { + try { + return InetAddress.getLocalHost().getCanonicalHostName(); + } catch (UnknownHostException e) { + throw new AssertionError("Failed to obtain local host info", e); + } } } From c239990b40e6f450df9779e7e3f26e852757c9e0 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Fri, 20 Dec 2019 15:28:22 +0000 Subject: [PATCH 054/123] docs: add basic docker-compose file and instructions (#4175) * docs: add basic docker-compose file and instructions --- CONTRIBUTING.md | 4 ++ docker-compose.yml | 107 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 111 insertions(+) create mode 100644 docker-compose.yml diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7af53063baf0..3de353d60cdc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -32,6 +32,10 @@ To build and test changes locally, run the following commands: $ mvn clean install checkstyle:check integration-test ``` +### Testing docker image + +See comments at the top of the [docker compose file in the root of the project](docker-compose.yml) for instructions +on how to build and run docker images. ## How to Contribute diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 000000000000..33571806ec2a --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,107 @@ +# Docker compose from bringing up a local KSQL cluster and dependencies. +# +# By default, the cluster has two KSQL servers. You can scale the number of KSQL nodes in the +# cluster by using the docker `--scale` command line arg. +# +# e.g. for a 4 node cluster run: +# > docker-compose up --scale additional-ksqldb-server=3 +# +# or a 1 node cluster run: +# > docker-compose up --scale additional-ksqldb-server=0 +# +# The default is one `primary-ksqldb-server` and one `additional-ksqdb-server`. The only +# difference is that the primary node has a well-known port exposed so clients can connect, where +# as the additional nodes use auto-port assignment so that ports don't clash. +# +# If you wish to run with locally built KSQL docker images then build then: +# +# 1. ensure logged in to docker: +# > docker login +# +# 2. ensure you can log in to AWS ECR: +# > aws ecr get-login --no-include-email | sh +# +# 3. build docker images from local changes (Note: access to Confluent docker registry is required): +# Change `docker.upstream-tag` if you want to depend on anything other than the latest master upstream, e.g. 5.4.x-latest +# > mvn -Pdocker package -DskipTests -Dspotbugs.skip -Dcheckstyle.skip -Ddockerfile.skip=false -Dskip.docker.build=false -Ddocker.upstream-tag=master-latest -Ddocker.tag=local.build -Ddocker.upstream-registry='368821881613.dkr.ecr.us-west-2.amazonaws.com/' +# +# 4. check images build: +# > docker image ls | grep ksql.local.build +# You should see your new images listed +# +# 5. update this file below replacing all references to "confluentinc/ksqldb-server:latest" with your image, e.g. "placeholder/confluentinc/ksql-rest-app:local.build" + +--- +version: '2' +services: + zookeeper: + image: confluentinc/cp-zookeeper:latest + environment: + ZOOKEEPER_CLIENT_PORT: 32181 + ZOOKEEPER_TICK_TIME: 2000 + + kafka: + image: confluentinc/cp-enterprise-kafka:latest + ports: + - "29092:29092" + depends_on: + - zookeeper + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 + KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 100 + + schema-registry: + image: confluentinc/cp-schema-registry:latest + depends_on: + - zookeeper + - kafka + environment: + SCHEMA_REGISTRY_HOST_NAME: schema-registry + SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper:32181 + + primary-ksqldb-server: + image: confluentinc/ksqldb-server:latest + hostname: primary-ksqldb-server + container_name: primary-ksqldb-server + depends_on: + - kafka + - schema-registry + ports: + - "8088:8088" + environment: + KSQL_LISTENERS: http://0.0.0.0:8088 + KSQL_BOOTSTRAP_SERVERS: kafka:9092 + KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schema-registry:8081 + KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: "true" + KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: "true" + + additional-ksqldb-server: + image: confluentinc/ksqldb-server:latest + hostname: additional-ksqldb-server + container_name: additional-ksqldb-server + depends_on: + - primary-ksqldb-server + ports: + - "8090" + environment: + KSQL_LISTENERS: http://0.0.0.0:8090 + KSQL_BOOTSTRAP_SERVERS: kafka:9092 + KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schema-registry:8081 + + # Access the cli by running: + # > docker-compose exec ksqldb-cli ksql http://primary-ksqldb-server:8088 + ksqldb-cli: + image: confluentinc/ksqldb-cli:latest + container_name: ksqldb-cli + depends_on: + - primary-ksqldb-server + entrypoint: /bin/sh + tty: true + From cbd3bab35666aa676c1f3722aaef8dcbbdc8243f Mon Sep 17 00:00:00 2001 From: Almog Gavra Date: Fri, 20 Dec 2019 11:25:56 -0800 Subject: [PATCH 055/123] fix: immutability in some more classes (MINOR) (#4179) --- .../io/confluent/ksql/execution/codegen/CodeGenSpec.java | 5 +++-- .../execution/expression/tree/CreateStructExpression.java | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenSpec.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenSpec.java index ee5a5b9bd334..56ede106bf78 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenSpec.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenSpec.java @@ -32,6 +32,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import org.apache.kafka.connect.data.ConnectSchema; import org.apache.kafka.connect.data.Schema; @Immutable @@ -239,14 +240,14 @@ public String toString() { @Immutable public static final class SchemaArgumentSpec extends BaseArgumentSpec { - private final Schema schema; + private final ConnectSchema schema; SchemaArgumentSpec( String name, Schema schema ) { super(name, Schema.class); - this.schema = requireNonNull(schema, "schema"); + this.schema = (ConnectSchema) requireNonNull(schema, "schema").schema(); } @Override diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/CreateStructExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/CreateStructExpression.java index 925e49638c2b..3a07370f3ca5 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/CreateStructExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/CreateStructExpression.java @@ -67,6 +67,7 @@ public int hashCode() { return Objects.hash(fields); } + @Immutable public static class Field { private final String name; private final Expression value; From a5a16206ffefee7d5906288a6cd3edc3ccecc784 Mon Sep 17 00:00:00 2001 From: Confluent Jenkins Bot Date: Fri, 20 Dec 2019 22:31:48 +0000 Subject: [PATCH 056/123] Bump Confluent to 5.3.3-SNAPSHOT, Kafka to 5.3.3-SNAPSHOT --- build-tools/pom.xml | 2 +- docs/conf.py | 2 +- ksql-benchmark/pom.xml | 2 +- ksql-cli/pom.xml | 2 +- ksql-clickstream-demo/pom.xml | 2 +- ksql-common/pom.xml | 2 +- ksql-console-scripts/pom.xml | 2 +- ksql-engine/pom.xml | 2 +- ksql-etc/pom.xml | 2 +- ksql-examples/pom.xml | 2 +- ksql-functional-tests/pom.xml | 2 +- ksql-metastore/pom.xml | 2 +- ksql-package/pom.xml | 2 +- ksql-parser/pom.xml | 4 ++-- ksql-rest-app/pom.xml | 2 +- ksql-rocksdb-config-setter/pom.xml | 2 +- ksql-serde/pom.xml | 2 +- ksql-test-util/pom.xml | 2 +- ksql-tools/pom.xml | 2 +- ksql-udf-quickstart/pom.xml | 2 +- ksql-udf/pom.xml | 2 +- ksql-version-metrics-client/pom.xml | 2 +- licenses/licenses.html | 12 ++++++------ pom.xml | 4 ++-- 24 files changed, 31 insertions(+), 31 deletions(-) diff --git a/build-tools/pom.xml b/build-tools/pom.xml index f0f3f8a73704..55424c84eed4 100644 --- a/build-tools/pom.xml +++ b/build-tools/pom.xml @@ -19,6 +19,6 @@ 4.0.0 io.confluent build-tools - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT Build Tools diff --git a/docs/conf.py b/docs/conf.py index 6cfe7ae2c868..6b0c55a114de 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,7 +59,7 @@ def setup(app): # The short X.Y version. version = '5.3' # The full version, including alpha/beta/rc tags. -release = '5.3.2-SNAPSHOT' +release = '5.3.3-SNAPSHOT' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/ksql-benchmark/pom.xml b/ksql-benchmark/pom.xml index 1c9e620ce989..e6e54d77015a 100644 --- a/ksql-benchmark/pom.xml +++ b/ksql-benchmark/pom.xml @@ -47,7 +47,7 @@ questions. io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT ksql-benchmark diff --git a/ksql-cli/pom.xml b/ksql-cli/pom.xml index f0807e6b167b..cc0a1b0b9808 100644 --- a/ksql-cli/pom.xml +++ b/ksql-cli/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT ksql-cli diff --git a/ksql-clickstream-demo/pom.xml b/ksql-clickstream-demo/pom.xml index bdc701df5816..12f454f716f8 100644 --- a/ksql-clickstream-demo/pom.xml +++ b/ksql-clickstream-demo/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT io.confluent.ksql diff --git a/ksql-common/pom.xml b/ksql-common/pom.xml index 3f8adc0adbc0..ec29a1d0a25a 100644 --- a/ksql-common/pom.xml +++ b/ksql-common/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT ksql-common diff --git a/ksql-console-scripts/pom.xml b/ksql-console-scripts/pom.xml index 630280e8388b..75809a375955 100644 --- a/ksql-console-scripts/pom.xml +++ b/ksql-console-scripts/pom.xml @@ -22,7 +22,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT io.confluent.ksql diff --git a/ksql-engine/pom.xml b/ksql-engine/pom.xml index 794547724c89..8a30fc3d509a 100644 --- a/ksql-engine/pom.xml +++ b/ksql-engine/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT ksql-engine diff --git a/ksql-etc/pom.xml b/ksql-etc/pom.xml index 095c7869457c..efa8f8c7163f 100644 --- a/ksql-etc/pom.xml +++ b/ksql-etc/pom.xml @@ -22,7 +22,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT io.confluent.ksql diff --git a/ksql-examples/pom.xml b/ksql-examples/pom.xml index 17ab44b46eaa..e11029ee6479 100644 --- a/ksql-examples/pom.xml +++ b/ksql-examples/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT ksql-examples diff --git a/ksql-functional-tests/pom.xml b/ksql-functional-tests/pom.xml index 91e445ed365e..0352037a55ea 100644 --- a/ksql-functional-tests/pom.xml +++ b/ksql-functional-tests/pom.xml @@ -5,7 +5,7 @@ ksql-parent io.confluent.ksql - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT 4.0.0 diff --git a/ksql-metastore/pom.xml b/ksql-metastore/pom.xml index 7db629e3ebf1..d4254ace83e9 100644 --- a/ksql-metastore/pom.xml +++ b/ksql-metastore/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT ksql-metastore diff --git a/ksql-package/pom.xml b/ksql-package/pom.xml index 1c18d0f7c619..dcb9e2c26b85 100644 --- a/ksql-package/pom.xml +++ b/ksql-package/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT ksql-package diff --git a/ksql-parser/pom.xml b/ksql-parser/pom.xml index 6b4dae6eb549..cbbe57c84635 100644 --- a/ksql-parser/pom.xml +++ b/ksql-parser/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT ksql-parser @@ -40,7 +40,7 @@ io.confluent.ksql ksql-metastore - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT test-jar test diff --git a/ksql-rest-app/pom.xml b/ksql-rest-app/pom.xml index d89d37a5f078..e86199bc88b5 100644 --- a/ksql-rest-app/pom.xml +++ b/ksql-rest-app/pom.xml @@ -23,7 +23,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT ksql-rest-app diff --git a/ksql-rocksdb-config-setter/pom.xml b/ksql-rocksdb-config-setter/pom.xml index 3b1c9ee3a5e2..1a1f289a9d33 100644 --- a/ksql-rocksdb-config-setter/pom.xml +++ b/ksql-rocksdb-config-setter/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT ksql-rocksdb-config-setter diff --git a/ksql-serde/pom.xml b/ksql-serde/pom.xml index aeb95f7820d3..cf1b78a4d069 100644 --- a/ksql-serde/pom.xml +++ b/ksql-serde/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT ksql-serde diff --git a/ksql-test-util/pom.xml b/ksql-test-util/pom.xml index 14fe1eb92e99..ff51c7639169 100644 --- a/ksql-test-util/pom.xml +++ b/ksql-test-util/pom.xml @@ -20,7 +20,7 @@ ksql-parent io.confluent.ksql - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT 4.0.0 diff --git a/ksql-tools/pom.xml b/ksql-tools/pom.xml index e466d0427d38..792f65897e65 100644 --- a/ksql-tools/pom.xml +++ b/ksql-tools/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT ksql-tools diff --git a/ksql-udf-quickstart/pom.xml b/ksql-udf-quickstart/pom.xml index 15b24d795cb9..1772da1e2eb1 100644 --- a/ksql-udf-quickstart/pom.xml +++ b/ksql-udf-quickstart/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT ksql-udf-quickstart diff --git a/ksql-udf/pom.xml b/ksql-udf/pom.xml index 03fb8b31c65b..2d2da1917bff 100644 --- a/ksql-udf/pom.xml +++ b/ksql-udf/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT ksql-udf diff --git a/ksql-version-metrics-client/pom.xml b/ksql-version-metrics-client/pom.xml index b2a37b04dc25..9aad7fd78cca 100644 --- a/ksql-version-metrics-client/pom.xml +++ b/ksql-version-metrics-client/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT ksql-version-metrics-client diff --git a/licenses/licenses.html b/licenses/licenses.html index 38d8722d80bf..77e596fbe8ee 100644 --- a/licenses/licenses.html +++ b/licenses/licenses.html @@ -67,15 +67,15 @@

License Report


slice-0.29jar0.29 -common-config-5.3.2-SNAPSHOTjar5.3.2-SNAPSHOT +common-config-5.3.3-SNAPSHOTjar5.3.3-SNAPSHOT -common-utils-5.3.2-SNAPSHOTjar5.3.2-SNAPSHOT +common-utils-5.3.3-SNAPSHOTjar5.3.3-SNAPSHOT -kafka-avro-serializer-5.3.2-SNAPSHOTjar5.3.2-SNAPSHOT +kafka-avro-serializer-5.3.3-SNAPSHOTjar5.3.3-SNAPSHOT -kafka-connect-avro-converter-5.3.2-SNAPSHOTjar5.3.2-SNAPSHOT +kafka-connect-avro-converter-5.3.3-SNAPSHOTjar5.3.3-SNAPSHOT -kafka-schema-registry-client-5.3.2-SNAPSHOTjar5.3.2-SNAPSHOT +kafka-schema-registry-client-5.3.3-SNAPSHOTjar5.3.3-SNAPSHOT ksql-engine-0.1-SNAPSHOTjar0.1-SNAPSHOT @@ -123,7 +123,7 @@

License Report


kafka-streams-0.11.0.0-cp1jarincluded file -kafka_2.11-5.3.2-ccs-SNAPSHOTjarincluded file +kafka_2.11-5.3.3-ccs-SNAPSHOTjarincluded file lz4-1.3.0jar1.3.0 diff --git a/pom.xml b/pom.xml index 5e9a6dafd420..2e2152bffa4d 100644 --- a/pom.xml +++ b/pom.xml @@ -22,14 +22,14 @@ io.confluent rest-utils-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT io.confluent.ksql ksql-parent pom ksql-parent - 5.3.2-SNAPSHOT + 5.3.3-SNAPSHOT Confluent Community License From acb656b6166d4263558d3f033af1db5166e2bc39 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Sat, 21 Dec 2019 17:00:19 +0000 Subject: [PATCH 057/123] chore: pull query support for primitive keys (#4178) --- ...eries-against-materialized-aggregates.json | 52 ++++++++++++++----- .../server/execution/PullQueryExecutor.java | 34 ++++++++---- 2 files changed, 63 insertions(+), 23 deletions(-) diff --git a/ksql-functional-tests/src/test/resources/rest-query-validation-tests/pull-queries-against-materialized-aggregates.json b/ksql-functional-tests/src/test/resources/rest-query-validation-tests/pull-queries-against-materialized-aggregates.json index d2041b5b3da4..af6e91052c37 100644 --- a/ksql-functional-tests/src/test/resources/rest-query-validation-tests/pull-queries-against-materialized-aggregates.json +++ b/ksql-functional-tests/src/test/resources/rest-query-validation-tests/pull-queries-against-materialized-aggregates.json @@ -4,7 +4,7 @@ ], "tests": [ { - "name": "non-windowed single key lookup", + "name": "non-windowed single key lookup - STRING", "statements": [ "CREATE STREAM INPUT (IGNORED INT) WITH (kafka_topic='test_topic', value_format='JSON');", "CREATE TABLE AGGREGATE AS SELECT COUNT(1) AS COUNT FROM INPUT GROUP BY ROWKEY;", @@ -27,6 +27,43 @@ ]} ] }, + { + "name": "non-windowed single key lookup - INT", + "statements": [ + "CREATE STREAM INPUT (ROWKEY INT KEY, IGNORED INT) WITH (kafka_topic='test_topic', value_format='JSON');", + "CREATE TABLE AGGREGATE AS SELECT COUNT(1) AS COUNT FROM INPUT GROUP BY ROWKEY;", + "SELECT * FROM AGGREGATE WHERE ROWKEY=10;", + "SELECT * FROM AGGREGATE WHERE ROWKEY=123369;" + ], + "inputs": [ + {"topic": "test_topic", "timestamp": 12345, "key": 11, "value": {}}, + {"topic": "test_topic", "timestamp": 12365, "key": 10, "value": {}} + ], + "responses": [ + {"admin": {"@type": "currentStatus"}}, + {"admin": {"@type": "currentStatus"}}, + {"query": [ + {"header":{"schema":"`ROWKEY` INTEGER KEY, `ROWTIME` BIGINT, `COUNT` BIGINT"}}, + {"row":{"columns":[10, 12365, 1]}} + ]}, + {"query": [ + {"header":{"schema":"`ROWKEY` INTEGER KEY, `ROWTIME` BIGINT, `COUNT` BIGINT"}} + ]} + ] + }, + { + "name": "lookup on wrong type type", + "statements": [ + "CREATE STREAM INPUT (ROWKEY INT KEY, IGNORED INT) WITH (kafka_topic='test_topic', value_format='JSON');", + "CREATE TABLE AGGREGATE AS SELECT COUNT(1) AS COUNT FROM INPUT GROUP BY ROWKEY;", + "SELECT * FROM AGGREGATE WHERE ROWKEY='10';" + ], + "expectedError": { + "type": "io.confluent.ksql.rest.entity.KsqlStatementErrorMessage", + "message": "ROWKEY not INTEGER", + "status": 400 + } + }, { "name": "tumbling windowed single key lookup with exact window start", "statements": [ @@ -719,19 +756,6 @@ "status": 400 } }, - { - "name": "fail on unsupported query feature: where rowkey not string", - "statements": [ - "CREATE STREAM INPUT (IGNORED INT) WITH (kafka_topic='test_topic', value_format='JSON');", - "CREATE TABLE AGGREGATE AS SELECT COUNT(1) AS COUNT FROM INPUT GROUP BY ROWKEY;", - "SELECT * FROM AGGREGATE WHERE ROWKEY = 100;" - ], - "expectedError": { - "type": "io.confluent.ksql.rest.entity.KsqlStatementErrorMessage", - "message": "ROWKEY must be compared to STRING literal.", - "status": 400 - } - }, { "name": "fail on unsupported query feature: where not on rowkey", "statements": [ diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/PullQueryExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/PullQueryExecutor.java index 37c186dc9f59..615dfb2c9bf3 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/PullQueryExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/PullQueryExecutor.java @@ -68,6 +68,7 @@ import io.confluent.ksql.rest.entity.TableRowsEntity; import io.confluent.ksql.rest.entity.TableRowsEntityFactory; import io.confluent.ksql.rest.server.resources.KsqlRestException; +import io.confluent.ksql.schema.ksql.DefaultSqlValueCoercer; import io.confluent.ksql.schema.ksql.FormatOptions; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.LogicalSchema.Builder; @@ -283,7 +284,11 @@ private static WhereInfo extractWhereInfo( throw invalidWhereClauseException("WHERE clause missing ROWKEY", windowed); } - final Object rowKey = extractRowKeyWhereClause(rowKeyComparison, windowed); + final Object rowKey = extractRowKeyWhereClause( + rowKeyComparison, + windowed, + query.getLogicalSchema() + ); if (!windowed) { if (comparisons.size() > 1) { @@ -303,26 +308,37 @@ private static WhereInfo extractWhereInfo( private static Object extractRowKeyWhereClause( final List comparisons, - final boolean windowed + final boolean windowed, + final LogicalSchema schema ) { if (comparisons.size() != 1) { throw invalidWhereClauseException("Multiple bounds on ROWKEY", windowed); } final ComparisonExpression comparison = comparisons.get(0); + if (comparison.getType() != Type.EQUAL) { + throw invalidWhereClauseException("ROWKEY bound must currently be '='", windowed); + } final Expression other = getNonColumnRefSide(comparison); + final Object right = ((Literal) other).getValue(); - if (!(other instanceof StringLiteral)) { - throw invalidWhereClauseException("ROWKEY must be compared to STRING literal", false); - } + return coerceRowKey(schema, right, windowed); + } - if (comparison.getType() != Type.EQUAL) { - throw invalidWhereClauseException("ROWKEY bound must currently be '='", false); + private static Object coerceRowKey( + final LogicalSchema schema, + final Object right, + final boolean windowed + ) { + if (schema.key().size() != 1) { + throw invalidWhereClauseException("Only single KEY column supported", windowed); } - final Literal right = (Literal) other; - return right.getValue(); + final SqlType sqlType = schema.key().get(0).type(); + + return DefaultSqlValueCoercer.INSTANCE.coerce(right, sqlType) + .orElseThrow(() -> new KsqlException("ROWKEY not " + sqlType)); } private static Range extractWhereClauseWindowBounds( From 53cbf1034cfc1d616ee0a59a20ce1c94a13acf94 Mon Sep 17 00:00:00 2001 From: Rohan Date: Mon, 23 Dec 2019 10:12:13 -0800 Subject: [PATCH 058/123] test: qtt generate/validate execution plans (#4176) * feat: qtt generate/validate execution plans This patch extends our query validation tests to generate, execute, and validate execution plans. TestExecutor/TestExecutorUtil have been extended to support executing from a plan as an alternative to a statement, if included in the TestCase spec. In both cases, TestExecutorUtil first builds an Iterable that yields instances of ConfiguredKsqlPlan. If the plans are included in the TestCase, the Iterable just iterates over the supplied plans. If not, the Iterable builds each statement into a plan. Execution plans are derived from the qtt test cases and checked into the repository under `ksql-functional-tests/src/test/resources/historical_plans`. The plans are organized as follows: ``: all the plans for a given test case `/_`: the plan for a given test case at a point in time `/_`/spec.json - a json containing the config, plan, and any generated schemas. `/_`/topology - the streams topology description. A new test loader called PlannedTestLoader iterates over all the plans and builds instances of TestCase out of them. To ensure we include new query plans over time, this patch includes a test called PlannedTestUpToDateTest, that iterates over each qtt test case and ensures that we have persisted the current plan that the engine builds. Finally, this patch includes PlannedTestGenerator which you can use to generate any new plans that need to be generated. --- .../io/confluent/ksql/util/QuerySchemas.java | 11 + .../io/confluent/ksql/engine/KsqlPlanV1.java | 20 + .../io/confluent/ksql/engine/QueryPlan.java | 21 + .../planner/PlanSourceExtractorVisitor.java | 7 + .../ksql/planner/plan/PlanVisitor.java | 3 + .../ksql/planner/plan/RepartitionNode.java | 5 + .../ksql/util/PersistentQueryMetadata.java | 4 +- .../ddl/commands/CreateSourceCommand.java | 24 + ksql-functional-tests/README.md | 55 +-- .../ksql/test/model/KsqlVersion.java | 19 +- .../confluent/ksql/test/tools/TestCase.java | 11 +- .../ksql/test/tools/TestExecutor.java | 17 +- .../ksql/test/tools/TestExecutorUtil.java | 368 +++++++-------- .../ksql/test/tools/TopologyAndConfigs.java | 24 +- .../test/tools/conditions/PostConditions.java | 4 +- ...est.java => PlannedTestGeneratorTest.java} | 26 +- .../ksql/test/PlannedTestsUpToDateTest.java | 120 +++++ .../ksql/test/QueryTranslationTest.java | 5 +- .../ksql/test/TopologyFileGenerator.java | 265 ----------- .../ksql/test/TopologyFileRewriter.java | 424 ------------------ .../loader/ExpectedTopologiesTestLoader.java | 319 ------------- .../ksql/test/model/KsqlVersionTest.java | 31 ++ .../test/planned/PlannedTestGenerator.java | 94 ++++ .../ksql/test/planned/PlannedTestLoader.java | 85 ++++ .../ksql/test/planned/PlannedTestUtils.java | 98 ++++ .../ksql/test/planned/TestCasePlan.java | 70 +++ .../ksql/test/planned/TestCasePlanLoader.java | 196 ++++++++ .../ksql/test/planned/TestCasePlanNode.java | 64 +++ .../ksql/test/tools/TestExecutorTest.java | 17 +- .../ksql/test/tools/TestExecutorUtilTest.java | 44 ++ .../5.5.0_1576794350087/spec.json | 204 +++++++++ .../5.5.0_1576794350087/topology | 25 ++ 32 files changed, 1396 insertions(+), 1284 deletions(-) rename ksql-functional-tests/src/test/java/io/confluent/ksql/test/{TopologyFileGeneratorTest.java => PlannedTestGeneratorTest.java} (51%) create mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/PlannedTestsUpToDateTest.java delete mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileGenerator.java delete mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileRewriter.java delete mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/loader/ExpectedTopologiesTestLoader.java create mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestGenerator.java create mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestLoader.java create mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestUtils.java create mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlan.java create mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanLoader.java create mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanNode.java create mode 100644 ksql-functional-tests/src/test/resources/historical_plans/average_-_calculate_average_in_select/5.5.0_1576794350087/spec.json create mode 100644 ksql-functional-tests/src/test/resources/historical_plans/average_-_calculate_average_in_select/5.5.0_1576794350087/topology diff --git a/ksql-common/src/main/java/io/confluent/ksql/util/QuerySchemas.java b/ksql-common/src/main/java/io/confluent/ksql/util/QuerySchemas.java index e05b85840bc3..ea610452adc8 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/util/QuerySchemas.java +++ b/ksql-common/src/main/java/io/confluent/ksql/util/QuerySchemas.java @@ -19,6 +19,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableMap.Builder; import com.google.errorprone.annotations.Immutable; import io.confluent.ksql.schema.connect.SchemaFormatter; import io.confluent.ksql.schema.connect.SqlSchemaFormatter; @@ -27,6 +29,7 @@ import io.confluent.ksql.testing.EffectivelyImmutable; import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.stream.Collectors; @@ -94,6 +97,14 @@ public int hashCode() { return Objects.hash(schemas); } + public Map getSchemasDescription() { + final ImmutableMap.Builder builder = new Builder<>(); + for (final Entry e : schemas) { + builder.put(e.loggerNamePrefix, schemaFormatter.format(e.schema.serializedSchema())); + } + return builder.build(); + } + @Override public String toString() { return schemas.stream() diff --git a/ksql-engine/src/main/java/io/confluent/ksql/engine/KsqlPlanV1.java b/ksql-engine/src/main/java/io/confluent/ksql/engine/KsqlPlanV1.java index e889500ca7a8..2e3c51a0c093 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/engine/KsqlPlanV1.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/engine/KsqlPlanV1.java @@ -46,4 +46,24 @@ public Optional getQueryPlan() { public String getStatementText() { return statementText; } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final KsqlPlanV1 that = (KsqlPlanV1) o; + return Objects.equals(statementText, that.statementText) + && Objects.equals(ddlCommand, that.ddlCommand) + && Objects.equals(queryPlan, that.queryPlan); + } + + @Override + public int hashCode() { + + return Objects.hash(statementText, ddlCommand, queryPlan); + } } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/engine/QueryPlan.java b/ksql-engine/src/main/java/io/confluent/ksql/engine/QueryPlan.java index 5e710676343a..bfeb5906e1eb 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/engine/QueryPlan.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/engine/QueryPlan.java @@ -55,4 +55,25 @@ public ExecutionStep getPhysicalPlan() { public QueryId getQueryId() { return queryId; } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final QueryPlan queryPlan = (QueryPlan) o; + return Objects.equals(sources, queryPlan.sources) + && Objects.equals(sink, queryPlan.sink) + && Objects.equals(physicalPlan, queryPlan.physicalPlan) + && Objects.equals(queryId, queryPlan.queryId); + } + + @Override + public int hashCode() { + + return Objects.hash(sources, sink, physicalPlan, queryId); + } } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/PlanSourceExtractorVisitor.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/PlanSourceExtractorVisitor.java index e8252d18b81e..b3e6315d9956 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/PlanSourceExtractorVisitor.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/PlanSourceExtractorVisitor.java @@ -25,6 +25,7 @@ import io.confluent.ksql.planner.plan.PlanNode; import io.confluent.ksql.planner.plan.PlanVisitor; import io.confluent.ksql.planner.plan.ProjectNode; +import io.confluent.ksql.planner.plan.RepartitionNode; import java.util.HashSet; import java.util.Set; @@ -80,6 +81,12 @@ protected R visitFlatMap(final FlatMapNode node, final C context) { return null; } + @Override + protected R visitRepartition(final RepartitionNode node, final C context) { + process(node.getSources().get(0), context); + return null; + } + public Set getSourceNames() { return sourceNames; } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/PlanVisitor.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/PlanVisitor.java index 97365e776469..6ba716a9929d 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/PlanVisitor.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/PlanVisitor.java @@ -49,4 +49,7 @@ protected R visitFlatMap(final FlatMapNode node, final C context) { return visitPlan(node, context); } + protected R visitRepartition(final RepartitionNode node, final C context) { + return visitPlan(node, context); + } } \ No newline at end of file diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java index b29b32f10636..60689e5b7b47 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java @@ -86,4 +86,9 @@ public SchemaKStream buildStream(KsqlQueryBuilder builder) { public Expression getPartitionBy() { return partitionBy; } + + @Override + public R accept(final PlanVisitor visitor, final C context) { + return visitor.visitRepartition(this, context); + } } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java b/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java index b44dc9c0dde0..fb38d2a3a3f0 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java @@ -123,8 +123,8 @@ public SourceName getSinkName() { return sinkName; } - public String getSchemasDescription() { - return schemas.toString(); + public Map getSchemasDescription() { + return schemas.getSchemasDescription(); } public PhysicalSchema getPhysicalSchema() { diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateSourceCommand.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateSourceCommand.java index 32a80dcaebbf..95f93f8115e3 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateSourceCommand.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateSourceCommand.java @@ -124,4 +124,28 @@ private static void validate(final LogicalSchema schema, final Optional/_`. +`QueryTranslationTest` runs by iterating over the saved plans, building them, and verifying that +queries execute correctly. + +### Generating new topology files + +Plans evolve over time, and we need to make sure that KSQL tests the latest way of executing +queries. To ensure this, we run a test (called `PlannedTestGeneratorTest`) that ensures that the +latest plan for each test case is saved to the local tree. If it's not, you will need to run the +generator to do so. + +To generate new plans, just run `PlannedTestGeneratorTest.manuallyGeneratePlans` + ## Topology comparision These tests also validate the generated topology matches the expected topology, i.e. a test will fail if the topology has changed from previous runs. This is needed to detect potentially non-backwards compatible changes to the generated topology. -The expected topology files, and the configuration used to generated them are found in -`src/test/resources/expected_topology/` - -By default, the test will check topology compatibility against all previously released versions -of KSQL (for which expected topology files exist). +The expected topologies are stored alongside the query plans described above. ### Running a subset of tests: @@ -33,33 +51,6 @@ mvn test -pl ksql-functional-tests -Dtest=QueryTranslationTest -Dksql.test.files The above commands can execute only a single test (sum.json) or multiple tests (sum.json and substring.json). -### Running against different previous versions: - -To run this test against specific previously released versions, set the system property -"topology.versions" to the desired version(s). The property value should be a comma-delimited list of -version number(s) found under the `src/test/resources/expected_topology` directory, -for example, `"5_0,5_3_0"`, or `latest-only` if only the current version is required. - -The are two places system properties may be set: - * Within Intellij - 1. Click Run/Edit configurations - 1. Select the QueryTranslationTest - 1. Enter `-Dtopology.versions=X` in the "VM options:" form entry - where X is a comma-delimited list of the desired previously released version number(s), - or `latest-only` if only the current version is required. - * From the command line - 1. run `mvn clean package -DskipTests=true` from the base of the KSQL project - 1. Then run `mvn test -Dtopology.versions=X -Dtest=QueryTranslationTest -pl ksql-functional-tests`. - Again X is a list of the versions you want to run the tests against, - or `latest-only` if only the current version is required. - - Note that for both options above the version(s) must exist - under the `src/test/resources/expected_topology` directory. - -### Generating new topology files - -For instructions on how to generate new topologies, see `TopologyFileGenerator.java` - ## Adding new tests The following is a template test file: diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/KsqlVersion.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/KsqlVersion.java index ef0ac0e04e0a..1f4e2ecbe266 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/KsqlVersion.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/KsqlVersion.java @@ -32,17 +32,19 @@ public final class KsqlVersion implements Comparable { @EffectivelyImmutable private static final Comparator COMPARATOR = - Comparator.comparing(KsqlVersion::getVersion); + Comparator.comparing(KsqlVersion::getVersion) + .thenComparingLong(KsqlVersion::getTimestamp); private final transient String name; private final SemanticVersion version; + private final long timestamp; public static KsqlVersion current() { return parse(Version.getVersion()); } public static KsqlVersion of(final String name, final SemanticVersion version) { - return new KsqlVersion(name, version); + return new KsqlVersion(name, version, Long.MAX_VALUE); } public static KsqlVersion parse(final String version) { @@ -61,7 +63,11 @@ public static KsqlVersion parse(final String version) { : Integer.parseInt(matcher.group(3).substring(1)); final SemanticVersion v = SemanticVersion.of(major, minor, patch); - return KsqlVersion.of(version, v); + return new KsqlVersion(version, v, Long.MAX_VALUE); + } + + public KsqlVersion withTimestamp(final long timestamp) { + return new KsqlVersion(name, version, timestamp); } public String getName() { @@ -72,6 +78,10 @@ public SemanticVersion getVersion() { return version; } + public long getTimestamp() { + return timestamp; + } + @Override public int compareTo(final KsqlVersion other) { return COMPARATOR.compare(this, other); @@ -99,8 +109,9 @@ public String toString() { return name + " (" + version + ")"; } - private KsqlVersion(final String name, final SemanticVersion version) { + private KsqlVersion(final String name, final SemanticVersion version, final long timestamp) { this.name = Objects.requireNonNull(name, "name"); this.version = Objects.requireNonNull(version, "version"); + this.timestamp = timestamp; } } diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCase.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCase.java index 49b68c21b118..e47a8855c4b3 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCase.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCase.java @@ -42,7 +42,7 @@ public class TestCase implements VersionedTest { private final List statements; private final Optional> expectedException; private List generatedTopologies; - private List generatedSchemas; + private Map generatedSchemas; private final Optional expectedTopology; private final PostConditions postConditions; @@ -160,17 +160,18 @@ public Optional getExpectedTopology() { return expectedTopology; } - public void setGeneratedSchemas(final List generatedSchemas) { - this.generatedSchemas = Objects.requireNonNull(generatedSchemas, "generatedSchemas"); + public void setGeneratedSchemas(final Map generatedSchemas) { + this.generatedSchemas = ImmutableMap.copyOf( + Objects.requireNonNull(generatedSchemas, "generatedSchemas")); } - public List getGeneratedSchemas() { + public Map getGeneratedSchemas() { return generatedSchemas; } public Map persistedProperties() { return expectedTopology - .flatMap(TopologyAndConfigs::getConfigs) + .map(TopologyAndConfigs::getConfigs) .orElseGet(HashMap::new); } diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java index fdc02b704295..d4bcda5c6e27 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java @@ -16,7 +16,7 @@ package io.confluent.ksql.test.tools; import static java.util.Objects.requireNonNull; -import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; import static org.junit.matchers.JUnitMatchers.isThrowable; @@ -63,7 +63,6 @@ import io.confluent.ksql.util.KsqlServerException; import io.confluent.ksql.util.PersistentQueryMetadata; import java.io.Closeable; -import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -151,9 +150,7 @@ private TestExecutor( public void buildAndExecuteQuery(final TestCase testCase) { final KsqlConfig currentConfigs = new KsqlConfig(config); - final Map persistedConfigs = testCase.persistedProperties(); - final KsqlConfig ksqlConfig = persistedConfigs.isEmpty() ? currentConfigs : currentConfigs.overrideBreakingConfigsWithOriginalValues(persistedConfigs); @@ -446,15 +443,15 @@ private static void verifyTopology(final TestCase testCase) { + "THIS IS BAD!", actualTopology, is(expectedTopology)); - expected.getSchemas().ifPresent(schemas -> { - final List generated = Arrays.asList( - testCase.getGeneratedSchemas().get(0).split(System.lineSeparator())); + final Map generated = testCase.getGeneratedSchemas(); + for (final Map.Entry e : expected.getSchemas().entrySet()) { assertThat("Schemas used by topology differ " + "from those used by previous versions" - + " of KSQL - this is likely to mean there is a non-backwards compatible change.\n" + + " of KSQL - this is likely to mean there is a non-backwards compatible change." + + "\n" + "THIS IS BAD!", - generated, hasItems(schemas.split(System.lineSeparator()))); - }); + generated, hasEntry(e.getKey(), e.getValue())); + } }); } diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java index e94b4edbf5fa..c60e89eeb4fc 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java @@ -22,12 +22,14 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableList.Builder; import io.confluent.kafka.schemaregistry.client.SchemaMetadata; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.KsqlExecutionContext; import io.confluent.ksql.KsqlExecutionContext.ExecuteResult; import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.engine.KsqlPlan; +import io.confluent.ksql.engine.QueryPlan; import io.confluent.ksql.engine.SqlFormatInjector; import io.confluent.ksql.engine.StubInsertValuesExecutor; import io.confluent.ksql.execution.json.PlanJsonMapper; @@ -36,14 +38,7 @@ import io.confluent.ksql.name.SourceName; import io.confluent.ksql.parser.KsqlParser.ParsedStatement; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; -import io.confluent.ksql.parser.tree.AliasedRelation; -import io.confluent.ksql.parser.tree.CreateAsSelect; -import io.confluent.ksql.parser.tree.InsertInto; import io.confluent.ksql.parser.tree.InsertValues; -import io.confluent.ksql.parser.tree.Join; -import io.confluent.ksql.parser.tree.Query; -import io.confluent.ksql.parser.tree.Relation; -import io.confluent.ksql.parser.tree.Table; import io.confluent.ksql.planner.plan.ConfiguredKsqlPlan; import io.confluent.ksql.schema.ksql.inference.DefaultSchemaInjector; import io.confluent.ksql.schema.ksql.inference.SchemaRegistryTopicSchemaSupplier; @@ -51,7 +46,6 @@ import io.confluent.ksql.services.KafkaTopicClient; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; -import io.confluent.ksql.test.TestFrameworkException; import io.confluent.ksql.test.serde.SerdeSupplier; import io.confluent.ksql.test.tools.stubs.StubKafkaService; import io.confluent.ksql.test.utils.SerdeUtil; @@ -61,10 +55,13 @@ import io.confluent.ksql.util.KsqlStatementException; import io.confluent.ksql.util.PersistentQueryMetadata; import java.io.IOException; -import java.time.Duration; import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Objects; import java.util.Optional; import java.util.Properties; import java.util.stream.Collectors; @@ -82,19 +79,6 @@ public final class TestExecutorUtil { private TestExecutorUtil() { } - public static List buildQueries( - final TestCase testCase, - final ServiceContext serviceContext, - final KsqlEngine ksqlEngine, - final KsqlConfig ksqlConfig, - final StubKafkaService stubKafkaService - ) { - return doBuildQueries(testCase, serviceContext, ksqlEngine, ksqlConfig, stubKafkaService) - .stream() - .map(q -> q.persistentQueryMetadata) - .collect(Collectors.toList()); - } - static List buildStreamsTopologyTestDrivers( final TestCase testCase, final ServiceContext serviceContext, @@ -105,16 +89,15 @@ static List buildStreamsTopologyTestDrivers( final KsqlConfig maybeUpdatedConfigs = persistedConfigs.isEmpty() ? ksqlConfig : ksqlConfig.overrideBreakingConfigsWithOriginalValues(persistedConfigs); - final List queryMetadataList = doBuildQueries( + final List queryMetadataList = doBuildQueries( testCase, serviceContext, ksqlEngine, maybeUpdatedConfigs, stubKafkaService); final List topologyTestDrivers = new ArrayList<>(); - for (final PersistentQueryAndSortedSources persistentQueryAndSortedSources: - queryMetadataList) { - final PersistentQueryMetadata persistentQueryMetadata = persistentQueryAndSortedSources + for (final PersistentQueryAndSources persistentQueryAndSources: queryMetadataList) { + final PersistentQueryMetadata persistentQueryMetadata = persistentQueryAndSources .getPersistentQueryMetadata(); final Properties streamsProperties = new Properties(); streamsProperties.putAll(persistentQueryMetadata.getStreamsProperties()); @@ -123,7 +106,7 @@ static List buildStreamsTopologyTestDrivers( topology, streamsProperties, 0); - final List sourceTopics = persistentQueryAndSortedSources.getSources() + final List sourceTopics = persistentQueryAndSources.getSources() .stream() .map(dataSource -> { stubKafkaService.requireTopicExists(dataSource.getKafkaTopicName()); @@ -137,8 +120,7 @@ static List buildStreamsTopologyTestDrivers( serviceContext.getSchemaRegistryClient()); testCase.setGeneratedTopologies( ImmutableList.of(persistentQueryMetadata.getTopologyDescription())); - testCase.setGeneratedSchemas( - ImmutableList.of(persistentQueryMetadata.getSchemasDescription())); + testCase.setGeneratedSchemas(persistentQueryMetadata.getSchemasDescription()); topologyTestDrivers.add(TopologyTestDriverContainer.of( topologyTestDriver, sourceTopics, @@ -148,6 +130,23 @@ static List buildStreamsTopologyTestDrivers( return topologyTestDrivers; } + public static Iterable planTestCase( + final KsqlEngine engine, + final TestCase testCase, + final KsqlConfig ksqlConfig, + final Optional srClient, + final StubKafkaService stubKafkaService + ) { + initializeTopics(testCase, engine.getServiceContext(), stubKafkaService); + if (testCase.getExpectedTopology().isPresent()) { + return testCase.getExpectedTopology().get().getPlan() + .stream() + .map(p -> ConfiguredKsqlPlan.of(p, testCase.properties(), ksqlConfig)) + .collect(Collectors.toList()); + } + return PlannedStatementIterator.of(engine, testCase, ksqlConfig, srClient, stubKafkaService); + } + private static Topic buildSinkTopic( final DataSource sinkDataSource, final StubKafkaService stubKafkaService, @@ -200,23 +199,17 @@ private static Optional getAvroSchema( return Optional.empty(); } - private static List doBuildQueries( + private static List doBuildQueries( final TestCase testCase, final ServiceContext serviceContext, final KsqlEngine ksqlEngine, final KsqlConfig ksqlConfig, final StubKafkaService stubKafkaService ) { - initializeTopics(testCase, serviceContext, stubKafkaService); - - final String sql = testCase.statements().stream() - .collect(Collectors.joining(System.lineSeparator())); - - final List queries = execute( + final List queries = execute( ksqlEngine, - sql, + testCase, ksqlConfig, - testCase.properties(), Optional.of(serviceContext.getSchemaRegistryClient()), stubKafkaService ); @@ -254,174 +247,192 @@ private static void initializeTopics( /** * @param srClient if supplied, then schemas can be inferred from the schema registry. */ - private static List execute( + private static List execute( final KsqlEngine engine, - final String sql, + final TestCase testCase, final KsqlConfig ksqlConfig, - final Map overriddenProperties, final Optional srClient, final StubKafkaService stubKafkaService ) { - final List statements = engine.parse(sql); - - final Optional schemaInjector = srClient - .map(SchemaRegistryTopicSchemaSupplier::new) - .map(DefaultSchemaInjector::new); - - return statements.stream() - .map(stmt -> execute( - engine, stmt, ksqlConfig, overriddenProperties, schemaInjector, stubKafkaService)) - .filter(executeResultAndSortedSources -> - executeResultAndSortedSources.getSources() != null) - .map( - executeResultAndSortedSources -> new PersistentQueryAndSortedSources( - (PersistentQueryMetadata) executeResultAndSortedSources - .getExecuteResult().getQuery().get(), - executeResultAndSortedSources.getSources(), - executeResultAndSortedSources.getWindowSize() - )) - .collect(Collectors.toList()); + final ImmutableList.Builder queriesBuilder = new Builder<>(); + for (final ConfiguredKsqlPlan plan + : planTestCase(engine, testCase, ksqlConfig, srClient, stubKafkaService)) { + final ExecuteResultAndSources result = executePlan(engine, plan); + if (result.getSources() == null) { + continue; + } + queriesBuilder.add(new PersistentQueryAndSources( + (PersistentQueryMetadata) result.getExecuteResult().getQuery().get(), + result.getSources() + )); + } + return queriesBuilder.build(); } @SuppressWarnings({"rawtypes", "unchecked"}) - private static ExecuteResultAndSortedSources execute( + private static ExecuteResultAndSources executePlan( final KsqlExecutionContext executionContext, - final ParsedStatement stmt, - final KsqlConfig ksqlConfig, - final Map overriddenProperties, - final Optional schemaInjector, - final StubKafkaService stubKafkaService + final ConfiguredKsqlPlan plan ) { - final PreparedStatement prepared = executionContext.prepare(stmt); - final ConfiguredStatement configured = ConfiguredStatement.of( - prepared, overriddenProperties, ksqlConfig); - - if (prepared.getStatement() instanceof InsertValues) { - StubInsertValuesExecutor.of(stubKafkaService).execute( - (ConfiguredStatement) configured, - overriddenProperties, - executionContext, - executionContext.getServiceContext() - ); - return new ExecuteResultAndSortedSources(null, null, null); - } - - final ConfiguredStatement withSchema = - schemaInjector - .map(injector -> injector.inject(configured)) - .orElse((ConfiguredStatement) configured); - final ConfiguredStatement reformatted = - new SqlFormatInjector(executionContext).inject(withSchema); - - final ExecuteResult executeResult; - try { - executeResult = executeConfiguredStatement(executionContext, reformatted); - } catch (final KsqlStatementException statementException) { - // use the original statement text in the exception so that tests - // can easily check that the failed statement is the input statement - throw new KsqlStatementException( - statementException.getMessage(), - withSchema.getStatementText(), - statementException.getCause()); - } - if (prepared.getStatement() instanceof CreateAsSelect) { - return new ExecuteResultAndSortedSources( - executeResult, - getSortedSources( - ((CreateAsSelect) prepared.getStatement()).getQuery(), - executionContext.getMetaStore()), - getWindowSize(((CreateAsSelect) prepared.getStatement()).getQuery())); - } - if (prepared.getStatement() instanceof InsertInto) { - return new ExecuteResultAndSortedSources( + final ExecuteResult executeResult = executionContext.execute( + executionContext.getServiceContext(), + plan + ); + final Optional maybeQueryPlan = plan.getPlan().getQueryPlan(); + if (maybeQueryPlan.isPresent()) { + return new ExecuteResultAndSources( executeResult, - getSortedSources(((InsertInto) prepared.getStatement()).getQuery(), - executionContext.getMetaStore()), - getWindowSize(((InsertInto) prepared.getStatement()).getQuery()) + getSources(maybeQueryPlan.get().getSources(), executionContext.getMetaStore()) ); } - return new ExecuteResultAndSortedSources( + return new ExecuteResultAndSources( executeResult, - null, - Optional.empty()); + null + ); } - @SuppressWarnings("unchecked") - private static ExecuteResult executeConfiguredStatement( - final KsqlExecutionContext executionContext, - final ConfiguredStatement stmt) { - final ConfiguredKsqlPlan configuredPlan; - try { - configuredPlan = buildConfiguredPlan(executionContext, stmt); - } catch (final IOException e) { - throw new TestFrameworkException("Error (de)serializing plan: " + e.getMessage(), e); + private static List> getSources( + final Collection sources, + final MetaStore metaStore) { + final ImmutableList.Builder> sourceBuilder = new Builder<>(); + for (final SourceName name : sources) { + if (metaStore.getSource(name) == null) { + throw new KsqlException("Source does not exist: " + name.toString()); + } + sourceBuilder.add(metaStore.getSource(name)); } - return executionContext.execute(executionContext.getServiceContext(), configuredPlan); + return sourceBuilder.build(); } - private static ConfiguredKsqlPlan buildConfiguredPlan( - final KsqlExecutionContext executionContext, - final ConfiguredStatement stmt - ) throws IOException { - final KsqlPlan plan = executionContext.plan(executionContext.getServiceContext(), stmt); - final String serialized = PLAN_MAPPER.writeValueAsString(plan); - return ConfiguredKsqlPlan.of( - PLAN_MAPPER.readValue(serialized, KsqlPlan.class), - stmt.getOverrides(), - stmt.getConfig()); - } + private static final class PlannedStatementIterator implements + Iterable, Iterator { + private final Iterator statements; + private final KsqlExecutionContext executionContext; + private final Map overrides; + private final KsqlConfig ksqlConfig; + private final StubKafkaService stubKafkaService; + private final Optional schemaInjector; + private Optional next = Optional.empty(); + + private PlannedStatementIterator( + final Iterator statements, + final KsqlExecutionContext executionContext, + final Map overrides, + final KsqlConfig ksqlConfig, + final StubKafkaService stubKafkaService, + final Optional schemaInjector + ) { + this.statements = Objects.requireNonNull(statements, "statements"); + this.executionContext = Objects.requireNonNull(executionContext, "executionContext"); + this.overrides = Objects.requireNonNull(overrides, "overrides"); + this.ksqlConfig = Objects.requireNonNull(ksqlConfig, "ksqlConfig"); + this.stubKafkaService = Objects.requireNonNull(stubKafkaService, "stubKafkaService"); + this.schemaInjector = Objects.requireNonNull(schemaInjector, "schemaInjector"); + } - private static Optional getWindowSize(final Query query) { - return query.getWindow().flatMap(window -> window - .getKsqlWindowExpression() - .getWindowInfo() - .getSize() - .map(Duration::toMillis)); - } + public static PlannedStatementIterator of( + final KsqlExecutionContext executionContext, + final TestCase testCase, + final KsqlConfig ksqlConfig, + final Optional srClient, + final StubKafkaService stubKafkaService + ) { + final Optional schemaInjector = srClient + .map(SchemaRegistryTopicSchemaSupplier::new) + .map(DefaultSchemaInjector::new); + final String sql = testCase.statements().stream() + .collect(Collectors.joining(System.lineSeparator())); + final Iterator statements = executionContext.parse(sql).iterator(); + return new PlannedStatementIterator( + statements, + executionContext, + testCase.properties(), + ksqlConfig, + stubKafkaService, + schemaInjector + ); + } - private static List> getSortedSources( - final Query query, - final MetaStore metaStore) { - final Relation from = query.getFrom(); - if (from instanceof Join) { - final Join join = (Join) from; - final AliasedRelation left = (AliasedRelation) join.getLeft(); - final AliasedRelation right = (AliasedRelation) join.getRight(); + @Override + public boolean hasNext() { + while (!next.isPresent() && statements.hasNext()) { + next = planStatement(statements.next()); + } + return next.isPresent(); + } + + @Override + public ConfiguredKsqlPlan next() { + hasNext(); + final ConfiguredKsqlPlan current = next.orElseThrow(NoSuchElementException::new); + next = Optional.empty(); + return current; + } - final SourceName leftName = ((Table) left.getRelation()).getName(); - final SourceName rightName = ((Table) right.getRelation()).getName(); + @Override + public Iterator iterator() { + return this; + } - if (metaStore.getSource(leftName) == null) { - throw new KsqlException("Source does not exist: " + left.getRelation().toString()); + @SuppressWarnings("unchecked") + private Optional planStatement(final ParsedStatement stmt) { + final PreparedStatement prepared = executionContext.prepare(stmt); + final ConfiguredStatement configured = ConfiguredStatement.of( + prepared, overrides, ksqlConfig); + + if (prepared.getStatement() instanceof InsertValues) { + StubInsertValuesExecutor.of(stubKafkaService).execute( + (ConfiguredStatement) configured, + overrides, + executionContext, + executionContext.getServiceContext() + ); + return Optional.empty(); } - if (metaStore.getSource(rightName) == null) { - throw new KsqlException("Source does not exist: " + right.getRelation().toString()); + + final ConfiguredStatement withSchema = + schemaInjector + .map(injector -> injector.inject(configured)) + .orElse((ConfiguredStatement) configured); + final ConfiguredStatement reformatted = + new SqlFormatInjector(executionContext).inject(withSchema); + + try { + final KsqlPlan plan = executionContext + .plan(executionContext.getServiceContext(), reformatted); + return Optional.of( + ConfiguredKsqlPlan.of( + rewritePlan(plan), + reformatted.getOverrides(), + reformatted.getConfig() + ) + ); + } catch (final KsqlStatementException e) { + throw new KsqlStatementException( + e.getMessage(), withSchema.getStatementText(), e.getCause()); } - return ImmutableList.of( - metaStore.getSource(leftName), - metaStore.getSource(rightName)); - } else { - final SourceName fromName = ((Table) ((AliasedRelation) from).getRelation()).getName(); - if (metaStore.getSource(fromName) == null) { - throw new KsqlException("Source does not exist: " + fromName); + } + + private KsqlPlan rewritePlan(final KsqlPlan plan) { + try { + final String serialized = PLAN_MAPPER.writeValueAsString(plan); + return PLAN_MAPPER.readValue(serialized, KsqlPlan.class); + } catch (final IOException e) { + throw new RuntimeException(e); } - return ImmutableList.of(metaStore.getSource(fromName)); } } - private static final class ExecuteResultAndSortedSources { + private static final class ExecuteResultAndSources { private final ExecuteResult executeResult; private final List> sources; - private final Optional windowSize; - ExecuteResultAndSortedSources( + ExecuteResultAndSources( final ExecuteResult executeResult, - final List> sources, - final Optional windowSize) { + final List> sources) { this.executeResult = executeResult; this.sources = sources; - this.windowSize = windowSize; } ExecuteResult getExecuteResult() { @@ -431,26 +442,19 @@ ExecuteResult getExecuteResult() { List> getSources() { return sources; } - - public Optional getWindowSize() { - return windowSize; - } } - private static final class PersistentQueryAndSortedSources { + private static final class PersistentQueryAndSources { private final PersistentQueryMetadata persistentQueryMetadata; private final List> sources; - private final Optional windowSize; - PersistentQueryAndSortedSources( + PersistentQueryAndSources( final PersistentQueryMetadata persistentQueryMetadata, - final List> sources, - final Optional windowSize + final List> sources ) { this.persistentQueryMetadata = persistentQueryMetadata; this.sources = sources; - this.windowSize = windowSize; } PersistentQueryMetadata getPersistentQueryMetadata() { @@ -460,9 +464,5 @@ PersistentQueryMetadata getPersistentQueryMetadata() { List> getSources() { return sources; } - - public Optional getWindowSize() { - return windowSize; - } } } diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TopologyAndConfigs.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TopologyAndConfigs.java index 2ea437bef7c1..08cd2c4b7723 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TopologyAndConfigs.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TopologyAndConfigs.java @@ -15,21 +15,25 @@ package io.confluent.ksql.test.tools; +import io.confluent.ksql.engine.KsqlPlan; +import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; public class TopologyAndConfigs { + private final List plan; private final String topology; - private final Optional schemas; - private Optional> configs; + private final Map schemas; + private final Map configs; public TopologyAndConfigs( + final List plan, final String topology, - final Optional schemas, - final Optional> configs + final Map schemas, + final Map configs ) { + this.plan = Objects.requireNonNull(plan, "plan"); this.topology = Objects.requireNonNull(topology, "topology"); this.schemas = Objects.requireNonNull(schemas, "schemas"); this.configs = Objects.requireNonNull(configs, "configs"); @@ -39,11 +43,15 @@ public String getTopology() { return topology; } - public Optional getSchemas() { + public Map getSchemas() { return schemas; } - public Optional> getConfigs() { + public Map getConfigs() { return configs; } -} \ No newline at end of file + + public List getPlan() { + return plan; + } +} diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/conditions/PostConditions.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/conditions/PostConditions.java index 15da5adb69ce..c83a559b1585 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/conditions/PostConditions.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/conditions/PostConditions.java @@ -56,7 +56,7 @@ public void verify( final Collection topicNames ) { verifyMetaStore(metaStore); - veriftyTopics(topicNames); + verifyTopics(topicNames); } private void verifyMetaStore(final MetaStore metaStore) { @@ -74,7 +74,7 @@ private void verifyMetaStore(final MetaStore metaStore) { + System.lineSeparator() + text, values, sourcesMatcher); } - private void veriftyTopics(final Collection topicNames) { + private void verifyTopics(final Collection topicNames) { final Set blackListed = topicNames.stream() .filter(topicBlackList.asPredicate()) .collect(Collectors.toSet()); diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileGeneratorTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/PlannedTestGeneratorTest.java similarity index 51% rename from ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileGeneratorTest.java rename to ksql-functional-tests/src/test/java/io/confluent/ksql/test/PlannedTestGeneratorTest.java index 059323ad74c2..58089a2ceabd 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileGeneratorTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/PlannedTestGeneratorTest.java @@ -15,24 +15,14 @@ package io.confluent.ksql.test; -import org.apache.kafka.test.IntegrationTest; -import org.junit.ClassRule; +import io.confluent.ksql.test.planned.PlannedTestGenerator; +import org.junit.Ignore; import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TemporaryFolder; -/** - * Do not combine this with `TopologyFileGenerator` as mvn will ignore the tests as the class - * does not end in `Test`. - */ -@Category(IntegrationTest.class) -public final class TopologyFileGeneratorTest { - - @ClassRule - public static final TemporaryFolder TMP = new TemporaryFolder(); - - @Test - public void shouldGenerateTopologies() throws Exception { - TopologyFileGenerator.generateTopologies(TMP.newFolder().toPath()); - } +public class PlannedTestGeneratorTest { + @Test + @Ignore + public void manuallyGeneratePlans() { + PlannedTestGenerator.generatePlans(QueryTranslationTest.findTestCases()); + } } diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/PlannedTestsUpToDateTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/PlannedTestsUpToDateTest.java new file mode 100644 index 000000000000..6d571de9f0d9 --- /dev/null +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/PlannedTestsUpToDateTest.java @@ -0,0 +1,120 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test; + +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.confluent.ksql.execution.json.PlanJsonMapper; +import io.confluent.ksql.test.planned.TestCasePlan; +import io.confluent.ksql.test.planned.TestCasePlanLoader; +import io.confluent.ksql.test.planned.PlannedTestLoader; +import io.confluent.ksql.test.planned.PlannedTestUtils; +import io.confluent.ksql.test.tools.TestCase; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Collection; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; +import org.hamcrest.Description; +import org.hamcrest.TypeSafeMatcher; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Test that ensures that each QTT test case that should be tested from a physical + * plan has the latest physical plan written to the local filesystem. + */ +@RunWith(Parameterized.class) +public class PlannedTestsUpToDateTest { + private static final ObjectMapper MAPPER = PlanJsonMapper.create(); + + private final TestCase testCase; + + @Parameterized.Parameters(name = "{0}") + public static Collection data() { + return QueryTranslationTest.findTestCases() + .filter(PlannedTestUtils::isPlannedTestCase) + .map(testCase -> new Object[]{testCase.getName(), testCase}) + .collect(Collectors.toList()); + } + + /** + * Test to check for qtt cases that require a new plan to be generated/persisted + * + * @param name unused - included just so the test has a name + * @param testCase test case to check for requiring plan generation + */ + public PlannedTestsUpToDateTest(final String name, final TestCase testCase) { + this.testCase = Objects.requireNonNull(testCase); + } + + @Test + public void shouldHaveLatestPlans() { + final Path testCaseDir = Paths.get( + PlannedTestLoader.PLANS_DIR, + PlannedTestUtils.formatName(testCase.getName()) + ); + + assertThat( + String.format( + "Missing test plan directory for: %s. Please re-generate QTT plans." + + " See `ksql-functional-tests/README.md` for more info.", + testCase.getName() + ), + Files.isDirectory(PlannedTestUtils.findBaseDir().resolve(testCaseDir)), is(true) + ); + + final Optional latest = TestCasePlanLoader.fromLatest(testCaseDir); + final TestCasePlan current = TestCasePlanLoader.fromTestCase(testCase); + assertThat( + String.format( + "Current query plan differs from latest for: %s. Please re-generate QTT plans." + + " See `ksql-functional-tests/README.md` for more info.", + testCase.getName() + ), + current, isLatestPlan(latest) + ); + } + + private static TypeSafeMatcher isLatestPlan(final Optional latest) { + return new TypeSafeMatcher() { + @Override + protected boolean matchesSafely(final TestCasePlan current) { + return PlannedTestUtils.isSamePlan(latest, current); + } + + @Override + public void describeTo(final Description description) { + description.appendText( + latest.map(PlannedTestsUpToDateTest::planText).orElse("no saved plan")); + } + }; + } + + private static String planText(final TestCasePlan plan) { + try { + return MAPPER.writerWithDefaultPrettyPrinter().writeValueAsString(plan.getPlan()); + } catch (final IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/QueryTranslationTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/QueryTranslationTest.java index 0f1615f729f5..b7c75706a061 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/QueryTranslationTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/QueryTranslationTest.java @@ -20,10 +20,10 @@ import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.collect.ImmutableList; -import io.confluent.ksql.test.loader.ExpectedTopologiesTestLoader; import io.confluent.ksql.test.loader.JsonTestLoader; import io.confluent.ksql.test.loader.TestFile; import io.confluent.ksql.test.model.TestCaseNode; +import io.confluent.ksql.test.planned.PlannedTestLoader; import io.confluent.ksql.test.tools.TestCase; import io.confluent.ksql.test.tools.TestCaseBuilder; import java.nio.file.Path; @@ -47,11 +47,10 @@ public class QueryTranslationTest { private static final Path QUERY_VALIDATION_TEST_DIR = Paths.get("query-validation-tests"); - private static final String TOPOLOGY_CHECKS_DIR = "expected_topology/"; @Parameterized.Parameters(name = "{0}") public static Collection data() { - return ExpectedTopologiesTestLoader.of(testFileLoader(), TOPOLOGY_CHECKS_DIR) + return PlannedTestLoader.of(testFileLoader()) .load() .map(testCase -> new Object[]{testCase.getName(), testCase}) .collect(Collectors.toCollection(ArrayList::new)); diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileGenerator.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileGenerator.java deleted file mode 100644 index b746f8e57660..000000000000 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileGenerator.java +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Copyright 2018 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.test; - -import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertThat; - -import io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient; -import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; -import io.confluent.ksql.engine.KsqlEngine; -import io.confluent.ksql.engine.KsqlEngineTestUtil; -import io.confluent.ksql.function.TestFunctionRegistry; -import io.confluent.ksql.metastore.MetaStore; -import io.confluent.ksql.metastore.MetaStoreImpl; -import io.confluent.ksql.metastore.MutableMetaStore; -import io.confluent.ksql.services.ServiceContext; -import io.confluent.ksql.services.TestServiceContext; -import io.confluent.ksql.test.loader.ExpectedTopologiesTestLoader; -import io.confluent.ksql.test.serde.SerdeSupplier; -import io.confluent.ksql.test.tools.TestCase; -import io.confluent.ksql.test.tools.TestExecutor; -import io.confluent.ksql.test.tools.TestExecutorUtil; -import io.confluent.ksql.test.tools.Topic; -import io.confluent.ksql.test.tools.stubs.StubKafkaService; -import io.confluent.ksql.test.utils.SerdeUtil; -import io.confluent.ksql.util.KsqlConfig; -import io.confluent.ksql.util.PersistentQueryMetadata; -import io.confluent.ksql.util.QueryMetadata; -import java.io.File; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.StandardOpenOption; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.stream.Collectors; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import org.junit.Ignore; -import org.junit.Test; -import org.w3c.dom.Document; -import org.w3c.dom.NodeList; - -/** - * This class is used to generate the topology files to ensure safe - * upgrades of KSQL across releases. - * - * There are some manual steps in using this class but this should be ok as - * we only need to create new topology files at the end of a release cycle. - * - * The steps to generate topology files: - * - * 1. Run this class by running the test {@link #manuallyGenerateTopologies} BEFORE you update the - * pom with a new version. - * - * 2. This class will generate expected topology files - * for the version specified in the pom file. The program writes the files to - * ksql-engine/src/test/resources/expected_topology/VERSION_NUM directory. Where - * VERSION_NUM is the version defined in ksql-engine/pom.xml <parent><version> element. - * - */ -@Ignore -public final class TopologyFileGenerator { - - /** - * This test exists only to be able to generate topologies as part of the release process - * It can be run manually from the IDE - * It is deliberately excluded from the test suite - */ - @Test - public void manuallyGenerateTopologies() throws Exception { - generateTopologies(); - } - - private static final StubKafkaService stubKafkaService = StubKafkaService.create(); - private static final String BASE_DIRECTORY = "src/test/resources/expected_topology/"; - - static Path findBaseDir() { - Path path = Paths.get("./ksql-functional-tests"); - if (Files.exists(path)) { - return path.resolve(BASE_DIRECTORY); - } - path = Paths.get("../ksql-functional-tests"); - if (Files.exists(path)) { - return path.resolve(BASE_DIRECTORY); - } - throw new RuntimeException("Failed to determine location of expected topologies directory. " - + "App should be run with current directory set to either the root of the repo or the " - + "root of the ksql-functional-tests module"); - } - - private static void generateTopologies() throws Exception { - generateTopologies(findBaseDir()); - } - - static void generateTopologies(final Path base) throws Exception { - final String formattedVersion = "0_6_0-pre"; - final Path generatedTopologyPath = base.resolve(formattedVersion); - - System.out.println(String.format("Starting to write topology files to %s", generatedTopologyPath)); - - if (!generatedTopologyPath.toFile().exists()) { - Files.createDirectory(generatedTopologyPath); - } else { - System.out.println("Warning: Directory already exists, " - + "this will re-generate topology files. dir: " + generatedTopologyPath); - } - - writeExpectedTopologyFiles(generatedTopologyPath, getTestCases()); - - System.out - .println(String.format("Done writing topology files to %s", generatedTopologyPath)); - } - - static List getTestCases() { - return QueryTranslationTest.findTestCases() - .filter(q -> !q.expectedException().isPresent()) - .collect(Collectors.toList()); - } - - private static String getFormattedVersionFromPomFile() throws Exception { - final File pomFile = new File("pom.xml"); - final DocumentBuilderFactory documentBuilderFactory = DocumentBuilderFactory.newInstance(); - final DocumentBuilder documentBuilder = documentBuilderFactory.newDocumentBuilder(); - final Document pomDoc = documentBuilder.parse(pomFile); - - final NodeList versionNodeList = pomDoc.getElementsByTagName("version"); - final String versionName = versionNodeList.item(0).getTextContent(); - - return versionName.replaceAll("-SNAPSHOT?", "").replaceAll("\\.", "_"); - } - - private static void writeExpectedTopologyFiles( - final Path topologyDir, - final List testCases - ) { - testCases.forEach(testCase -> writeExpectedToplogyFile(topologyDir, testCase)); - } - - private static void writeExpectedToplogyFile(final Path topologyDir, final TestCase testCase) { - try { - final Path topologyFile = buildExpectedTopologyPath(topologyDir, testCase); - - final String topologyContent = buildExpectedTopologyContent(testCase, Optional.empty()); - - Files.write(topologyFile, - topologyContent.getBytes(StandardCharsets.UTF_8), - StandardOpenOption.CREATE, - StandardOpenOption.WRITE, - StandardOpenOption.TRUNCATE_EXISTING - ); - } catch (final IOException e) { - throw new RuntimeException(e); - } - } - - static Path buildExpectedTopologyPath(final Path topologyDir, final TestCase testCase) { - return ExpectedTopologiesTestLoader.buildExpectedTopologyPath( - testCase.getName(), - topologyDir - ); - } - - static String buildExpectedTopologyContent( - final TestCase testCase, - final Optional> persistedConfigs - ) { - final KsqlConfig baseConfigs = new KsqlConfig(TestExecutor.baseConfig()) - .cloneWithPropertyOverwrite(testCase.properties()); - - final KsqlConfig ksqlConfig = persistedConfigs - .map(baseConfigs::overrideBreakingConfigsWithOriginalValues) - .orElse(baseConfigs); - - try (final ServiceContext serviceContext = getServiceContext(); - final KsqlEngine ksqlEngine = getKsqlEngine(serviceContext) - ) { - final PersistentQueryMetadata queryMetadata = - buildQuery(testCase, serviceContext, ksqlEngine, ksqlConfig); - - final Map configsToPersist - = new HashMap<>(ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); - - // Ignore the KStreams state directory as its different every time: - configsToPersist.remove("ksql.streams.state.dir"); - - return ExpectedTopologiesTestLoader.buildExpectedTopologyContent( - queryMetadata, - configsToPersist - ); - } catch (final Exception e) { - throw new RuntimeException(e); - } - } - - private static ServiceContext getServiceContext() { - final SchemaRegistryClient schemaRegistryClient = new MockSchemaRegistryClient(); - return TestServiceContext.create(() -> schemaRegistryClient); - } - - private static KsqlEngine getKsqlEngine(final ServiceContext serviceContext) { - final MutableMetaStore metaStore = new MetaStoreImpl(TestFunctionRegistry.INSTANCE.get()); - return KsqlEngineTestUtil.createKsqlEngine(serviceContext, metaStore); - } - - private static PersistentQueryMetadata buildQuery( - final TestCase testCase, - final ServiceContext serviceContext, - final KsqlEngine ksqlEngine, - final KsqlConfig ksqlConfig - ) { - final List queries = TestExecutorUtil - .buildQueries(testCase, serviceContext, ksqlEngine, ksqlConfig, stubKafkaService); - - final MetaStore metaStore = ksqlEngine.getMetaStore(); - for (QueryMetadata queryMetadata: queries) { - final PersistentQueryMetadata persistentQueryMetadata - = (PersistentQueryMetadata) queryMetadata; - final String sinkKafkaTopicName = metaStore - .getSource(persistentQueryMetadata.getSinkName()) - .getKafkaTopicName(); - - final SerdeSupplier keySerdes = SerdeUtil.getKeySerdeSupplier( - persistentQueryMetadata.getResultTopic().getKeyFormat(), - queryMetadata::getLogicalSchema - ); - - final SerdeSupplier valueSerdes = SerdeUtil.getSerdeSupplier( - persistentQueryMetadata.getResultTopic().getValueFormat().getFormat(), - queryMetadata::getLogicalSchema - ); - - final Topic sinkTopic = new Topic( - sinkKafkaTopicName, - Optional.empty(), - keySerdes, - valueSerdes, - 1, - 1 - ); - - stubKafkaService.createTopic(sinkTopic); - } - - assertThat("test did not generate any queries.", queries.isEmpty(), is(false)); - return queries.get(queries.size() - 1); - } -} diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileRewriter.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileRewriter.java deleted file mode 100644 index dadb063c7cc7..000000000000 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileRewriter.java +++ /dev/null @@ -1,424 +0,0 @@ -/* - * Copyright 2019 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.test; - -import static io.confluent.ksql.test.loader.ExpectedTopologiesTestLoader.CONFIG_END_MARKER; -import static io.confluent.ksql.test.loader.ExpectedTopologiesTestLoader.SCHEMAS_END_MARKER; -import static java.nio.charset.StandardCharsets.UTF_8; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.collect.ImmutableSet; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import io.confluent.ksql.test.loader.ExpectedTopologiesTestLoader; -import io.confluent.ksql.test.tools.TestCase; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; -import org.junit.Ignore; -import org.junit.Test; - -/** - * Utility to help re-write the expected topology files used by {@link QueryTranslationTest}. - * - * Occasionally, things change in the way KStreams generates topologies and we need to update the - * previously saved topologies to bring them back inline. Obviously, care should be taken when - * doing so to ensure no backwards incompatible changes are being hidden by any changes made. * - */ -@Ignore -public final class TopologyFileRewriter { - - /** - * Set {@code REWRITER} to an appropriate rewriter impl. - */ - private static final Rewriter REWRITER = new RewriteTopologyOnly(); - - /** - * Exclude some versions. Anything version starting with one of these strings is excluded: - */ - private static final Set EXCLUDE_VERSIONS = ImmutableSet.builder() - //.add("5_0") - //.add("5_1") - .build(); - - private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); - - public TopologyFileRewriter() { - } - - @Test - public void runMeToRewrite() throws Exception { - final Path baseDir = TopologyFileGenerator.findBaseDir(); - final List testCases = TopologyFileGenerator.getTestCases(); - - Files.list(baseDir) - .filter(Files::isDirectory) - .filter(TopologyFileRewriter::includedVersion) - .forEach(dir -> rewriteTopologyDirectory(dir, testCases)); - } - - private static boolean includedVersion(final Path path) { - - final String version = getVersion(path); - - return EXCLUDE_VERSIONS.stream() - .noneMatch(version::startsWith); - } - - @SuppressFBWarnings("NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE") - private static String getVersion(final Path versionDir) { - try { - final Path versionFile = versionDir - .resolve(ExpectedTopologiesTestLoader.TOPOLOGY_VERSION_FILE); - if (Files.exists(versionFile)) { - return new String(Files.readAllBytes(versionFile), UTF_8); - } - - return versionDir.getFileName().toString(); - } catch (final Exception e) { - throw new RuntimeException("Failed to determine version in " + versionDir, e); - } - } - - private static void rewriteTopologyDirectory( - final Path versionDir, - final List testCases - ) { - try { - System.out.println("Starting to rewrite topology files in " + versionDir); - - for (TestCase testCase : testCases) { - rewriteTopologyFile(versionDir, testCase); - } - - deleteOrphanedFiles(versionDir, testCases); - - System.out.println("Done rewrite topology files in " + versionDir); - } catch (final Exception e) { - throw new RuntimeException("Failed processing version dir: " + versionDir, e); - } - } - - private static void rewriteTopologyFile( - final Path topologyDir, - final TestCase testCase - ) { - final Path path = TopologyFileGenerator.buildExpectedTopologyPath(topologyDir, testCase); - if (!Files.exists(path)) { - System.err.println("WARING: Missing topology file: " + path); - return; - } - - try { - final String rewritten = REWRITER.rewrite(testCase, path); - - Files.write(path, rewritten.getBytes(UTF_8)); - - System.out.println("Rewritten topology file: " + path); - } catch (final Exception e) { - throw new RuntimeException("Failed processing topology file: " + path, e); - } - } - - private static void deleteOrphanedFiles( - final Path versionDir, - final List testCases - ) throws IOException { - final Set paths = testCases.stream() - .map(testCase -> TopologyFileGenerator.buildExpectedTopologyPath(versionDir, testCase)) - .collect(Collectors.toSet()); - - Files.list(versionDir) - .filter(Files::isRegularFile) - .filter(path -> !path.endsWith(ExpectedTopologiesTestLoader.TOPOLOGY_VERSION_FILE)) - .filter(path -> !paths.contains(path)) - .forEach(TopologyFileRewriter::deleteOrphanedFile); - } - - private static void deleteOrphanedFile(final Path orphan) { - try { - System.out.println("WARNING: Deleting orphaned topology file: " + orphan); - Files.delete(orphan); - } catch (final Exception e) { - throw new RuntimeException("Failed to delete orphaned expected topology file", e); - } - } - - private static String grabContent( - final String contents, - final Optional startMarker, - final Optional endMarker - ) { - final int start = startMarker - .map(marker -> { - final int idx = contents.indexOf(marker + System.lineSeparator()); - return idx < 0 ? idx : idx + marker.length() + 1; - }) - .orElse(0); - - if (start < 0) { - throw new RuntimeException("Failed to find marker for start of section: " + startMarker); - } - - final int end = endMarker - .map(contents::indexOf) - .orElse(contents.length()); - - if (end < 0) { - throw new RuntimeException("Failed to find marker for end of section: " + startMarker); - } - - return contents.substring(start, end); - } - - private static Map parseConfigs(final String configs) { - try { - final ObjectReader objectReader = OBJECT_MAPPER.readerFor(Map.class); - final Map parsed = objectReader.readValue(configs); - - final Set toRemove = parsed.entrySet().stream() - .filter(e -> e.getValue() == null) - .map(Entry::getKey) - .collect(Collectors.toSet()); - - parsed.remove("ksql.streams.state.dir"); - parsed.keySet().removeAll(toRemove); - return parsed; - } catch (final Exception e) { - throw new RuntimeException("Failed to parse configs: " + configs, e); - } - } - - private interface Rewriter { - - String rewrite(final TestCase testCase, final Path path) throws Exception; - } - - private interface StructuredRewriter extends Rewriter { - - default String rewrite(final TestCase testCase, final Path path) throws Exception { - final String contents = new String(Files.readAllBytes(path), UTF_8); - - final String newConfig = rewriteConfig( - testCase, - path, - grabContent(contents, Optional.empty(), Optional.of(CONFIG_END_MARKER)) - ) - + CONFIG_END_MARKER - + System.lineSeparator(); - - final boolean hasSchemas = contents.contains(SCHEMAS_END_MARKER); - - final String newSchemas = hasSchemas - ? rewriteSchemas( - testCase, - path, - grabContent(contents, Optional.of(CONFIG_END_MARKER), Optional.of(SCHEMAS_END_MARKER)) - ) - + SCHEMAS_END_MARKER - + System.lineSeparator() - : ""; - - final Optional topologyStart = hasSchemas - ? Optional.of(SCHEMAS_END_MARKER) - : Optional.of(CONFIG_END_MARKER); - - final String newTopologies = rewriteTopologies( - testCase, - path, - grabContent(contents, topologyStart, Optional.empty()) - ); - - return newConfig + newSchemas + newTopologies; - } - - // Overwrite below methods as needed: - default String rewriteConfig( - final TestCase testCase, - final Path path, - final String configs - ) { - return configs; - } - - default String rewriteSchemas( - final TestCase testCase, - final Path path, - final String schemas - ) { - return schemas; - } - - default String rewriteTopologies( - final TestCase testCase, - final Path path, - final String topologies - ) { - return topologies; - } - } - - private static final class RegexRewriter implements StructuredRewriter { - - @Override - public String rewriteSchemas(final TestCase testCase, final Path path, final String schemas) { - - int start; - String result = schemas; - - while ((start = result.indexOf("optional<")) != -1) { - final int end = findCloseTagFor(result, start + "optional".length()); - - final String contents = result.substring(start + "optional<".length(), end); - - result = result.substring(0, start) - + contents - + result.substring(end + 1); - } - - return result - .replaceAll(",(\\S)", ", $1") - .replaceAll("\\n", " NOT NULL" + System.lineSeparator()) - .replaceAll("struct<", "STRUCT<") - .replaceAll("map<", "MAP<") - .replaceAll("array<", "ARRAY<") - .replaceAll("boolean", "BOOLEAN") - .replaceAll("int32", "INT") - .replaceAll("int64", "BIGINT") - .replaceAll("float64", "DOUBLE") - .replaceAll("string", "VARCHAR"); - } - - private static int findCloseTagFor(final String contents, final int startIdx) { - assert (contents.charAt(startIdx) == '<'); - - int depth = 1; - int idx = startIdx + 1; - - while (depth > 0 && idx < contents.length()) { - final char c = contents.charAt(idx++); - switch (c) { - case '<': - depth++; - break; - - case '>': - depth--; - break; - - default: - break; - } - } - - if (depth > 0) { - throw new RuntimeException("Reached end of file before finding close tag"); - } - - return idx - 1; - } - } - - private static final class CustomRewriter implements StructuredRewriter { - - @Override - public String rewriteSchemas(final TestCase testCase, final Path path, final String schemas) { - return Arrays.stream(schemas.split(System.lineSeparator())) - // Add any steps you need to rewrite the schemas here. - // The is generally no need to check such changes in. - .collect(Collectors.joining(System.lineSeparator(), "", System.lineSeparator())); - } - } - - /** - * Uses the standard topology generation code to rewrite expected topology, i.e. it updates - * the topology to match what the current code would output, taking into account any config - */ - private static final class RewriteTopologyOnly implements StructuredRewriter { - - private Map configs; - - @Override - public String rewriteConfig( - final TestCase testCase, - final Path path, - final String configs - ) { - this.configs = parseConfigs(configs); - return configs; - } - - @Override - public String rewriteTopologies( - final TestCase testCase, - final Path path, - final String existing - ) { - final String newContent = TopologyFileGenerator - .buildExpectedTopologyContent(testCase, Optional.of(configs)); - - final boolean hasSchemas = newContent.contains(SCHEMAS_END_MARKER); - - final Optional topologyStart = hasSchemas - ? Optional.of(SCHEMAS_END_MARKER) - : Optional.of(CONFIG_END_MARKER); - - return grabContent(newContent, topologyStart, Optional.empty()); - } - } - - /** - * Uses the standard topology generation code to rewrite expected schemas, i.e. it updates - * the schemes to match what the current code would output, taking into account any config - */ - private static final class RewriteSchemasOnly implements StructuredRewriter { - - private Map configs; - - @Override - public String rewriteConfig( - final TestCase testCase, - final Path path, - final String configs - ) { - this.configs = parseConfigs(configs); - return configs; - } - - @Override - public String rewriteSchemas( - final TestCase testCase, - final Path path, - final String schemas - ) { - final String newContent = TopologyFileGenerator - .buildExpectedTopologyContent(testCase, Optional.of(configs)); - - return grabContent( - newContent, - Optional.of(CONFIG_END_MARKER), - Optional.of(SCHEMAS_END_MARKER) - ); - } - } -} diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/loader/ExpectedTopologiesTestLoader.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/loader/ExpectedTopologiesTestLoader.java deleted file mode 100644 index d9ac0b52489f..000000000000 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/loader/ExpectedTopologiesTestLoader.java +++ /dev/null @@ -1,319 +0,0 @@ -/* - * Copyright 2019 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.test.loader; - -import static java.nio.charset.StandardCharsets.UTF_8; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import io.confluent.ksql.model.SemanticVersion; -import io.confluent.ksql.test.model.KsqlVersion; -import io.confluent.ksql.test.tools.TopologyAndConfigs; -import io.confluent.ksql.test.tools.VersionedTest; -import io.confluent.ksql.util.PersistentQueryMetadata; -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.stream.Collectors; -import java.util.stream.Stream; -import org.apache.commons.lang3.StringUtils; - -/** - * Loads the expected topology files for each test and creates a new test for each expected version - * and sets the expected topology of the test. - */ -public class ExpectedTopologiesTestLoader implements TestLoader { - - public static final String TOPOLOGY_VERSION_FILE = "__version"; - public static final String CONFIG_END_MARKER = "CONFIGS_END"; - public static final String SCHEMAS_END_MARKER = "SCHEMAS_END"; - - private static final Pattern TOPOLOGY_VERSION_PATTERN = Pattern.compile("(\\d+)_(\\d+)(_\\d+)?"); - private static final String TOPOLOGY_VERSIONS_DELIMITER = ","; - private static final String TOPOLOGY_VERSIONS_PROP = "topology.versions"; - private static final String TOPOLOGY_VERSION_LATEST = "latest-only"; - - private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); - private static final KsqlVersion CURRENT_VERSION = KsqlVersion.current(); - private static final String INVALID_FILENAME_CHARS_PATTERN = "\\s|/|\\\\|:|\\*|\\?|\"|<|>|\\|"; - - private final String topologyChecksDir; - private final TestLoader innerLoader; - - public static ExpectedTopologiesTestLoader of( - final TestLoader innerLoader, - final String topologyChecksDir - ) { - return new ExpectedTopologiesTestLoader<>(innerLoader, topologyChecksDir); - } - - private ExpectedTopologiesTestLoader( - final TestLoader innerLoader, - final String topologyChecksDir - ) { - this.topologyChecksDir = Objects.requireNonNull(topologyChecksDir, "topologyChecksDir"); - this.innerLoader = Objects.requireNonNull(innerLoader, "innerLoader"); - } - - public Stream load() { - final List expectedTopologies = loadTopologiesAndVersions(); - - return innerLoader.load() - .flatMap(q -> buildVersionedTestCases(q, expectedTopologies)); - } - - public static Path buildExpectedTopologyPath(final String queryName, final Path topologyDir) { - final String updatedQueryName = formatQueryName(queryName); - return topologyDir.resolve(updatedQueryName); - } - - public static String buildExpectedTopologyContent( - final PersistentQueryMetadata query, - final Map configs - ) { - try { - final ObjectWriter objectWriter = OBJECT_MAPPER.writerWithDefaultPrettyPrinter(); - - final String configString = objectWriter.writeValueAsString(configs); - final String topologyString = query.getTopology().describe().toString(); - final String schemasString = query.getSchemasDescription(); - - return configString + "\n" - + CONFIG_END_MARKER + "\n" - + schemasString + "\n" - + SCHEMAS_END_MARKER + "\n" - + topologyString; - } catch (final Exception e) { - throw new RuntimeException(e); - } - } - - private List loadTopologiesAndVersions() { - return getTopologyVersions().stream() - .map(version -> new TopologiesAndVersion( - version, - loadExpectedTopologies(topologyChecksDir + version.getName()) - )) - .collect(Collectors.toList()); - } - - private List getTopologyVersions() { - final String versionProp = System.getProperty(TOPOLOGY_VERSIONS_PROP, ""); - - final Stream versionStrings = versionProp.isEmpty() - ? findExpectedTopologyDirectories().stream() - : versionProp.equalsIgnoreCase(TOPOLOGY_VERSION_LATEST) - ? Stream.of() - : Arrays.stream(versionProp.split(TOPOLOGY_VERSIONS_DELIMITER)); - - return versionStrings - .map(this::getVersion) - .collect(Collectors.toList()); - } - - private List findExpectedTopologyDirectories() { - try { - return findContentsOfDirectory(topologyChecksDir).stream() - .filter(file -> !file.endsWith(".md")) - .collect(Collectors.toList()); - } catch (final Exception e) { - throw new RuntimeException("Could not find expected topology directories.", e); - } - } - - private KsqlVersion getVersion(final String dir) { - final Path versionFile = Paths.get(topologyChecksDir, dir, TOPOLOGY_VERSION_FILE); - - try { - final String versionString = loadContents(versionFile.toString()) - .map(content -> String.join("", content)) - .orElse(dir); - - final Matcher matcher = TOPOLOGY_VERSION_PATTERN.matcher(versionString); - if (!matcher.matches()) { - throw new RuntimeException("Version does not match required pattern. " - + TOPOLOGY_VERSION_PATTERN - + ". Correct the directory name, or add a " + TOPOLOGY_VERSION_FILE + "."); - } - - final int major = Integer.parseInt(matcher.group(1)); - final int minor = Integer.parseInt(matcher.group(2)); - final int patch = matcher.groupCount() == 3 - ? 0 - : Integer.parseInt(matcher.group(3).substring(1)); - - return KsqlVersion.of(dir, SemanticVersion.of(major, minor, patch)); - } catch (Exception e) { - throw new RuntimeException("Failed to load version file: " + versionFile, e); - } - } - - @SuppressWarnings("unchecked") - private static Stream buildVersionedTestCases( - final T test, - final List expectedTopologies - ) { - Stream.Builder builder = Stream.builder(); - if (test.getVersionBounds().contains(CURRENT_VERSION)) { - builder.add(test); - } - - for (final TopologiesAndVersion topologies : expectedTopologies) { - if (!test.getVersionBounds().contains(topologies.getVersion())) { - continue; - } - - final TopologyAndConfigs topologyAndConfigs = - topologies.getTopology(formatQueryName(test.getName())); - // could be null if the testCase has expected errors, no topology or configs saved - if (topologyAndConfigs != null) { - final T versionedTest = (T) test.withExpectedTopology( - topologies.getVersion(), - topologyAndConfigs - ); - - builder = builder.add(versionedTest); - } - } - return builder.build(); - } - - private static Map loadExpectedTopologies(final String dir) { - final HashMap expectedTopologyAndConfigs = new HashMap<>(); - final ObjectReader objectReader = new ObjectMapper().readerFor(Map.class); - final List topologyFiles = findExpectedTopologyFiles(dir); - topologyFiles.forEach(fileName -> { - final TopologyAndConfigs topologyAndConfigs = readTopologyFile(dir + "/" + fileName, - objectReader); - expectedTopologyAndConfigs.put(fileName, topologyAndConfigs); - }); - return expectedTopologyAndConfigs; - } - - private static List findExpectedTopologyFiles(final String dir) { - try { - return findContentsOfDirectory(dir); - } catch (final Exception e) { - throw new RuntimeException("Could not find expected topology files. dir: " + dir, e); - } - } - - private static TopologyAndConfigs readTopologyFile( - final String file, - final ObjectReader objectReader - ) { - final InputStream s = ExpectedTopologiesTestLoader.class.getClassLoader() - .getResourceAsStream(file); - if (s == null) { - throw new AssertionError("Resource not found: " + file); - } - - try (BufferedReader reader = new BufferedReader(new InputStreamReader(s, UTF_8)) - ) { - final StringBuilder topologyFileBuilder = new StringBuilder(); - - String schemas = null; - String topologyAndConfigLine; - Optional> persistedConfigs = Optional.empty(); - - while ((topologyAndConfigLine = reader.readLine()) != null) { - if (topologyAndConfigLine.contains(CONFIG_END_MARKER)) { - persistedConfigs = Optional - .of(objectReader.readValue(topologyFileBuilder.toString())); - topologyFileBuilder.setLength(0); - } else if (topologyAndConfigLine.contains(SCHEMAS_END_MARKER)) { - schemas = StringUtils.stripEnd(topologyFileBuilder.toString(), "\n"); - topologyFileBuilder.setLength(0); - } else { - topologyFileBuilder.append(topologyAndConfigLine).append("\n"); - } - } - - return new TopologyAndConfigs( - topologyFileBuilder.toString(), - Optional.ofNullable(schemas), - persistedConfigs - ); - - } catch (final IOException e) { - throw new RuntimeException(String.format("Couldn't read topology file %s %s", file, e)); - } - } - - private static Optional> loadContents(final String path) { - final InputStream s = ExpectedTopologiesTestLoader.class.getClassLoader() - .getResourceAsStream(path); - - if (s == null) { - return Optional.empty(); - } - - try (BufferedReader reader = new BufferedReader(new InputStreamReader(s, UTF_8))) { - final List contents = new ArrayList<>(); - String file; - while ((file = reader.readLine()) != null) { - contents.add(file); - } - return Optional.of(contents); - } catch (final IOException e) { - throw new AssertionError("Failed to read path: " + path, e); - } - } - - private static List findContentsOfDirectory(final String path) { - return loadContents(path) - .orElseThrow(() -> new AssertionError("Dir not found: " + path)); - } - - private static String formatQueryName(final String originalQueryName) { - return originalQueryName - .replaceAll(" - (AVRO|JSON|DELIMITED|KAFKA)$", "") - .replaceAll(INVALID_FILENAME_CHARS_PATTERN, "_"); - } - - private static class TopologiesAndVersion { - - private final KsqlVersion version; - private final Map topologies; - - TopologiesAndVersion(final KsqlVersion version, - final Map topologies) { - this.version = Objects.requireNonNull(version, "version"); - this.topologies = Objects.requireNonNull(topologies, "topologies"); - } - - KsqlVersion getVersion() { - return version; - } - - TopologyAndConfigs getTopology(final String name) { - return topologies.get(name); - } - } -} \ No newline at end of file diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/model/KsqlVersionTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/model/KsqlVersionTest.java index ba76740f3665..459198a1aa75 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/model/KsqlVersionTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/model/KsqlVersionTest.java @@ -17,6 +17,7 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; import io.confluent.ksql.model.SemanticVersion; import org.junit.Test; @@ -62,4 +63,34 @@ public void shouldParseMajorMinorPointSnapshot() { assertThat(result.getName(), is("5.4.1-SNAPSHOT")); assertThat(result.getVersion(), is(SemanticVersion.of(5, 4, 1))); } + + @Test + public void shouldCompareUsingTimestamps() { + // Given: + final KsqlVersion v1 = KsqlVersion.parse("5.4.1").withTimestamp(123); + final KsqlVersion v2 = KsqlVersion.parse("5.4.1").withTimestamp(456); + + // Then: + assertThat(v1, lessThan(v2)); + } + + @Test + public void shouldTreatNoTimestampAsHigher() { + // Given: + final KsqlVersion v1 = KsqlVersion.parse("5.4.1").withTimestamp(123); + final KsqlVersion v2 = KsqlVersion.parse("5.4.1"); + + // Then: + assertThat(v1, lessThan(v2)); + } + + @Test + public void shouldCompareVersionBeforeTimestamp() { + // Given: + final KsqlVersion v1 = KsqlVersion.parse("5.4.1").withTimestamp(456); + final KsqlVersion v2 = KsqlVersion.parse("5.4.2").withTimestamp(123); + + // Then: + assertThat(v1, lessThan(v2)); + } } \ No newline at end of file diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestGenerator.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestGenerator.java new file mode 100644 index 000000000000..312b9d9e4cdc --- /dev/null +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestGenerator.java @@ -0,0 +1,94 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test.planned; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Charsets; +import io.confluent.ksql.execution.json.PlanJsonMapper; +import io.confluent.ksql.test.tools.TestCase; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.Optional; +import java.util.stream.Stream; + +/** + * Tool for generating new TestCasePlans and writing them to the local filesystem + */ +public class PlannedTestGenerator { + private static final ObjectMapper MAPPER = PlanJsonMapper.create(); + + public static void generatePlans(Stream testCases) { + testCases + .filter(PlannedTestUtils::isPlannedTestCase) + .forEach(PlannedTestGenerator::maybeGenerateTestCase); + } + + private static void maybeGenerateTestCase(final TestCase testCase) { + final String testCaseName = PlannedTestUtils.formatName(testCase.getName()); + final Path testCaseDir = Paths.get(PlannedTestLoader.PLANS_DIR, testCaseName); + createDirectory(testCaseDir); + final Optional latest = TestCasePlanLoader.fromLatest(testCaseDir); + final TestCasePlan current = TestCasePlanLoader.fromTestCase(testCase); + if (PlannedTestUtils.isSamePlan(latest, current)) { + return; + } + dumpTestCase(testCaseDir, current); + } + + private static String getTestDirName(final TestCasePlan planAtVersionNode) { + return String.format("%s_%s", planAtVersionNode.getVersion(), planAtVersionNode.getTimestamp()); + } + + private static void dumpTestCase(final Path dir, final TestCasePlan planAtVersion) { + final Path parent = PlannedTestUtils.findBaseDir() + .resolve(dir) + .resolve(getTestDirName(planAtVersion)); + final Path specPath = parent.resolve(PlannedTestLoader.SPEC_FILE); + final Path topologyPath = parent.resolve(PlannedTestLoader.TOPOLOGY_FILE); + try { + Files.createDirectories(parent); + Files.write( + specPath, + MAPPER.writerWithDefaultPrettyPrinter() + .writeValueAsString(planAtVersion.getNode()) + .getBytes(Charsets.UTF_8), + StandardOpenOption.CREATE, + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING + ); + Files.write( + topologyPath, + planAtVersion.getTopology().getBytes(Charsets.UTF_8), + StandardOpenOption.CREATE, + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING + ); + } catch (final IOException e) { + throw new RuntimeException(e); + } + } + + private static void createDirectory(final Path path) { + try { + Files.createDirectories(PlannedTestUtils.findBaseDir().resolve(path)); + } catch (final IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestLoader.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestLoader.java new file mode 100644 index 000000000000..1d37aea44b2f --- /dev/null +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestLoader.java @@ -0,0 +1,85 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test.planned; + +import io.confluent.ksql.test.loader.TestLoader; +import io.confluent.ksql.test.model.KsqlVersion; +import io.confluent.ksql.test.tools.TestCase; +import io.confluent.ksql.test.tools.TopologyAndConfigs; +import io.confluent.ksql.test.tools.VersionedTest; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Stream; + +/** + * Loads test cases that include physical plan for any QTT test case that should be tested + * against a saved physical plan (according to {@link PlannedTestUtils#isPlannedTestCase}) + */ +public class PlannedTestLoader implements TestLoader { + public static final String PLANS_DIR = "historical_plans/"; + public static final String SPEC_FILE = "spec.json"; + public static final String TOPOLOGY_FILE = "topology"; + + private final TestLoader innerLoader; + + private PlannedTestLoader( + final TestLoader innerLoader + ) { + this.innerLoader = Objects.requireNonNull(innerLoader, "innerLoader"); + } + + public static PlannedTestLoader of(final TestLoader innerLoader) { + return new PlannedTestLoader(innerLoader); + } + + @Override + public Stream load() { + return innerLoader.load().flatMap(this::buildHistoricalTestCases); + } + + private Stream buildHistoricalTestCases(final TestCase testCase) { + if (PlannedTestUtils.isPlannedTestCase(testCase)) { + final Path rootforCase + = Paths.get(PLANS_DIR, PlannedTestUtils.formatName(testCase.getName())); + return PlannedTestUtils.findContentsOfDirectory(rootforCase.toString()).stream() + .map(d -> buildHistoricalTestCase(testCase, rootforCase.resolve(d))); + } else if (testCase.getVersionBounds().contains(KsqlVersion.current())) { + return Stream.of(testCase); + } else { + return Stream.empty(); + } + } + + private VersionedTest buildHistoricalTestCase( + final VersionedTest testCase, + final Path dir + ) { + final TestCasePlan planAtVersionNode = TestCasePlanLoader.fromSpecific(dir); + final KsqlVersion version = KsqlVersion.parse(planAtVersionNode.getVersion()) + .withTimestamp(planAtVersionNode.getTimestamp()); + return testCase.withExpectedTopology( + version, + new TopologyAndConfigs( + planAtVersionNode.getPlan(), + planAtVersionNode.getTopology(), + planAtVersionNode.getSchemas(), + planAtVersionNode.getConfigs() + ) + ); + } +} diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestUtils.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestUtils.java new file mode 100644 index 000000000000..281c49419d73 --- /dev/null +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestUtils.java @@ -0,0 +1,98 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test.planned; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.google.common.collect.ImmutableList; +import io.confluent.ksql.test.tools.TestCase; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +public final class PlannedTestUtils { + private static final String BASE_DIRECTORY = "src/test/resources/"; + private static final String INVALID_FILENAME_CHARS_PATTERN = "\\s|/|\\\\|:|\\*|\\?|\"|<|>|\\|"; + // this is temporary + private static final List WHITELIST = ImmutableList.of( + "average - calculate average in select" + ); + + private PlannedTestUtils() { + } + + public static boolean isPlannedTestCase(final TestCase testCase) { + return !testCase.expectedException().isPresent() && WHITELIST.contains(testCase.getName()); + } + + public static boolean isSamePlan( + final Optional latest, + final TestCasePlan current) { + return latest.isPresent() && current.getPlan().equals(latest.get().getPlan()); + } + + public static Optional> loadContents(final String path) { + final InputStream s = PlannedTestUtils.class.getClassLoader() + .getResourceAsStream(path); + + if (s == null) { + return Optional.empty(); + } + + try (BufferedReader reader = new BufferedReader(new InputStreamReader(s, UTF_8))) { + final List contents = new ArrayList<>(); + String file; + while ((file = reader.readLine()) != null) { + contents.add(file); + } + return Optional.of(contents); + } catch (final IOException e) { + throw new AssertionError("Failed to read path: " + path, e); + } + } + + public static List findContentsOfDirectory(final String path) { + return loadContents(path) + .orElseThrow(() -> new AssertionError("Dir not found: " + path)); + } + + public static String formatName(final String originalName) { + return originalName + .replaceAll(" - (AVRO|JSON|DELIMITED|KAFKA)$", "") + .replaceAll(INVALID_FILENAME_CHARS_PATTERN, "_"); + } + + public static Path findBaseDir() { + Path path = Paths.get("./ksql-functional-tests"); + if (Files.exists(path)) { + return path.resolve(BASE_DIRECTORY); + } + path = Paths.get("../ksql-functional-tests"); + if (Files.exists(path)) { + return path.resolve(BASE_DIRECTORY); + } + throw new RuntimeException("Failed to determine location of expected topologies directory. " + + "App should be run with current directory set to either the root of the repo or the " + + "root of the ksql-functional-tests module"); + } +} diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlan.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlan.java new file mode 100644 index 000000000000..a34d9518dc18 --- /dev/null +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlan.java @@ -0,0 +1,70 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test.planned; + +import io.confluent.ksql.engine.KsqlPlan; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public final class TestCasePlan { + private final TestCasePlanNode node; + private final String topology; + + TestCasePlan( + final String version, + final long timestamp, + final List plan, + final String topology, + final Map schemas, + final Map configs + ) { + this(new TestCasePlanNode(version, timestamp, plan, schemas, configs), topology); + } + + TestCasePlan(final TestCasePlanNode node, final String topology) { + this.node = Objects.requireNonNull(node, "node"); + this.topology = Objects.requireNonNull(topology, "topology"); + } + + public List getPlan() { + return node.getPlan(); + } + + public long getTimestamp() { + return node.getTimestamp(); + } + + public Map getConfigs() { + return node.getConfigs(); + } + + public Map getSchemas() { + return node.getSchemas(); + } + + public String getTopology() { + return topology; + } + + public String getVersion() { + return node.getVersion(); + } + + TestCasePlanNode getNode() { + return node; + } +} diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanLoader.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanLoader.java new file mode 100644 index 000000000000..caaa5bfc75b3 --- /dev/null +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanLoader.java @@ -0,0 +1,196 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test.planned; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableList.Builder; +import io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient; +import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; +import io.confluent.ksql.KsqlExecutionContext.ExecuteResult; +import io.confluent.ksql.engine.KsqlEngine; +import io.confluent.ksql.engine.KsqlEngineTestUtil; +import io.confluent.ksql.engine.KsqlPlan; +import io.confluent.ksql.execution.json.PlanJsonMapper; +import io.confluent.ksql.function.TestFunctionRegistry; +import io.confluent.ksql.metastore.MetaStoreImpl; +import io.confluent.ksql.metastore.MutableMetaStore; +import io.confluent.ksql.planner.plan.ConfiguredKsqlPlan; +import io.confluent.ksql.services.ServiceContext; +import io.confluent.ksql.services.TestServiceContext; +import io.confluent.ksql.test.model.KsqlVersion; +import io.confluent.ksql.test.tools.TestCase; +import io.confluent.ksql.test.tools.TestExecutor; +import io.confluent.ksql.test.tools.TestExecutorUtil; +import io.confluent.ksql.test.tools.stubs.StubKafkaService; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.PersistentQueryMetadata; +import java.io.File; +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Path;; +import java.util.List; +import java.util.Optional; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import org.w3c.dom.Document; +import org.w3c.dom.NodeList; + +/** + * Loads saved test case plans or builds them from a TestCase + */ +public final class TestCasePlanLoader { + + private static final StubKafkaService KAFKA_STUB = StubKafkaService.create(); + private static final ObjectMapper MAPPER = PlanJsonMapper.create(); + private static final String CURRENT_VERSION = getFormattedVersionFromPomFile(); + private static final KsqlConfig BASE_CONFIG = new KsqlConfig(TestExecutor.baseConfig()); + + private TestCasePlanLoader() { + } + + /** + * Create a TestCasePlan from a TestCase by executing it against an engine + * @param testCase the test case to build plans for + * @return the built plan. + */ + public static TestCasePlan fromTestCase(final TestCase testCase) { + final KsqlConfig configs = BASE_CONFIG.cloneWithPropertyOverwrite(testCase.properties()); + try ( + final ServiceContext serviceContext = getServiceContext(); + final KsqlEngine engine = getKsqlEngine(serviceContext)) { + return buildStatementsInTestCase(testCase, configs, serviceContext, engine); + } + } + + /** + * Create a TestCasePlan by loading it from the local filesystem. This factory loads the + * most recent plan from a given test case directory. + * @param testCaseDir The directory to load the plan from. + * @return the loaded plan. + */ + public static Optional fromLatest(final Path testCaseDir) { + final Optional> existing = PlannedTestUtils.loadContents(testCaseDir.toString()); + if (!existing.isPresent()) { + return Optional.empty(); + } + KsqlVersion latestVersion = null; + TestCasePlan latest = null; + for (final String versionDir : existing.get()) { + final TestCasePlan planAtVersionNode = parseSpec(testCaseDir.resolve(versionDir)); + final KsqlVersion version = KsqlVersion.parse(planAtVersionNode.getVersion()) + .withTimestamp(planAtVersionNode.getTimestamp()); + if (latestVersion == null || latestVersion.compareTo(version) < 0) { + latestVersion = version; + latest = planAtVersionNode; + } + } + return Optional.ofNullable(latest); + } + + /** + * Create a TestCasePlan by loading a specific plan from the local filesystem. + * @param versionDir the directory to load the plan from. + * @return the loaded plan. + */ + public static TestCasePlan fromSpecific(final Path versionDir) { + return parseSpec(versionDir); + } + + private static TestCasePlan parseSpec(final Path versionDir) { + final Path specPath = versionDir.resolve(PlannedTestLoader.SPEC_FILE); + final Path topologyPath = versionDir.resolve(PlannedTestLoader.TOPOLOGY_FILE); + try { + return new TestCasePlan( + MAPPER.readValue(slurp(specPath), TestCasePlanNode.class), + slurp(topologyPath) + ); + } catch (final IOException e) { + throw new RuntimeException(e); + } + } + + private static String slurp(final Path path) throws IOException { + return new String( + Files.readAllBytes(PlannedTestUtils.findBaseDir().resolve(path)), + Charset.defaultCharset() + ); + } + + private static TestCasePlan buildStatementsInTestCase( + final TestCase testCase, + final KsqlConfig ksqlConfig, + final ServiceContext serviceContext, + final KsqlEngine ksqlEngine) { + final Iterable configuredPlans = TestExecutorUtil.planTestCase( + ksqlEngine, + testCase, + ksqlConfig, + Optional.of(serviceContext.getSchemaRegistryClient()), + KAFKA_STUB + ); + final ImmutableList.Builder plansBuilder = new Builder<>(); + PersistentQueryMetadata queryMetadata = null; + for (final ConfiguredKsqlPlan configuredPlan : configuredPlans) { + plansBuilder.add(configuredPlan.getPlan()); + final ExecuteResult executeResult = ksqlEngine.execute( + ksqlEngine.getServiceContext(), + configuredPlan + ); + if (executeResult.getQuery().isPresent()) { + queryMetadata = (PersistentQueryMetadata) executeResult.getQuery().get(); + } + } + if (queryMetadata == null) { + throw new AssertionError("test case does not build a query"); + } + return new TestCasePlan( + CURRENT_VERSION, + System.currentTimeMillis(), + plansBuilder.build(), + queryMetadata.getTopologyDescription(), + queryMetadata.getSchemasDescription(), + BASE_CONFIG.getAllConfigPropsWithSecretsObfuscated() + ); + } + + private static ServiceContext getServiceContext() { + final SchemaRegistryClient schemaRegistryClient = new MockSchemaRegistryClient(); + return TestServiceContext.create(() -> schemaRegistryClient); + } + + private static KsqlEngine getKsqlEngine(final ServiceContext serviceContext) { + final MutableMetaStore metaStore = new MetaStoreImpl(TestFunctionRegistry.INSTANCE.get()); + return KsqlEngineTestUtil.createKsqlEngine(serviceContext, metaStore); + } + + private static String getFormattedVersionFromPomFile() { + try { + final File pomFile = new File("pom.xml"); + final DocumentBuilderFactory documentBuilderFactory = DocumentBuilderFactory.newInstance(); + final DocumentBuilder documentBuilder = documentBuilderFactory.newDocumentBuilder(); + final Document pomDoc = documentBuilder.parse(pomFile); + + final NodeList versionNodeList = pomDoc.getElementsByTagName("version"); + final String versionName = versionNodeList.item(0).getTextContent(); + + return versionName.replaceAll("-SNAPSHOT?", ""); + } catch (final Exception e) { + throw new RuntimeException(e); + } + } +} diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanNode.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanNode.java new file mode 100644 index 000000000000..43e423a97e69 --- /dev/null +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanNode.java @@ -0,0 +1,64 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test.planned; + +import com.fasterxml.jackson.annotation.JsonProperty; +import io.confluent.ksql.engine.KsqlPlan; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +class TestCasePlanNode { + private final String version; + private final long timestamp; + private final List plan; + private final Map schemas; + private final Map configs; + + public TestCasePlanNode( + @JsonProperty("version") final String version, + @JsonProperty("timestamp") final long timestamp, + @JsonProperty("plan") final List plan, + @JsonProperty("schemas") final Map schemas, + @JsonProperty("configs") final Map configs + ) { + this.version = Objects.requireNonNull(version, "version"); + this.timestamp = timestamp; + this.plan = Objects.requireNonNull(plan, "plan"); + this.schemas = Objects.requireNonNull(schemas, "schemas"); + this.configs = Objects.requireNonNull(configs, "configs"); + } + + public List getPlan() { + return plan; + } + + public long getTimestamp() { + return timestamp; + } + + public Map getConfigs() { + return configs; + } + + public Map getSchemas() { + return schemas; + } + + public String getVersion() { + return version; + } +} diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorTest.java index c2eb6c164c9b..3d8621552b99 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorTest.java @@ -24,6 +24,7 @@ import com.fasterxml.jackson.databind.node.TextNode; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.confluent.ksql.engine.KsqlEngine; @@ -166,8 +167,8 @@ public void shouldVerifyTopology() { @Test public void shouldVerifyTopologySchemas() { // Given: - givenExpectedTopology("a-topology", "matching-schemas"); - givenActualTopology("a-topology", "matching-schemas"); + givenExpectedTopology("a-topology", ImmutableMap.of("matching", "schemas")); + givenActualTopology("a-topology", ImmutableMap.of("matching", "schemas")); // When: executor.buildAndExecuteQuery(testCase); @@ -198,8 +199,8 @@ public void shouldFailOnTopologyMismatch() { @Test public void shouldFailOnSchemasMismatch() { // Given: - givenExpectedTopology("the-topology", "expected-schemas"); - givenActualTopology("the-topology", "actual-schemas"); + givenExpectedTopology("the-topology", ImmutableMap.of("expected", "schemas")); + givenActualTopology("the-topology", ImmutableMap.of("actual", "schemas")); // Then: expectedException.expect(AssertionError.class); @@ -332,18 +333,18 @@ private void givenExpectedTopology(final String topology) { when(expectedTopologyAndConfig.getTopology()).thenReturn(topology); } - private void givenExpectedTopology(final String topology, final String schemas) { + private void givenExpectedTopology(final String topology, final Map schemas) { givenExpectedTopology(topology); - when(expectedTopologyAndConfig.getSchemas()).thenReturn(Optional.of(schemas)); + when(expectedTopologyAndConfig.getSchemas()).thenReturn(schemas); } private void givenActualTopology(final String topology) { when(testCase.getGeneratedTopologies()).thenReturn(ImmutableList.of(topology)); } - private void givenActualTopology(final String topology, final String schemas) { + private void givenActualTopology(final String topology, final Map schemas) { givenActualTopology(topology); - when(testCase.getGeneratedSchemas()).thenReturn(ImmutableList.of(schemas)); + when(testCase.getGeneratedSchemas()).thenReturn(schemas); } private void givenDataSourceTopic(final LogicalSchema schema) { diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorUtilTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorUtilTest.java index d3a90293dd4f..03d721a56029 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorUtilTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorUtilTest.java @@ -16,11 +16,14 @@ package io.confluent.ksql.test.tools; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.MatcherAssert.assertThat; import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.json.JsonMapper; +import io.confluent.ksql.planner.plan.ConfiguredKsqlPlan; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.test.model.QttTestFile; import io.confluent.ksql.test.model.TestCaseNode; @@ -29,6 +32,7 @@ import io.confluent.ksql.util.KsqlConfig; import java.io.File; import java.io.IOException; +import java.util.LinkedList; import java.util.List; import java.util.Optional; import org.junit.After; @@ -67,6 +71,46 @@ public void tearDown() { serviceContext.close(); } + @Test + public void shouldPlanTestCase() { + // Given: + final Topic sourceTopic = new Topic( + "test_topic", + Optional.empty(), + new StringSerdeSupplier(), + new StringSerdeSupplier(), + 1, + 1 + ); + + stubKafkaService.createTopic(sourceTopic); + + // When: + final Iterable plans = TestExecutorUtil.planTestCase( + ksqlEngine, + testCase, + ksqlConfig, + Optional.of(serviceContext.getSchemaRegistryClient()), + stubKafkaService + ); + + // Then: + final List asList = new LinkedList<>(); + for (final ConfiguredKsqlPlan plan : plans) { + ksqlEngine.execute(ksqlEngine.getServiceContext(), plan); + asList.add(plan); + } + assertThat(asList.size(), is(2)); + assertThat( + asList.get(0).getPlan().getStatementText(), + startsWith("CREATE STREAM TEST") + ); + assertThat( + asList.get(1).getPlan().getStatementText(), + startsWith("CREATE STREAM S1 AS SELECT") + ); + } + @Test public void shouldBuildStreamsTopologyTestDrivers() { diff --git a/ksql-functional-tests/src/test/resources/historical_plans/average_-_calculate_average_in_select/5.5.0_1576794350087/spec.json b/ksql-functional-tests/src/test/resources/historical_plans/average_-_calculate_average_in_select/5.5.0_1576794350087/spec.json new file mode 100644 index 000000000000..ba38f628847a --- /dev/null +++ b/ksql-functional-tests/src/test/resources/historical_plans/average_-_calculate_average_in_select/5.5.0_1576794350087/spec.json @@ -0,0 +1,204 @@ +{ + "version" : "5.5.0", + "timestamp" : 1576794350087, + "plan" : [ { + "@type" : "ksqlPlanV1", + "statementText" : "CREATE STREAM TEST (ROWKEY BIGINT KEY, ID BIGINT, NAME STRING, VALUE BIGINT) WITH (KAFKA_TOPIC='test_topic', KEY='ID', VALUE_FORMAT='DELIMITED');", + "ddlCommand" : { + "@type" : "createStreamV1", + "sourceName" : "TEST", + "schema" : "`ROWKEY` BIGINT KEY, `ID` BIGINT, `NAME` STRING, `VALUE` BIGINT", + "keyField" : "ID", + "timestampColumn" : null, + "topicName" : "test_topic", + "formats" : { + "keyFormat" : { + "format" : "KAFKA", + "fullSchemaName" : null, + "delimiter" : null + }, + "valueFormat" : { + "format" : "DELIMITED", + "fullSchemaName" : null, + "delimiter" : null + }, + "options" : [ ] + }, + "windowInfo" : null + }, + "queryPlan" : null + }, { + "@type" : "ksqlPlanV1", + "statementText" : "CREATE TABLE AVG AS SELECT\n TEST.ID ID,\n (ABS((SUM(TEST.VALUE) / COUNT(TEST.ID))) * 10) AVG\nFROM TEST TEST\nGROUP BY TEST.ID\nEMIT CHANGES", + "ddlCommand" : { + "@type" : "createTableV1", + "sourceName" : "AVG", + "schema" : "`ROWKEY` BIGINT KEY, `ID` BIGINT, `AVG` BIGINT", + "keyField" : "ID", + "timestampColumn" : null, + "topicName" : "AVG", + "formats" : { + "keyFormat" : { + "format" : "KAFKA", + "fullSchemaName" : null, + "delimiter" : null + }, + "valueFormat" : { + "format" : "DELIMITED", + "fullSchemaName" : null, + "delimiter" : null + }, + "options" : [ ] + }, + "windowInfo" : null + }, + "queryPlan" : { + "sources" : [ "TEST" ], + "sink" : "AVG", + "physicalPlan" : { + "@type" : "tableSinkV1", + "properties" : { + "queryContext" : "AVG" + }, + "source" : { + "@type" : "tableSelectV1", + "properties" : { + "queryContext" : "Aggregate/Project" + }, + "source" : { + "@type" : "streamAggregateV1", + "properties" : { + "queryContext" : "Aggregate/Aggregate" + }, + "source" : { + "@type" : "streamGroupByKeyV1", + "properties" : { + "queryContext" : "Aggregate/GroupBy" + }, + "source" : { + "@type" : "streamSelectV1", + "properties" : { + "queryContext" : "Aggregate/Prepare" + }, + "source" : { + "@type" : "streamSourceV1", + "properties" : { + "queryContext" : "KsqlTopic/Source" + }, + "topicName" : "test_topic", + "formats" : { + "keyFormat" : { + "format" : "KAFKA", + "fullSchemaName" : null, + "delimiter" : null + }, + "valueFormat" : { + "format" : "DELIMITED", + "fullSchemaName" : null, + "delimiter" : null + }, + "options" : [ ] + }, + "timestampColumn" : null, + "sourceSchema" : "`ROWKEY` BIGINT KEY, `ID` BIGINT, `NAME` STRING, `VALUE` BIGINT", + "alias" : "TEST" + }, + "selectExpressions" : [ "TEST.ID AS KSQL_INTERNAL_COL_0", "TEST.VALUE AS KSQL_INTERNAL_COL_1" ] + }, + "internalFormats" : { + "keyFormat" : { + "format" : "KAFKA", + "fullSchemaName" : null, + "delimiter" : null + }, + "valueFormat" : { + "format" : "DELIMITED", + "fullSchemaName" : null, + "delimiter" : null + }, + "options" : [ ] + } + }, + "internalFormats" : { + "keyFormat" : { + "format" : "KAFKA", + "fullSchemaName" : null, + "delimiter" : null + }, + "valueFormat" : { + "format" : "DELIMITED", + "fullSchemaName" : null, + "delimiter" : null + }, + "options" : [ ] + }, + "nonAggregateColumns" : [ "KSQL_INTERNAL_COL_0", "KSQL_INTERNAL_COL_1" ], + "aggregationFunctions" : [ "SUM(KSQL_INTERNAL_COL_1)", "COUNT(KSQL_INTERNAL_COL_0)" ] + }, + "selectExpressions" : [ "KSQL_INTERNAL_COL_0 AS ID", "(ABS((KSQL_AGG_VARIABLE_0 / KSQL_AGG_VARIABLE_1)) * 10) AS AVG" ] + }, + "formats" : { + "keyFormat" : { + "format" : "KAFKA", + "fullSchemaName" : null, + "delimiter" : null + }, + "valueFormat" : { + "format" : "DELIMITED", + "fullSchemaName" : null, + "delimiter" : null + }, + "options" : [ ] + }, + "topicName" : "AVG" + }, + "queryId" : "CTAS_AVG_0" + } + } ], + "schemas" : { + "CTAS_AVG_0.KsqlTopic.Source" : "STRUCT NOT NULL", + "CTAS_AVG_0.Aggregate.GroupBy" : "STRUCT NOT NULL", + "CTAS_AVG_0.Aggregate.Aggregate.Materialize" : "STRUCT NOT NULL", + "CTAS_AVG_0.AVG" : "STRUCT NOT NULL" + }, + "configs" : { + "ksql.extension.dir" : "ext", + "ksql.streams.cache.max.bytes.buffering" : "0", + "ksql.security.extension.class" : null, + "ksql.transient.prefix" : "transient_", + "ksql.persistence.wrap.single.values" : "true", + "ksql.schema.registry.url" : "http://localhost:8081", + "ksql.streams.default.deserialization.exception.handler" : "io.confluent.ksql.errors.LogMetricAndContinueExceptionHandler", + "ksql.output.topic.name.prefix" : "", + "ksql.streams.auto.offset.reset" : "earliest", + "ksql.connect.url" : "http://localhost:8083", + "ksql.service.id" : "some.ksql.service.id", + "ksql.internal.topic.min.insync.replicas" : "1", + "ksql.streams.shutdown.timeout.ms" : "300000", + "ksql.streams.state.dir" : "/var/folders/p9/bk8xks6s2lndncftdbq36xh80000gp/T/confluent8507459536865147930", + "ksql.internal.topic.replicas" : "1", + "ksql.insert.into.values.enabled" : "true", + "ksql.streams.default.production.exception.handler" : "io.confluent.ksql.errors.ProductionExceptionHandlerUtil$LogAndFailProductionExceptionHandler", + "ksql.access.validator.enable" : "auto", + "ksql.streams.bootstrap.servers" : "localhost:0", + "ksql.streams.commit.interval.ms" : "2000", + "ksql.metric.reporters" : "", + "ksql.streams.auto.commit.interval.ms" : "0", + "ksql.metrics.extension" : null, + "ksql.streams.topology.optimization" : "all", + "ksql.execution.plan.enable" : "false", + "ksql.query.pull.streamsstore.rebalancing.timeout.ms" : "10000", + "ksql.streams.num.stream.threads" : "4", + "ksql.metrics.tags.custom" : "", + "ksql.pull.queries.enable" : "true", + "ksql.udfs.enabled" : "true", + "ksql.udf.enable.security.manager" : "true", + "ksql.query.pull.skip.access.validator" : "false", + "ksql.connect.worker.config" : "", + "ksql.query.pull.routing.timeout.ms" : "30000", + "ksql.sink.window.change.log.additional.retention" : "1000000", + "ksql.udf.collect.metrics" : "false", + "ksql.persistent.prefix" : "query_", + "ksql.query.persistent.active.limit" : "2147483647" + } +} \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/historical_plans/average_-_calculate_average_in_select/5.5.0_1576794350087/topology b/ksql-functional-tests/src/test/resources/historical_plans/average_-_calculate_average_in_select/5.5.0_1576794350087/topology new file mode 100644 index 000000000000..357afa2a96d7 --- /dev/null +++ b/ksql-functional-tests/src/test/resources/historical_plans/average_-_calculate_average_in_select/5.5.0_1576794350087/topology @@ -0,0 +1,25 @@ +Topologies: + Sub-topology: 0 + Source: KSTREAM-SOURCE-0000000000 (topics: [test_topic]) + --> KSTREAM-TRANSFORMVALUES-0000000001 + Processor: KSTREAM-TRANSFORMVALUES-0000000001 (stores: []) + --> Aggregate-Prepare + <-- KSTREAM-SOURCE-0000000000 + Processor: Aggregate-Prepare (stores: []) + --> KSTREAM-AGGREGATE-0000000003 + <-- KSTREAM-TRANSFORMVALUES-0000000001 + Processor: KSTREAM-AGGREGATE-0000000003 (stores: [Aggregate-Aggregate-Materialize]) + --> Aggregate-Aggregate-ToOutputSchema + <-- Aggregate-Prepare + Processor: Aggregate-Aggregate-ToOutputSchema (stores: []) + --> Aggregate-Project + <-- KSTREAM-AGGREGATE-0000000003 + Processor: Aggregate-Project (stores: []) + --> KTABLE-TOSTREAM-0000000006 + <-- Aggregate-Aggregate-ToOutputSchema + Processor: KTABLE-TOSTREAM-0000000006 (stores: []) + --> KSTREAM-SINK-0000000007 + <-- Aggregate-Project + Sink: KSTREAM-SINK-0000000007 (topic: AVG) + <-- KTABLE-TOSTREAM-0000000006 + From a50a665c62c8d24d4c9e8962b43146897b1cbe3a Mon Sep 17 00:00:00 2001 From: Almog Gavra Date: Mon, 23 Dec 2019 11:17:37 -0800 Subject: [PATCH 059/123] fix: reintroduce FetchFieldFromStruct as a public UDF (#4185) --- .../execution/codegen/SqlToJavaVisitor.java | 4 ++++ .../execution/util/ExpressionTypeManager.java | 15 ++++++++++++++ .../util/ExpressionTypeManagerTest.java | 11 +++++----- .../fetch-field-from-struct.json | 20 +++++++++++++++++++ .../query-validation-tests/simple-struct.json | 11 ---------- 5 files changed, 44 insertions(+), 17 deletions(-) create mode 100644 ksql-functional-tests/src/test/resources/query-validation-tests/fetch-field-from-struct.json diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java index 7b14f2ba005a..23f840bf1694 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java @@ -366,6 +366,10 @@ public Pair visitFunctionCall( private Schema getFunctionReturnSchema( final FunctionCall node ) { + if (node.getName().equals(FetchFieldFromStruct.FUNCTION_NAME)) { + return expressionTypeManager.getExpressionSchema(node); + } + final UdfFactory udfFactory = functionRegistry.getUdfFactory(node.getName().name()); final List argumentSchemas = node.getArguments().stream() .map(expressionTypeManager::getExpressionSchema) diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java index 07ee31929203..a07d1582b71e 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java @@ -48,6 +48,7 @@ import io.confluent.ksql.execution.expression.tree.Type; import io.confluent.ksql.execution.expression.tree.WhenClause; import io.confluent.ksql.execution.function.UdafUtil; +import io.confluent.ksql.execution.function.udf.structfieldextractor.FetchFieldFromStruct; import io.confluent.ksql.function.AggregateFunctionInitArguments; import io.confluent.ksql.function.FunctionRegistry; import io.confluent.ksql.function.KsqlAggregateFunction; @@ -407,6 +408,20 @@ public Void visitFunctionCall( return null; } + if (node.getName().equals(FetchFieldFromStruct.FUNCTION_NAME)) { + process(node.getArguments().get(0), expressionTypeContext); + final Schema firstArgSchema = expressionTypeContext.getSchema(); + final String fieldName = ((StringLiteral) node.getArguments().get(1)).getValue(); + if (firstArgSchema.field(fieldName) == null) { + throw new KsqlException(String.format("Could not find field %s in %s.", + fieldName, + node.getArguments().get(0).toString())); + } + final Schema returnSchema = firstArgSchema.field(fieldName).schema(); + expressionTypeContext.setSchema(returnSchema); + return null; + } + final UdfFactory udfFactory = functionRegistry.getUdfFactory(node.getName().name()); final UdfMetadata metadata = udfFactory.getMetadata(); if (metadata.isInternal()) { diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java index f718a25b467d..ce6fbfe5a015 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java @@ -297,19 +297,18 @@ public void shouldHandleStructFieldDereference() { } @Test - public void shouldThrowOnFetchFieldFromStructFunctionCall() { + public void shouldHandleFetchFieldFromStructFunctionCall() { // Given: final Expression expression = new FunctionCall( FetchFieldFromStruct.FUNCTION_NAME, ImmutableList.of(ADDRESS, new StringLiteral("NUMBER")) ); - // Then: - expectedException.expect(KsqlException.class); - expectedException.expectMessage("Can't find any functions with the name 'FETCH_FIELD_FROM_STRUCT'"); - // When: - expressionTypeManager.getExpressionSqlType(expression); + final SqlType sqlType = expressionTypeManager.getExpressionSqlType(expression); + + // Then: + assertThat(sqlType, is(SqlTypes.BIGINT)); } @Test diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/fetch-field-from-struct.json b/ksql-functional-tests/src/test/resources/query-validation-tests/fetch-field-from-struct.json new file mode 100644 index 000000000000..52fd58af776f --- /dev/null +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/fetch-field-from-struct.json @@ -0,0 +1,20 @@ +{ + "comments": [ + "External usage of FETCH_FIELD_FROM_STRUCT" + ], + "tests": [ + { + "name": "Fetch Field", + "statements": [ + "CREATE STREAM TEST (s STRUCT) WITH (kafka_topic='test_topic', value_format='JSON');", + "CREATE STREAM OUTPUT as SELECT FETCH_FIELD_FROM_STRUCT(s, 'VAL') AS value FROM test;" + ], + "inputs": [ + {"topic": "test_topic", "key": "foo", "value": {"s": {"val": "foo"}}} + ], + "outputs": [ + {"topic": "OUTPUT", "key": "foo", "value": {"VALUE": "foo"}} + ] + } + ] +} diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/simple-struct.json b/ksql-functional-tests/src/test/resources/query-validation-tests/simple-struct.json index fc4906db3278..6c0b66a07a1e 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/simple-struct.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/simple-struct.json @@ -1821,17 +1821,6 @@ "key": 0 } ] - }, - { - "name": "direct use of FETCH_FIELD_FROM_STRUCT", - "statements": [ - "CREATE STREAM input (s STRUCT) WITH (kafka_topic='input_topic', value_format='JSON');", - "CREATE STREAM output AS SELECT FETCH_FIELD_FROM_STRUCT(s, 'f0') FROM input;" - ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Can't find any functions with the name 'FETCH_FIELD_FROM_STRUCT'" - } } ] } \ No newline at end of file From 240c3dc1d1e920fc5a53fb147a3f275f29db7d0c Mon Sep 17 00:00:00 2001 From: Steven Zhang <35498506+stevenpyzhang@users.noreply.github.com> Date: Sat, 28 Dec 2019 21:06:20 -0800 Subject: [PATCH 060/123] chore: use UsePartitionTimeOnInvalidTimestamp instead of UsePreviousTimeOnInvalidTimeStamp (#4199) --- .../timestamp/MetadataTimestampExtractionPolicyTest.java | 6 +++--- .../timestamp/TimestampExtractionPolicyFactoryTest.java | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/timestamp/MetadataTimestampExtractionPolicyTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/timestamp/MetadataTimestampExtractionPolicyTest.java index 239f85e22ce0..9a13cd9f9ec8 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/timestamp/MetadataTimestampExtractionPolicyTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/timestamp/MetadataTimestampExtractionPolicyTest.java @@ -16,7 +16,7 @@ package io.confluent.ksql.execution.streams.timestamp; import com.google.common.testing.EqualsTester; -import org.apache.kafka.streams.processor.UsePreviousTimeOnInvalidTimestamp; +import org.apache.kafka.streams.processor.UsePartitionTimeOnInvalidTimestamp; import org.junit.Test; public class MetadataTimestampExtractionPolicyTest { @@ -27,8 +27,8 @@ public void shouldTestEqualityCorrectly() { new MetadataTimestampExtractionPolicy(), new MetadataTimestampExtractionPolicy()) .addEqualityGroup( - new MetadataTimestampExtractionPolicy(new UsePreviousTimeOnInvalidTimestamp()), - new MetadataTimestampExtractionPolicy(new UsePreviousTimeOnInvalidTimestamp()) + new MetadataTimestampExtractionPolicy(new UsePartitionTimeOnInvalidTimestamp()), + new MetadataTimestampExtractionPolicy(new UsePartitionTimeOnInvalidTimestamp()) ) .testEquals(); } diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/timestamp/TimestampExtractionPolicyFactoryTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/timestamp/TimestampExtractionPolicyFactoryTest.java index 1b49e720713f..b822cc197f2d 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/timestamp/TimestampExtractionPolicyFactoryTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/timestamp/TimestampExtractionPolicyFactoryTest.java @@ -31,7 +31,7 @@ import java.util.Optional; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.processor.FailOnInvalidTimestamp; -import org.apache.kafka.streams.processor.UsePreviousTimeOnInvalidTimestamp; +import org.apache.kafka.streams.processor.UsePartitionTimeOnInvalidTimestamp; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -105,11 +105,11 @@ public void shouldCreateMetadataPolicyWithDefaultFailedOnInvalidTimestamp() { } @Test - public void shouldCreateMetadataPolicyWithConfiguredUsePreviousTimeOnInvalidTimestamp() { + public void shouldCreateMetadataPolicyWithConfiguredUsePartitionTimeOnInvalidTimestamp() { // Given: final KsqlConfig ksqlConfig = new KsqlConfig(ImmutableMap.of( StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, - UsePreviousTimeOnInvalidTimestamp.class + UsePartitionTimeOnInvalidTimestamp.class )); // When: @@ -122,7 +122,7 @@ public void shouldCreateMetadataPolicyWithConfiguredUsePreviousTimeOnInvalidTime // Then: assertThat(result, instanceOf(MetadataTimestampExtractionPolicy.class)); - assertThat(result.create(0), instanceOf(UsePreviousTimeOnInvalidTimestamp.class)); + assertThat(result.create(0), instanceOf(UsePartitionTimeOnInvalidTimestamp.class)); } @Test From 8005542e0771cba92eecd6eae9a05e930edaf684 Mon Sep 17 00:00:00 2001 From: Andy Coates Date: Mon, 30 Dec 2019 15:46:47 +0000 Subject: [PATCH 061/123] chore: re-enable final param and var checkstyle rule --- checkstyle/checkstyle.properties | 2 + .../ksql/benchmark/UdfInvokerBenchmark.java | 8 +- .../confluent/ksql/cli/console/Console.java | 2 +- .../ksql/cli/console/cmd/RunScript.java | 2 +- .../builder/ErrorEntityTableBuilder.java | 2 +- .../java/io/confluent/ksql/cli/CliTest.java | 4 +- .../confluent/ksql/cli/SslFunctionalTest.java | 2 +- .../ksql/cli/console/JLineReaderTest.java | 6 +- .../ksql/configdef/ConfigValidators.java | 4 +- .../confluent/ksql/function/GenericsUtil.java | 7 +- .../ksql/function/KsqlScalarFunction.java | 6 +- .../ksql/function/ParameterInfo.java | 7 +- .../io/confluent/ksql/function/UdfIndex.java | 4 +- .../ksql/function/types/ArrayType.java | 8 +- .../ksql/function/types/BooleanType.java | 2 +- .../ksql/function/types/DecimalType.java | 2 +- .../ksql/function/types/DoubleType.java | 2 +- .../ksql/function/types/GenericType.java | 8 +- .../ksql/function/types/IntegerType.java | 2 +- .../ksql/function/types/LongType.java | 2 +- .../ksql/function/types/MapType.java | 8 +- .../ksql/function/types/StringType.java | 2 +- .../ksql/function/types/StructType.java | 10 +- .../confluent/ksql/metrics/TopicSensors.java | 4 +- .../ksql/schema/connect/SchemaWalker.java | 30 +- .../ksql/schema/ksql/SchemaConverters.java | 8 +- .../ksql/schema/ksql/SqlTypeWalker.java | 24 +- .../io/confluent/ksql/util/DecimalUtil.java | 4 +- .../io/confluent/ksql/util/KsqlConfig.java | 2 +- .../io/confluent/ksql/util/SchemaUtil.java | 10 +- .../io/confluent/ksql/GenericRowTest.java | 6 +- .../ProductionExceptionHandlerUtilTest.java | 4 +- .../confluent/ksql/function/UdfIndexTest.java | 2 +- .../ksql/metrics/MetricCollectorsTest.java | 2 +- .../schema/ksql/SchemaConvertersTest.java | 4 +- .../StringToTimestampParserTest.java | 26 +- .../ksql/analyzer/QueryAnalyzer.java | 2 +- .../ksql/connect/supported/JdbcSource.java | 2 +- .../confluent/ksql/embedded/KsqlContext.java | 2 +- .../ksql/engine/InsertValuesExecutor.java | 4 +- .../io/confluent/ksql/engine/KsqlPlanV1.java | 4 +- .../rewrite/ExpressionTreeRewriter.java | 4 +- .../ksql/execution/json/PlanJsonMapper.java | 2 +- .../ksql/function/DynamicFunctionInvoker.java | 2 +- .../ksql/function/FunctionLoaderUtils.java | 8 +- .../ksql/function/UdafFactoryInvoker.java | 2 +- .../confluent/ksql/function/UdafLoader.java | 2 +- .../io/confluent/ksql/function/UdfLoader.java | 2 +- .../confluent/ksql/function/UdtfLoader.java | 2 +- .../function/udaf/count/CountDistinct.java | 6 +- .../ksql/function/udaf/sum/ListSumUdaf.java | 2 +- .../ksql/function/udf/array/Entries.java | 2 +- .../function/udf/json/JsonArrayContains.java | 2 +- .../ksql/function/udf/list/ArrayContains.java | 4 +- .../ksql/function/udf/string/SplitKudf.java | 2 +- .../io/confluent/ksql/function/udtf/Cube.java | 10 +- .../ksql/planner/plan/RepartitionNode.java | 4 +- .../KsqlAuthorizationValidatorImpl.java | 2 +- .../ksql/services/DefaultConnectClient.java | 6 +- .../ksql/services/KafkaClusterUtil.java | 2 +- .../ksql/statement/InjectorChain.java | 2 +- .../ksql/structured/SchemaKStream.java | 4 +- .../ksql/topic/TopicDeleteInjector.java | 2 +- .../confluent/ksql/topic/TopicProperties.java | 4 +- .../java/io/confluent/ksql/util/AvroUtil.java | 2 +- .../src/test/java/TestUdfWithNoPackage.java | 2 +- .../ksql/KsqlContextTestUtilTest.java | 2 +- .../analyzer/QueryAnalyzerFunctionalTest.java | 2 +- .../ksql/ddl/commands/DdlCommandExecTest.java | 2 +- .../InternalFunctionRegistryTest.java | 16 +- .../ksql/function/UdfClassLoaderTest.java | 2 +- .../ksql/function/UdfLoaderTest.java | 16 +- .../ksql/function/UdtfLoaderTest.java | 6 +- .../udaf/average/AverageUdafTest.java | 2 +- .../udaf/count/CountDistinctKudafTest.java | 8 +- .../function/udaf/sum/ListSumUdafTest.java | 18 +- .../function/udaf/topk/IntTopkKudafTest.java | 2 +- .../confluent/ksql/function/udf/TestUdtf.java | 30 +- .../ksql/function/udf/array/EntriesTest.java | 44 +-- .../udf/array/GenerateSeriesTest.java | 50 +-- .../function/udf/geo/GeoDistanceTest.java | 2 +- .../udf/json/JsonArrayContainsTest.java | 2 +- .../function/udf/list/ArrayContainsTest.java | 3 +- .../ksql/function/udf/string/FieldTest.java | 12 +- .../ksql/function/udtf/CubeTest.java | 22 +- .../integration/IntegrationTestHarness.java | 6 +- .../ksql/internal/KsqlEngineMetricsTest.java | 2 +- .../ks/KsMaterializationFunctionalTest.java | 22 +- .../ExtensionSecurityManagerTest.java | 2 +- .../services/KafkaTopicClientImplTest.java | 6 +- .../ksql/services/MemoizedSupplierTest.java | 10 +- .../ksql/util/LimitedProxyBuilderTest.java | 10 +- .../io/confluent/ksql/datagen/DataGen.java | 2 +- .../exception/KafkaDeleteTopicsException.java | 5 +- .../KafkaResponseGetFailedException.java | 2 +- .../exception/KafkaTopicClientException.java | 4 +- .../exception/KafkaTopicExistsException.java | 2 +- .../execution/builder/KsqlQueryBuilder.java | 42 ++- .../ksql/execution/codegen/CodeGenRunner.java | 64 ++-- .../ksql/execution/codegen/CodeGenSpec.java | 28 +- .../ksql/execution/codegen/CodeGenUtil.java | 6 +- .../execution/codegen/ExpressionMetadata.java | 16 +- .../execution/codegen/SqlToJavaVisitor.java | 303 ++++++++++-------- .../codegen/helpers/ArrayAccess.java | 2 +- .../codegen/helpers/SearchedCaseFunction.java | 9 +- .../ksql/execution/context/QueryContext.java | 16 +- .../execution/context/QueryLoggerUtil.java | 2 +- .../ddl/commands/CreateStreamCommand.java | 12 +- .../ddl/commands/CreateTableCommand.java | 12 +- .../ddl/commands/DdlCommandResult.java | 2 +- .../ddl/commands/DropSourceCommand.java | 4 +- .../ddl/commands/DropTypeCommand.java | 4 +- .../execution/ddl/commands/KsqlTopic.java | 6 +- .../formatter/ExpressionFormatter.java | 105 +++--- .../tree/ArithmeticBinaryExpression.java | 17 +- .../tree/ArithmeticUnaryExpression.java | 16 +- .../expression/tree/BetweenPredicate.java | 13 +- .../expression/tree/BooleanLiteral.java | 10 +- .../ksql/execution/expression/tree/Cast.java | 10 +- .../expression/tree/ColumnReferenceExp.java | 10 +- .../expression/tree/ComparisonExpression.java | 15 +- .../tree/CreateStructExpression.java | 12 +- .../expression/tree/DecimalLiteral.java | 10 +- .../tree/DereferenceExpression.java | 12 +- .../expression/tree/DoubleLiteral.java | 12 +- .../execution/expression/tree/Expression.java | 2 +- .../expression/tree/ExpressionVisitor.java | 2 +- .../expression/tree/FunctionCall.java | 12 +- .../expression/tree/InListExpression.java | 10 +- .../expression/tree/InPredicate.java | 12 +- .../expression/tree/IntegerLiteral.java | 10 +- .../expression/tree/IsNotNullPredicate.java | 10 +- .../expression/tree/IsNullPredicate.java | 10 +- .../expression/tree/LikePredicate.java | 14 +- .../execution/expression/tree/Literal.java | 2 +- .../tree/LogicalBinaryExpression.java | 17 +- .../expression/tree/LongLiteral.java | 10 +- .../expression/tree/NotExpression.java | 10 +- .../expression/tree/NullLiteral.java | 6 +- .../tree/SearchedCaseExpression.java | 15 +- .../expression/tree/SimpleCaseExpression.java | 16 +- .../expression/tree/StringLiteral.java | 10 +- .../expression/tree/SubscriptExpression.java | 14 +- .../expression/tree/TimeLiteral.java | 10 +- .../expression/tree/TimestampLiteral.java | 10 +- .../tree/TraversalExpressionVisitor.java | 68 ++-- .../ksql/execution/expression/tree/Type.java | 10 +- .../tree/VisitParentExpressionVisitor.java | 66 ++-- .../execution/expression/tree/WhenClause.java | 14 +- .../ksql/execution/function/UdafUtil.java | 18 +- .../ksql/execution/function/UdfUtil.java | 14 +- .../ksql/execution/function/UdtfUtil.java | 13 +- .../function/udaf/KudafAggregator.java | 32 +- .../function/udaf/KudafInitializer.java | 4 +- .../function/udaf/KudafUndoAggregator.java | 12 +- .../function/udtf/KudtfFlatMapper.java | 10 +- .../function/udtf/TableFunctionApplier.java | 6 +- .../materialization/MaterializationInfo.java | 14 +- .../execution/plan/AbstractStreamSource.java | 20 +- .../execution/plan/KGroupedStreamHolder.java | 8 +- .../execution/plan/KGroupedTableHolder.java | 8 +- .../LogicalSchemaWithMetaAndKeyFields.java | 10 +- .../ksql/execution/plan/SelectExpression.java | 10 +- .../ksql/execution/plan/StreamAggregate.java | 18 +- .../ksql/execution/plan/StreamFilter.java | 14 +- .../ksql/execution/plan/StreamFlatMap.java | 10 +- .../ksql/execution/plan/StreamGroupBy.java | 16 +- .../ksql/execution/plan/StreamGroupByKey.java | 14 +- .../ksql/execution/plan/StreamSelect.java | 14 +- .../ksql/execution/plan/StreamSelectKey.java | 14 +- .../ksql/execution/plan/StreamSink.java | 16 +- .../ksql/execution/plan/StreamSource.java | 16 +- .../ksql/execution/plan/StreamStreamJoin.java | 30 +- .../ksql/execution/plan/StreamTableJoin.java | 18 +- .../plan/StreamWindowedAggregate.java | 20 +- .../ksql/execution/plan/TableAggregate.java | 18 +- .../ksql/execution/plan/TableFilter.java | 14 +- .../ksql/execution/plan/TableGroupBy.java | 16 +- .../ksql/execution/plan/TableSelect.java | 14 +- .../ksql/execution/plan/TableSink.java | 16 +- .../ksql/execution/plan/TableSource.java | 2 +- .../ksql/execution/plan/TableTableJoin.java | 16 +- .../execution/plan/WindowedStreamSource.java | 18 +- .../execution/plan/WindowedTableSource.java | 16 +- .../execution/timestamp/TimestampColumn.java | 8 +- .../transform/sqlpredicate/SqlPredicate.java | 18 +- .../transform/window/WindowSelectMapper.java | 11 +- .../ksql/execution/util/ComparisonUtil.java | 2 +- .../EngineProcessingLogMessageFactory.java | 12 +- .../execution/util/ExpressionTypeManager.java | 142 ++++---- .../windows/HoppingWindowExpression.java | 18 +- .../windows/KsqlWindowExpression.java | 2 +- .../windows/SessionWindowExpression.java | 14 +- .../windows/TumblingWindowExpression.java | 14 +- .../ksql/services/ConnectClient.java | 6 +- .../ksql/services/DisabledKsqlClient.java | 2 +- .../ksql/services/KafkaTopicClient.java | 30 +- .../builder/KsqlQueryBuilderTest.java | 8 +- .../codegen/ExpressionMetadataTest.java | 10 +- .../codegen/SqlToJavaVisitorTest.java | 184 +++++------ .../codegen/helpers/ArrayAccessTest.java | 19 +- .../helpers/SearchedCaseFunctionTest.java | 36 +-- .../formatter/ExpressionFormatterTest.java | 38 +-- .../ksql/execution/function/UdafUtilTest.java | 2 +- .../ksql/execution/function/UdfUtilTest.java | 32 +- .../function/udtf/KudtfFlatMapperTest.java | 22 +- .../execution/testutil/TestExpressions.java | 4 +- .../sqlpredicate/SqlPredicateTest.java | 14 +- .../window/WindowSelectMapperTest.java | 10 +- .../execution/util/ComparisonUtilTest.java | 10 +- ...EngineProcessingLogMessageFactoryTest.java | 22 +- .../util/ExpressionTypeManagerTest.java | 110 +++---- .../json/ValueSpecJsonSerdeSupplier.java | 2 +- .../KafkaStreamsInternalTopicsAccessor.java | 2 +- .../ksql/test/tools/TestExecutor.java | 4 +- .../ksql/test/SchemaTranslationTest.java | 4 +- .../test/planned/PlannedTestGenerator.java | 2 +- .../ksql/test/rest/RestTestExecutor.java | 2 +- .../io/confluent/ksql/parser/AstBuilder.java | 4 +- .../json/KsqlTypesDeserializationModule.java | 2 +- .../ksql/parser/tree/WindowExpression.java | 2 +- .../schema/ksql/DefaultSqlValueCoercer.java | 10 +- .../confluent/ksql/parser/KsqlParserTest.java | 6 +- .../json/LogicalSchemaDeserializerTest.java | 2 +- .../ksql/rest/server/ExecutableServer.java | 7 +- .../ksql/rest/server/KsqlRestApplication.java | 4 +- .../ksql/rest/server/KsqlRestConfig.java | 2 +- .../ksql/rest/server/StandaloneExecutor.java | 2 +- .../execution/DescribeConnectorExecutor.java | 2 +- .../execution/ListPropertiesExecutor.java | 10 +- .../rest/server/execution/RequestHandler.java | 2 +- .../filters/KsqlAuthorizationFilter.java | 2 +- .../rest/server/resources/KsqlResource.java | 2 +- .../streaming/StreamedQueryResource.java | 2 +- .../server/validation/RequestValidator.java | 2 +- .../rest/util/RocksDBConfigSetterHandler.java | 2 +- .../confluent/ksql/parser/ParserMatchers.java | 4 +- .../client/KsqlRestClientFunctionalTest.java | 2 +- .../entity/TableRowsEntityFactoryTest.java | 2 +- .../healthcheck/HealthCheckAgentTest.java | 2 +- .../MockKsqlSecurityExtension.java | 2 +- .../KsqlRestApplicationFunctionalTest.java | 2 +- .../rest/server/KsqlRestApplicationTest.java | 4 +- .../ksql/rest/server/KsqlRestConfigTest.java | 2 +- .../ksql/rest/server/ServerUtilTest.java | 4 +- .../StandaloneExecutorFunctionalTest.java | 2 +- .../ksql/rest/server/TemporaryEngine.java | 10 +- .../ksql/rest/server/TestKsqlRestApp.java | 8 +- .../server/computation/CommandRunnerTest.java | 14 +- .../computation/DistributingExecutorTest.java | 2 +- .../InteractiveStatementExecutorTest.java | 35 +- .../rest/server/computation/RecoveryTest.java | 4 +- .../SequenceNumberFutureStoreTest.java | 4 +- .../DescribeConnectorExecutorTest.java | 2 +- .../DescribeFunctionExecutorTest.java | 12 +- .../server/execution/ExplainExecutorTest.java | 2 +- .../execution/ListFunctionsExecutorTest.java | 2 +- .../execution/ListPropertiesExecutorTest.java | 6 +- .../server/execution/RequestHandlerTest.java | 6 +- .../filters/KsqlAuthorizationFilterTest.java | 12 +- .../rest/server/mock/MockApplication.java | 2 +- .../server/resources/KsqlResourceTest.java | 4 +- .../streaming/PollingSubscriptionTest.java | 35 +- .../streaming/PrintSubscriptionTest.java | 42 +-- .../streaming/StreamedQueryResourceTest.java | 2 +- .../streaming/StreamingTestUtils.java | 39 ++- .../streaming/TopicStreamWriterTest.java | 2 +- .../rest/server/state/ServerStateTest.java | 2 +- .../validation/RequestValidatorTest.java | 2 +- .../ksql/rest/client/KsqlTargetTest.java | 2 +- .../ksql/rest/entity/PropertiesList.java | 4 +- .../ksql/rest/entity/StreamedRow.java | 4 +- .../io/confluent/ksql/rest/ErrorsTest.java | 4 +- .../rest/entity/KsqlErrorMessageTest.java | 4 +- .../ksql/rest/entity/KsqlRequestTest.java | 4 +- .../ksql/rest/entity/ServerInfoTest.java | 2 +- .../KsqlBoundedMemoryRocksDBConfigSetter.java | 2 +- .../ksql/serde/KsqlSerdeFactory.java | 8 +- .../ksql/serde/json/KsqlJsonDeserializer.java | 10 +- .../serde/avro/AvroDataTranslatorTest.java | 2 +- .../serde/avro/KsqlAvroSerializerTest.java | 2 +- .../KsqlDelimitedDeserializerTest.java | 4 +- .../serde/json/KsqlJsonDeserializerTest.java | 4 +- .../serde/tls/ThreadLocalCloseableTest.java | 19 +- .../ksql/execution/streams/SourceBuilder.java | 2 +- .../streams/StreamFlatMapBuilder.java | 6 +- .../ks/KsMaterializationFactory.java | 2 +- .../materialization/ks/KsLocatorTest.java | 2 +- .../ksql/test/util/KafkaEmbedded.java | 2 +- .../confluent/ksql/test/util/TestMethods.java | 4 +- 290 files changed, 1873 insertions(+), 1653 deletions(-) diff --git a/checkstyle/checkstyle.properties b/checkstyle/checkstyle.properties index 3d89946c8dfb..bc6dd0a47039 100644 --- a/checkstyle/checkstyle.properties +++ b/checkstyle/checkstyle.properties @@ -18,4 +18,6 @@ checkstyle.hideutilityclassconstructor.severity=error checkstyle.redundantmodifier.severity=error checkstyle.customimportorder.severity=error checkstyle.finalclass.severity=error +checkstyle.finalparameters.severity=error +checkstyle.finallocalvariable.severity=error checkstyle.todocomment.severity=error diff --git a/ksql-benchmark/src/main/java/io/confluent/ksql/benchmark/UdfInvokerBenchmark.java b/ksql-benchmark/src/main/java/io/confluent/ksql/benchmark/UdfInvokerBenchmark.java index dbeda95a8338..fba3096f9b28 100644 --- a/ksql-benchmark/src/main/java/io/confluent/ksql/benchmark/UdfInvokerBenchmark.java +++ b/ksql-benchmark/src/main/java/io/confluent/ksql/benchmark/UdfInvokerBenchmark.java @@ -64,7 +64,7 @@ public void setUp() { private Method createMethod(final String methodName, final Class... params) { try { return getClass().getMethod(methodName, params); - } catch (Exception e) { + } catch (final Exception e) { throw new RuntimeException(e); } } @@ -73,7 +73,7 @@ private PluggableUdf createPluggableUdf(final Method method) { try { final FunctionInvoker invoker = FunctionLoaderUtils.createFunctionInvoker(method); return new PluggableUdf(invoker, this); - } catch (Exception e) { + } catch (final Exception e) { throw new RuntimeException(e); } } @@ -91,7 +91,7 @@ public int varArgsMethod(final int x, final long... arr) { public int invokeSimpleMethod(final UdfInvokerState state) { try { return (Integer) state.simpleMethod.invoke(state, 1); - } catch (Exception e) { + } catch (final Exception e) { throw new RuntimeException(e); } } @@ -100,7 +100,7 @@ public int invokeSimpleMethod(final UdfInvokerState state) { public int invokeVarargsMethod(final UdfInvokerState state) { try { return (Integer) state.varArgsMethod.invoke(state, 1, vargs2); - } catch (Exception e) { + } catch (final Exception e) { throw new RuntimeException(e); } } diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java index f27bcffd0ba7..47493f2d5a63 100644 --- a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java @@ -250,7 +250,7 @@ public void setSpool(final File file) { spoolFile = Optional.of(file); terminal.writer().println("Session will be spooled to " + file.getAbsolutePath()); terminal.writer().println("Enter SPOOL OFF to disable"); - } catch (IOException e) { + } catch (final IOException e) { throw new KsqlException("Cannot SPOOL to file: " + file, e); } } diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/cmd/RunScript.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/cmd/RunScript.java index 05934a62d336..3a5342d9395a 100644 --- a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/cmd/RunScript.java +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/cmd/RunScript.java @@ -65,7 +65,7 @@ private static String loadScript(final String filePath) { try { return Files.readAllLines(Paths.get(filePath), StandardCharsets.UTF_8).stream() .collect(Collectors.joining(System.lineSeparator())); - } catch (IOException e) { + } catch (final IOException e) { throw new KsqlException("Failed to read file: " + filePath, e); } } diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/ErrorEntityTableBuilder.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/ErrorEntityTableBuilder.java index 5242cc05955c..64a107466e3d 100644 --- a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/ErrorEntityTableBuilder.java +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/ErrorEntityTableBuilder.java @@ -35,7 +35,7 @@ public Table buildTable(final ErrorEntity entity) { formatted = MAPPER .writerWithDefaultPrettyPrinter() .writeValueAsString(MAPPER.readTree(message)); - } catch (IOException e) { + } catch (final IOException e) { formatted = String.join("\n", Splitter.fixedLength(60).splitToList(message)); } diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java index 73a74bfc29ae..e985a4ad7f57 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java @@ -262,7 +262,7 @@ public static void classTearDown() { } private void testCreateStreamAsSelect( - String selectQuery, + final String selectQuery, final PhysicalSchema resultSchema, final Map expectedResults ) { @@ -405,7 +405,7 @@ public void testPrint() { try { thread.join(0); - } catch (InterruptedException e) { + } catch (final InterruptedException e) { // } } diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/SslFunctionalTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/SslFunctionalTest.java index 07acd2145b31..475cf85fe561 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/SslFunctionalTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/SslFunctionalTest.java @@ -241,7 +241,7 @@ public void onError(final Throwable t) { @SuppressWarnings("unused") // Invoked via reflection @OnWebSocketMessage - public void onMessage(String msg) { + public void onMessage(final String msg) { closeSilently(); latch.countDown(); } diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/console/JLineReaderTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/console/JLineReaderTest.java index 99ee9eefc7c2..8b7f35fbb14a 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/console/JLineReaderTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/console/JLineReaderTest.java @@ -259,10 +259,10 @@ private static List readAllLines(final JLineReader reader) { final List commands = new ArrayList<>(); try { while (true) { - String line = reader.readLine(); + final String line = reader.readLine(); commands.add(line.trim()); } - } catch (EndOfFileException e) { + } catch (final EndOfFileException e) { // this indicates end of input in JLine } return commands; @@ -279,7 +279,7 @@ private JLineReader createReaderForInput(final String input) throws IOException new ByteArrayInputStream(input.getBytes(StandardCharsets.UTF_8)); final OutputStream outputStream = new ByteArrayOutputStream(512); final Terminal terminal = new DumbTerminal(inputStream, outputStream); - File tempHistoryFile = tempFolder.newFile("ksql-history.txt"); + final File tempHistoryFile = tempFolder.newFile("ksql-history.txt"); final Path historyFilePath = Paths.get(tempHistoryFile.getAbsolutePath()); return new JLineReader(terminal, historyFilePath, cliLinePredicate); } diff --git a/ksql-common/src/main/java/io/confluent/ksql/configdef/ConfigValidators.java b/ksql-common/src/main/java/io/confluent/ksql/configdef/ConfigValidators.java index 3db0708f0bbd..163ce2dd9af3 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/configdef/ConfigValidators.java +++ b/ksql-common/src/main/java/io/confluent/ksql/configdef/ConfigValidators.java @@ -46,7 +46,7 @@ public static Validator parses(final Function parser) { } try { parser.apply((String)val); - } catch (Exception e) { + } catch (final Exception e) { throw new ConfigException("Configuration " + name + " is invalid: " + e.getMessage()); } }; @@ -86,7 +86,7 @@ public static Validator validUrl() { } try { new URL((String)val); - } catch (Exception e) { + } catch (final Exception e) { throw new ConfigException(name, val, "Not valid URL: " + e.getMessage()); } }; diff --git a/ksql-common/src/main/java/io/confluent/ksql/function/GenericsUtil.java b/ksql-common/src/main/java/io/confluent/ksql/function/GenericsUtil.java index 62f1e4af9eca..688b464e4077 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/function/GenericsUtil.java +++ b/ksql-common/src/main/java/io/confluent/ksql/function/GenericsUtil.java @@ -104,7 +104,10 @@ public static boolean hasGenerics(final ParamType type) { * @throws KsqlException if there is a generic in {@code schema} that is not present * in {@code mapping} */ - public static SqlType applyResolved(ParamType schema, Map resolved) { + public static SqlType applyResolved( + final ParamType schema, + final Map resolved + ) { if (schema instanceof ArrayType) { return SqlTypes.array(applyResolved(((ArrayType) schema).element(), resolved)); } @@ -210,7 +213,7 @@ private static boolean resolveGenerics( return true; } - private static boolean matches(ParamType schema, SqlType instance) { + private static boolean matches(final ParamType schema, final SqlType instance) { switch (instance.baseType()) { case BOOLEAN: return schema instanceof BooleanType; case INTEGER: return schema instanceof IntegerType; diff --git a/ksql-common/src/main/java/io/confluent/ksql/function/KsqlScalarFunction.java b/ksql-common/src/main/java/io/confluent/ksql/function/KsqlScalarFunction.java index 164c4f286f50..dd6de2b9eb63 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/function/KsqlScalarFunction.java +++ b/ksql-common/src/main/java/io/confluent/ksql/function/KsqlScalarFunction.java @@ -65,8 +65,10 @@ public static KsqlScalarFunction createLegacyBuiltIn( final FunctionName functionName, final Class kudfClass ) { - ParamType javaReturnType = SchemaConverters.sqlToFunctionConverter().toFunctionType(returnType); - List paramInfos = arguments + final ParamType javaReturnType = SchemaConverters.sqlToFunctionConverter() + .toFunctionType(returnType); + + final List paramInfos = arguments .stream() .map(type -> new ParameterInfo("", type, "", false)) .collect(Collectors.toList()); diff --git a/ksql-common/src/main/java/io/confluent/ksql/function/ParameterInfo.java b/ksql-common/src/main/java/io/confluent/ksql/function/ParameterInfo.java index 10123793c460..17d5b34a6ccf 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/function/ParameterInfo.java +++ b/ksql-common/src/main/java/io/confluent/ksql/function/ParameterInfo.java @@ -26,7 +26,12 @@ public final class ParameterInfo { private final String description; private final boolean isVariadic; - public ParameterInfo(String name, ParamType type, String description, boolean isVariadic) { + public ParameterInfo( + final String name, + final ParamType type, + final String description, + final boolean isVariadic + ) { this.name = name; this.type = type; this.description = description; diff --git a/ksql-common/src/main/java/io/confluent/ksql/function/UdfIndex.java b/ksql-common/src/main/java/io/confluent/ksql/function/UdfIndex.java index a3f19e89ee4a..4ba92a9b24bb 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/function/UdfIndex.java +++ b/ksql-common/src/main/java/io/confluent/ksql/function/UdfIndex.java @@ -312,7 +312,9 @@ private static boolean reserveGenerics( return false; } - Map genericMapping = GenericsUtil.resolveGenerics(schema, argument); + final Map genericMapping = GenericsUtil + .resolveGenerics(schema, argument); + for (final Entry entry : genericMapping.entrySet()) { final SqlType old = reservedGenerics.putIfAbsent(entry.getKey(), entry.getValue()); if (old != null && !old.equals(entry.getValue())) { diff --git a/ksql-common/src/main/java/io/confluent/ksql/function/types/ArrayType.java b/ksql-common/src/main/java/io/confluent/ksql/function/types/ArrayType.java index faec535f103a..82361948b245 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/function/types/ArrayType.java +++ b/ksql-common/src/main/java/io/confluent/ksql/function/types/ArrayType.java @@ -21,11 +21,11 @@ public final class ArrayType extends ObjectType { private final ParamType element; - private ArrayType(ParamType element) { + private ArrayType(final ParamType element) { this.element = element; } - public static ArrayType of(ParamType element) { + public static ArrayType of(final ParamType element) { return new ArrayType(element); } @@ -34,14 +34,14 @@ public ParamType element() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - ArrayType arrayType = (ArrayType) o; + final ArrayType arrayType = (ArrayType) o; return Objects.equals(element, arrayType.element); } diff --git a/ksql-common/src/main/java/io/confluent/ksql/function/types/BooleanType.java b/ksql-common/src/main/java/io/confluent/ksql/function/types/BooleanType.java index c4b4583327f5..97ef23e51faa 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/function/types/BooleanType.java +++ b/ksql-common/src/main/java/io/confluent/ksql/function/types/BooleanType.java @@ -28,7 +28,7 @@ public int hashCode() { } @Override - public boolean equals(Object obj) { + public boolean equals(final Object obj) { return obj instanceof BooleanType; } diff --git a/ksql-common/src/main/java/io/confluent/ksql/function/types/DecimalType.java b/ksql-common/src/main/java/io/confluent/ksql/function/types/DecimalType.java index adb0dc4acae5..73feab1eca12 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/function/types/DecimalType.java +++ b/ksql-common/src/main/java/io/confluent/ksql/function/types/DecimalType.java @@ -28,7 +28,7 @@ public int hashCode() { } @Override - public boolean equals(Object obj) { + public boolean equals(final Object obj) { return obj instanceof DecimalType; } diff --git a/ksql-common/src/main/java/io/confluent/ksql/function/types/DoubleType.java b/ksql-common/src/main/java/io/confluent/ksql/function/types/DoubleType.java index 159e056d3626..af889abd96e7 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/function/types/DoubleType.java +++ b/ksql-common/src/main/java/io/confluent/ksql/function/types/DoubleType.java @@ -28,7 +28,7 @@ public int hashCode() { } @Override - public boolean equals(Object obj) { + public boolean equals(final Object obj) { return obj instanceof DoubleType; } diff --git a/ksql-common/src/main/java/io/confluent/ksql/function/types/GenericType.java b/ksql-common/src/main/java/io/confluent/ksql/function/types/GenericType.java index aa1bc41c81f3..fa4bfdcbd6b0 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/function/types/GenericType.java +++ b/ksql-common/src/main/java/io/confluent/ksql/function/types/GenericType.java @@ -21,11 +21,11 @@ public final class GenericType extends ObjectType { private final String name; - private GenericType(String name) { + private GenericType(final String name) { this.name = name; } - public static GenericType of(String name) { + public static GenericType of(final String name) { return new GenericType(name); } @@ -34,14 +34,14 @@ public String name() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - GenericType that = (GenericType) o; + final GenericType that = (GenericType) o; return Objects.equals(name, that.name); } diff --git a/ksql-common/src/main/java/io/confluent/ksql/function/types/IntegerType.java b/ksql-common/src/main/java/io/confluent/ksql/function/types/IntegerType.java index 34534343832f..95a414b8dcbd 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/function/types/IntegerType.java +++ b/ksql-common/src/main/java/io/confluent/ksql/function/types/IntegerType.java @@ -29,7 +29,7 @@ public int hashCode() { } @Override - public boolean equals(Object obj) { + public boolean equals(final Object obj) { return obj instanceof IntegerType; } diff --git a/ksql-common/src/main/java/io/confluent/ksql/function/types/LongType.java b/ksql-common/src/main/java/io/confluent/ksql/function/types/LongType.java index bf293b69c8ae..58cfafde3792 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/function/types/LongType.java +++ b/ksql-common/src/main/java/io/confluent/ksql/function/types/LongType.java @@ -28,7 +28,7 @@ public int hashCode() { } @Override - public boolean equals(Object obj) { + public boolean equals(final Object obj) { return obj instanceof LongType; } diff --git a/ksql-common/src/main/java/io/confluent/ksql/function/types/MapType.java b/ksql-common/src/main/java/io/confluent/ksql/function/types/MapType.java index 0a223d12a1a5..10b4ec43ce4b 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/function/types/MapType.java +++ b/ksql-common/src/main/java/io/confluent/ksql/function/types/MapType.java @@ -21,11 +21,11 @@ public final class MapType extends ObjectType { private final ParamType value; - private MapType(ParamType value) { + private MapType(final ParamType value) { this.value = value; } - public static MapType of(ParamType value) { + public static MapType of(final ParamType value) { return new MapType(value); } @@ -38,14 +38,14 @@ public ParamType value() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - MapType mapType = (MapType) o; + final MapType mapType = (MapType) o; return Objects.equals(value, mapType.value); } diff --git a/ksql-common/src/main/java/io/confluent/ksql/function/types/StringType.java b/ksql-common/src/main/java/io/confluent/ksql/function/types/StringType.java index 69c5e77bab6e..ffef4d3a601c 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/function/types/StringType.java +++ b/ksql-common/src/main/java/io/confluent/ksql/function/types/StringType.java @@ -28,7 +28,7 @@ public int hashCode() { } @Override - public boolean equals(Object obj) { + public boolean equals(final Object obj) { return obj instanceof StringType; } diff --git a/ksql-common/src/main/java/io/confluent/ksql/function/types/StructType.java b/ksql-common/src/main/java/io/confluent/ksql/function/types/StructType.java index 1f19da85b337..fc6cb18eba92 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/function/types/StructType.java +++ b/ksql-common/src/main/java/io/confluent/ksql/function/types/StructType.java @@ -24,7 +24,7 @@ public final class StructType extends ObjectType { private final ImmutableMap schema; - private StructType(Map schema) { + private StructType(final Map schema) { this.schema = ImmutableMap.copyOf(Objects.requireNonNull(schema, "schema")); } @@ -37,14 +37,14 @@ public Map getSchema() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - StructType that = (StructType) o; + final StructType that = (StructType) o; return Objects.equals(schema, that.schema); } @@ -64,12 +64,12 @@ public String toString() { } public static final class Builder { - private ImmutableMap.Builder builder = new ImmutableMap.Builder<>(); + private final ImmutableMap.Builder builder = new ImmutableMap.Builder<>(); private Builder() { } - public Builder field(String name, ParamType value) { + public Builder field(final String name, final ParamType value) { builder.put(name, value); return this; } diff --git a/ksql-common/src/main/java/io/confluent/ksql/metrics/TopicSensors.java b/ksql-common/src/main/java/io/confluent/ksql/metrics/TopicSensors.java index e2d9c18e5244..520d4766b8bd 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/metrics/TopicSensors.java +++ b/ksql-common/src/main/java/io/confluent/ksql/metrics/TopicSensors.java @@ -167,8 +167,8 @@ static class SensorMetric

{ private final Sensor sensor; private final KafkaMetric metric; - private Time time; - private boolean errorMetric; + private final Time time; + private final boolean errorMetric; private long lastEvent = 0; SensorMetric(final Sensor sensor, final KafkaMetric metric, diff --git a/ksql-common/src/main/java/io/confluent/ksql/schema/connect/SchemaWalker.java b/ksql-common/src/main/java/io/confluent/ksql/schema/connect/SchemaWalker.java index 0b46e2041b11..f654012ce8f7 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/schema/connect/SchemaWalker.java +++ b/ksql-common/src/main/java/io/confluent/ksql/schema/connect/SchemaWalker.java @@ -50,63 +50,63 @@ private SchemaWalker() { public interface Visitor { - default S visitSchema(Schema schema) { + default S visitSchema(final Schema schema) { throw new UnsupportedOperationException("Unsupported schema type: " + schema); } - default S visitPrimitive(Schema schema) { + default S visitPrimitive(final Schema schema) { return visitSchema(schema); } - default S visitBoolean(Schema schema) { + default S visitBoolean(final Schema schema) { return visitPrimitive(schema); } - default S visitInt8(Schema schema) { + default S visitInt8(final Schema schema) { return visitPrimitive(schema); } - default S visitInt16(Schema schema) { + default S visitInt16(final Schema schema) { return visitPrimitive(schema); } - default S visitInt32(Schema schema) { + default S visitInt32(final Schema schema) { return visitPrimitive(schema); } - default S visitInt64(Schema schema) { + default S visitInt64(final Schema schema) { return visitPrimitive(schema); } - default S visitFloat32(Schema schema) { + default S visitFloat32(final Schema schema) { return visitPrimitive(schema); } - default S visitFloat64(Schema schema) { + default S visitFloat64(final Schema schema) { return visitPrimitive(schema); } - default S visitString(Schema schema) { + default S visitString(final Schema schema) { return visitPrimitive(schema); } - default S visitBytes(Schema schema) { + default S visitBytes(final Schema schema) { return visitSchema(schema); } - default S visitArray(Schema schema, S element) { + default S visitArray(final Schema schema, final S element) { return visitSchema(schema); } - default S visitMap(Schema schema, S key, S value) { + default S visitMap(final Schema schema, final S key, final S value) { return visitSchema(schema); } - default S visitStruct(Schema schema, List fields) { + default S visitStruct(final Schema schema, final List fields) { return visitSchema(schema); } - default F visitField(Field field, S type) { + default F visitField(final Field field, final S type) { return null; } } diff --git a/ksql-common/src/main/java/io/confluent/ksql/schema/ksql/SchemaConverters.java b/ksql-common/src/main/java/io/confluent/ksql/schema/ksql/SchemaConverters.java index 8229a548206b..88c3fac43c2e 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/schema/ksql/SchemaConverters.java +++ b/ksql-common/src/main/java/io/confluent/ksql/schema/ksql/SchemaConverters.java @@ -143,7 +143,7 @@ public interface SqlToJavaTypeConverter { */ Class toJavaType(SqlBaseType sqlBaseType); - default Class toJavaType(SqlType sqlType) { + default Class toJavaType(final SqlType sqlType) { return toJavaType(sqlType.baseType()); } } @@ -352,7 +352,7 @@ private static class FunctionToSql implements FunctionToSqlConverter { .build(); @Override - public SqlType toSqlType(ParamType paramType) { + public SqlType toSqlType(final ParamType paramType) { final SqlType sqlType = FUNCTION_TO_SQL.get(paramType); if (sqlType != null) { return sqlType; @@ -380,7 +380,7 @@ public SqlType toSqlType(ParamType paramType) { private static class SqlToFunction implements SqlToFunctionConverter { @Override - public ParamType toFunctionType(SqlType sqlType) { + public ParamType toFunctionType(final SqlType sqlType) { final ParamType paramType = FunctionToSql.FUNCTION_TO_SQL.inverse().get(sqlType); if (paramType != null) { return paramType; @@ -400,7 +400,7 @@ public ParamType toFunctionType(SqlType sqlType) { if (sqlType.baseType() == SqlBaseType.STRUCT) { final StructType.Builder builder = StructType.builder(); - for (Field field : ((SqlStruct) sqlType).fields()) { + for (final Field field : ((SqlStruct) sqlType).fields()) { builder.field(field.name(), toFunctionType(field.type())); } return builder.build(); diff --git a/ksql-common/src/main/java/io/confluent/ksql/schema/ksql/SqlTypeWalker.java b/ksql-common/src/main/java/io/confluent/ksql/schema/ksql/SqlTypeWalker.java index 1f222e3d929c..ce3d43523831 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/schema/ksql/SqlTypeWalker.java +++ b/ksql-common/src/main/java/io/confluent/ksql/schema/ksql/SqlTypeWalker.java @@ -52,51 +52,51 @@ private SqlTypeWalker() { public interface Visitor { - default S visitType(SqlType schema) { + default S visitType(final SqlType schema) { throw new UnsupportedOperationException("Unsupported sql type: " + schema); } - default S visitPrimitive(SqlPrimitiveType type) { + default S visitPrimitive(final SqlPrimitiveType type) { return visitType(type); } - default S visitBoolean(SqlPrimitiveType type) { + default S visitBoolean(final SqlPrimitiveType type) { return visitPrimitive(type); } - default S visitInt(SqlPrimitiveType type) { + default S visitInt(final SqlPrimitiveType type) { return visitPrimitive(type); } - default S visitBigInt(SqlPrimitiveType type) { + default S visitBigInt(final SqlPrimitiveType type) { return visitPrimitive(type); } - default S visitDouble(SqlPrimitiveType type) { + default S visitDouble(final SqlPrimitiveType type) { return visitPrimitive(type); } - default S visitString(SqlPrimitiveType type) { + default S visitString(final SqlPrimitiveType type) { return visitPrimitive(type); } - default S visitDecimal(SqlDecimal type) { + default S visitDecimal(final SqlDecimal type) { return visitType(type); } - default S visitArray(SqlArray type, S element) { + default S visitArray(final SqlArray type, final S element) { return visitType(type); } - default S visitMap(SqlMap type, S value) { + default S visitMap(final SqlMap type, final S value) { return visitType(type); } - default S visitStruct(SqlStruct type, List fields) { + default S visitStruct(final SqlStruct type, final List fields) { return visitType(type); } - default F visitField(Field field, S type) { + default F visitField(final Field field, final S type) { return null; } } diff --git a/ksql-common/src/main/java/io/confluent/ksql/util/DecimalUtil.java b/ksql-common/src/main/java/io/confluent/ksql/util/DecimalUtil.java index 602c94cd5d9e..3df1697c5e10 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/util/DecimalUtil.java +++ b/ksql-common/src/main/java/io/confluent/ksql/util/DecimalUtil.java @@ -88,7 +88,7 @@ public static int scale(final Schema schema) { try { return Integer.parseInt(scaleString); - } catch (NumberFormatException e) { + } catch (final NumberFormatException e) { throw new DataException("Invalid scale parameter found in Decimal schema: ", e); } } @@ -106,7 +106,7 @@ public static int precision(final Schema schema) { try { return Integer.parseInt(precisionString); - } catch (NumberFormatException e) { + } catch (final NumberFormatException e) { throw new DataException("Invalid precision parameter found in Decimal schema: ", e); } } diff --git a/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java b/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java index 2cd95dc7fad9..3c2bfb659720 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java +++ b/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java @@ -751,7 +751,7 @@ public Map getStringAsMap(final String key) { return value.equals("") ? Collections.emptyMap() : Splitter.on(",").trimResults().withKeyValueSeparator(":").split(value); - } catch (IllegalArgumentException e) { + } catch (final IllegalArgumentException e) { throw new KsqlException( String.format( "Invalid config value for '%s'. value: %s. reason: %s", diff --git a/ksql-common/src/main/java/io/confluent/ksql/util/SchemaUtil.java b/ksql-common/src/main/java/io/confluent/ksql/util/SchemaUtil.java index fc364ca8b1ca..63fb7625a764 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/util/SchemaUtil.java +++ b/ksql-common/src/main/java/io/confluent/ksql/util/SchemaUtil.java @@ -141,7 +141,7 @@ public static Schema ensureOptional(final Schema schema) { .build(); } - public static boolean areCompatible(SqlType actual, ParamType declared) { + public static boolean areCompatible(final SqlType actual, final ParamType declared) { if (actual.baseType() == SqlBaseType.ARRAY && declared instanceof ArrayType) { return areCompatible(((SqlArray) actual).getItemType(), ((ArrayType) declared).element()); } @@ -160,7 +160,7 @@ public static boolean areCompatible(SqlType actual, ParamType declared) { return isPrimitiveMatch(actual, declared); } - private static boolean isStructCompatible(SqlType actual, ParamType declared) { + private static boolean isStructCompatible(final SqlType actual, final ParamType declared) { final SqlStruct actualStruct = (SqlStruct) actual; // consider a struct that is empty to match any other struct @@ -168,8 +168,8 @@ private static boolean isStructCompatible(SqlType actual, ParamType declared) { return true; } - for (Entry entry : ((StructType) declared).getSchema().entrySet()) { - String k = entry.getKey(); + for (final Entry entry : ((StructType) declared).getSchema().entrySet()) { + final String k = entry.getKey(); final Optional field = actualStruct.field(k); if (!field.isPresent() || !areCompatible(field.get().type(), entry.getValue())) { return false; @@ -179,7 +179,7 @@ private static boolean isStructCompatible(SqlType actual, ParamType declared) { } // CHECKSTYLE_RULES.OFF: CyclomaticComplexity - private static boolean isPrimitiveMatch(SqlType actual, ParamType declared) { + private static boolean isPrimitiveMatch(final SqlType actual, final ParamType declared) { // CHECKSTYLE_RULES.ON: CyclomaticComplexity // CHECKSTYLE_RULES.OFF: BooleanExpressionComplexity return actual.baseType() == SqlBaseType.STRING && declared instanceof StringType diff --git a/ksql-common/src/test/java/io/confluent/ksql/GenericRowTest.java b/ksql-common/src/test/java/io/confluent/ksql/GenericRowTest.java index b30a8f2c8377..d67d4a7ce781 100644 --- a/ksql-common/src/test/java/io/confluent/ksql/GenericRowTest.java +++ b/ksql-common/src/test/java/io/confluent/ksql/GenericRowTest.java @@ -128,8 +128,8 @@ public void shouldPrintStructRowCorrectly() { @Test public void shouldHandleRowWithNoElements() { - List linkedList = new LinkedList<>(); - GenericRow genericRow = new GenericRow(linkedList); + final List linkedList = new LinkedList<>(); + final GenericRow genericRow = new GenericRow(linkedList); assertThat(genericRow.getColumns().size(), is(0) ); } @@ -137,7 +137,7 @@ public void shouldHandleRowWithNoElements() { @Test public void testEquals(){ - List columnListWithString = ImmutableList.of("nr"); + final List columnListWithString = ImmutableList.of("nr"); new EqualsTester(). addEqualityGroup( diff --git a/ksql-common/src/test/java/io/confluent/ksql/errors/ProductionExceptionHandlerUtilTest.java b/ksql-common/src/test/java/io/confluent/ksql/errors/ProductionExceptionHandlerUtilTest.java index 17a3c446b6f0..d64833b91bfa 100644 --- a/ksql-common/src/test/java/io/confluent/ksql/errors/ProductionExceptionHandlerUtilTest.java +++ b/ksql-common/src/test/java/io/confluent/ksql/errors/ProductionExceptionHandlerUtilTest.java @@ -132,7 +132,7 @@ public void shouldReturnContinueFromLogAndContinueHandler() { assertResponseIs(ProductionExceptionHandlerResponse.CONTINUE); } - private void assertResponseIs(Object o) { + private void assertResponseIs(final Object o) { // When: final ProductionExceptionHandlerResponse response = exceptionHandler.handle(record, new Exception()); @@ -145,7 +145,7 @@ private static class TestLogAndXProductionExceptionHandler extends LogAndXProduc private final ProductionExceptionHandlerResponse response; - private TestLogAndXProductionExceptionHandler(ProductionExceptionHandlerResponse response) { + private TestLogAndXProductionExceptionHandler(final ProductionExceptionHandlerResponse response) { this.response = response; } diff --git a/ksql-common/src/test/java/io/confluent/ksql/function/UdfIndexTest.java b/ksql-common/src/test/java/io/confluent/ksql/function/UdfIndexTest.java index 28d5f513104b..f134fa14485a 100644 --- a/ksql-common/src/test/java/io/confluent/ksql/function/UdfIndexTest.java +++ b/ksql-common/src/test/java/io/confluent/ksql/function/UdfIndexTest.java @@ -687,7 +687,7 @@ private static KsqlScalarFunction function( } }; - List paramInfos = Arrays.stream(args) + final List paramInfos = Arrays.stream(args) .map(type -> new ParameterInfo("", type, "", false)) .collect(Collectors.toList()); diff --git a/ksql-common/src/test/java/io/confluent/ksql/metrics/MetricCollectorsTest.java b/ksql-common/src/test/java/io/confluent/ksql/metrics/MetricCollectorsTest.java index 6cbd8c753e17..6790e40573c6 100644 --- a/ksql-common/src/test/java/io/confluent/ksql/metrics/MetricCollectorsTest.java +++ b/ksql-common/src/test/java/io/confluent/ksql/metrics/MetricCollectorsTest.java @@ -49,7 +49,7 @@ public class MetricCollectorsTest { private static final String TEST_TOPIC = "shared-topic"; - private KsqlConfig ksqlConfig = mock(KsqlConfig.class); + private final KsqlConfig ksqlConfig = mock(KsqlConfig.class); @Before public void setUp() { diff --git a/ksql-common/src/test/java/io/confluent/ksql/schema/ksql/SchemaConvertersTest.java b/ksql-common/src/test/java/io/confluent/ksql/schema/ksql/SchemaConvertersTest.java index 584eb25bd94e..a34dbd592bdc 100644 --- a/ksql-common/src/test/java/io/confluent/ksql/schema/ksql/SchemaConvertersTest.java +++ b/ksql-common/src/test/java/io/confluent/ksql/schema/ksql/SchemaConvertersTest.java @@ -135,7 +135,7 @@ public void shouldHaveConnectTestsForAllSqlTypes() { @Test public void shouldGetLogicalForEverySqlType() { - for (Entry entry : SQL_TO_LOGICAL.entrySet()) { + for (final Entry entry : SQL_TO_LOGICAL.entrySet()) { final SqlType sqlType = entry.getKey(); final Schema logical = entry.getValue(); final Schema result = SchemaConverters.sqlToConnectConverter().toConnectSchema(sqlType); @@ -162,7 +162,7 @@ public void shouldHaveJavaTestsForAllSqlTypes() { @Test public void shouldGetJavaTypesForAllSqlTypes() { - for (Entry> entry : SQL_TO_JAVA.entrySet()) { + for (final Entry> entry : SQL_TO_JAVA.entrySet()) { final SqlBaseType sqlType = entry.getKey(); final Class javaType = entry.getValue(); final Class result = SchemaConverters.sqlToJavaConverter().toJavaType(sqlType); diff --git a/ksql-common/src/test/java/io/confluent/ksql/util/timestamp/StringToTimestampParserTest.java b/ksql-common/src/test/java/io/confluent/ksql/util/timestamp/StringToTimestampParserTest.java index 4a5f6576dfcc..4aa972e86201 100644 --- a/ksql-common/src/test/java/io/confluent/ksql/util/timestamp/StringToTimestampParserTest.java +++ b/ksql-common/src/test/java/io/confluent/ksql/util/timestamp/StringToTimestampParserTest.java @@ -29,7 +29,7 @@ public void shouldParseBasicLocalDate() { final String timestamp = "1605-11-05 10"; // When - ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); + final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); // Then assertThat(ts, sameInstant( @@ -45,7 +45,7 @@ public void shouldConvertToMillis() { final String timestamp = "1605-11-05 10"; // When - long ts = new StringToTimestampParser(format).parse(timestamp); + final long ts = new StringToTimestampParser(format).parse(timestamp); // Then assertThat(ts, is( @@ -63,7 +63,7 @@ public void shouldParseFullLocalDateWithPartialSeconds() { final String timestamp = "1605-11-05 10:10:10:010"; // When - ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); + final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); // Then assertThat(ts, is(sameInstant(FIFTH_OF_NOVEMBER @@ -83,7 +83,7 @@ public void shouldParseFullLocalDateWithNanoSeconds() { final String timestamp = "1605-11-05 10:10:10:001000000"; // When - ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); + final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); // Then assertThat(ts, is(sameInstant(FIFTH_OF_NOVEMBER @@ -100,7 +100,7 @@ public void shouldParseFullLocalDateWithOptionalElements() { final String timestamp = "1605-11-05"; // When - ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); + final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); // Then assertThat(ts, is(sameInstant(FIFTH_OF_NOVEMBER))); @@ -113,7 +113,7 @@ public void shouldParseFullLocalDateWithPassedInTimeZone() { final String timestamp = "1605-11-05 10"; // When - ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, GMT_3); + final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, GMT_3); // Then assertThat(ts, is(sameInstant(FIFTH_OF_NOVEMBER.withHour(10).withZoneSameLocal(GMT_3)))); @@ -128,7 +128,7 @@ public void shouldParseFullLocalDateWithTimeZone() { final String timestamp = "1605-11-05 10 GMT+3 "; // When - ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, IGNORED); + final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, IGNORED); // Then assertThat(ts, is(sameInstant(FIFTH_OF_NOVEMBER.withHour(10).withZoneSameLocal(GMT_3)))); @@ -141,7 +141,7 @@ public void shouldParseDateTimeWithDayOfYear() { final String timestamp = String.format("1605-%d 10", FIFTH_OF_NOVEMBER.getDayOfYear()); // When - ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); + final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); // Then assertThat(ts, is(sameInstant(FIFTH_OF_NOVEMBER.withHour(10)))); @@ -154,7 +154,7 @@ public void shouldResolveDefaultsForEmpty() { final String timestamp = ""; // When - ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); + final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); // Then assertThat(ts, is(sameInstant(EPOCH.withZoneSameInstant(ZID)))); @@ -167,7 +167,7 @@ public void shouldResolveDefaultsForPartial() { final String timestamp = "2019"; // When - ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); + final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); // Then assertThat(ts, is(sameInstant(EPOCH.withYear(2019).withZoneSameInstant(ZID)))); @@ -180,7 +180,7 @@ public void shouldResolveDefaultsForDayOfYear() { final String timestamp = "100"; // When - ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); + final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID); // Then assertThat(ts, is(sameInstant(EPOCH.withDayOfYear(100).withZoneSameInstant(ZID)))); @@ -189,12 +189,12 @@ public void shouldResolveDefaultsForDayOfYear() { private static Matcher sameInstant(final ZonedDateTime other) { return new TypeSafeMatcher() { @Override - protected boolean matchesSafely(ZonedDateTime item) { + protected boolean matchesSafely(final ZonedDateTime item) { return item.toInstant().equals(other.toInstant()); } @Override - public void describeTo(Description description) { + public void describeTo(final Description description) { description.appendText(other.toString()); } }; diff --git a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/QueryAnalyzer.java b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/QueryAnalyzer.java index 1b1e87c5fea1..b81107828f03 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/QueryAnalyzer.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/QueryAnalyzer.java @@ -86,7 +86,7 @@ public Analysis analyze( } if (!analysis.getTableFunctions().isEmpty()) { - AliasedDataSource ds = analysis.getFromDataSources().get(0); + final AliasedDataSource ds = analysis.getFromDataSources().get(0); if (ds.getDataSource().getDataSourceType() == DataSourceType.KTABLE) { throw new KsqlException("Table source is not supported with table functions"); } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/connect/supported/JdbcSource.java b/ksql-engine/src/main/java/io/confluent/ksql/connect/supported/JdbcSource.java index 865200968ca6..6dcaef8e6d1f 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/connect/supported/JdbcSource.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/connect/supported/JdbcSource.java @@ -90,7 +90,7 @@ private static Optional extractKeyNameFromSmt(final Map } final List transforms = Splitter.on(',').splitToList(transformsString); - for (String transform : transforms) { + for (final String transform : transforms) { final String transformType = properties.get("transforms." + transform + ".type"); if (transformType != null && transformType.contains("ExtractField$Key")) { return Optional.ofNullable(properties.get("transforms." + transform + ".field")); diff --git a/ksql-engine/src/main/java/io/confluent/ksql/embedded/KsqlContext.java b/ksql-engine/src/main/java/io/confluent/ksql/embedded/KsqlContext.java index 065c26a21b9a..6626fa411a1c 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/embedded/KsqlContext.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/embedded/KsqlContext.java @@ -129,7 +129,7 @@ public List sql(final String sql, final Map overridden final KsqlExecutionContext sandbox = ksqlEngine.createSandbox(ksqlEngine.getServiceContext()); final Map validationOverrides = new HashMap<>(overriddenProperties); - for (ParsedStatement stmt : statements) { + for (final ParsedStatement stmt : statements) { execute( sandbox, stmt, diff --git a/ksql-engine/src/main/java/io/confluent/ksql/engine/InsertValuesExecutor.java b/ksql-engine/src/main/java/io/confluent/ksql/engine/InsertValuesExecutor.java index 8fb24f558ad2..28983df9c9fc 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/engine/InsertValuesExecutor.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/engine/InsertValuesExecutor.java @@ -211,7 +211,7 @@ private ProducerRecord buildRecord( key, value ); - } catch (Exception e) { + } catch (final Exception e) { throw new KsqlStatementException( createInsertFailedExceptionMessage(insertValues) + " " + e.getMessage(), statement.getStatementText(), @@ -468,7 +468,7 @@ private static void sendRecord( throw (RuntimeException) e.getCause(); } throw new RuntimeException(e); - } catch (InterruptedException e) { + } catch (final InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/engine/KsqlPlanV1.java b/ksql-engine/src/main/java/io/confluent/ksql/engine/KsqlPlanV1.java index 2e3c51a0c093..d487b6d57d4e 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/engine/KsqlPlanV1.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/engine/KsqlPlanV1.java @@ -22,8 +22,8 @@ final class KsqlPlanV1 implements KsqlPlan { private final String statementText; - private Optional ddlCommand; - private Optional queryPlan; + private final Optional ddlCommand; + private final Optional queryPlan; KsqlPlanV1( @JsonProperty(value = "statementText", required = true) final String statementText, diff --git a/ksql-engine/src/main/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriter.java b/ksql-engine/src/main/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriter.java index a28ff3302297..5cfc228f1b18 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriter.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriter.java @@ -189,9 +189,9 @@ public Expression visitSubscriptExpression( } @Override - public Expression visitStructExpression(CreateStructExpression node, C context) { + public Expression visitStructExpression(final CreateStructExpression node, final C context) { final Builder fields = ImmutableList.builder(); - for (Field field : node.getFields()) { + for (final Field field : node.getFields()) { fields.add(new Field(field.getName(), rewriter.apply(field.getValue(), context))); } return new CreateStructExpression(node.getLocation(), fields.build()); diff --git a/ksql-engine/src/main/java/io/confluent/ksql/execution/json/PlanJsonMapper.java b/ksql-engine/src/main/java/io/confluent/ksql/execution/json/PlanJsonMapper.java index b84d6e8d8f02..1105a5561744 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/execution/json/PlanJsonMapper.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/execution/json/PlanJsonMapper.java @@ -33,7 +33,7 @@ private PlanJsonMapper() { * @return ObjectMapper instance */ public static ObjectMapper create() { - ObjectMapper mapper = new ObjectMapper(); + final ObjectMapper mapper = new ObjectMapper(); mapper.registerModules( new Jdk8Module(), new JavaTimeModule(), diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/DynamicFunctionInvoker.java b/ksql-engine/src/main/java/io/confluent/ksql/function/DynamicFunctionInvoker.java index 14757bf263ea..cd5ba7e7693b 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/DynamicFunctionInvoker.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/DynamicFunctionInvoker.java @@ -51,7 +51,7 @@ public Object eval(final Object udf, final Object... args) { try { final Object[] extractedArgs = extractArgs(args); return method.invoke(udf, extractedArgs); - } catch (Exception e) { + } catch (final Exception e) { throw new KsqlFunctionException("Failed to invoke function " + method, e); } } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/FunctionLoaderUtils.java b/ksql-engine/src/main/java/io/confluent/ksql/function/FunctionLoaderUtils.java index 3da7d16ad10b..d754ec8511ac 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/FunctionLoaderUtils.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/FunctionLoaderUtils.java @@ -93,8 +93,8 @@ static List createParameters( paramType = UdfUtil.getSchemaFromType(type); } - String doc = annotation.map(UdfParameter::description).orElse(""); - boolean isVariadicParam = idx == method.getParameterCount() - 1 && method.isVarArgs(); + final String doc = annotation.map(UdfParameter::description).orElse(""); + final boolean isVariadicParam = idx == method.getParameterCount() - 1 && method.isVarArgs(); return new ParameterInfo(name, paramType, doc, isVariadicParam); }).collect(Collectors.toList()); } @@ -197,7 +197,7 @@ static SchemaProvider handleUdfReturnSchema( return (parameters, arguments) -> { if (schemaProvider != null) { - SqlType returnType = schemaProvider.apply(arguments); + final SqlType returnType = schemaProvider.apply(arguments); if (!(SchemaUtil.areCompatible(returnType, javaReturnSchema))) { throw new KsqlException(String.format( "Return type %s of UDF %s does not match the declared " @@ -260,7 +260,7 @@ private static Method findSchemaProvider( )); } return m; - } catch (NoSuchMethodException e) { + } catch (final NoSuchMethodException e) { throw new KsqlException(String.format( "Cannot find schema provider method with name %s and parameter List in class " + "%s.", schemaProviderName, theClass.getName()), e); diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/UdafFactoryInvoker.java b/ksql-engine/src/main/java/io/confluent/ksql/function/UdafFactoryInvoker.java index 67410bd6dd5d..969d33354af1 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/UdafFactoryInvoker.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/UdafFactoryInvoker.java @@ -105,7 +105,7 @@ KsqlAggregateFunction createFunction(final AggregateFunctionInitArguments initAr method.getName()); } return function; - } catch (Exception e) { + } catch (final Exception e) { throw new KsqlException("Failed to invoke UDAF factory method", e); } } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/UdafLoader.java b/ksql-engine/src/main/java/io/confluent/ksql/function/UdafLoader.java index 307233af6453..e72819125906 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/UdafLoader.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/UdafLoader.java @@ -54,7 +54,7 @@ void loadUdafFromClass(final Class theClass, final String path) { final UdafDescription udafAnnotation = theClass.getAnnotation(UdafDescription.class); final List invokers = new ArrayList<>(); - for (Method method : theClass.getMethods()) { + for (final Method method : theClass.getMethods()) { if (method.getAnnotation(UdafFactory.class) != null) { if (!Modifier.isStatic(method.getModifiers())) { LOGGER.warn( diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/UdfLoader.java b/ksql-engine/src/main/java/io/confluent/ksql/function/UdfLoader.java index 35b72ec90c28..2eb6f4875fbc 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/UdfLoader.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/UdfLoader.java @@ -98,7 +98,7 @@ void loadUdfFromClass( functionRegistry.ensureFunctionFactory(factory); - for (Method method : theClass.getMethods()) { + for (final Method method : theClass.getMethods()) { final Udf udfAnnotation = method.getAnnotation(Udf.class); if (udfAnnotation != null) { final KsqlScalarFunction function; diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/UdtfLoader.java b/ksql-engine/src/main/java/io/confluent/ksql/function/UdtfLoader.java index cdfbc01bee36..3dc6bec4bfc8 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/UdtfLoader.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/UdtfLoader.java @@ -79,7 +79,7 @@ public void loadUdtfFromClass( final TableFunctionFactory factory = new TableFunctionFactory(metadata); - for (Method method : theClass.getMethods()) { + for (final Method method : theClass.getMethods()) { if (method.getAnnotation(Udtf.class) != null) { final Udtf annotation = method.getAnnotation(Udtf.class); try { diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/udaf/count/CountDistinct.java b/ksql-engine/src/main/java/io/confluent/ksql/function/udaf/count/CountDistinct.java index 284a1caa0794..3e2a04a2e667 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/udaf/count/CountDistinct.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/udaf/count/CountDistinct.java @@ -55,7 +55,7 @@ public List initialize() { } @Override - public List aggregate(T current, List aggregate) { + public List aggregate(final T current, final List aggregate) { if (current == null) { return aggregate; } @@ -71,7 +71,7 @@ public List aggregate(T current, List aggregate) { } @Override - public List merge(List aggOne, List aggTwo) { + public List merge(final List aggOne, final List aggTwo) { final RegisterSet registerSet = new RegisterSet(M, Ints.toArray(aggOne)); registerSet.merge(new RegisterSet(M, Ints.toArray(aggTwo))); @@ -79,7 +79,7 @@ public List merge(List aggOne, List aggTwo) { } @Override - public Long map(List agg) { + public Long map(final List agg) { return toHyperLogLog(new RegisterSet(M, Ints.toArray(agg))).cardinality(); } }; diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/udaf/sum/ListSumUdaf.java b/ksql-engine/src/main/java/io/confluent/ksql/function/udaf/sum/ListSumUdaf.java index 7d86649411a5..aa7e90f46c33 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/udaf/sum/ListSumUdaf.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/udaf/sum/ListSumUdaf.java @@ -157,7 +157,7 @@ private static T sum( final BinaryOperator summer) { T sum = initial; - for (T v: list) { + for (final T v: list) { if (v == null) { continue; } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/udf/array/Entries.java b/ksql-engine/src/main/java/io/confluent/ksql/function/udf/array/Entries.java index 6eb62c1d41ac..65e1212cdc28 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/udf/array/Entries.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/udf/array/Entries.java @@ -115,7 +115,7 @@ private List entries( list.sort(Comparator.comparing(Entry::getKey)); entries = list; } - for (Map.Entry entry : entries) { + for (final Map.Entry entry : entries) { final Struct struct = new Struct(structSchema); struct.put(KEY_FIELD_NAME, entry.getKey()).put(VALUE_FIELD_NAME, entry.getValue()); structs.add(struct); diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/udf/json/JsonArrayContains.java b/ksql-engine/src/main/java/io/confluent/ksql/function/udf/json/JsonArrayContains.java index c1d7424b61ae..1319de9992f0 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/udf/json/JsonArrayContains.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/udf/json/JsonArrayContains.java @@ -90,7 +90,7 @@ public Boolean contains( } return false; - } catch (IOException e) { + } catch (final IOException e) { return false; } } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/udf/list/ArrayContains.java b/ksql-engine/src/main/java/io/confluent/ksql/function/udf/list/ArrayContains.java index 9d2f6017d5dd..743fa695d220 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/udf/list/ArrayContains.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/udf/list/ArrayContains.java @@ -31,8 +31,8 @@ public class ArrayContains { @Udf public boolean contains( - @UdfParameter List array, - @UdfParameter T val + @UdfParameter final List array, + @UdfParameter final T val ) { return array != null && array.contains(val); } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/udf/string/SplitKudf.java b/ksql-engine/src/main/java/io/confluent/ksql/function/udf/string/SplitKudf.java index 87a6413cc172..e650cf46c81b 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/udf/string/SplitKudf.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/udf/string/SplitKudf.java @@ -61,7 +61,7 @@ public List split( } else { return Splitter.on(delimiter).splitToList(string); } - } catch (Exception e) { + } catch (final Exception e) { throw new KsqlFunctionException( String.format("Invalid delimiter '%s' in the split() function.", delimiter), e); } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/udtf/Cube.java b/ksql-engine/src/main/java/io/confluent/ksql/function/udtf/Cube.java index 8b66c8438758..46847322ecd5 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/udtf/Cube.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/udtf/Cube.java @@ -36,9 +36,9 @@ public List> cube(final List columns) { } - private List> createAllCombinations(List columns) { + private List> createAllCombinations(final List columns) { - int combinations = 1 << columns.size(); + final int combinations = 1 << columns.size(); // when a column value is null there is only a single possibility for the output // value [null] instead of two [null, original]. in order to avoid creating duplicate // rows, we use nullMask: a binary number with set bits at non-null column @@ -50,7 +50,7 @@ private List> createAllCombinations(List columns) { } } - List> result = new ArrayList<>(combinations); + final List> result = new ArrayList<>(combinations); // bitmask is a binary number where a set bit represents that the value at that index of input // should be included - (e.g. the bitmask 5 (101) represents that cols[2] and cols[0] // should be set while cols[1] should be null). @@ -59,14 +59,14 @@ private List> createAllCombinations(List columns) { // canonicalBitMask represents which indices in the output // row will be null after taking into consideration which values // in columns were originally null - int canonicalBitMask = bitMask & nullMask; + final int canonicalBitMask = bitMask & nullMask; if (canonicalBitMask != bitMask) { // if the canonicalBitMask is not the same as bitMask, then this row is a logical // duplicate of another row and we should not emit it continue; } - List row = new ArrayList<>(columns.size()); + final List row = new ArrayList<>(columns.size()); for (int i = 0; i < columns.size(); i++) { row.add(0, (bitMask & (1 << i)) == 0 ? null : columns.get(columns.size() - 1 - i)); } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java index 60689e5b7b47..ba3bea90020d 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/RepartitionNode.java @@ -72,12 +72,12 @@ public List getSelectExpressions() { } @Override - protected int getPartitions(KafkaTopicClient kafkaTopicClient) { + protected int getPartitions(final KafkaTopicClient kafkaTopicClient) { return source.getPartitions(kafkaTopicClient); } @Override - public SchemaKStream buildStream(KsqlQueryBuilder builder) { + public SchemaKStream buildStream(final KsqlQueryBuilder builder) { return source.buildStream(builder) .selectKey(partitionBy, builder.buildNodeContext(getId().toString())); } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidatorImpl.java b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidatorImpl.java index c1ce69478c2d..3204f156131e 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidatorImpl.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidatorImpl.java @@ -65,7 +65,7 @@ private void validateQuery( ) { final SourceTopicsExtractor extractor = new SourceTopicsExtractor(metaStore); extractor.process(query, null); - for (String kafkaTopic : extractor.getSourceTopics()) { + for (final String kafkaTopic : extractor.getSourceTopics()) { checkAccess(serviceContext, kafkaTopic, AclOperation.READ); } } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/services/DefaultConnectClient.java b/ksql-engine/src/main/java/io/confluent/ksql/services/DefaultConnectClient.java index 114b0f203551..0c98c731e0d5 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/services/DefaultConnectClient.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/services/DefaultConnectClient.java @@ -74,7 +74,7 @@ public DefaultConnectClient( try { this.connectUri = new URI(connectUri); - } catch (URISyntaxException e) { + } catch (final URISyntaxException e) { throw new KsqlException( "Could not initialize connect client due to invalid URI: " + connectUri, e); } @@ -233,10 +233,10 @@ private static ConnectResponse withRetries(final Callable) e.getLastFailedAttempt().getResult(); diff --git a/ksql-engine/src/main/java/io/confluent/ksql/services/KafkaClusterUtil.java b/ksql-engine/src/main/java/io/confluent/ksql/services/KafkaClusterUtil.java index 1553cecd164a..d14f1d456275 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/services/KafkaClusterUtil.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/services/KafkaClusterUtil.java @@ -47,7 +47,7 @@ public static boolean isAuthorizedOperationsSupported(final Admin adminClient) { ); return authorizedOperations.authorizedOperations().get() != null; - } catch (Exception e) { + } catch (final Exception e) { throw new KsqlServerException("Could not get Kafka authorized operations!", e); } } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/statement/InjectorChain.java b/ksql-engine/src/main/java/io/confluent/ksql/statement/InjectorChain.java index 5bba80c89ca2..4b45e38d9520 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/statement/InjectorChain.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/statement/InjectorChain.java @@ -38,7 +38,7 @@ private InjectorChain(final Injector... injectors) { public ConfiguredStatement inject( final ConfiguredStatement statement) { ConfiguredStatement injected = statement; - for (Injector injector : injectors) { + for (final Injector injector : injectors) { injected = injector.inject(injected); } return injected; diff --git a/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKStream.java b/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKStream.java index 3597d04ce806..28762d95e2c8 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKStream.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKStream.java @@ -173,7 +173,7 @@ KeyField findKeyField(final List selectExpressions) { Optional found = Optional.empty(); - for (SelectExpression selectExpression : selectExpressions) { + for (final SelectExpression selectExpression : selectExpressions) { final ColumnName toName = selectExpression.getAlias(); final Expression toExpression = selectExpression.getExpression(); @@ -534,7 +534,7 @@ LogicalSchema resolveSchema(final ExecutionStep step) { return new StepSchemaResolver(ksqlConfig, functionRegistry).resolve(step, schema); } - LogicalSchema resolveSchema(final ExecutionStep step, SchemaKStream right) { + LogicalSchema resolveSchema(final ExecutionStep step, final SchemaKStream right) { return new StepSchemaResolver(ksqlConfig, functionRegistry).resolve( step, schema, diff --git a/ksql-engine/src/main/java/io/confluent/ksql/topic/TopicDeleteInjector.java b/ksql-engine/src/main/java/io/confluent/ksql/topic/TopicDeleteInjector.java index cc4899326007..54d40cdeced0 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/topic/TopicDeleteInjector.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/topic/TopicDeleteInjector.java @@ -102,7 +102,7 @@ public ConfiguredStatement inject( ExecutorUtil.executeWithRetries( () -> topicClient.deleteTopics(ImmutableList.of(source.getKafkaTopicName())), ExecutorUtil.RetryBehaviour.ALWAYS); - } catch (Exception e) { + } catch (final Exception e) { throw new RuntimeException("Could not delete the corresponding kafka topic: " + source.getKafkaTopicName(), e); } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/topic/TopicProperties.java b/ksql-engine/src/main/java/io/confluent/ksql/topic/TopicProperties.java index a137930b0a1d..354a07eae36a 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/topic/TopicProperties.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/topic/TopicProperties.java @@ -93,8 +93,8 @@ public static final class Builder { private String name; private TopicProperties fromWithClause = new TopicProperties(null, null, null); - private TopicProperties fromOverrides = new TopicProperties(null, null, null); - private TopicProperties fromKsqlConfig = new TopicProperties(null, null, null); + private final TopicProperties fromOverrides = new TopicProperties(null, null, null); + private final TopicProperties fromKsqlConfig = new TopicProperties(null, null, null); private Supplier fromSource = () -> new TopicProperties(null, null, null); Builder withName(final String name) { diff --git a/ksql-engine/src/main/java/io/confluent/ksql/util/AvroUtil.java b/ksql-engine/src/main/java/io/confluent/ksql/util/AvroUtil.java index 69977d4f4e71..47b35f397741 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/util/AvroUtil.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/util/AvroUtil.java @@ -75,7 +75,7 @@ private static String getRegisteredSchema( return schemaRegistryClient .getLatestSchemaMetadata(topicName + KsqlConstants.SCHEMA_REGISTRY_VALUE_SUFFIX) .getSchema(); - } catch (Exception e) { + } catch (final Exception e) { return "Could not get registered schema due to exception: " + e.getMessage(); } } diff --git a/ksql-engine/src/test/java/TestUdfWithNoPackage.java b/ksql-engine/src/test/java/TestUdfWithNoPackage.java index c64318309ec0..ea188e24b305 100644 --- a/ksql-engine/src/test/java/TestUdfWithNoPackage.java +++ b/ksql-engine/src/test/java/TestUdfWithNoPackage.java @@ -26,7 +26,7 @@ public class TestUdfWithNoPackage { @Test public void shouldCompileMethodsWithNoPackage() throws Exception { // Given: - double version = Double.parseDouble(System.getProperty("java.specification.version")); + final double version = Double.parseDouble(System.getProperty("java.specification.version")); if (version < 1.9) { assertThat(this.getClass().getPackage(), nullValue()); } else { diff --git a/ksql-engine/src/test/java/io/confluent/ksql/KsqlContextTestUtilTest.java b/ksql-engine/src/test/java/io/confluent/ksql/KsqlContextTestUtilTest.java index d04cef5e12f8..4d99b1937eec 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/KsqlContextTestUtilTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/KsqlContextTestUtilTest.java @@ -52,7 +52,7 @@ public void shouldBeAbleToHaveTwoInstancesWithDifferentNames() { first.terminateQuery(new QueryId("avoid compiler warning")); // When: - KsqlContext second = KsqlContextTestUtil.create( + final KsqlContext second = KsqlContextTestUtil.create( BASE_CONFIG, srClient, functionRegistry diff --git a/ksql-engine/src/test/java/io/confluent/ksql/analyzer/QueryAnalyzerFunctionalTest.java b/ksql-engine/src/test/java/io/confluent/ksql/analyzer/QueryAnalyzerFunctionalTest.java index a68ef7fedbd0..5499c8be8c11 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/analyzer/QueryAnalyzerFunctionalTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/analyzer/QueryAnalyzerFunctionalTest.java @@ -199,7 +199,7 @@ public void shouldCreateAnalysisForInsertInto() { public void shouldAnalyseTableFunctions() { // We need to load udfs for this - UserFunctionLoader loader = new UserFunctionLoader(functionRegistry, new File(""), + final UserFunctionLoader loader = new UserFunctionLoader(functionRegistry, new File(""), Thread.currentThread().getContextClassLoader(), s -> false, Optional.empty(), true diff --git a/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/DdlCommandExecTest.java b/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/DdlCommandExecTest.java index 83ddfc31f9ce..39321f224698 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/DdlCommandExecTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/DdlCommandExecTest.java @@ -60,7 +60,7 @@ public class DdlCommandExecTest { private DropSourceCommand dropSource; private DropTypeCommand dropType; - private MutableMetaStore metaStore + private final MutableMetaStore metaStore = MetaStoreFixture.getNewMetaStore(new InternalFunctionRegistry()); @Mock diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/InternalFunctionRegistryTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/InternalFunctionRegistryTest.java index 874b541d8ea4..b394b2cdde28 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/InternalFunctionRegistryTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/InternalFunctionRegistryTest.java @@ -303,9 +303,9 @@ public void shouldThrowExceptionIfNoFunctionsWithNameExist() { @Test public void shouldHaveAllInitializedFunctionNamesInUppercase() { - for (UdfFactory udfFactory : functionRegistry.listFunctions()) { - String actual = udfFactory.getName(); - String expected = actual.toUpperCase(); + for (final UdfFactory udfFactory : functionRegistry.listFunctions()) { + final String actual = udfFactory.getName(); + final String expected = actual.toUpperCase(); assertThat("UDF name must be registered in uppercase", actual, equalTo(expected)); } @@ -315,7 +315,7 @@ public void shouldHaveAllInitializedFunctionNamesInUppercase() { public void shouldHaveBuiltInUDFRegistered() { // Verify that all built-in UDF are correctly registered in the InternalFunctionRegistry - List buildtInUDF = Arrays.asList( + final List buildtInUDF = Arrays.asList( // String UDF "LCASE", "UCASE", "CONCAT", "TRIM", "IFNULL", "LEN", // Math UDF @@ -324,7 +324,7 @@ public void shouldHaveBuiltInUDFRegistered() { "EXTRACTJSONFIELD" ); - Collection names = Collections2.transform(functionRegistry.listFunctions(), + final Collection names = Collections2.transform(functionRegistry.listFunctions(), UdfFactory::getName); assertThat("More or less UDF are registered in the InternalFunctionRegistry", @@ -333,11 +333,11 @@ public void shouldHaveBuiltInUDFRegistered() { @Test public void shouldHaveBuiltInUDAFRegistered() { - Collection builtInUDAF = Arrays.asList( + final Collection builtInUDAF = Arrays.asList( "COUNT", "SUM", "MAX", "MIN", "TOPK", "TOPKDISTINCT" ); - Collection names = Collections2.transform(functionRegistry.listAggregateFunctions(), + final Collection names = Collections2.transform(functionRegistry.listAggregateFunctions(), AggregateFunctionFactory::getName); assertThat("More or less UDAF are registered in the InternalFunctionRegistry", @@ -382,7 +382,7 @@ private TableFunctionFactory createTableFunctionFactory() { return new TableFunctionFactory(new UdfMetadata("my_tablefunction", "", "", "", "")) { @Override - public KsqlTableFunction createTableFunction(List argTypeList) { + public KsqlTableFunction createTableFunction(final List argTypeList) { return tableFunction; } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/UdfClassLoaderTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/UdfClassLoaderTest.java index 50e525b9f50c..bf8f53e0a34f 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/UdfClassLoaderTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/UdfClassLoaderTest.java @@ -72,7 +72,7 @@ static PrivilegedAction factory() { } @Override - public Class findClass(String name) throws ClassNotFoundException { + public Class findClass(final String name) throws ClassNotFoundException { switch (name) { case ONLY_IN_PARENT: return OnlyInParent.class; case IN_PARENT_AND_JAR: return InParentAndJar.class; diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/UdfLoaderTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/UdfLoaderTest.java index 854daec40dfa..1ac708308cf3 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/UdfLoaderTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/UdfLoaderTest.java @@ -272,7 +272,7 @@ public void shouldThrowOnMissingAnnotation() throws ClassNotFoundException { final UdfClassLoader udfClassLoader = UdfClassLoader.newClassLoader(udfJar, PARENT_CLASS_LOADER, resourceName -> false); - Class clazz = udfClassLoader.loadClass("org.damian.ksql.udf.MissingAnnotationUdf"); + final Class clazz = udfClassLoader.loadClass("org.damian.ksql.udf.MissingAnnotationUdf"); final UdfLoader udfLoader = new UdfLoader( functionRegistry, Optional.empty(), @@ -298,7 +298,7 @@ public void shouldThrowOnMissingSchemaProvider() throws ClassNotFoundException { final UdfClassLoader udfClassLoader = UdfClassLoader.newClassLoader(udfJar, PARENT_CLASS_LOADER, resourceName -> false); - Class clazz = udfClassLoader.loadClass("org.damian.ksql.udf.MissingSchemaProviderUdf"); + final Class clazz = udfClassLoader.loadClass("org.damian.ksql.udf.MissingSchemaProviderUdf"); final UdfLoader udfLoader = new UdfLoader( functionRegistry, Optional.empty(), @@ -324,7 +324,7 @@ public void shouldThrowOnReturnDecimalWithoutSchemaProvider() throws ClassNotFou final UdfClassLoader udfClassLoader = UdfClassLoader.newClassLoader(udfJar, PARENT_CLASS_LOADER, resourceName -> false); - Class clazz = udfClassLoader.loadClass("org.damian.ksql.udf." + final Class clazz = udfClassLoader.loadClass("org.damian.ksql.udf." + "ReturnDecimalWithoutSchemaProviderUdf"); final UdfLoader udfLoader = new UdfLoader( functionRegistry, @@ -363,7 +363,7 @@ public void shouldPutJarUdfsInClassLoaderForJar() throws Exception { @Test public void shouldAllowClassesWithSameFQCNInDifferentUDFJars() throws Exception { - File pluginDir = tempFolder.newFolder(); + final File pluginDir = tempFolder.newFolder(); Files.copy(Paths.get("src/test/resources/udf-example.jar"), new File(pluginDir, "udf-example.jar").toPath()); Files.copy(Paths.get("src/test/resources/udf-isolated.jar"), @@ -1147,7 +1147,7 @@ public static Udaf, Map, Map, Map, Map> createMapMap( - int ignored) { + final int ignored) { return null; } @@ -1195,7 +1195,7 @@ private static UdafLoader createUdafLoader() { return createUdafLoader(Optional.empty()); } - private static UdafLoader createUdafLoader(Optional metrics) { + private static UdafLoader createUdafLoader(final Optional metrics) { return new UdafLoader(new InternalFunctionRegistry(), metrics, SqlTypeParser.create( TypeRegistry.EMPTY)); } @@ -1298,7 +1298,7 @@ public BigDecimal foo(@UdfParameter("justValue") final BigDecimal p) { } @UdfSchemaProvider - public SqlType provideSchema(List params) { + public SqlType provideSchema(final List params) { return SqlDecimal.of(2, 1); } } @@ -1316,7 +1316,7 @@ public String foo(@UdfParameter("justValue") final BigDecimal p) { } @UdfSchemaProvider - public SqlType provideSchema(List params) { + public SqlType provideSchema(final List params) { return SqlDecimal.of(2, 1); } } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/UdtfLoaderTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/UdtfLoaderTest.java index 820d6603086c..c5c3dd063dd9 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/UdtfLoaderTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/UdtfLoaderTest.java @@ -277,7 +277,7 @@ public void shouldNotLoadUdtfWithBigDecimalReturnAndNoSchemaProvider() { static class UdtfBadReturnValue { @Udtf - public Map badReturn(int foo) { + public Map badReturn(final int foo) { return new HashMap<>(); } } @@ -286,7 +286,7 @@ public Map badReturn(int foo) { static class RawListReturn { @Udtf - public List badReturn(int foo) { + public List badReturn(final int foo) { return new ArrayList(); } } @@ -295,7 +295,7 @@ public List badReturn(int foo) { static class BigDecimalNoSchemaProvider { @Udtf - public List badReturn(int foo) { + public List badReturn(final int foo) { return ImmutableList.of(new BigDecimal("123")); } } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/average/AverageUdafTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/average/AverageUdafTest.java index 2211d66e46b4..36a2344d2a5f 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/average/AverageUdafTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/average/AverageUdafTest.java @@ -94,7 +94,7 @@ public void shouldAverageZeroes() { @Test public void shouldAverageEmpty() { final TableUdaf udaf = AverageUdaf.averageInt(); - Struct agg = udaf.initialize(); + final Struct agg = udaf.initialize(); final double avg = udaf.map(agg); assertThat(0.0, equalTo(avg)); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/count/CountDistinctKudafTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/count/CountDistinctKudafTest.java index f00fd67c78fb..9ddaa867d138 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/count/CountDistinctKudafTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/count/CountDistinctKudafTest.java @@ -39,7 +39,7 @@ public void shouldCountStrings() { List agg = udaf.initialize(); // When: - for (String value : values) { + for (final String value : values) { agg = udaf.aggregate(value, agg); } @@ -59,7 +59,7 @@ public void shouldCountList() { List agg = udaf.initialize(); // When: - for (List value : values) { + for (final List value : values) { agg = udaf.aggregate(value, agg); } @@ -93,11 +93,11 @@ public void shouldMerge() { List agg2 = udaf.initialize(); // When: - for (String value : values1) { + for (final String value : values1) { agg1 = udaf.aggregate(value, agg1); } - for (String value : new String[]{"5"}) { + for (final String value : new String[]{"5"}) { agg2 = udaf.aggregate(value, agg2); } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/sum/ListSumUdafTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/sum/ListSumUdafTest.java index 20a3ad7fb547..b5eccb099bb1 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/sum/ListSumUdafTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/sum/ListSumUdafTest.java @@ -31,7 +31,7 @@ public void shouldSumLongList() { final TableUdaf, Long, Long> udaf = ListSumUdaf.sumLongList(); final Long[] values = new Long[] {1L, 1L, 1L, 1L, 1L}; final List list = Arrays.asList(values); - Long sum = udaf.aggregate(list, 0L); + final Long sum = udaf.aggregate(list, 0L); assertThat(5L, equalTo(sum)); } @@ -41,7 +41,7 @@ public void shouldSumIntList() { final TableUdaf, Integer, Integer> udaf = ListSumUdaf.sumIntList(); final Integer[] values = new Integer[] {1, 1, 1, 1, 1}; final List list = Arrays.asList(values); - Integer sum = udaf.aggregate(list, 0); + final Integer sum = udaf.aggregate(list, 0); assertThat(5, equalTo(sum)); } @@ -51,7 +51,7 @@ public void shouldSumDoubleList() { final TableUdaf, Double, Double> udaf = ListSumUdaf.sumDoubleList(); final Double[] values = new Double[] {1.0, 1.0, 1.0, 1.0, 1.0}; final List list = Arrays.asList(values); - Double sum = udaf.aggregate(list, 0.0); + final Double sum = udaf.aggregate(list, 0.0); assertThat(5.0, equalTo(sum)); } @@ -61,7 +61,7 @@ public void shouldASumZeroes() { final TableUdaf, Integer, Integer> udaf = ListSumUdaf.sumIntList(); final Integer[] values = new Integer[] {0, 0, 0, 0, 0}; final List list = Arrays.asList(values); - Integer sum = udaf.aggregate(list, 0); + final Integer sum = udaf.aggregate(list, 0); assertThat(0, equalTo(sum)); } @@ -80,7 +80,7 @@ public void shouldIgnoreNull() { final TableUdaf, Integer, Integer> udaf = ListSumUdaf.sumIntList(); final Integer[] values = new Integer[] {1, 1, null, 1}; final List list = Arrays.asList(values); - Integer sum = udaf.aggregate(list, 0); + final Integer sum = udaf.aggregate(list, 0); assertThat(3, equalTo(sum)); } @@ -91,11 +91,11 @@ public void shouldMergeSums() { final Integer[] leftValues = new Integer[] {1, 1, 1, 1}; final List leftList = Arrays.asList(leftValues); - Integer sumLeft = udaf.aggregate(leftList, 0); + final Integer sumLeft = udaf.aggregate(leftList, 0); final Integer[] rightValues = new Integer[] {2, 2, 2}; final List rightList = Arrays.asList(rightValues); - Integer sumRight = udaf.aggregate(rightList, 0); + final Integer sumRight = udaf.aggregate(rightList, 0); final Integer merged = udaf.merge(sumLeft, sumRight); @@ -107,11 +107,11 @@ public void shouldUndoSum() { final TableUdaf, Integer, Integer> udaf = ListSumUdaf.sumIntList(); final Integer[] values = new Integer[] {1, 1, 1, 1}; final List list = Arrays.asList(values); - Integer sum = udaf.aggregate(list, 0); + final Integer sum = udaf.aggregate(list, 0); final Integer[] undoValues = new Integer[] {1, 1, 1}; final List undoList = Arrays.asList(undoValues); - int undo = udaf.undo(undoList, sum); + final int undo = udaf.undo(undoList, sum); assertThat(1, equalTo(undo)); } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/topk/IntTopkKudafTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/topk/IntTopkKudafTest.java index 85274a19ab92..8fbd8315f00d 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/topk/IntTopkKudafTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udaf/topk/IntTopkKudafTest.java @@ -38,7 +38,7 @@ public class IntTopkKudafTest { private final List valuesArray = ImmutableList.of(10, 30, 45, 10, 50, 60, 20, 60, 80, 35, 25); private KsqlAggregateFunction, List> topkKudaf; - private AggregateFunctionInitArguments createArgs(int k) { + private AggregateFunctionInitArguments createArgs(final int k) { return new AggregateFunctionInitArguments(0, k); } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/TestUdtf.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/TestUdtf.java index 203a3d693ce7..e735d932b158 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/TestUdtf.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/TestUdtf.java @@ -31,8 +31,8 @@ public class TestUdtf { @Udtf public List standardParams( - int i, long l, double d, boolean b, String s, - BigDecimal bd, @UdfParameter(schema = "STRUCT") Struct struct + final int i, final long l, final double d, final boolean b, final String s, + final BigDecimal bd, @UdfParameter(schema = "STRUCT") final Struct struct ) { return ImmutableList.of(String.valueOf(i), String.valueOf(l), String.valueOf(d), String.valueOf(b), s, bd.toString(), struct.toString() @@ -41,8 +41,8 @@ public List standardParams( @Udtf public List parameterizedListParams( - List i, List l, List d, List b, List s, - List bd, @UdfParameter(schema = "ARRAY>") List struct + final List i, final List l, final List d, final List b, final List s, + final List bd, @UdfParameter(schema = "ARRAY>") final List struct ) { return ImmutableList .of(String.valueOf(i.get(0)), String.valueOf(l.get(0)), String.valueOf(d.get(0)), @@ -52,10 +52,10 @@ public List parameterizedListParams( @Udtf public List parameterizedMapParams( - Map i, Map l, Map d, Map b, - Map s, - Map bd, - @UdfParameter(schema = "MAP>") Map struct + final Map i, final Map l, final Map d, final Map b, + final Map s, + final Map bd, + @UdfParameter(schema = "MAP>") final Map struct ) { return ImmutableList .of( @@ -70,37 +70,37 @@ public List parameterizedMapParams( } @Udtf - public List listIntegerReturn(int i) { + public List listIntegerReturn(final int i) { return ImmutableList.of(i); } @Udtf - public List listLongReturn(long l) { + public List listLongReturn(final long l) { return ImmutableList.of(l); } @Udtf - public List listDoubleReturn(double d) { + public List listDoubleReturn(final double d) { return ImmutableList.of(d); } @Udtf - public List listBooleanReturn(boolean b) { + public List listBooleanReturn(final boolean b) { return ImmutableList.of(b); } @Udtf - public List listStringReturn(String s) { + public List listStringReturn(final String s) { return ImmutableList.of(s); } @Udtf(schemaProvider = "provideSchema") - public List listBigDecimalReturnWithSchemaProvider(BigDecimal bd) { + public List listBigDecimalReturnWithSchemaProvider(final BigDecimal bd) { return ImmutableList.of(bd); } @Udtf(schema = "STRUCT") - public List listStructReturn(@UdfParameter(schema = "STRUCT") Struct struct) { + public List listStructReturn(@UdfParameter(schema = "STRUCT") final Struct struct) { return ImmutableList.of(struct); } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/array/EntriesTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/array/EntriesTest.java index a02d417b8482..1665ea298053 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/array/EntriesTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/array/EntriesTest.java @@ -36,65 +36,65 @@ public class EntriesTest { private static final int ENTRIES = 20; - private Entries entriesUdf = new Entries(); + private final Entries entriesUdf = new Entries(); @Test public void shouldComputeIntEntries() { - Map map = createMap(i -> i); + final Map map = createMap(i -> i); shouldComputeEntries(map, () -> entriesUdf.entriesInt(map, false)); } @Test public void shouldComputeBigIntEntries() { - Map map = createMap(Long::valueOf); + final Map map = createMap(Long::valueOf); shouldComputeEntries(map, () -> entriesUdf.entriesBigInt(map, false)); } @Test public void shouldComputeDoubleEntries() { - Map map = createMap(Double::valueOf); + final Map map = createMap(Double::valueOf); shouldComputeEntries(map, () -> entriesUdf.entriesDouble(map, false)); } @Test public void shouldComputeBooleanEntries() { - Map map = createMap(i -> i % 2 == 0); + final Map map = createMap(i -> i % 2 == 0); shouldComputeEntries(map, () -> entriesUdf.entriesBoolean(map, false)); } @Test public void shouldComputeStringEntries() { - Map map = createMap(String::valueOf); + final Map map = createMap(String::valueOf); shouldComputeEntries(map, () -> entriesUdf.entriesString(map, false)); } @Test public void shouldComputeIntEntriesSorted() { - Map map = createMap(i -> i); + final Map map = createMap(i -> i); shouldComputeEntriesSorted(map, () -> entriesUdf.entriesInt(map, true)); } @Test public void shouldComputeBigIntEntriesSorted() { - Map map = createMap(Long::valueOf); + final Map map = createMap(Long::valueOf); shouldComputeEntriesSorted(map, () -> entriesUdf.entriesBigInt(map, true)); } @Test public void shouldComputeDoubleEntriesSorted() { - Map map = createMap(Double::valueOf); + final Map map = createMap(Double::valueOf); shouldComputeEntriesSorted(map, () -> entriesUdf.entriesDouble(map, true)); } @Test public void shouldComputeBooleanEntriesSorted() { - Map map = createMap(i -> i % 2 == 0); + final Map map = createMap(i -> i % 2 == 0); shouldComputeEntriesSorted(map, () -> entriesUdf.entriesBoolean(map, true)); } @Test public void shouldComputeStringEntriesSorted() { - Map map = createMap(String::valueOf); + final Map map = createMap(String::valueOf); shouldComputeEntriesSorted(map, () -> entriesUdf.entriesString(map, true)); } @@ -124,33 +124,33 @@ public void shouldReturnNullListForNullMapString() { } private void shouldComputeEntries( - Map map, Supplier> supplier + final Map map, final Supplier> supplier ) { - List out = supplier.get(); + final List out = supplier.get(); assertThat(out, hasSize(map.size())); for (int i = 0; i < out.size(); i++) { - Struct struct = out.get(i); - T val = map.get(struct.getString("K")); + final Struct struct = out.get(i); + final T val = map.get(struct.getString("K")); assertThat(val == null, is(false)); assertThat(val, is(struct.get("V"))); } } - private void shouldComputeEntriesSorted(Map map, Supplier> supplier) { - List out = supplier.get(); - List> entries = new ArrayList<>(map.entrySet()); + private void shouldComputeEntriesSorted(final Map map, final Supplier> supplier) { + final List out = supplier.get(); + final List> entries = new ArrayList<>(map.entrySet()); entries.sort(Comparator.comparing(Entry::getKey)); assertThat(out.size(), is(entries.size())); for (int i = 0; i < entries.size(); i++) { - Struct struct = out.get(i); - Map.Entry entry = entries.get(i); + final Struct struct = out.get(i); + final Map.Entry entry = entries.get(i); assertThat(struct.get("K"), is(entry.getKey())); assertThat(struct.get("V"), is(entry.getValue())); } } - private Map createMap(Function valueSupplier) { - Map map = new HashMap<>(); + private Map createMap(final Function valueSupplier) { + final Map map = new HashMap<>(); for (int i = 0; i < ENTRIES; i++) { map.put(UUID.randomUUID().toString(), valueSupplier.apply(i)); } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/array/GenerateSeriesTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/array/GenerateSeriesTest.java index 993043812a22..30300630842d 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/array/GenerateSeriesTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/array/GenerateSeriesTest.java @@ -27,57 +27,57 @@ public class GenerateSeriesTest { - private GenerateSeries rangeUdf = new GenerateSeries(); + private final GenerateSeries rangeUdf = new GenerateSeries(); @Rule public final ExpectedException expectedException = ExpectedException.none(); @Test public void shouldComputePositiveIntRange() { - List range = rangeUdf.generateSeriesInt(0, 9); + final List range = rangeUdf.generateSeriesInt(0, 9); assertThat(range, hasSize(10)); int val = 0; - for (Integer i : range) { + for (final Integer i : range) { assertThat(val++, is(i)); } } @Test public void shouldComputeNegativeIntRange() { - List range = rangeUdf.generateSeriesInt(9, 0); + final List range = rangeUdf.generateSeriesInt(9, 0); assertThat(range, hasSize(10)); int val = 9; - for (Integer i : range) { + for (final Integer i : range) { assertThat(val--, is(i)); } } @Test public void shouldComputeLongRange() { - List range = rangeUdf.generateSeriesLong(0, 9); + final List range = rangeUdf.generateSeriesLong(0, 9); assertThat(range, hasSize(10)); long val = 0; - for (Long i : range) { + for (final Long i : range) { assertThat(val++, is(i)); } } @Test public void shouldComputeNegativeLongRange() { - List range = rangeUdf.generateSeriesLong(9, 0); + final List range = rangeUdf.generateSeriesLong(9, 0); assertThat(range, hasSize(10)); long val = 9; - for (Long i : range) { + for (final Long i : range) { assertThat(val--, is(i)); } } @Test public void shouldComputeIntRangeWithPositiveEvenStepInt() { - List range = rangeUdf.generateSeriesInt(0, 9, 2); + final List range = rangeUdf.generateSeriesInt(0, 9, 2); assertThat(range, hasSize(5)); int val = 0; - for (int i : range) { + for (final int i : range) { assertThat(val, is(i)); val += 2; } @@ -85,10 +85,10 @@ public void shouldComputeIntRangeWithPositiveEvenStepInt() { @Test public void shouldComputeIntRangeWithPositiveOddStepInt() { - List range = rangeUdf.generateSeriesInt(0, 9, 3); + final List range = rangeUdf.generateSeriesInt(0, 9, 3); assertThat(range, hasSize(4)); int val = 0; - for (int i : range) { + for (final int i : range) { assertThat(val, is(i)); val += 3; } @@ -96,10 +96,10 @@ public void shouldComputeIntRangeWithPositiveOddStepInt() { @Test public void shouldComputeIntRangeWithNegativeEvenStepInt() { - List range = rangeUdf.generateSeriesInt(9, 0, -2); + final List range = rangeUdf.generateSeriesInt(9, 0, -2); assertThat(range, hasSize(5)); int val = 9; - for (int i : range) { + for (final int i : range) { assertThat(val, is(i)); val -= 2; } @@ -107,10 +107,10 @@ public void shouldComputeIntRangeWithNegativeEvenStepInt() { @Test public void shouldComputeIntRangeWithNegativeOddStepInt() { - List range = rangeUdf.generateSeriesInt(9, 0, -3); + final List range = rangeUdf.generateSeriesInt(9, 0, -3); assertThat(range, hasSize(4)); int val = 9; - for (int i : range) { + for (final int i : range) { assertThat(val, is(i)); val -= 3; } @@ -118,10 +118,10 @@ public void shouldComputeIntRangeWithNegativeOddStepInt() { @Test public void shouldComputeIntRangeWithEvenStepLong() { - List range = rangeUdf.generateSeriesLong(0, 9, 2); + final List range = rangeUdf.generateSeriesLong(0, 9, 2); assertThat(range, hasSize(5)); long index = 0; - for (long i : range) { + for (final long i : range) { assertThat(index, is(i)); index += 2; } @@ -129,10 +129,10 @@ public void shouldComputeIntRangeWithEvenStepLong() { @Test public void shouldComputeIntRangeWithOddStepLong() { - List range = rangeUdf.generateSeriesLong(0, 9, 3); + final List range = rangeUdf.generateSeriesLong(0, 9, 3); assertThat(range, hasSize(4)); long index = 0; - for (long i : range) { + for (final long i : range) { assertThat(index, is(i)); index += 3; } @@ -140,10 +140,10 @@ public void shouldComputeIntRangeWithOddStepLong() { @Test public void shouldComputeIntRangeWithNegativeEvenStepLong() { - List range = rangeUdf.generateSeriesLong(9, 0, -2); + final List range = rangeUdf.generateSeriesLong(9, 0, -2); assertThat(range, hasSize(5)); long val = 9; - for (long i : range) { + for (final long i : range) { assertThat(val, is(i)); val -= 2; } @@ -151,10 +151,10 @@ public void shouldComputeIntRangeWithNegativeEvenStepLong() { @Test public void shouldComputeIntRangeWithNegativeOddStepLong() { - List range = rangeUdf.generateSeriesLong(9, 0, -3); + final List range = rangeUdf.generateSeriesLong(9, 0, -3); assertThat(range, hasSize(4)); long val = 9; - for (long i : range) { + for (final long i : range) { assertThat(val, is(i)); val -= 3; } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/geo/GeoDistanceTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/geo/GeoDistanceTest.java index b021832d6628..2fa9c7aeca1e 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/geo/GeoDistanceTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/geo/GeoDistanceTest.java @@ -23,7 +23,7 @@ import org.junit.rules.ExpectedException; public class GeoDistanceTest { - private GeoDistance distanceUdf = new GeoDistance(); + private final GeoDistance distanceUdf = new GeoDistance(); @Rule public final ExpectedException expectedException = ExpectedException.none(); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/json/JsonArrayContainsTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/json/JsonArrayContainsTest.java index 5c092c208f34..d2b68ede3666 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/json/JsonArrayContainsTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/json/JsonArrayContainsTest.java @@ -21,7 +21,7 @@ public class JsonArrayContainsTest { - private JsonArrayContains jsonUdf = new JsonArrayContains(); + private final JsonArrayContains jsonUdf = new JsonArrayContains(); @Test public void shouldReturnFalseOnEmptyArray() { diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/list/ArrayContainsTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/list/ArrayContainsTest.java index 14c4b682513d..a411f27e63c5 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/list/ArrayContainsTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/list/ArrayContainsTest.java @@ -15,7 +15,6 @@ package io.confluent.ksql.function.udf.list; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -25,7 +24,7 @@ public class ArrayContainsTest { - private ArrayContains udf = new ArrayContains(); + private final ArrayContains udf = new ArrayContains(); @Test public void shouldReturnFalseOnEmptyList() { diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/string/FieldTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/string/FieldTest.java index 3fe447508dbe..ac3027067853 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/udf/string/FieldTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udf/string/FieldTest.java @@ -27,7 +27,7 @@ public class FieldTest { @Test public void shouldFindFirstArgument() { // When: - int pos = field.field("hello", "hello", "world"); + final int pos = field.field("hello", "hello", "world"); // Then: assertThat(pos, equalTo(1)); @@ -36,7 +36,7 @@ public void shouldFindFirstArgument() { @Test public void shouldFindSecondArgument() { // When: - int pos = field.field("world", "hello", "world"); + final int pos = field.field("world", "hello", "world"); // Then: assertThat(pos, equalTo(2)); @@ -45,7 +45,7 @@ public void shouldFindSecondArgument() { @Test public void shouldFindArgumentWhenOneIsNull() { // When: - int pos = field.field("world", null, "world"); + final int pos = field.field("world", null, "world"); // Then: assertThat(pos, equalTo(2)); @@ -54,7 +54,7 @@ public void shouldFindArgumentWhenOneIsNull() { @Test public void shouldNotFindMissing() { // When: - int pos = field.field("missing", "hello", "world"); + final int pos = field.field("missing", "hello", "world"); // Then: assertThat(pos, equalTo(0)); @@ -63,7 +63,7 @@ public void shouldNotFindMissing() { @Test public void shouldNotFindIfNoArgs() { // When: - int pos = field.field("missing"); + final int pos = field.field("missing"); // Then: assertThat(pos, equalTo(0)); @@ -72,7 +72,7 @@ public void shouldNotFindIfNoArgs() { @Test public void shouldNotFindNull() { // When: - int pos = field.field(null, null, "world"); + final int pos = field.field(null, null, "world"); // Then: assertThat(pos, equalTo(0)); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/udtf/CubeTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/udtf/CubeTest.java index e863d9aedb7f..e3584631523b 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/udtf/CubeTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/udtf/CubeTest.java @@ -26,15 +26,15 @@ public class CubeTest { - private Cube cubeUdtf = new Cube(); + private final Cube cubeUdtf = new Cube(); @Test public void shouldCubeSingleColumn() { // Given: - Object[] args = {1}; + final Object[] args = {1}; // When: - List> result = cubeUdtf.cube(Arrays.asList(args)); + final List> result = cubeUdtf.cube(Arrays.asList(args)); // Then: assertThat(result.size(), is(2)); @@ -45,10 +45,10 @@ public void shouldCubeSingleColumn() { @Test public void shouldCubeSingleNullColumn() { // Given: - Object[] oneNull = {null}; + final Object[] oneNull = {null}; // When: - List> result = cubeUdtf.cube(Arrays.asList(oneNull)); + final List> result = cubeUdtf.cube(Arrays.asList(oneNull)); // Then: assertThat(result.size(), is(1)); @@ -58,10 +58,10 @@ public void shouldCubeSingleNullColumn() { @Test public void shouldCubeColumnsWithDifferentTypes() { // Given: - Object[] args = {1, "foo"}; + final Object[] args = {1, "foo"}; // When: - List> result = cubeUdtf.cube(Arrays.asList(args)); + final List> result = cubeUdtf.cube(Arrays.asList(args)); // Then: assertThat(result.size(), is(4)); @@ -74,10 +74,10 @@ public void shouldCubeColumnsWithDifferentTypes() { @Test public void shouldHandleOneNull() { // Given: - Object[] oneNull = {1, null}; + final Object[] oneNull = {1, null}; // When: - List> result = cubeUdtf.cube(Arrays.asList(oneNull)); + final List> result = cubeUdtf.cube(Arrays.asList(oneNull)); // Then: assertThat(result.size(), is(2)); @@ -88,10 +88,10 @@ public void shouldHandleOneNull() { @Test public void shouldHandleAllNulls() { // Given: - Object[] allNull = {null, null}; + final Object[] allNull = {null, null}; // When: - List> result = cubeUdtf.cube(Arrays.asList(allNull)); + final List> result = cubeUdtf.cube(Arrays.asList(allNull)); // Then: assertThat(result.size(), is(1)); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/integration/IntegrationTestHarness.java b/ksql-engine/src/test/java/io/confluent/ksql/integration/IntegrationTestHarness.java index bbbd5ce0e4e4..02558557e8ff 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/integration/IntegrationTestHarness.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/integration/IntegrationTestHarness.java @@ -469,7 +469,7 @@ public void waitForTopicsToBePresent(final String... topicNames) throws Exceptio final KafkaTopicClient topicClient = serviceContext.get().getTopicClient(); return Arrays.stream(topicNames) .allMatch(topicClient::isTopicExists); - } catch (Exception e) { + } catch (final Exception e) { throw new RuntimeException("could not get subjects"); } }, @@ -487,7 +487,7 @@ public void waitForSubjectToBePresent(final String subjectName) throws Exception () -> { try { return getSchemaRegistryClient().getAllSubjects().contains(subjectName); - } catch (Exception e) { + } catch (final Exception e) { throw new RuntimeException("could not get subjects"); } }, @@ -505,7 +505,7 @@ public void waitForSubjectToBeAbsent(final String subjectName) throws Exception () -> { try { return !getSchemaRegistryClient().getAllSubjects().contains(subjectName); - } catch (Exception e) { + } catch (final Exception e) { throw new RuntimeException("could not get subjects"); } }, diff --git a/ksql-engine/src/test/java/io/confluent/ksql/internal/KsqlEngineMetricsTest.java b/ksql-engine/src/test/java/io/confluent/ksql/internal/KsqlEngineMetricsTest.java index e58c3d662194..6c7a9a9def4e 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/internal/KsqlEngineMetricsTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/internal/KsqlEngineMetricsTest.java @@ -358,7 +358,7 @@ private static Answer> returnQueriesInState( private static class TestKsqlMetricsExtension implements KsqlMetricsExtension { @Override - public void configure(Map config) { + public void configure(final Map config) { } @Override diff --git a/ksql-engine/src/test/java/io/confluent/ksql/materialization/ks/KsMaterializationFunctionalTest.java b/ksql-engine/src/test/java/io/confluent/ksql/materialization/ks/KsMaterializationFunctionalTest.java index c249c05246f4..459b12e71a81 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/materialization/ks/KsMaterializationFunctionalTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/materialization/ks/KsMaterializationFunctionalTest.java @@ -146,7 +146,7 @@ public void after() { @Test public void shouldReturnEmptyIfNotMaterializedTable() { // Given: - PersistentQueryMetadata query = executeQuery( + final PersistentQueryMetadata query = executeQuery( "CREATE TABLE " + output + " AS" + " SELECT * FROM " + USER_TABLE + ";" ); @@ -161,7 +161,7 @@ public void shouldReturnEmptyIfNotMaterializedTable() { @Test public void shouldReturnEmptyIfNotMaterializedStream() { // Given: - PersistentQueryMetadata query = executeQuery( + final PersistentQueryMetadata query = executeQuery( "CREATE STREAM " + output + " AS" + " SELECT * FROM " + USER_STREAM + ";" ); @@ -179,7 +179,7 @@ public void shouldReturnEmptyIfAppServerNotConfigured() { try (TestKsqlContext ksqlNoAppServer = TEST_HARNESS.ksqlContextBuilder().build()) { initializeKsql(ksqlNoAppServer); - PersistentQueryMetadata query = executeQuery( + final PersistentQueryMetadata query = executeQuery( ksqlNoAppServer, "CREATE TABLE " + output + " AS" + " SELECT COUNT(*) AS COUNT FROM " + USER_TABLE @@ -197,7 +197,7 @@ public void shouldReturnEmptyIfAppServerNotConfigured() { @Test public void shouldQueryMaterializedTableForAggregatedTable() { // Given: - PersistentQueryMetadata query = executeQuery( + final PersistentQueryMetadata query = executeQuery( "CREATE TABLE " + output + " AS" + " SELECT COUNT(*) FROM " + USER_TABLE + " GROUP BY USERID;" @@ -231,7 +231,7 @@ public void shouldQueryMaterializedTableForAggregatedTable() { @Test public void shouldQueryMaterializedTableForAggregatedStream() { // Given: - PersistentQueryMetadata query = executeQuery( + final PersistentQueryMetadata query = executeQuery( "CREATE TABLE " + output + " AS" + " SELECT COUNT(*) AS COUNT FROM " + USER_STREAM + " GROUP BY USERID;" @@ -265,7 +265,7 @@ public void shouldQueryMaterializedTableForAggregatedStream() { @Test public void shouldQueryMaterializedTableForTumblingWindowed() { // Given: - PersistentQueryMetadata query = executeQuery( + final PersistentQueryMetadata query = executeQuery( "CREATE TABLE " + output + " AS" + " SELECT COUNT(*) AS COUNT FROM " + USER_STREAM + " WINDOW TUMBLING (SIZE " + WINDOW_SIZE.getSeconds() + " SECONDS)" @@ -310,7 +310,7 @@ public void shouldQueryMaterializedTableForTumblingWindowed() { @Test public void shouldQueryMaterializedTableForHoppingWindowed() { // Given: - PersistentQueryMetadata query = executeQuery( + final PersistentQueryMetadata query = executeQuery( "CREATE TABLE " + output + " AS" + " SELECT COUNT(*) AS COUNT FROM " + USER_STREAM + " WINDOW HOPPING (SIZE " + WINDOW_SIZE.getSeconds() + " SECONDS," @@ -355,7 +355,7 @@ public void shouldQueryMaterializedTableForHoppingWindowed() { @Test public void shouldQueryMaterializedTableForSessionWindowed() { // Given: - PersistentQueryMetadata query = executeQuery( + final PersistentQueryMetadata query = executeQuery( "CREATE TABLE " + output + " AS" + " SELECT COUNT(*) AS COUNT FROM " + USER_STREAM + " WINDOW SESSION (" + WINDOW_SIZE.getSeconds() + " SECONDS)" @@ -399,7 +399,7 @@ public void shouldQueryMaterializedTableForSessionWindowed() { @Test public void shouldQueryMaterializedTableWithKeyFieldsInProjection() { // Given: - PersistentQueryMetadata query = executeQuery( + final PersistentQueryMetadata query = executeQuery( "CREATE TABLE " + output + " AS" + " SELECT USERID, COUNT(*), USERID AS USERID_2 FROM " + USER_TABLE + " GROUP BY USERID;" @@ -436,7 +436,7 @@ public void shouldQueryMaterializedTableWithKeyFieldsInProjection() { @Test public void shouldQueryMaterializedTableWitMultipleAggregationColumns() { // Given: - PersistentQueryMetadata query = executeQuery( + final PersistentQueryMetadata query = executeQuery( "CREATE TABLE " + output + " AS" + " SELECT COUNT(1) AS COUNT, SUM(REGISTERTIME) AS SUM FROM " + USER_TABLE + " GROUP BY USERID;" @@ -472,7 +472,7 @@ public void shouldIgnoreHavingClause() { // Note: HAVING clause are handled centrally by KsqlMaterialization // Given: - PersistentQueryMetadata query = executeQuery( + final PersistentQueryMetadata query = executeQuery( "CREATE TABLE " + output + " AS" + " SELECT COUNT(*) AS COUNT FROM " + USER_TABLE + " GROUP BY USERID" diff --git a/ksql-engine/src/test/java/io/confluent/ksql/security/ExtensionSecurityManagerTest.java b/ksql-engine/src/test/java/io/confluent/ksql/security/ExtensionSecurityManagerTest.java index 0b1b7975838d..6ed7fde5b2ab 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/security/ExtensionSecurityManagerTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/security/ExtensionSecurityManagerTest.java @@ -72,7 +72,7 @@ public void shouldNotAllowExecWhenPluggableUDF() throws NoSuchMethodException { public static Process exec() { try { return Runtime.getRuntime().exec("cmd"); - } catch (IOException e) { + } catch (final IOException e) { return null; } } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/services/KafkaTopicClientImplTest.java b/ksql-engine/src/test/java/io/confluent/ksql/services/KafkaTopicClientImplTest.java index 1f8f09ee5b4a..061e422737b7 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/services/KafkaTopicClientImplTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/services/KafkaTopicClientImplTest.java @@ -684,7 +684,7 @@ private Collection describeBrokerRequest() { return Collections.singleton(new ConfigResource(ConfigResource.Type.BROKER, node.idString())); } - private static DeleteTopicsResult deleteTopicException(Exception e) + private static DeleteTopicsResult deleteTopicException(final Exception e) throws InterruptedException, ExecutionException, TimeoutException { final DeleteTopicsResult deleteTopicsResult = mock(DeleteTopicsResult.class); final KafkaFuture kafkaFuture = mock(KafkaFuture.class); @@ -848,13 +848,13 @@ public void appendTo(final StringBuffer buffer) { private CreateTopicsOptions shouldValidateCreate(final boolean validateOnly) { EasyMock.reportMatcher(new IArgumentMatcher() { @Override - public boolean matches(Object argument) { + public boolean matches(final Object argument) { return argument instanceof CreateTopicsOptions && ((CreateTopicsOptions) argument).shouldValidateOnly() == validateOnly; } @Override - public void appendTo(StringBuffer buffer) { + public void appendTo(final StringBuffer buffer) { buffer.append("validateOnly(\"" + validateOnly + "\")"); } }); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/services/MemoizedSupplierTest.java b/ksql-engine/src/test/java/io/confluent/ksql/services/MemoizedSupplierTest.java index 3614a2278b2f..377235e98ef3 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/services/MemoizedSupplierTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/services/MemoizedSupplierTest.java @@ -27,7 +27,7 @@ public class MemoizedSupplierTest { @Test public void shouldReturnIsInitializedAfterConstructor() { // Given - MemoizedSupplier memoizedSupplier = new MemoizedSupplier<>(() -> ""); + final MemoizedSupplier memoizedSupplier = new MemoizedSupplier<>(() -> ""); // When memoizedSupplier.get(); @@ -39,7 +39,7 @@ public void shouldReturnIsInitializedAfterConstructor() { @Test public void shouldReturnNotInitializedAfterConstructor() { // Given - MemoizedSupplier memoizedSupplier = new MemoizedSupplier<>(() -> ""); + final MemoizedSupplier memoizedSupplier = new MemoizedSupplier<>(() -> ""); // Then assertThat(memoizedSupplier.isInitialized(), is(false)); @@ -48,11 +48,11 @@ public void shouldReturnNotInitializedAfterConstructor() { @Test public void shouldReturnSameInstance() { // Given - MemoizedSupplier memoizedSupplier = new MemoizedSupplier<>(() -> ""); + final MemoizedSupplier memoizedSupplier = new MemoizedSupplier<>(() -> ""); // When - String s1 = memoizedSupplier.get(); - String s2 = memoizedSupplier.get(); + final String s1 = memoizedSupplier.get(); + final String s2 = memoizedSupplier.get(); // Then assertThat(s1, sameInstance(s2)); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/util/LimitedProxyBuilderTest.java b/ksql-engine/src/test/java/io/confluent/ksql/util/LimitedProxyBuilderTest.java index 088169de31f1..d92c6b0787b4 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/util/LimitedProxyBuilderTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/util/LimitedProxyBuilderTest.java @@ -477,15 +477,15 @@ int someFunc() { return mock.someFunc(); } - String someFunc(String a) { + String someFunc(final String a) { return mock.someFunc(a); } - void someFunc(long a0, double a1) { + void someFunc(final long a0, final double a1) { mock.someFunc(a0, a1); } - int differentParams(Double a0, String a1) { + int differentParams(final Double a0, final String a1) { return 0; } @@ -497,7 +497,7 @@ void defaultMethods() { mock.defaultMethods(); } - void defaultMethods(int i) { + void defaultMethods(final int i) { mock.defaultMethods(i); } } @@ -525,7 +525,7 @@ interface TestInterface { void defaultMethods(); - default void defaultMethods(int i) { + default void defaultMethods(final int i) { throw new AssertionError("should never be called"); } diff --git a/ksql-examples/src/main/java/io/confluent/ksql/datagen/DataGen.java b/ksql-examples/src/main/java/io/confluent/ksql/datagen/DataGen.java index 90dc40487e56..f154a2af0725 100644 --- a/ksql-examples/src/main/java/io/confluent/ksql/datagen/DataGen.java +++ b/ksql-examples/src/main/java/io/confluent/ksql/datagen/DataGen.java @@ -495,7 +495,7 @@ private static int parseNumThreads(final String numThreadsString) { result)); } return result; - } catch (NumberFormatException e) { + } catch (final NumberFormatException e) { throw new ArgumentParseException(String.format( "Invalid number of threads in '%s'; must be a positive number", numThreadsString)); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaDeleteTopicsException.java b/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaDeleteTopicsException.java index adf61a65e829..27c7f316089a 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaDeleteTopicsException.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaDeleteTopicsException.java @@ -22,7 +22,10 @@ public class KafkaDeleteTopicsException extends KafkaTopicClientException { private final List> exceptionList; - public KafkaDeleteTopicsException(String message, List> failList) { + public KafkaDeleteTopicsException( + final String message, + final List> failList + ) { super(message); exceptionList = failList; } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaResponseGetFailedException.java b/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaResponseGetFailedException.java index fa1a511268d4..76a56d9c3fc7 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaResponseGetFailedException.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaResponseGetFailedException.java @@ -17,7 +17,7 @@ public class KafkaResponseGetFailedException extends KafkaTopicClientException { - public KafkaResponseGetFailedException(String message, Throwable throwable) { + public KafkaResponseGetFailedException(final String message, final Throwable throwable) { super(message, throwable); } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaTopicClientException.java b/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaTopicClientException.java index a4dce786ca34..1cb1064e2e64 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaTopicClientException.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaTopicClientException.java @@ -17,11 +17,11 @@ abstract class KafkaTopicClientException extends RuntimeException { - KafkaTopicClientException(String message) { + KafkaTopicClientException(final String message) { super(message); } - KafkaTopicClientException(String message, Throwable throwable) { + KafkaTopicClientException(final String message, final Throwable throwable) { super(message, throwable); } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaTopicExistsException.java b/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaTopicExistsException.java index 074fe289b6ce..81f3f5c3edea 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaTopicExistsException.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/exception/KafkaTopicExistsException.java @@ -17,7 +17,7 @@ public class KafkaTopicExistsException extends KafkaTopicClientException { - public KafkaTopicExistsException(String message) { + public KafkaTopicExistsException(final String message) { super(message); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/builder/KsqlQueryBuilder.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/builder/KsqlQueryBuilder.java index 17d4b238f3e2..f29fe7504f9a 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/builder/KsqlQueryBuilder.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/builder/KsqlQueryBuilder.java @@ -54,8 +54,12 @@ public final class KsqlQueryBuilder { private final LinkedHashMap schemas = new LinkedHashMap<>(); public static KsqlQueryBuilder of( - StreamsBuilder streamsBuilder, KsqlConfig ksqlConfig, ServiceContext serviceContext, - ProcessingLogContext processingLogContext, FunctionRegistry functionRegistry, QueryId queryId + final StreamsBuilder streamsBuilder, + final KsqlConfig ksqlConfig, + final ServiceContext serviceContext, + final ProcessingLogContext processingLogContext, + final FunctionRegistry functionRegistry, + final QueryId queryId ) { return new KsqlQueryBuilder( streamsBuilder, @@ -71,9 +75,14 @@ public static KsqlQueryBuilder of( @VisibleForTesting KsqlQueryBuilder( - StreamsBuilder streamsBuilder, KsqlConfig ksqlConfig, ServiceContext serviceContext, - ProcessingLogContext processingLogContext, FunctionRegistry functionRegistry, QueryId queryId, - KeySerdeFactory keySerdeFactory, ValueSerdeFactory valueSerdeFactory + final StreamsBuilder streamsBuilder, + final KsqlConfig ksqlConfig, + final ServiceContext serviceContext, + final ProcessingLogContext processingLogContext, + final FunctionRegistry functionRegistry, + final QueryId queryId, + final KeySerdeFactory keySerdeFactory, + final ValueSerdeFactory valueSerdeFactory ) { this.streamsBuilder = requireNonNull(streamsBuilder, "streamsBuilder"); this.ksqlConfig = requireNonNull(ksqlConfig, "ksqlConfig"); @@ -113,7 +122,7 @@ public QueryId getQueryId() { return queryId; } - public KsqlQueryBuilder withKsqlConfig(KsqlConfig newConfig) { + public KsqlQueryBuilder withKsqlConfig(final KsqlConfig newConfig) { return of( streamsBuilder, newConfig, @@ -124,15 +133,15 @@ public KsqlQueryBuilder withKsqlConfig(KsqlConfig newConfig) { ); } - public QueryContext.Stacker buildNodeContext(String context) { + public QueryContext.Stacker buildNodeContext(final String context) { return new QueryContext.Stacker() .push(context); } public Serde buildKeySerde( - FormatInfo format, PhysicalSchema schema, QueryContext queryContext + final FormatInfo format, final PhysicalSchema schema, final QueryContext queryContext ) { - String loggerNamePrefix = QueryLoggerUtil.queryLoggerName(queryId, queryContext); + final String loggerNamePrefix = QueryLoggerUtil.queryLoggerName(queryId, queryContext); return keySerdeFactory.create( format, @@ -145,9 +154,12 @@ public Serde buildKeySerde( } public Serde> buildKeySerde( - FormatInfo format, WindowInfo window, PhysicalSchema schema, QueryContext queryContext + final FormatInfo format, + final WindowInfo window, + final PhysicalSchema schema, + final QueryContext queryContext ) { - String loggerNamePrefix = QueryLoggerUtil.queryLoggerName(queryId, queryContext); + final String loggerNamePrefix = QueryLoggerUtil.queryLoggerName(queryId, queryContext); return keySerdeFactory.create( format, @@ -161,9 +173,11 @@ public Serde> buildKeySerde( } public Serde buildValueSerde( - FormatInfo format, PhysicalSchema schema, QueryContext queryContext + final FormatInfo format, + final PhysicalSchema schema, + final QueryContext queryContext ) { - String loggerNamePrefix = QueryLoggerUtil.queryLoggerName(queryId, queryContext); + final String loggerNamePrefix = QueryLoggerUtil.queryLoggerName(queryId, queryContext); track(loggerNamePrefix, schema.valueSchema()); @@ -177,7 +191,7 @@ public Serde buildValueSerde( ); } - private void track(String loggerNamePrefix, PersistenceSchema schema) { + private void track(final String loggerNamePrefix, final PersistenceSchema schema) { if (schemas.containsKey(loggerNamePrefix)) { throw new IllegalStateException("Schema with tracked:" + loggerNamePrefix); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenRunner.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenRunner.java index f912a874c9cc..3e79f8a48996 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenRunner.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenRunner.java @@ -41,7 +41,6 @@ import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.Stream; -import javax.annotation.Nullable; import org.apache.kafka.connect.data.Schema; import org.codehaus.commons.compiler.CompileException; import org.codehaus.commons.compiler.CompilerFactoryFactory; @@ -58,10 +57,13 @@ public class CodeGenRunner { private final KsqlConfig ksqlConfig; public static List compileExpressions( - Stream expressions, String type, LogicalSchema schema, KsqlConfig ksqlConfig, - FunctionRegistry functionRegistry + final Stream expressions, + final String type, + final LogicalSchema schema, + final KsqlConfig ksqlConfig, + final FunctionRegistry functionRegistry ) { - CodeGenRunner codeGen = new CodeGenRunner(schema, ksqlConfig, functionRegistry); + final CodeGenRunner codeGen = new CodeGenRunner(schema, ksqlConfig, functionRegistry); return expressions .map(exp -> codeGen.buildCodeGenFromParseTree(exp, type)) @@ -69,7 +71,9 @@ public static List compileExpressions( } public CodeGenRunner( - LogicalSchema schema, KsqlConfig ksqlConfig, FunctionRegistry functionRegistry + final LogicalSchema schema, + final KsqlConfig ksqlConfig, + final FunctionRegistry functionRegistry ) { this.functionRegistry = Objects.requireNonNull(functionRegistry, "functionRegistry"); this.schema = Objects.requireNonNull(schema, "schema"); @@ -77,29 +81,32 @@ public CodeGenRunner( this.expressionTypeManager = new ExpressionTypeManager(schema, functionRegistry); } - public CodeGenSpec getCodeGenSpec(Expression expression) { - Visitor visitor = + public CodeGenSpec getCodeGenSpec(final Expression expression) { + final Visitor visitor = new Visitor(schema, functionRegistry, expressionTypeManager, ksqlConfig); visitor.process(expression, null); return visitor.spec.build(); } - public ExpressionMetadata buildCodeGenFromParseTree(Expression expression, String type) { + public ExpressionMetadata buildCodeGenFromParseTree( + final Expression expression, + final String type + ) { try { - CodeGenSpec spec = getCodeGenSpec(expression); - String javaCode = SqlToJavaVisitor.of( + final CodeGenSpec spec = getCodeGenSpec(expression); + final String javaCode = SqlToJavaVisitor.of( schema, functionRegistry, spec ).process(expression); - IExpressionEvaluator ee = + final IExpressionEvaluator ee = CompilerFactoryFactory.getDefaultCompilerFactory().newExpressionEvaluator(); ee.setDefaultImports(SqlToJavaVisitor.JAVA_IMPORTS.toArray(new String[0])); ee.setParameters(spec.argumentNames(), spec.argumentTypes()); - SqlType expressionType = expressionTypeManager + final SqlType expressionType = expressionTypeManager .getExpressionSqlType(expression); ee.setExpressionType(SQL_TO_JAVA_TYPE_CONVERTER.toJavaType(expressionType)); @@ -116,7 +123,7 @@ public ExpressionMetadata buildCodeGenFromParseTree(Expression expression, Strin throw new KsqlException("Code generation failed for " + type + ": " + e.getMessage() + ". expression:" + expression + ", schema:" + schema, e); - } catch (Exception e) { + } catch (final Exception e) { throw new RuntimeException("Unexpected error generating code for " + type + ". expression:" + expression, e); } @@ -131,8 +138,8 @@ private static final class Visitor extends TraversalExpressionVisitor { private final KsqlConfig ksqlConfig; private Visitor( - LogicalSchema schema, FunctionRegistry functionRegistry, - ExpressionTypeManager expressionTypeManager, KsqlConfig ksqlConfig + final LogicalSchema schema, final FunctionRegistry functionRegistry, + final ExpressionTypeManager expressionTypeManager, final KsqlConfig ksqlConfig ) { this.schema = Objects.requireNonNull(schema, "schema"); this.ksqlConfig = Objects.requireNonNull(ksqlConfig, "ksqlConfig"); @@ -142,22 +149,22 @@ private Visitor( } @Override - public Void visitLikePredicate(LikePredicate node, Void context) { + public Void visitLikePredicate(final LikePredicate node, final Void context) { process(node.getValue(), null); return null; } @Override - public Void visitFunctionCall(FunctionCall node, Void context) { - List argumentTypes = new ArrayList<>(); - FunctionName functionName = node.getName(); - for (Expression argExpr : node.getArguments()) { + public Void visitFunctionCall(final FunctionCall node, final Void context) { + final List argumentTypes = new ArrayList<>(); + final FunctionName functionName = node.getName(); + for (final Expression argExpr : node.getArguments()) { process(argExpr, null); argumentTypes.add(expressionTypeManager.getExpressionSqlType(argExpr)); } - UdfFactory holder = functionRegistry.getUdfFactory(functionName.name()); - KsqlScalarFunction function = holder.getFunction(argumentTypes); + final UdfFactory holder = functionRegistry.getUdfFactory(functionName.name()); + final KsqlScalarFunction function = holder.getFunction(argumentTypes); spec.addFunction( function.name(), function.newInstance(ksqlConfig) @@ -167,9 +174,9 @@ public Void visitFunctionCall(FunctionCall node, Void context) { } @Override - public Void visitSubscriptExpression(SubscriptExpression node, Void context) { + public Void visitSubscriptExpression(final SubscriptExpression node, final Void context) { if (node.getBase() instanceof ColumnReferenceExp) { - ColumnReferenceExp arrayBaseName = (ColumnReferenceExp) node.getBase(); + final ColumnReferenceExp arrayBaseName = (ColumnReferenceExp) node.getBase(); addRequiredColumn(arrayBaseName.getReference()); } else { process(node.getBase(), context); @@ -179,7 +186,10 @@ public Void visitSubscriptExpression(SubscriptExpression node, Void context) { } @Override - public Void visitStructExpression(CreateStructExpression exp, @Nullable Void context) { + public Void visitStructExpression( + final CreateStructExpression exp, + final Void context + ) { exp.getFields().forEach(val -> process(val.getValue(), context)); final Schema schema = SchemaConverters .sqlToConnectConverter() @@ -190,13 +200,13 @@ public Void visitStructExpression(CreateStructExpression exp, @Nullable Void con } @Override - public Void visitColumnReference(ColumnReferenceExp node, Void context) { + public Void visitColumnReference(final ColumnReferenceExp node, final Void context) { addRequiredColumn(node.getReference()); return null; } @Override - public Void visitDereferenceExpression(DereferenceExpression node, Void context) { + public Void visitDereferenceExpression(final DereferenceExpression node, final Void context) { process(node.getBase(), null); return null; } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenSpec.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenSpec.java index 56ede106bf78..ab4a9b5b6ad8 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenSpec.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenSpec.java @@ -44,10 +44,10 @@ public final class CodeGenSpec { private final ImmutableMap structToCodeName; private CodeGenSpec( - ImmutableList arguments, - ImmutableMap columnToCodeName, - ImmutableListMultimap functionToCodeName, - ImmutableMap structToCodeName + final ImmutableList arguments, + final ImmutableMap columnToCodeName, + final ImmutableListMultimap functionToCodeName, + final ImmutableMap structToCodeName ) { this.arguments = arguments; this.columnToCodeName = columnToCodeName; @@ -67,25 +67,25 @@ public List arguments() { return arguments; } - public String getCodeName(ColumnRef columnRef) { + public String getCodeName(final ColumnRef columnRef) { return columnToCodeName.get(columnRef); } - public String getUniqueNameForFunction(FunctionName functionName, int index) { - List names = functionToCodeName.get(functionName); + public String getUniqueNameForFunction(final FunctionName functionName, final int index) { + final List names = functionToCodeName.get(functionName); if (names.size() <= index) { throw new KsqlException("Cannot get name for " + functionName + " " + index + " times"); } return names.get(index); } - public void resolve(GenericRow row, Object[] parameters) { + public void resolve(final GenericRow row, final Object[] parameters) { for (int paramIdx = 0; paramIdx < arguments.size(); paramIdx++) { parameters[paramIdx] = arguments.get(paramIdx).resolve(row); } } - public String getStructSchemaName(CreateStructExpression createStructExpression) { + public String getStructSchemaName(final CreateStructExpression createStructExpression) { final String schemaName = structToCodeName.get(createStructExpression); if (schemaName == null) { throw new KsqlException( @@ -117,13 +117,13 @@ void addParameter( argumentBuilder.add(new ValueArgumentSpec(codeName, type, colIndex)); } - void addFunction(FunctionName functionName, Kudf function) { + void addFunction(final FunctionName functionName, final Kudf function) { final String codeName = CodeGenUtil.functionName(functionName, argumentCount++); functionNameBuilder.put(functionName, codeName); argumentBuilder.add(new FunctionArgumentSpec(codeName, function.getClass(), function)); } - void addStructSchema(CreateStructExpression struct, Schema schema) { + void addStructSchema(final CreateStructExpression struct, final Schema schema) { final String structSchemaName = CodeGenUtil.schemaName(structSchemaCount++); structToSchemaName.put(struct, structSchemaName); argumentBuilder.add(new SchemaArgumentSpec(structSchemaName, schema)); @@ -243,15 +243,15 @@ public static final class SchemaArgumentSpec extends BaseArgumentSpec { private final ConnectSchema schema; SchemaArgumentSpec( - String name, - Schema schema + final String name, + final Schema schema ) { super(name, Schema.class); this.schema = (ConnectSchema) requireNonNull(schema, "schema").schema(); } @Override - public Object resolve(GenericRow value) { + public Object resolve(final GenericRow value) { return schema; } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenUtil.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenUtil.java index 8279121166f1..4d2aabec32c7 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenUtil.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenUtil.java @@ -25,15 +25,15 @@ public final class CodeGenUtil { private CodeGenUtil() { } - public static String paramName(int index) { + public static String paramName(final int index) { return PARAM_NAME_PREFIX + index; } - public static String schemaName(int index) { + public static String schemaName(final int index) { return SCHEMA_NAME_PREFIX + index; } - public static String functionName(FunctionName fun, int index) { + public static String functionName(final FunctionName fun, final int index) { return fun.name() + "_" + index; } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/ExpressionMetadata.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/ExpressionMetadata.java index 41a6387f0cc3..7b6988deaa39 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/ExpressionMetadata.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/ExpressionMetadata.java @@ -38,10 +38,10 @@ public class ExpressionMetadata { private final CodeGenSpec spec; public ExpressionMetadata( - IExpressionEvaluator expressionEvaluator, - CodeGenSpec spec, - SqlType expressionType, - Expression expression + final IExpressionEvaluator expressionEvaluator, + final CodeGenSpec spec, + final SqlType expressionType, + final Expression expression ) { this.expressionEvaluator = Objects.requireNonNull(expressionEvaluator, "expressionEvaluator"); this.expressionType = Objects.requireNonNull(expressionType, "expressionType"); @@ -62,16 +62,16 @@ public Expression getExpression() { return expression; } - public Object evaluate(GenericRow row) { + public Object evaluate(final GenericRow row) { try { return expressionEvaluator.evaluate(getParameters(row)); - } catch (InvocationTargetException e) { + } catch (final InvocationTargetException e) { throw new KsqlException(e.getCause().getMessage(), e.getCause()); } } - private Object[] getParameters(GenericRow row) { - Object[] parameters = this.threadLocalParameters.get(); + private Object[] getParameters(final GenericRow row) { + final Object[] parameters = this.threadLocalParameters.get(); spec.resolve(row, parameters); return parameters; } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java index 61821035fe45..b8e8515994c0 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java @@ -142,15 +142,15 @@ public class SqlToJavaVisitor { private final Function structToCodeName; public static SqlToJavaVisitor of( - LogicalSchema schema, FunctionRegistry functionRegistry, CodeGenSpec spec + final LogicalSchema schema, final FunctionRegistry functionRegistry, final CodeGenSpec spec ) { - Multiset nameCounts = HashMultiset.create(); + final Multiset nameCounts = HashMultiset.create(); return new SqlToJavaVisitor( schema, functionRegistry, spec::getCodeName, name -> { - int index = nameCounts.add(name, 1); + final int index = nameCounts.add(name, 1); return spec.getUniqueNameForFunction(name, index); }, spec::getStructSchemaName); @@ -158,10 +158,10 @@ public static SqlToJavaVisitor of( @VisibleForTesting SqlToJavaVisitor( - LogicalSchema schema, FunctionRegistry functionRegistry, - Function colRefToCodeName, - Function funNameToCodeName, - Function structToCodeName + final LogicalSchema schema, final FunctionRegistry functionRegistry, + final Function colRefToCodeName, + final Function funNameToCodeName, + final Function structToCodeName ) { this.expressionTypeManager = new ExpressionTypeManager(schema, functionRegistry); @@ -172,12 +172,12 @@ public static SqlToJavaVisitor of( this.structToCodeName = Objects.requireNonNull(structToCodeName, "structToCodeName"); } - public String process(Expression expression) { + public String process(final Expression expression) { return formatExpression(expression); } - private String formatExpression(Expression expression) { - Pair expressionFormatterResult = + private String formatExpression(final Expression expression) { + final Pair expressionFormatterResult = new Formatter(functionRegistry).process(expression, null); return expressionFormatterResult.getLeft(); } @@ -187,16 +187,16 @@ private class Formatter implements ExpressionVisitor, Void private final FunctionRegistry functionRegistry; - Formatter(FunctionRegistry functionRegistry) { + Formatter(final FunctionRegistry functionRegistry) { this.functionRegistry = functionRegistry; } - private Pair visitIllegalState(Expression expression) { + private Pair visitIllegalState(final Expression expression) { throw new IllegalStateException( format("expression type %s should never be visited", expression.getClass())); } - private Pair visitUnsupported(Expression expression) { + private Pair visitUnsupported(final Expression expression) { throw new UnsupportedOperationException( format( "not yet implemented: %s.visit%s", @@ -207,58 +207,70 @@ private Pair visitUnsupported(Expression expression) { } @Override - public Pair visitType(Type node, Void context) { + public Pair visitType(final Type node, final Void context) { return visitIllegalState(node); } @Override - public Pair visitWhenClause(WhenClause whenClause, Void context) { + public Pair visitWhenClause(final WhenClause whenClause, final Void context) { return visitIllegalState(whenClause); } @Override - public Pair visitInPredicate(InPredicate inPredicate, Void context) { + public Pair visitInPredicate( + final InPredicate inPredicate, + final Void context + ) { return visitUnsupported(inPredicate); } @Override public Pair visitInListExpression( - InListExpression inListExpression, Void context + final InListExpression inListExpression, final Void context ) { return visitUnsupported(inListExpression); } @Override public Pair visitTimestampLiteral( - TimestampLiteral timestampLiteral, Void context + final TimestampLiteral timestampLiteral, final Void context ) { return visitUnsupported(timestampLiteral); } @Override - public Pair visitTimeLiteral(TimeLiteral timeLiteral, Void context) { + public Pair visitTimeLiteral( + final TimeLiteral timeLiteral, + final Void context + ) { return visitUnsupported(timeLiteral); } @Override - public Pair visitDecimalLiteral(DecimalLiteral decimalLiteral, Void context) { + public Pair visitDecimalLiteral( + final DecimalLiteral decimalLiteral, + final Void context + ) { return visitUnsupported(decimalLiteral); } @Override public Pair visitSimpleCaseExpression( - SimpleCaseExpression simpleCaseExpression, Void context + final SimpleCaseExpression simpleCaseExpression, final Void context ) { return visitUnsupported(simpleCaseExpression); } @Override - public Pair visitBooleanLiteral(BooleanLiteral node, Void context) { + public Pair visitBooleanLiteral( + final BooleanLiteral node, + final Void context + ) { return new Pair<>(String.valueOf(node.getValue()), SqlTypes.BOOLEAN); } @Override - public Pair visitStringLiteral(StringLiteral node, Void context) { + public Pair visitStringLiteral(final StringLiteral node, final Void context) { return new Pair<>( "\"" + StringEscapeUtils.escapeJava(node.getValue()) + "\"", SqlTypes.STRING @@ -266,19 +278,22 @@ public Pair visitStringLiteral(StringLiteral node, Void context } @Override - public Pair visitDoubleLiteral(DoubleLiteral node, Void context) { + public Pair visitDoubleLiteral(final DoubleLiteral node, final Void context) { return new Pair<>(Double.toString(node.getValue()), SqlTypes.DOUBLE); } @Override - public Pair visitNullLiteral(NullLiteral node, Void context) { + public Pair visitNullLiteral(final NullLiteral node, final Void context) { return new Pair<>("null", null); } @Override - public Pair visitColumnReference(ColumnReferenceExp node, Void context) { - ColumnRef fieldName = node.getReference(); - Column schemaColumn = schema.findValueColumn(node.getReference()) + public Pair visitColumnReference( + final ColumnReferenceExp node, + final Void context + ) { + final ColumnRef fieldName = node.getReference(); + final Column schemaColumn = schema.findValueColumn(node.getReference()) .orElseThrow(() -> new KsqlException("Field not found: " + node.getReference())); @@ -287,49 +302,52 @@ public Pair visitColumnReference(ColumnReferenceExp node, Void @Override public Pair visitDereferenceExpression( - DereferenceExpression node, Void context + final DereferenceExpression node, final Void context ) { - SqlType functionReturnSchema = expressionTypeManager.getExpressionSqlType(node); - String javaReturnType = + final SqlType functionReturnSchema = expressionTypeManager.getExpressionSqlType(node); + final String javaReturnType = SchemaConverters.sqlToJavaConverter().toJavaType(functionReturnSchema).getSimpleName(); - String struct = process(node.getBase(), context).getLeft(); - String field = process(new StringLiteral(node.getFieldName()), context).getLeft(); - String codeString = "((" + javaReturnType + ") " + final String struct = process(node.getBase(), context).getLeft(); + final String field = process(new StringLiteral(node.getFieldName()), context).getLeft(); + final String codeString = "((" + javaReturnType + ") " + struct + ".get(" + field + "))"; return new Pair<>(codeString, functionReturnSchema); } - public Pair visitLongLiteral(LongLiteral node, Void context) { + public Pair visitLongLiteral(final LongLiteral node, final Void context) { return new Pair<>(node.getValue() + "L", SqlTypes.BIGINT); } @Override - public Pair visitIntegerLiteral(IntegerLiteral node, Void context) { + public Pair visitIntegerLiteral( + final IntegerLiteral node, + final Void context + ) { return new Pair<>(Integer.toString(node.getValue()), SqlTypes.INTEGER); } @Override - public Pair visitFunctionCall(FunctionCall node, Void context) { - FunctionName functionName = node.getName(); + public Pair visitFunctionCall(final FunctionCall node, final Void context) { + final FunctionName functionName = node.getName(); - String instanceName = funNameToCodeName.apply(functionName); + final String instanceName = funNameToCodeName.apply(functionName); - SqlType functionReturnSchema = getFunctionReturnSchema(node); - String javaReturnType = + final SqlType functionReturnSchema = getFunctionReturnSchema(node); + final String javaReturnType = SchemaConverters.sqlToJavaConverter().toJavaType(functionReturnSchema).getSimpleName(); - String arguments = node.getArguments().stream() + final String arguments = node.getArguments().stream() .map(arg -> process(arg, context).getLeft()) .collect(Collectors.joining(", ")); - String codeString = "((" + javaReturnType + ") " + instanceName + final String codeString = "((" + javaReturnType + ") " + instanceName + ".evaluate(" + arguments + "))"; return new Pair<>(codeString, functionReturnSchema); } - private SqlType getFunctionReturnSchema(FunctionCall node) { - UdfFactory udfFactory = functionRegistry.getUdfFactory(node.getName().name()); - List argumentSchemas = node.getArguments().stream() + private SqlType getFunctionReturnSchema(final FunctionCall node) { + final UdfFactory udfFactory = functionRegistry.getUdfFactory(node.getName().name()); + final List argumentSchemas = node.getArguments().stream() .map(expressionTypeManager::getExpressionSqlType) .collect(Collectors.toList()); @@ -338,7 +356,7 @@ private SqlType getFunctionReturnSchema(FunctionCall node) { @Override public Pair visitLogicalBinaryExpression( - LogicalBinaryExpression node, Void context + final LogicalBinaryExpression node, final Void context ) { if (node.getType() == LogicalBinaryExpression.Type.OR) { return new Pair<>( @@ -359,12 +377,12 @@ public Pair visitLogicalBinaryExpression( } @Override - public Pair visitNotExpression(NotExpression node, Void context) { - String exprString = process(node.getValue(), context).getLeft(); + public Pair visitNotExpression(final NotExpression node, final Void context) { + final String exprString = process(node.getValue(), context).getLeft(); return new Pair<>("(!" + exprString + ")", SqlTypes.BOOLEAN); } - private String nullCheckPrefix(ComparisonExpression.Type type) { + private String nullCheckPrefix(final ComparisonExpression.Type type) { if (type == ComparisonExpression.Type.IS_DISTINCT_FROM) { return "(((Object)(%1$s)) == null || ((Object)(%2$s)) == null) ? " + "((((Object)(%1$s)) == null ) ^ (((Object)(%2$s)) == null )) : "; @@ -372,7 +390,7 @@ private String nullCheckPrefix(ComparisonExpression.Type type) { return "(((Object)(%1$s)) == null || ((Object)(%2$s)) == null) ? false : "; } - private String visitStringComparisonExpression(ComparisonExpression.Type type) { + private String visitStringComparisonExpression(final ComparisonExpression.Type type) { switch (type) { case EQUAL: return "(%1$s.equals(%2$s))"; @@ -389,7 +407,7 @@ private String visitStringComparisonExpression(ComparisonExpression.Type type) { } } - private String visitScalarComparisonExpression(ComparisonExpression.Type type) { + private String visitScalarComparisonExpression(final ComparisonExpression.Type type) { switch (type) { case EQUAL: return "((%1$s <= %2$s) && (%1$s >= %2$s))"; @@ -407,9 +425,9 @@ private String visitScalarComparisonExpression(ComparisonExpression.Type type) { } private String visitBytesComparisonExpression( - ComparisonExpression.Type type, SqlType left, SqlType right + final ComparisonExpression.Type type, final SqlType left, final SqlType right ) { - String comparator = SQL_COMPARE_TO_JAVA.get(type); + final String comparator = SQL_COMPARE_TO_JAVA.get(type); if (comparator == null) { throw new KsqlException("Unexpected scalar comparison: " + type.getValue()); } @@ -422,7 +440,7 @@ private String visitBytesComparisonExpression( ); } - private String toDecimal(SqlType schema, int index) { + private String toDecimal(final SqlType schema, final int index) { if (schema.baseType() == SqlBaseType.DECIMAL) { return "%" + index + "$s"; } @@ -430,7 +448,7 @@ private String toDecimal(SqlType schema, int index) { return "new BigDecimal(%" + index + "$s)"; } - private String visitBooleanComparisonExpression(ComparisonExpression.Type type) { + private String visitBooleanComparisonExpression(final ComparisonExpression.Type type) { switch (type) { case EQUAL: return "(Boolean.compare(%1$s, %2$s) == 0)"; @@ -444,10 +462,10 @@ private String visitBooleanComparisonExpression(ComparisonExpression.Type type) @Override public Pair visitComparisonExpression( - ComparisonExpression node, Void context + final ComparisonExpression node, final Void context ) { - Pair left = process(node.getLeft(), context); - Pair right = process(node.getRight(), context); + final Pair left = process(node.getLeft(), context); + final Pair right = process(node.getRight(), context); String exprFormat = nullCheckPrefix(node.getType()); @@ -472,33 +490,39 @@ public Pair visitComparisonExpression( break; } } - String expr = "(" + String.format(exprFormat, left.getLeft(), right.getLeft()) + ")"; + final String expr = "(" + String.format(exprFormat, left.getLeft(), right.getLeft()) + ")"; return new Pair<>(expr, SqlTypes.BOOLEAN); } @Override - public Pair visitCast(Cast node, Void context) { - Pair expr = process(node.getExpression(), context); + public Pair visitCast(final Cast node, final Void context) { + final Pair expr = process(node.getExpression(), context); return CastVisitor.getCast(expr, node.getType().getSqlType()); } @Override - public Pair visitIsNullPredicate(IsNullPredicate node, Void context) { - Pair value = process(node.getValue(), context); + public Pair visitIsNullPredicate( + final IsNullPredicate node, + final Void context + ) { + final Pair value = process(node.getValue(), context); return new Pair<>("((" + value.getLeft() + ") == null )", SqlTypes.BOOLEAN); } @Override - public Pair visitIsNotNullPredicate(IsNotNullPredicate node, Void context) { - Pair value = process(node.getValue(), context); + public Pair visitIsNotNullPredicate( + final IsNotNullPredicate node, + final Void context + ) { + final Pair value = process(node.getValue(), context); return new Pair<>("((" + value.getLeft() + ") != null )", SqlTypes.BOOLEAN); } @Override public Pair visitArithmeticUnary( - ArithmeticUnaryExpression node, Void context + final ArithmeticUnaryExpression node, final Void context ) { - Pair value = process(node.getValue(), context); + final Pair value = process(node.getValue(), context); switch (node.getSign()) { case MINUS: return visitArithmeticMinus(value); @@ -509,7 +533,7 @@ public Pair visitArithmeticUnary( } } - private Pair visitArithmeticMinus(Pair value) { + private Pair visitArithmeticMinus(final Pair value) { if (value.getRight().baseType() == SqlBaseType.DECIMAL) { return new Pair<>( String.format( @@ -521,12 +545,12 @@ private Pair visitArithmeticMinus(Pair value) ); } else { // this is to avoid turning a sequence of "-" into a comment (i.e., "-- comment") - String separator = value.getLeft().startsWith("-") ? " " : ""; + final String separator = value.getLeft().startsWith("-") ? " " : ""; return new Pair<>("-" + separator + value.getLeft(), value.getRight()); } } - private Pair visitArithmeticPlus(Pair value) { + private Pair visitArithmeticPlus(final Pair value) { if (value.getRight().baseType() == SqlBaseType.DECIMAL) { return new Pair<>( String.format( @@ -543,18 +567,18 @@ private Pair visitArithmeticPlus(Pair value) { @Override public Pair visitArithmeticBinary( - ArithmeticBinaryExpression node, Void context + final ArithmeticBinaryExpression node, final Void context ) { - Pair left = process(node.getLeft(), context); - Pair right = process(node.getRight(), context); + final Pair left = process(node.getLeft(), context); + final Pair right = process(node.getRight(), context); - SqlType schema = expressionTypeManager.getExpressionSqlType(node); + final SqlType schema = expressionTypeManager.getExpressionSqlType(node); if (schema.baseType() == SqlBaseType.DECIMAL) { - SqlDecimal decimal = (SqlDecimal) schema; - String leftExpr = + final SqlDecimal decimal = (SqlDecimal) schema; + final String leftExpr = CastVisitor.getCast(left, DecimalUtil.toSqlDecimal(left.right)).getLeft(); - String rightExpr = + final String rightExpr = CastVisitor.getCast(right, DecimalUtil.toSqlDecimal(right.right)).getLeft(); return new Pair<>( @@ -569,11 +593,11 @@ public Pair visitArithmeticBinary( schema ); } else { - String leftExpr = + final String leftExpr = left.getRight().baseType() == SqlBaseType.DECIMAL ? CastVisitor.getCast(left, SqlTypes.DOUBLE).getLeft() : left.getLeft(); - String rightExpr = + final String rightExpr = right.getRight().baseType() == SqlBaseType.DECIMAL ? CastVisitor.getCast(right, SqlTypes.DOUBLE).getLeft() : right.getLeft(); @@ -592,10 +616,10 @@ public Pair visitArithmeticBinary( @Override public Pair visitSearchedCaseExpression( - SearchedCaseExpression node, Void context + final SearchedCaseExpression node, final Void context ) { - String functionClassName = SearchedCaseFunction.class.getSimpleName(); - List whenClauses = node + final String functionClassName = SearchedCaseFunction.class.getSimpleName(); + final List whenClauses = node .getWhenClauses() .stream() .map(whenClause -> new CaseWhenProcessed( @@ -604,11 +628,11 @@ public Pair visitSearchedCaseExpression( )) .collect(Collectors.toList()); - SqlType resultSchema = expressionTypeManager.getExpressionSqlType(node); - String resultSchemaString = + final SqlType resultSchema = expressionTypeManager.getExpressionSqlType(node); + final String resultSchemaString = SchemaConverters.sqlToJavaConverter().toJavaType(resultSchema).getCanonicalName(); - List lazyWhenClause = whenClauses + final List lazyWhenClause = whenClauses .stream() .map(processedWhenClause -> functionClassName + ".whenClause(" + buildSupplierCode( @@ -619,11 +643,11 @@ public Pair visitSearchedCaseExpression( + ")") .collect(Collectors.toList()); - String defaultValue = node.getDefaultValue().isPresent() + final String defaultValue = node.getDefaultValue().isPresent() ? process(node.getDefaultValue().get(), context).getLeft() : "null"; - String codeString = "((" + resultSchemaString + ")" + final String codeString = "((" + resultSchemaString + ")" + functionClassName + ".searchedCaseFunction(ImmutableList.of( " + StringUtils.join(lazyWhenClause, ", ") + ")," + buildSupplierCode(resultSchemaString, defaultValue) @@ -631,17 +655,17 @@ public Pair visitSearchedCaseExpression( return new Pair<>(codeString, resultSchema); } - private String buildSupplierCode(String typeString, String code) { + private String buildSupplierCode(final String typeString, final String code) { return " new " + Supplier.class.getSimpleName() + "<" + typeString + ">() {" + " @Override public " + typeString + " get() { return " + code + "; }}"; } @Override - public Pair visitLikePredicate(LikePredicate node, Void context) { + public Pair visitLikePredicate(final LikePredicate node, final Void context) { // For now we just support simple prefix/suffix cases only. - String patternString = trimQuotes(process(node.getPattern(), context).getLeft()); - String valueString = process(node.getValue(), context).getLeft(); + final String patternString = trimQuotes(process(node.getPattern(), context).getLeft()); + final String valueString = process(node.getValue(), context).getLeft(); if (patternString.startsWith("%")) { if (patternString.endsWith("%")) { return new Pair<>( @@ -682,18 +706,21 @@ public Pair visitLikePredicate(LikePredicate node, Void context } @Override - public Pair visitSubscriptExpression(SubscriptExpression node, Void context) { - SqlType internalSchema = expressionTypeManager.getExpressionSqlType(node.getBase()); + public Pair visitSubscriptExpression( + final SubscriptExpression node, + final Void context + ) { + final SqlType internalSchema = expressionTypeManager.getExpressionSqlType(node.getBase()); - String internalSchemaJavaType = + final String internalSchemaJavaType = SchemaConverters.sqlToJavaConverter().toJavaType(internalSchema).getCanonicalName(); switch (internalSchema.baseType()) { case ARRAY: - SqlArray array = (SqlArray) internalSchema; - String listName = process(node.getBase(), context).getLeft(); - String suppliedIdx = process(node.getIndex(), context).getLeft(); + final SqlArray array = (SqlArray) internalSchema; + final String listName = process(node.getBase(), context).getLeft(); + final String suppliedIdx = process(node.getIndex(), context).getLeft(); - String code = format( + final String code = format( "((%s) (%s.arrayAccess((%s) %s, ((int) %s))))", SchemaConverters.sqlToJavaConverter().toJavaType(array.getItemType()).getSimpleName(), ArrayAccess.class.getSimpleName(), @@ -705,7 +732,7 @@ public Pair visitSubscriptExpression(SubscriptExpression node, return new Pair<>(code, array.getItemType()); case MAP: - SqlMap map = (SqlMap) internalSchema; + final SqlMap map = (SqlMap) internalSchema; return new Pair<>( String.format( "((%s) ((%s)%s).get(%s))", @@ -723,10 +750,13 @@ public Pair visitSubscriptExpression(SubscriptExpression node, } @Override - public Pair visitStructExpression(CreateStructExpression node, Void context) { + public Pair visitStructExpression( + final CreateStructExpression node, + final Void context + ) { final String schemaName = structToCodeName.apply(node); final StringBuilder struct = new StringBuilder("new Struct(").append(schemaName).append(")"); - for (Field field : node.getFields()) { + for (final Field field : node.getFields()) { struct.append(".put(") .append('"') .append(field.getName()) @@ -742,10 +772,13 @@ public Pair visitStructExpression(CreateStructExpression node, } @Override - public Pair visitBetweenPredicate(BetweenPredicate node, Void context) { - Pair value = process(node.getValue(), context); - Pair min = process(node.getMin(), context); - Pair max = process(node.getMax(), context); + public Pair visitBetweenPredicate( + final BetweenPredicate node, + final Void context + ) { + final Pair value = process(node.getValue(), context); + final Pair min = process(node.getMin(), context); + final Pair max = process(node.getMax(), context); String expression = "(((Object) {value}) == null " + "|| ((Object) {min}) == null " @@ -753,7 +786,7 @@ public Pair visitBetweenPredicate(BetweenPredicate node, Void c + "? false " + ": "; - SqlBaseType type = value.getRight().baseType(); + final SqlBaseType type = value.getRight().baseType(); switch (type) { case DOUBLE: case BIGINT: @@ -769,7 +802,7 @@ public Pair visitBetweenPredicate(BetweenPredicate node, Void c // note that the entire expression must be surrounded by parentheses // otherwise negations and other higher level operations will not work - String evaluation = StrSubstitutor.replace( + final String evaluation = StrSubstitutor.replace( "(" + expression + ")", ImmutableMap.of( "value", value.getLeft(), @@ -783,13 +816,13 @@ public Pair visitBetweenPredicate(BetweenPredicate node, Void c } private String formatBinaryExpression( - String operator, Expression left, Expression right, Void context + final String operator, final Expression left, final Expression right, final Void context ) { return "(" + process(left, context).getLeft() + " " + operator + " " + process(right, context).getLeft() + ")"; } - private String trimQuotes(String s) { + private String trimQuotes(final String s) { return s.substring(1, s.length() - 1); } } @@ -809,13 +842,13 @@ private static final class CastVisitor { private CastVisitor() { } - static Pair getCast(Pair expr, SqlType sqlType) { + static Pair getCast(final Pair expr, final SqlType sqlType) { if (!sqlType.supportsCast()) { throw new KsqlFunctionException( "Only casts to primitive types and decimal are supported: " + sqlType); } - SqlType rightSchema = expr.getRight(); + final SqlType rightSchema = expr.getRight(); if (sqlType.equals(rightSchema) || rightSchema == null) { return new Pair<>(expr.getLeft(), sqlType); } @@ -830,14 +863,14 @@ static Pair getCast(Pair expr, SqlType sqlType } private static Pair castString( - Pair expr, SqlType sqltype, SqlType returnType + final Pair expr, final SqlType sqltype, final SqlType returnType ) { - SqlType schema = expr.getRight(); - String exprStr; + final SqlType schema = expr.getRight(); + final String exprStr; if (schema.baseType() == SqlBaseType.DECIMAL) { - SqlDecimal decimal = (SqlDecimal) schema; - int precision = decimal.getPrecision(); - int scale = decimal.getScale(); + final SqlDecimal decimal = (SqlDecimal) schema; + final int precision = decimal.getPrecision(); + final int scale = decimal.getScale(); exprStr = String.format("DecimalUtil.format(%d, %d, %s)", precision, scale, expr.getLeft()); } else { exprStr = "String.valueOf(" + expr.getLeft() + ")"; @@ -846,15 +879,15 @@ private static Pair castString( } private static Pair castBoolean( - Pair expr, SqlType sqltype, SqlType returnType + final Pair expr, final SqlType sqltype, final SqlType returnType ) { return new Pair<>(getCastToBooleanString(expr.getRight(), expr.getLeft()), returnType); } private static Pair castInteger( - Pair expr, SqlType sqltype, SqlType returnType + final Pair expr, final SqlType sqltype, final SqlType returnType ) { - String exprStr = getCastString( + final String exprStr = getCastString( expr.getRight(), expr.getLeft(), "intValue()", @@ -864,9 +897,9 @@ private static Pair castInteger( } private static Pair castLong( - Pair expr, SqlType sqltype, SqlType returnType + final Pair expr, final SqlType sqltype, final SqlType returnType ) { - String exprStr = getCastString( + final String exprStr = getCastString( expr.getRight(), expr.getLeft(), "longValue()", @@ -876,9 +909,9 @@ private static Pair castLong( } private static Pair castDouble( - Pair expr, SqlType sqltype, SqlType returnType + final Pair expr, final SqlType sqltype, final SqlType returnType ) { - String exprStr = getCastString( + final String exprStr = getCastString( expr.getRight(), expr.getLeft(), "doubleValue()", @@ -888,13 +921,13 @@ private static Pair castDouble( } private static Pair castDecimal( - Pair expr, SqlType sqltype, SqlType returnType + final Pair expr, final SqlType sqltype, final SqlType returnType ) { if (!(sqltype instanceof SqlDecimal)) { throw new KsqlException("Expected decimal type: " + sqltype); } - SqlDecimal sqlDecimal = (SqlDecimal) sqltype; + final SqlDecimal sqlDecimal = (SqlDecimal) sqltype; if (expr.getRight().baseType() == SqlBaseType.DECIMAL && expr.right.equals(sqlDecimal)) { return expr; @@ -906,7 +939,7 @@ private static Pair castDecimal( ); } - private static String getCastToBooleanString(SqlType schema, String exprStr) { + private static String getCastToBooleanString(final SqlType schema, final String exprStr) { if (schema.baseType() == SqlBaseType.STRING) { return "Boolean.parseBoolean(" + exprStr + ")"; } else { @@ -916,7 +949,10 @@ private static String getCastToBooleanString(SqlType schema, String exprStr) { } private static String getCastString( - SqlType schema, String exprStr, String javaTypeMethod, String javaStringParserMethod + final SqlType schema, + final String exprStr, + final String javaTypeMethod, + final String javaStringParserMethod ) { if (schema.baseType() == SqlBaseType.DECIMAL) { return "((" + exprStr + ")." + javaTypeMethod + ")"; @@ -940,8 +976,11 @@ private static String getCastString( } } - private static String getDecimalCastString(SqlType schema, String exprStr, SqlDecimal target) { - + private static String getDecimalCastString( + final SqlType schema, + final String exprStr, + final SqlDecimal target + ) { switch (schema.baseType()) { case INTEGER: case BIGINT: @@ -977,7 +1016,7 @@ private static final class CaseWhenProcessed { private final Pair thenProcessResult; private CaseWhenProcessed( - Pair whenProcessResult, Pair thenProcessResult + final Pair whenProcessResult, final Pair thenProcessResult ) { this.whenProcessResult = whenProcessResult; this.thenProcessResult = thenProcessResult; diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/helpers/ArrayAccess.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/helpers/ArrayAccess.java index d667367d9a18..69ec2f8fb925 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/helpers/ArrayAccess.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/helpers/ArrayAccess.java @@ -30,7 +30,7 @@ private ArrayAccess() { } * @param index the index, base-1 or negative (n from the end) * @return the {@code index}-th item in {@code list} */ - public static T arrayAccess(List list, int index) { + public static T arrayAccess(final List list, final int index) { // subtract by 1 because SQL standard uses 1-based indexing; since // SQL standard does not support negative (end-based) indexing, we // will use -1 to represent the last element diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/helpers/SearchedCaseFunction.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/helpers/SearchedCaseFunction.java index f76e0e7c1391..276281335c95 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/helpers/SearchedCaseFunction.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/helpers/SearchedCaseFunction.java @@ -27,7 +27,7 @@ private SearchedCaseFunction() { } public static T searchedCaseFunction( - List> whenClauses, Supplier defaultValue + final List> whenClauses, final Supplier defaultValue ) { if (whenClauses.isEmpty()) { throw new KsqlException("When clause cannot be empty."); @@ -39,7 +39,10 @@ public static T searchedCaseFunction( .orElseGet(defaultValue); } - public static LazyWhenClause whenClause(Supplier operand, Supplier result) { + public static LazyWhenClause whenClause( + final Supplier operand, + final Supplier result + ) { return new LazyWhenClause<>(operand, result); } @@ -48,7 +51,7 @@ public static final class LazyWhenClause { private final Supplier operand; private final Supplier result; - private LazyWhenClause(Supplier operand, Supplier result) { + private LazyWhenClause(final Supplier operand, final Supplier result) { this.operand = Objects.requireNonNull(operand, "operand"); this.result = Objects.requireNonNull(result, "result"); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/context/QueryContext.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/context/QueryContext.java index b2f5cf886c7d..a6e71b35a668 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/context/QueryContext.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/context/QueryContext.java @@ -35,7 +35,7 @@ private QueryContext() { this(Collections.emptyList()); } - private QueryContext(List context) { + private QueryContext(final List context) { this.context = ImmutableList.copyOf(Objects.requireNonNull(context)); for (final String frame : context) { if (frame.contains(DELIMITER)) { @@ -45,7 +45,7 @@ private QueryContext(List context) { } @JsonCreator - private QueryContext(String context) { + private QueryContext(final String context) { this(ImmutableList.copyOf(context.split(DELIMITER))); } @@ -63,7 +63,7 @@ public String toString() { return formatContext(); } - private QueryContext push(String ...context) { + private QueryContext push(final String ...context) { return new QueryContext( new ImmutableList.Builder() .addAll(this.context) @@ -80,15 +80,15 @@ public Stacker() { this.queryContext = new QueryContext(); } - public static Stacker of(QueryContext queryContext) { + public static Stacker of(final QueryContext queryContext) { return new Stacker(queryContext); } - private Stacker(QueryContext queryContext) { + private Stacker(final QueryContext queryContext) { this.queryContext = Objects.requireNonNull(queryContext); } - public Stacker push(String... context) { + public Stacker push(final String... context) { return new Stacker(queryContext.push(context)); } @@ -97,7 +97,7 @@ public QueryContext getQueryContext() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { return o instanceof Stacker && Objects.equals(queryContext, ((Stacker) o).queryContext); } @@ -109,7 +109,7 @@ public int hashCode() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { return o instanceof QueryContext && Objects.equals(context, ((QueryContext) o).context); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/context/QueryLoggerUtil.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/context/QueryLoggerUtil.java index dfb264c16a25..d08e7b02b769 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/context/QueryLoggerUtil.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/context/QueryLoggerUtil.java @@ -23,7 +23,7 @@ public final class QueryLoggerUtil { private QueryLoggerUtil() { } - public static String queryLoggerName(QueryId queryId, QueryContext queryContext) { + public static String queryLoggerName(final QueryId queryId, final QueryContext queryContext) { return String.join( ".", new ImmutableList.Builder() diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateStreamCommand.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateStreamCommand.java index 9b08c8970370..9e880244f328 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateStreamCommand.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateStreamCommand.java @@ -28,12 +28,12 @@ @Immutable public class CreateStreamCommand extends CreateSourceCommand { public CreateStreamCommand( - @JsonProperty(value = "sourceName", required = true) SourceName sourceName, - @JsonProperty(value = "schema", required = true) LogicalSchema schema, - @JsonProperty(value = "keyField") Optional keyField, - @JsonProperty(value = "timestampColumn") + @JsonProperty(value = "sourceName", required = true) final SourceName sourceName, + @JsonProperty(value = "schema", required = true) final LogicalSchema schema, + @JsonProperty(value = "keyField") final Optional keyField, + @JsonProperty(value = "timestampColumn") final Optional timestampColumn, - @JsonProperty(value = "topicName", required = true) String topicName, + @JsonProperty(value = "topicName", required = true) final String topicName, @JsonProperty(value = "formats", required = true) final Formats formats, @JsonProperty(value = "windowInfo") final Optional windowInfo ) { @@ -49,7 +49,7 @@ public CreateStreamCommand( } @Override - public DdlCommandResult execute(Executor executor) { + public DdlCommandResult execute(final Executor executor) { return executor.executeCreateStream(this); } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateTableCommand.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateTableCommand.java index 46f1b4fd87fa..69fa6a1b2e80 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateTableCommand.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/CreateTableCommand.java @@ -28,11 +28,11 @@ @Immutable public class CreateTableCommand extends CreateSourceCommand { public CreateTableCommand( - @JsonProperty(value = "sourceName", required = true) SourceName sourceName, - @JsonProperty(value = "schema", required = true) LogicalSchema schema, - @JsonProperty("keyField") Optional keyField, - @JsonProperty("timestampColumn") Optional timestampColumn, - @JsonProperty(value = "topicName", required = true) String topicName, + @JsonProperty(value = "sourceName", required = true) final SourceName sourceName, + @JsonProperty(value = "schema", required = true) final LogicalSchema schema, + @JsonProperty("keyField") final Optional keyField, + @JsonProperty("timestampColumn") final Optional timestampColumn, + @JsonProperty(value = "topicName", required = true) final String topicName, @JsonProperty(value = "formats", required = true) final Formats formats, @JsonProperty(value = "windowInfo") final Optional windowInfo ) { @@ -48,7 +48,7 @@ public CreateTableCommand( } @Override - public DdlCommandResult execute(Executor executor) { + public DdlCommandResult execute(final Executor executor) { return executor.executeCreateTable(this); } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/DdlCommandResult.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/DdlCommandResult.java index 94778bf9d831..c55ff9cd8295 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/DdlCommandResult.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/DdlCommandResult.java @@ -24,7 +24,7 @@ public class DdlCommandResult { private final boolean success; private final String message; - public DdlCommandResult(boolean success, String message) { + public DdlCommandResult(final boolean success, final String message) { this.success = success; this.message = Objects.requireNonNull(message, "message"); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/DropSourceCommand.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/DropSourceCommand.java index b74dfc27ae15..d63e39274306 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/DropSourceCommand.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/DropSourceCommand.java @@ -25,12 +25,12 @@ public class DropSourceCommand implements DdlCommand { private final SourceName sourceName; public DropSourceCommand( - @JsonProperty(value = "sourceName", required = true) SourceName sourceName) { + @JsonProperty(value = "sourceName", required = true) final SourceName sourceName) { this.sourceName = Objects.requireNonNull(sourceName, "sourceName"); } @Override - public DdlCommandResult execute(Executor executor) { + public DdlCommandResult execute(final Executor executor) { return executor.executeDropSource(this); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/DropTypeCommand.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/DropTypeCommand.java index aff2aa5489ed..bfd1ea642e46 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/DropTypeCommand.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/DropTypeCommand.java @@ -24,11 +24,11 @@ public class DropTypeCommand implements DdlCommand { private final String typeName; @Override - public DdlCommandResult execute(Executor executor) { + public DdlCommandResult execute(final Executor executor) { return executor.executeDropType(this); } - public DropTypeCommand(@JsonProperty(value = "typeName", required = true) String typeName) { + public DropTypeCommand(@JsonProperty(value = "typeName", required = true) final String typeName) { this.typeName = Objects.requireNonNull(typeName, "typeName"); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/KsqlTopic.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/KsqlTopic.java index 621e093ec85a..f819f314f8c7 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/KsqlTopic.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/ddl/commands/KsqlTopic.java @@ -28,7 +28,11 @@ public class KsqlTopic { private final KeyFormat keyFormat; private final ValueFormat valueFormat; - public KsqlTopic(String kafkaTopicName, KeyFormat keyFormat, ValueFormat valueFormat) { + public KsqlTopic( + final String kafkaTopicName, + final KeyFormat keyFormat, + final ValueFormat valueFormat + ) { this.kafkaTopicName = requireNonNull(kafkaTopicName, "kafkaTopicName"); this.keyFormat = requireNonNull(keyFormat, "keyFormat"); this.valueFormat = requireNonNull(valueFormat, "valueFormat"); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatter.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatter.java index 95b969512aa3..f4dd6da6e10b 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatter.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatter.java @@ -82,28 +82,28 @@ private Context(final FormatOptions formatOptions) { private static class Formatter implements ExpressionVisitor { @Override - public String visitType(Type node, Context context) { + public String visitType(final Type node, final Context context) { return node.getSqlType().toString(context.formatOptions); } @Override - public String visitBooleanLiteral(BooleanLiteral node, Context context) { + public String visitBooleanLiteral(final BooleanLiteral node, final Context context) { return String.valueOf(node.getValue()); } @Override - public String visitStringLiteral(StringLiteral node, Context context) { + public String visitStringLiteral(final StringLiteral node, final Context context) { return formatStringLiteral(node.getValue()); } @Override - public String visitSubscriptExpression(SubscriptExpression node, Context context) { + public String visitSubscriptExpression(final SubscriptExpression node, final Context context) { return process(node.getBase(), context) + "[" + process(node.getIndex(), context) + "]"; } @Override - public String visitStructExpression(CreateStructExpression exp, Context context) { + public String visitStructExpression(final CreateStructExpression exp, final Context context) { return exp .getFields() .stream() @@ -115,59 +115,62 @@ public String visitStructExpression(CreateStructExpression exp, Context context) } @Override - public String visitLongLiteral(LongLiteral node, Context context) { + public String visitLongLiteral(final LongLiteral node, final Context context) { return Long.toString(node.getValue()); } @Override - public String visitIntegerLiteral(IntegerLiteral node, Context context) { + public String visitIntegerLiteral(final IntegerLiteral node, final Context context) { return Integer.toString(node.getValue()); } @Override - public String visitDoubleLiteral(DoubleLiteral node, Context context) { + public String visitDoubleLiteral(final DoubleLiteral node, final Context context) { return Double.toString(node.getValue()); } @Override - public String visitDecimalLiteral(DecimalLiteral node, Context context) { + public String visitDecimalLiteral(final DecimalLiteral node, final Context context) { return "DECIMAL '" + node.getValue() + "'"; } @Override - public String visitTimeLiteral(TimeLiteral node, Context context) { + public String visitTimeLiteral(final TimeLiteral node, final Context context) { return "TIME '" + node.getValue() + "'"; } @Override - public String visitTimestampLiteral(TimestampLiteral node, Context context) { + public String visitTimestampLiteral(final TimestampLiteral node, final Context context) { return "TIMESTAMP '" + node.getValue() + "'"; } @Override - public String visitNullLiteral(NullLiteral node, Context context) { + public String visitNullLiteral(final NullLiteral node, final Context context) { return "null"; } @Override - public String visitColumnReference(ColumnReferenceExp node, Context context) { + public String visitColumnReference(final ColumnReferenceExp node, final Context context) { return node.getReference().toString(context.formatOptions); } @Override - public String visitDereferenceExpression(DereferenceExpression node, Context context) { - String baseString = process(node.getBase(), context); + public String visitDereferenceExpression( + final DereferenceExpression node, + final Context context + ) { + final String baseString = process(node.getBase(), context); return baseString + KsqlConstants.STRUCT_FIELD_REF + context.formatOptions.escape(node.getFieldName()); } - private static String formatName(Name name, Context context) { + private static String formatName(final Name name, final Context context) { return name.toString(context.formatOptions); } @Override - public String visitFunctionCall(FunctionCall node, Context context) { - StringBuilder builder = new StringBuilder(); + public String visitFunctionCall(final FunctionCall node, final Context context) { + final StringBuilder builder = new StringBuilder(); String arguments = joinExpressions(node.getArguments(), context); if (node.getArguments().isEmpty() && "COUNT".equals(node.getName().name())) { @@ -181,42 +184,51 @@ public String visitFunctionCall(FunctionCall node, Context context) { } @Override - public String visitLogicalBinaryExpression(LogicalBinaryExpression node, Context context) { + public String visitLogicalBinaryExpression( + final LogicalBinaryExpression node, + final Context context + ) { return formatBinaryExpression(node.getType().toString(), node.getLeft(), node.getRight(), context ); } @Override - public String visitNotExpression(NotExpression node, Context context) { + public String visitNotExpression(final NotExpression node, final Context context) { return "(NOT " + process(node.getValue(), context) + ")"; } @Override - public String visitComparisonExpression(ComparisonExpression node, Context context) { + public String visitComparisonExpression( + final ComparisonExpression node, + final Context context + ) { return formatBinaryExpression(node.getType().getValue(), node.getLeft(), node.getRight(), context ); } @Override - public String visitIsNullPredicate(IsNullPredicate node, Context context) { + public String visitIsNullPredicate(final IsNullPredicate node, final Context context) { return "(" + process(node.getValue(), context) + " IS NULL)"; } @Override - public String visitIsNotNullPredicate(IsNotNullPredicate node, Context context) { + public String visitIsNotNullPredicate(final IsNotNullPredicate node, final Context context) { return "(" + process(node.getValue(), context) + " IS NOT NULL)"; } @Override - public String visitArithmeticUnary(ArithmeticUnaryExpression node, Context context) { - String value = process(node.getValue(), context); + public String visitArithmeticUnary( + final ArithmeticUnaryExpression node, + final Context context + ) { + final String value = process(node.getValue(), context); switch (node.getSign()) { case MINUS: // this is to avoid turning a sequence of "-" into a comment (i.e., "-- comment") - String separator = value.startsWith("-") ? " " : ""; + final String separator = value.startsWith("-") ? " " : ""; return "-" + separator + value; case PLUS: return "+" + value; @@ -226,14 +238,17 @@ public String visitArithmeticUnary(ArithmeticUnaryExpression node, Context conte } @Override - public String visitArithmeticBinary(ArithmeticBinaryExpression node, Context context) { + public String visitArithmeticBinary( + final ArithmeticBinaryExpression node, + final Context context + ) { return formatBinaryExpression(node.getOperator().getSymbol(), node.getLeft(), node.getRight(), context ); } @Override - public String visitLikePredicate(LikePredicate node, Context context) { + public String visitLikePredicate(final LikePredicate node, final Context context) { return "(" + process(node.getValue(), context) + " LIKE " @@ -242,16 +257,19 @@ public String visitLikePredicate(LikePredicate node, Context context) { } @Override - public String visitCast(Cast node, Context context) { + public String visitCast(final Cast node, final Context context) { return "CAST" + "(" + process(node.getExpression(), context) + " AS " + node.getType() + ")"; } @Override - public String visitSearchedCaseExpression(SearchedCaseExpression node, Context context) { - ImmutableList.Builder parts = ImmutableList.builder(); + public String visitSearchedCaseExpression( + final SearchedCaseExpression node, + final Context context + ) { + final ImmutableList.Builder parts = ImmutableList.builder(); parts.add("CASE"); - for (WhenClause whenClause : node.getWhenClauses()) { + for (final WhenClause whenClause : node.getWhenClauses()) { parts.add(process(whenClause, context)); } @@ -264,13 +282,16 @@ public String visitSearchedCaseExpression(SearchedCaseExpression node, Context c } @Override - public String visitSimpleCaseExpression(SimpleCaseExpression node, Context context) { - ImmutableList.Builder parts = ImmutableList.builder(); + public String visitSimpleCaseExpression( + final SimpleCaseExpression node, + final Context context + ) { + final ImmutableList.Builder parts = ImmutableList.builder(); parts.add("CASE") .add(process(node.getOperand(), context)); - for (WhenClause whenClause : node.getWhenClauses()) { + for (final WhenClause whenClause : node.getWhenClauses()) { parts.add(process(whenClause, context)); } @@ -283,13 +304,13 @@ public String visitSimpleCaseExpression(SimpleCaseExpression node, Context conte } @Override - public String visitWhenClause(WhenClause node, Context context) { + public String visitWhenClause(final WhenClause node, final Context context) { return "WHEN " + process(node.getOperand(), context) + " THEN " + process( node.getResult(), context); } @Override - public String visitBetweenPredicate(BetweenPredicate node, Context context) { + public String visitBetweenPredicate(final BetweenPredicate node, final Context context) { return "(" + process(node.getValue(), context) + " BETWEEN " + process(node.getMin(), context) + " AND " + process( node.getMax(), @@ -299,7 +320,7 @@ public String visitBetweenPredicate(BetweenPredicate node, Context context) { } @Override - public String visitInPredicate(InPredicate node, Context context) { + public String visitInPredicate(final InPredicate node, final Context context) { return "(" + process(node.getValue(), context) + " IN " + process( node.getValueList(), context @@ -307,12 +328,12 @@ public String visitInPredicate(InPredicate node, Context context) { } @Override - public String visitInListExpression(InListExpression node, Context context) { + public String visitInListExpression(final InListExpression node, final Context context) { return "(" + joinExpressions(node.getValues(), context) + ")"; } private String formatBinaryExpression( - String operator, Expression left, Expression right, Context context + final String operator, final Expression left, final Expression right, final Context context ) { return '(' + process(left, context) + ' ' + operator + ' ' + process( right, @@ -321,13 +342,13 @@ private String formatBinaryExpression( + ')'; } - private String joinExpressions(List expressions, Context context) { + private String joinExpressions(final List expressions, final Context context) { return Joiner.on(", ").join(expressions.stream() .map((e) -> process(e, context)) .iterator()); } - private static String formatStringLiteral(String s) { + private static String formatStringLiteral(final String s) { return "'" + s.replace("'", "''") + "'"; } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ArithmeticBinaryExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ArithmeticBinaryExpression.java index b2c25cb22df3..e7ea93710d68 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ArithmeticBinaryExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ArithmeticBinaryExpression.java @@ -28,12 +28,19 @@ public class ArithmeticBinaryExpression extends Expression { private final Expression left; private final Expression right; - public ArithmeticBinaryExpression(Operator operator, Expression left, Expression right) { + public ArithmeticBinaryExpression( + final Operator operator, + final Expression left, + final Expression right + ) { this(Optional.empty(), operator, left, right); } public ArithmeticBinaryExpression( - Optional location, Operator operator, Expression left, Expression right + final Optional location, + final Operator operator, + final Expression left, + final Expression right ) { super(location); this.operator = Objects.requireNonNull(operator, "operator"); @@ -54,12 +61,12 @@ public Expression getRight() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitArithmeticBinary(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -67,7 +74,7 @@ public boolean equals(Object o) { return false; } - ArithmeticBinaryExpression that = (ArithmeticBinaryExpression) o; + final ArithmeticBinaryExpression that = (ArithmeticBinaryExpression) o; return (operator == that.operator) && Objects.equals(left, that.left) && Objects.equals(right, that.right); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ArithmeticUnaryExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ArithmeticUnaryExpression.java index dfdf7a29dd81..7b4acd9562a5 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ArithmeticUnaryExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ArithmeticUnaryExpression.java @@ -33,20 +33,24 @@ public enum Sign { private final Expression value; private final Sign sign; - public ArithmeticUnaryExpression(Optional location, Sign sign, Expression value) { + public ArithmeticUnaryExpression( + final Optional location, + final Sign sign, + final Expression value + ) { super(location); this.value = requireNonNull(value, "value"); this.sign = requireNonNull(sign, "sign"); } public static ArithmeticUnaryExpression positive( - Optional location, Expression value + final Optional location, final Expression value ) { return new ArithmeticUnaryExpression(location, Sign.PLUS, value); } public static ArithmeticUnaryExpression negative( - Optional location, Expression value + final Optional location, final Expression value ) { return new ArithmeticUnaryExpression(location, Sign.MINUS, value); } @@ -60,12 +64,12 @@ public Sign getSign() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitArithmeticUnary(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -73,7 +77,7 @@ public boolean equals(Object o) { return false; } - ArithmeticUnaryExpression that = (ArithmeticUnaryExpression) o; + final ArithmeticUnaryExpression that = (ArithmeticUnaryExpression) o; return Objects.equals(value, that.value) && (sign == that.sign); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/BetweenPredicate.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/BetweenPredicate.java index d17af4d9a45a..fd1158ba20c6 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/BetweenPredicate.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/BetweenPredicate.java @@ -29,12 +29,15 @@ public class BetweenPredicate extends Expression { private final Expression min; private final Expression max; - public BetweenPredicate(Expression value, Expression min, Expression max) { + public BetweenPredicate(final Expression value, final Expression min, final Expression max) { this(Optional.empty(), value, min, max); } public BetweenPredicate( - Optional location, Expression value, Expression min, Expression max + final Optional location, + final Expression value, + final Expression min, + final Expression max ) { super(location); this.value = requireNonNull(value, "value"); @@ -55,12 +58,12 @@ public Expression getMax() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitBetweenPredicate(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -68,7 +71,7 @@ public boolean equals(Object o) { return false; } - BetweenPredicate that = (BetweenPredicate) o; + final BetweenPredicate that = (BetweenPredicate) o; return Objects.equals(value, that.value) && Objects.equals(min, that.min) && Objects.equals(max, that.max); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/BooleanLiteral.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/BooleanLiteral.java index 52eed3391301..ec268b4cbe0d 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/BooleanLiteral.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/BooleanLiteral.java @@ -29,11 +29,11 @@ public class BooleanLiteral extends Literal { private final boolean value; - public BooleanLiteral(String value) { + public BooleanLiteral(final String value) { this(Optional.empty(), value); } - public BooleanLiteral(Optional location, String value) { + public BooleanLiteral(final Optional location, final String value) { super(location); this.value = requireNonNull(value, "value") .toLowerCase(ENGLISH) @@ -49,7 +49,7 @@ public Boolean getValue() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitBooleanLiteral(this, context); } @@ -59,14 +59,14 @@ public int hashCode() { } @Override - public boolean equals(Object obj) { + public boolean equals(final Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } - BooleanLiteral other = (BooleanLiteral) obj; + final BooleanLiteral other = (BooleanLiteral) obj; return Objects.equals(this.value, other.value); } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Cast.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Cast.java index 3846b57f54bf..1566f7c16829 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Cast.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Cast.java @@ -28,11 +28,11 @@ public final class Cast extends Expression { private final Expression expression; private final Type type; - public Cast(Expression expression, Type type) { + public Cast(final Expression expression, final Type type) { this(Optional.empty(), expression, type); } - public Cast(Optional location, Expression expression, Type type) { + public Cast(final Optional location, final Expression expression, final Type type) { super(location); this.expression = requireNonNull(expression, "expression"); this.type = requireNonNull(type, "type"); @@ -47,19 +47,19 @@ public Type getType() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitCast(this, context); } @Override - public boolean equals(Object obj) { + public boolean equals(final Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } - Cast o = (Cast) obj; + final Cast o = (Cast) obj; return Objects.equals(this.expression, o.expression) && Objects.equals(this.type, o.type); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ColumnReferenceExp.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ColumnReferenceExp.java index cc4a6222b990..50dd9a367342 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ColumnReferenceExp.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ColumnReferenceExp.java @@ -31,11 +31,11 @@ public class ColumnReferenceExp extends Expression { private final ColumnRef name; - public ColumnReferenceExp(ColumnRef name) { + public ColumnReferenceExp(final ColumnRef name) { this(Optional.empty(), name); } - public ColumnReferenceExp(Optional location, ColumnRef name) { + public ColumnReferenceExp(final Optional location, final ColumnRef name) { super(location); this.name = requireNonNull(name, "name"); } @@ -45,12 +45,12 @@ public ColumnRef getReference() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitColumnReference(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -58,7 +58,7 @@ public boolean equals(Object o) { return false; } - ColumnReferenceExp that = (ColumnReferenceExp) o; + final ColumnReferenceExp that = (ColumnReferenceExp) o; return Objects.equals(name, that.name); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ComparisonExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ComparisonExpression.java index d863851d7c81..e97a238d8735 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ComparisonExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ComparisonExpression.java @@ -36,7 +36,7 @@ public enum Type { private final String value; - Type(String value) { + Type(final String value) { this.value = value; } @@ -89,12 +89,15 @@ public Type negate() { private final Expression left; private final Expression right; - public ComparisonExpression(Type type, Expression left, Expression right) { + public ComparisonExpression(final Type type, final Expression left, final Expression right) { this(Optional.empty(), type, left, right); } public ComparisonExpression( - Optional location, Type type, Expression left, Expression right + final Optional location, + final Type type, + final Expression left, + final Expression right ) { super(location); this.type = requireNonNull(type, "type"); @@ -115,12 +118,12 @@ public Expression getRight() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitComparisonExpression(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -128,7 +131,7 @@ public boolean equals(Object o) { return false; } - ComparisonExpression that = (ComparisonExpression) o; + final ComparisonExpression that = (ComparisonExpression) o; return (type == that.type) && Objects.equals(left, that.left) && Objects.equals(right, that.right); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/CreateStructExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/CreateStructExpression.java index 3a07370f3ca5..ea485faabb74 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/CreateStructExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/CreateStructExpression.java @@ -42,7 +42,7 @@ public CreateStructExpression( } @Override - protected R accept(ExpressionVisitor visitor, C context) { + protected R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitStructExpression(this, context); } @@ -51,14 +51,14 @@ public ImmutableList getFields() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - CreateStructExpression that = (CreateStructExpression) o; + final CreateStructExpression that = (CreateStructExpression) o; return Objects.equals(fields, that.fields); } @@ -72,7 +72,7 @@ public static class Field { private final String name; private final Expression value; - public Field(String name, Expression value) { + public Field(final String name, final Expression value) { this.name = Objects.requireNonNull(name, "name"); this.value = Objects.requireNonNull(value, "value"); } @@ -86,14 +86,14 @@ public Expression getValue() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - Field field = (Field) o; + final Field field = (Field) o; return Objects.equals(name, field.name) && Objects.equals(value, field.value); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/DecimalLiteral.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/DecimalLiteral.java index a02a96165e7e..f0a60496cb19 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/DecimalLiteral.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/DecimalLiteral.java @@ -27,11 +27,11 @@ public class DecimalLiteral extends Literal { private final String value; - public DecimalLiteral(String value) { + public DecimalLiteral(final String value) { this(Optional.empty(), value); } - public DecimalLiteral(Optional location, String value) { + public DecimalLiteral(final Optional location, final String value) { super(location); this.value = requireNonNull(value, "value"); } @@ -42,19 +42,19 @@ public String getValue() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitDecimalLiteral(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - DecimalLiteral that = (DecimalLiteral) o; + final DecimalLiteral that = (DecimalLiteral) o; return Objects.equals(value, that.value); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/DereferenceExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/DereferenceExpression.java index afd201a86a95..2052852bc527 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/DereferenceExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/DereferenceExpression.java @@ -36,14 +36,18 @@ public class DereferenceExpression extends Expression { * @param base the base expression that resolves to a struct. * @param fieldName the name of the field within the struct. */ - public DereferenceExpression(Optional location, Expression base, String fieldName) { + public DereferenceExpression( + final Optional location, + final Expression base, + final String fieldName + ) { super(location); this.base = requireNonNull(base, "base"); this.fieldName = requireNonNull(fieldName, "fieldName"); } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitDereferenceExpression(this, context); } @@ -56,14 +60,14 @@ public String getFieldName() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - DereferenceExpression that = (DereferenceExpression) o; + final DereferenceExpression that = (DereferenceExpression) o; return Objects.equals(base, that.base) && Objects.equals(fieldName, that.fieldName); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/DoubleLiteral.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/DoubleLiteral.java index 0202f75b690e..1527dea7c74f 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/DoubleLiteral.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/DoubleLiteral.java @@ -24,11 +24,11 @@ public class DoubleLiteral extends Literal { private final double value; - public DoubleLiteral(double value) { + public DoubleLiteral(final double value) { this(Optional.empty(), value); } - public DoubleLiteral(Optional location, double value) { + public DoubleLiteral(final Optional location, final double value) { super(location); this.value = value; } @@ -39,12 +39,12 @@ public Double getValue() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitDoubleLiteral(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -52,7 +52,7 @@ public boolean equals(Object o) { return false; } - DoubleLiteral that = (DoubleLiteral) o; + final DoubleLiteral that = (DoubleLiteral) o; return Double.compare(that.value, value) == 0; } @@ -60,7 +60,7 @@ public boolean equals(Object o) { @SuppressWarnings("UnaryPlus") @Override public int hashCode() { - long temp = value != +0.0d ? Double.doubleToLongBits(value) : 0L; + final long temp = value != +0.0d ? Double.doubleToLongBits(value) : 0L; return (int) (temp ^ (temp >>> 32)); } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Expression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Expression.java index b4155e9ff7ea..77ddb823be7f 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Expression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Expression.java @@ -28,7 +28,7 @@ @Immutable public abstract class Expression extends Node { - protected Expression(Optional location) { + protected Expression(final Optional location) { super(location); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ExpressionVisitor.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ExpressionVisitor.java index d60c7d8dac52..9a4ccd1ee137 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ExpressionVisitor.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/ExpressionVisitor.java @@ -19,7 +19,7 @@ public interface ExpressionVisitor { - default R process(Expression node, @Nullable C context) { + default R process(final Expression node, @Nullable final C context) { return node.accept(this, context); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/FunctionCall.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/FunctionCall.java index e1f626d328d4..474b0b4fc400 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/FunctionCall.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/FunctionCall.java @@ -31,12 +31,14 @@ public class FunctionCall extends Expression { private final FunctionName name; private final ImmutableList arguments; - public FunctionCall(FunctionName name, List arguments) { + public FunctionCall(final FunctionName name, final List arguments) { this(Optional.empty(), name, arguments); } public FunctionCall( - Optional location, FunctionName name, List arguments + final Optional location, + final FunctionName name, + final List arguments ) { super(location); this.name = requireNonNull(name, "name"); @@ -52,19 +54,19 @@ public List getArguments() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitFunctionCall(this, context); } @Override - public boolean equals(Object obj) { + public boolean equals(final Object obj) { if (this == obj) { return true; } if ((obj == null) || (getClass() != obj.getClass())) { return false; } - FunctionCall o = (FunctionCall) obj; + final FunctionCall o = (FunctionCall) obj; return Objects.equals(name, o.name) && Objects.equals(arguments, o.arguments); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/InListExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/InListExpression.java index cafb8311eb92..16a0d6081cb1 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/InListExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/InListExpression.java @@ -29,11 +29,11 @@ public class InListExpression extends Expression { private final ImmutableList values; - public InListExpression(List values) { + public InListExpression(final List values) { this(Optional.empty(), values); } - public InListExpression(Optional location, List values) { + public InListExpression(final Optional location, final List values) { super(location); this.values = ImmutableList.copyOf(requireNonNull(values, "values")); @@ -47,12 +47,12 @@ public List getValues() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitInListExpression(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -60,7 +60,7 @@ public boolean equals(Object o) { return false; } - InListExpression that = (InListExpression) o; + final InListExpression that = (InListExpression) o; return Objects.equals(values, that.values); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/InPredicate.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/InPredicate.java index a4a7748514f1..269198c0bc81 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/InPredicate.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/InPredicate.java @@ -28,12 +28,14 @@ public class InPredicate extends Expression { private final Expression value; private final InListExpression valueList; - public InPredicate(Expression value, InListExpression valueList) { + public InPredicate(final Expression value, final InListExpression valueList) { this(Optional.empty(), value, valueList); } public InPredicate( - Optional location, Expression value, InListExpression valueList + final Optional location, + final Expression value, + final InListExpression valueList ) { super(location); this.value = requireNonNull(value, "value"); @@ -49,12 +51,12 @@ public InListExpression getValueList() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitInPredicate(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -62,7 +64,7 @@ public boolean equals(Object o) { return false; } - InPredicate that = (InPredicate) o; + final InPredicate that = (InPredicate) o; return Objects.equals(value, that.value) && Objects.equals(valueList, that.valueList); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/IntegerLiteral.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/IntegerLiteral.java index 322ec27bba45..130ccf494e08 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/IntegerLiteral.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/IntegerLiteral.java @@ -25,11 +25,11 @@ public class IntegerLiteral extends Literal { private final int value; - public IntegerLiteral(int value) { + public IntegerLiteral(final int value) { this(Optional.empty(), value); } - public IntegerLiteral(Optional location, int value) { + public IntegerLiteral(final Optional location, final int value) { super(location); this.value = value; } @@ -40,12 +40,12 @@ public Integer getValue() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitIntegerLiteral(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -53,7 +53,7 @@ public boolean equals(Object o) { return false; } - IntegerLiteral that = (IntegerLiteral) o; + final IntegerLiteral that = (IntegerLiteral) o; return value == that.value; } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/IsNotNullPredicate.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/IsNotNullPredicate.java index b1e7facd78c6..53386a969fc8 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/IsNotNullPredicate.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/IsNotNullPredicate.java @@ -27,11 +27,11 @@ public class IsNotNullPredicate extends Expression { private final Expression value; - public IsNotNullPredicate(Expression value) { + public IsNotNullPredicate(final Expression value) { this(Optional.empty(), value); } - public IsNotNullPredicate(Optional location, Expression value) { + public IsNotNullPredicate(final Optional location, final Expression value) { super(location); this.value = requireNonNull(value, "value"); } @@ -41,12 +41,12 @@ public Expression getValue() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitIsNotNullPredicate(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -54,7 +54,7 @@ public boolean equals(Object o) { return false; } - IsNotNullPredicate that = (IsNotNullPredicate) o; + final IsNotNullPredicate that = (IsNotNullPredicate) o; return Objects.equals(value, that.value); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/IsNullPredicate.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/IsNullPredicate.java index 0da10f2acfc1..ed4857451e5e 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/IsNullPredicate.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/IsNullPredicate.java @@ -27,11 +27,11 @@ public class IsNullPredicate extends Expression { private final Expression value; - public IsNullPredicate(Expression value) { + public IsNullPredicate(final Expression value) { this(Optional.empty(), value); } - public IsNullPredicate(Optional location, Expression value) { + public IsNullPredicate(final Optional location, final Expression value) { super(location); this.value = requireNonNull(value, "value"); } @@ -41,12 +41,12 @@ public Expression getValue() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitIsNullPredicate(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -54,7 +54,7 @@ public boolean equals(Object o) { return false; } - IsNullPredicate that = (IsNullPredicate) o; + final IsNullPredicate that = (IsNullPredicate) o; return Objects.equals(value, that.value); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/LikePredicate.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/LikePredicate.java index d8831dab745d..54a9bac72ee4 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/LikePredicate.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/LikePredicate.java @@ -28,11 +28,15 @@ public class LikePredicate extends Expression { private final Expression value; private final Expression pattern; - public LikePredicate(Expression value, Expression pattern) { + public LikePredicate(final Expression value, final Expression pattern) { this(Optional.empty(), value, pattern); } - public LikePredicate(Optional location, Expression value, Expression pattern) { + public LikePredicate( + final Optional location, + final Expression value, + final Expression pattern + ) { super(location); this.value = requireNonNull(value, "value"); this.pattern = requireNonNull(pattern, "pattern"); @@ -47,12 +51,12 @@ public Expression getPattern() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitLikePredicate(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -60,7 +64,7 @@ public boolean equals(Object o) { return false; } - LikePredicate that = (LikePredicate) o; + final LikePredicate that = (LikePredicate) o; return Objects.equals(value, that.value) && Objects.equals(pattern, that.pattern); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Literal.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Literal.java index d4eaa0bd0ca9..cde50ec6ddf8 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Literal.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Literal.java @@ -22,7 +22,7 @@ @Immutable public abstract class Literal extends Expression { - protected Literal(Optional location) { + protected Literal(final Optional location) { super(location); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/LogicalBinaryExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/LogicalBinaryExpression.java index 82f232ca7889..46286f078580 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/LogicalBinaryExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/LogicalBinaryExpression.java @@ -33,12 +33,15 @@ public enum Type { private final Expression left; private final Expression right; - public LogicalBinaryExpression(Type type, Expression left, Expression right) { + public LogicalBinaryExpression(final Type type, final Expression left, final Expression right) { this(Optional.empty(), type, left, right); } public LogicalBinaryExpression( - Optional location, Type type, Expression left, Expression right + final Optional location, + final Type type, + final Expression left, + final Expression right ) { super(location); this.type = requireNonNull(type, "type"); @@ -59,20 +62,20 @@ public Expression getRight() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitLogicalBinaryExpression(this, context); } - public static LogicalBinaryExpression and(Expression left, Expression right) { + public static LogicalBinaryExpression and(final Expression left, final Expression right) { return new LogicalBinaryExpression(Optional.empty(), Type.AND, left, right); } - public static LogicalBinaryExpression or(Expression left, Expression right) { + public static LogicalBinaryExpression or(final Expression left, final Expression right) { return new LogicalBinaryExpression(Optional.empty(), Type.OR, left, right); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -80,7 +83,7 @@ public boolean equals(Object o) { return false; } - LogicalBinaryExpression that = (LogicalBinaryExpression) o; + final LogicalBinaryExpression that = (LogicalBinaryExpression) o; return type == that.type && Objects.equals(left, that.left) && Objects.equals(right, that.right); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/LongLiteral.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/LongLiteral.java index 02bf42103538..342b0d300b0a 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/LongLiteral.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/LongLiteral.java @@ -24,11 +24,11 @@ public class LongLiteral extends Literal { private final long value; - public LongLiteral(long value) { + public LongLiteral(final long value) { this(Optional.empty(), value); } - public LongLiteral(Optional location, long value) { + public LongLiteral(final Optional location, final long value) { super(location); this.value = value; } @@ -39,12 +39,12 @@ public Long getValue() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitLongLiteral(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -52,7 +52,7 @@ public boolean equals(Object o) { return false; } - LongLiteral that = (LongLiteral) o; + final LongLiteral that = (LongLiteral) o; return value == that.value; } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/NotExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/NotExpression.java index cbcc24cd7848..9c0d4bcef811 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/NotExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/NotExpression.java @@ -27,11 +27,11 @@ public class NotExpression extends Expression { private final Expression value; - public NotExpression(Expression value) { + public NotExpression(final Expression value) { this(Optional.empty(), value); } - public NotExpression(Optional location, Expression value) { + public NotExpression(final Optional location, final Expression value) { super(location); this.value = requireNonNull(value, "value"); } @@ -41,12 +41,12 @@ public Expression getValue() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitNotExpression(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -54,7 +54,7 @@ public boolean equals(Object o) { return false; } - NotExpression that = (NotExpression) o; + final NotExpression that = (NotExpression) o; return Objects.equals(value, that.value); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/NullLiteral.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/NullLiteral.java index 86e503b69e76..571a5dddb12e 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/NullLiteral.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/NullLiteral.java @@ -26,12 +26,12 @@ public NullLiteral() { super(Optional.empty()); } - public NullLiteral(Optional location) { + public NullLiteral(final Optional location) { super(location); } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitNullLiteral(this, context); } @@ -41,7 +41,7 @@ public String getValue() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/SearchedCaseExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/SearchedCaseExpression.java index 17674c6e538c..3f3e1cfaaa4b 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/SearchedCaseExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/SearchedCaseExpression.java @@ -30,13 +30,16 @@ public class SearchedCaseExpression extends Expression { private final ImmutableList whenClauses; private final Optional defaultValue; - public SearchedCaseExpression(List whenClauses, Optional defaultValue) { + public SearchedCaseExpression( + final List whenClauses, + final Optional defaultValue + ) { this(Optional.empty(), whenClauses, defaultValue); } public SearchedCaseExpression( - Optional location, List whenClauses, - Optional defaultValue + final Optional location, final List whenClauses, + final Optional defaultValue ) { super(location); this.whenClauses = ImmutableList.copyOf(requireNonNull(whenClauses, "whenClauses")); @@ -52,12 +55,12 @@ public Optional getDefaultValue() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitSearchedCaseExpression(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -65,7 +68,7 @@ public boolean equals(Object o) { return false; } - SearchedCaseExpression that = (SearchedCaseExpression) o; + final SearchedCaseExpression that = (SearchedCaseExpression) o; return Objects.equals(whenClauses, that.whenClauses) && Objects.equals(defaultValue, that.defaultValue); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/SimpleCaseExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/SimpleCaseExpression.java index 338d769fa1a9..5840c9fa2c19 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/SimpleCaseExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/SimpleCaseExpression.java @@ -32,14 +32,18 @@ public class SimpleCaseExpression extends Expression { private final Optional defaultValue; public SimpleCaseExpression( - Expression operand, List whenClauses, Optional defaultValue + final Expression operand, + final List whenClauses, + final Optional defaultValue ) { this(Optional.empty(), operand, whenClauses, defaultValue); } public SimpleCaseExpression( - Optional location, Expression operand, List whenClauses, - Optional defaultValue + final Optional location, + final Expression operand, + final List whenClauses, + final Optional defaultValue ) { super(location); this.operand = requireNonNull(operand, "operand"); @@ -60,12 +64,12 @@ public Optional getDefaultValue() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitSimpleCaseExpression(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -73,7 +77,7 @@ public boolean equals(Object o) { return false; } - SimpleCaseExpression that = (SimpleCaseExpression) o; + final SimpleCaseExpression that = (SimpleCaseExpression) o; return Objects.equals(operand, that.operand) && Objects.equals(whenClauses, that.whenClauses) && Objects.equals(defaultValue, that.defaultValue); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/StringLiteral.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/StringLiteral.java index 6fb7adabc849..e041d6bec8fa 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/StringLiteral.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/StringLiteral.java @@ -25,11 +25,11 @@ public class StringLiteral extends Literal { private final String value; - public StringLiteral(String value) { + public StringLiteral(final String value) { this(Optional.empty(), value); } - public StringLiteral(Optional location, String value) { + public StringLiteral(final Optional location, final String value) { super(location); this.value = Objects.requireNonNull(value, "value"); } @@ -40,12 +40,12 @@ public String getValue() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitStringLiteral(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -53,7 +53,7 @@ public boolean equals(Object o) { return false; } - StringLiteral that = (StringLiteral) o; + final StringLiteral that = (StringLiteral) o; return Objects.equals(value, that.value); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/SubscriptExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/SubscriptExpression.java index 04ba4f0f4ab8..011b580a1dcb 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/SubscriptExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/SubscriptExpression.java @@ -28,18 +28,22 @@ public class SubscriptExpression extends Expression { private final Expression base; private final Expression index; - public SubscriptExpression(Expression base, Expression index) { + public SubscriptExpression(final Expression base, final Expression index) { this(Optional.empty(), base, index); } - public SubscriptExpression(Optional location, Expression base, Expression index) { + public SubscriptExpression( + final Optional location, + final Expression base, + final Expression index + ) { super(location); this.base = requireNonNull(base, "base"); this.index = requireNonNull(index, "index"); } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitSubscriptExpression(this, context); } @@ -52,7 +56,7 @@ public Expression getIndex() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -60,7 +64,7 @@ public boolean equals(Object o) { return false; } - SubscriptExpression that = (SubscriptExpression) o; + final SubscriptExpression that = (SubscriptExpression) o; return Objects.equals(this.base, that.base) && Objects.equals(this.index, that.index); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TimeLiteral.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TimeLiteral.java index a2bc3d118a5d..d631b2a7cea5 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TimeLiteral.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TimeLiteral.java @@ -27,11 +27,11 @@ public class TimeLiteral extends Literal { private final String value; - public TimeLiteral(String value) { + public TimeLiteral(final String value) { this(Optional.empty(), value); } - public TimeLiteral(Optional location, String value) { + public TimeLiteral(final Optional location, final String value) { super(location); this.value = requireNonNull(value, "value"); } @@ -42,12 +42,12 @@ public String getValue() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitTimeLiteral(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -55,7 +55,7 @@ public boolean equals(Object o) { return false; } - TimeLiteral that = (TimeLiteral) o; + final TimeLiteral that = (TimeLiteral) o; return Objects.equals(value, that.value); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TimestampLiteral.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TimestampLiteral.java index 023e7f5fb857..90c24de72f6f 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TimestampLiteral.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TimestampLiteral.java @@ -27,11 +27,11 @@ public class TimestampLiteral extends Literal { private final String value; - public TimestampLiteral(String value) { + public TimestampLiteral(final String value) { this(Optional.empty(), value); } - public TimestampLiteral(Optional location, String value) { + public TimestampLiteral(final Optional location, final String value) { super(location); this.value = requireNonNull(value, "value"); } @@ -42,12 +42,12 @@ public String getValue() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitTimestampLiteral(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -55,7 +55,7 @@ public boolean equals(Object o) { return false; } - TimestampLiteral that = (TimestampLiteral) o; + final TimestampLiteral that = (TimestampLiteral) o; return Objects.equals(value, that.value); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TraversalExpressionVisitor.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TraversalExpressionVisitor.java index 08ceef9e0098..d9f9091f5ce3 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TraversalExpressionVisitor.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/TraversalExpressionVisitor.java @@ -24,19 +24,19 @@ public abstract class TraversalExpressionVisitor implements ExpressionVisitor { @Override - public Void visitCast(Cast node, C context) { + public Void visitCast(final Cast node, final C context) { return process(node.getExpression(), context); } @Override - public Void visitArithmeticBinary(ArithmeticBinaryExpression node, C context) { + public Void visitArithmeticBinary(final ArithmeticBinaryExpression node, final C context) { process(node.getLeft(), context); process(node.getRight(), context); return null; } @Override - public Void visitBetweenPredicate(BetweenPredicate node, C context) { + public Void visitBetweenPredicate(final BetweenPredicate node, final C context) { process(node.getValue(), context); process(node.getMin(), context); process(node.getMax(), context); @@ -44,57 +44,57 @@ public Void visitBetweenPredicate(BetweenPredicate node, C context) { } @Override - public Void visitSubscriptExpression(SubscriptExpression node, C context) { + public Void visitSubscriptExpression(final SubscriptExpression node, final C context) { process(node.getBase(), context); process(node.getIndex(), context); return null; } @Override - public Void visitStructExpression(CreateStructExpression node, C context) { + public Void visitStructExpression(final CreateStructExpression node, final C context) { node.getFields().forEach(field -> process(field.getValue(), context)); return null; } @Override - public Void visitComparisonExpression(ComparisonExpression node, C context) { + public Void visitComparisonExpression(final ComparisonExpression node, final C context) { process(node.getLeft(), context); process(node.getRight(), context); return null; } @Override - public Void visitWhenClause(WhenClause node, C context) { + public Void visitWhenClause(final WhenClause node, final C context) { process(node.getOperand(), context); process(node.getResult(), context); return null; } @Override - public Void visitInPredicate(InPredicate node, C context) { + public Void visitInPredicate(final InPredicate node, final C context) { process(node.getValue(), context); process(node.getValueList(), context); return null; } @Override - public Void visitFunctionCall(FunctionCall node, C context) { - for (Expression argument : node.getArguments()) { + public Void visitFunctionCall(final FunctionCall node, final C context) { + for (final Expression argument : node.getArguments()) { process(argument, context); } return null; } @Override - public Void visitDereferenceExpression(DereferenceExpression node, C context) { + public Void visitDereferenceExpression(final DereferenceExpression node, final C context) { process(node.getBase(), context); return null; } @Override - public Void visitSimpleCaseExpression(SimpleCaseExpression node, C context) { + public Void visitSimpleCaseExpression(final SimpleCaseExpression node, final C context) { process(node.getOperand(), context); - for (WhenClause clause : node.getWhenClauses()) { + for (final WhenClause clause : node.getWhenClauses()) { process(clause, context); } node.getDefaultValue() @@ -103,26 +103,26 @@ public Void visitSimpleCaseExpression(SimpleCaseExpression node, C context) { } @Override - public Void visitInListExpression(InListExpression node, C context) { - for (Expression value : node.getValues()) { + public Void visitInListExpression(final InListExpression node, final C context) { + for (final Expression value : node.getValues()) { process(value, context); } return null; } @Override - public Void visitArithmeticUnary(ArithmeticUnaryExpression node, C context) { + public Void visitArithmeticUnary(final ArithmeticUnaryExpression node, final C context) { return process(node.getValue(), context); } @Override - public Void visitNotExpression(NotExpression node, C context) { + public Void visitNotExpression(final NotExpression node, final C context) { return process(node.getValue(), context); } @Override - public Void visitSearchedCaseExpression(SearchedCaseExpression node, C context) { - for (WhenClause clause : node.getWhenClauses()) { + public Void visitSearchedCaseExpression(final SearchedCaseExpression node, final C context) { + for (final WhenClause clause : node.getWhenClauses()) { process(clause, context); } node.getDefaultValue() @@ -131,81 +131,81 @@ public Void visitSearchedCaseExpression(SearchedCaseExpression node, C context) } @Override - public Void visitLikePredicate(LikePredicate node, C context) { + public Void visitLikePredicate(final LikePredicate node, final C context) { process(node.getValue(), context); process(node.getPattern(), context); return null; } @Override - public Void visitIsNotNullPredicate(IsNotNullPredicate node, C context) { + public Void visitIsNotNullPredicate(final IsNotNullPredicate node, final C context) { return process(node.getValue(), context); } @Override - public Void visitIsNullPredicate(IsNullPredicate node, C context) { + public Void visitIsNullPredicate(final IsNullPredicate node, final C context) { return process(node.getValue(), context); } @Override - public Void visitLogicalBinaryExpression(LogicalBinaryExpression node, C context) { + public Void visitLogicalBinaryExpression(final LogicalBinaryExpression node, final C context) { process(node.getLeft(), context); process(node.getRight(), context); return null; } @Override - public Void visitDoubleLiteral(DoubleLiteral node, C context) { + public Void visitDoubleLiteral(final DoubleLiteral node, final C context) { return null; } @Override - public Void visitDecimalLiteral(DecimalLiteral node, C context) { + public Void visitDecimalLiteral(final DecimalLiteral node, final C context) { return null; } @Override - public Void visitTimeLiteral(TimeLiteral node, C context) { + public Void visitTimeLiteral(final TimeLiteral node, final C context) { return null; } @Override - public Void visitTimestampLiteral(TimestampLiteral node, C context) { + public Void visitTimestampLiteral(final TimestampLiteral node, final C context) { return null; } @Override - public Void visitStringLiteral(StringLiteral node, C context) { + public Void visitStringLiteral(final StringLiteral node, final C context) { return null; } @Override - public Void visitBooleanLiteral(BooleanLiteral node, C context) { + public Void visitBooleanLiteral(final BooleanLiteral node, final C context) { return null; } @Override - public Void visitColumnReference(ColumnReferenceExp node, C context) { + public Void visitColumnReference(final ColumnReferenceExp node, final C context) { return null; } @Override - public Void visitNullLiteral(NullLiteral node, C context) { + public Void visitNullLiteral(final NullLiteral node, final C context) { return null; } @Override - public Void visitLongLiteral(LongLiteral node, C context) { + public Void visitLongLiteral(final LongLiteral node, final C context) { return null; } @Override - public Void visitIntegerLiteral(IntegerLiteral node, C context) { + public Void visitIntegerLiteral(final IntegerLiteral node, final C context) { return null; } @Override - public Void visitType(Type node, C context) { + public Void visitType(final Type node, final C context) { return null; } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Type.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Type.java index 73543fd9a4e7..9995086bf580 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Type.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/Type.java @@ -28,11 +28,11 @@ public final class Type extends Expression { private final SqlType sqlType; - public Type(SqlType sqlType) { + public Type(final SqlType sqlType) { this(Optional.empty(), sqlType); } - public Type(Optional location, SqlType sqlType) { + public Type(final Optional location, final SqlType sqlType) { super(location); this.sqlType = requireNonNull(sqlType, "sqlType"); } @@ -42,19 +42,19 @@ public SqlType getSqlType() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitType(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - Type type = (Type) o; + final Type type = (Type) o; return Objects.equals(sqlType, type.sqlType); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/VisitParentExpressionVisitor.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/VisitParentExpressionVisitor.java index f5c48c28d46a..2fcbef3e7d0e 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/VisitParentExpressionVisitor.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/VisitParentExpressionVisitor.java @@ -34,165 +34,165 @@ protected VisitParentExpressionVisitor() { this(null); } - protected VisitParentExpressionVisitor(R defaultValue) { + protected VisitParentExpressionVisitor(final R defaultValue) { this.defaultValue = defaultValue; } - protected R visitExpression(Expression node, C context) { + protected R visitExpression(final Expression node, final C context) { return defaultValue; } @Override - public R visitArithmeticBinary(ArithmeticBinaryExpression node, C context) { + public R visitArithmeticBinary(final ArithmeticBinaryExpression node, final C context) { return visitExpression(node, context); } @Override - public R visitBetweenPredicate(BetweenPredicate node, C context) { + public R visitBetweenPredicate(final BetweenPredicate node, final C context) { return visitExpression(node, context); } @Override - public R visitComparisonExpression(ComparisonExpression node, C context) { + public R visitComparisonExpression(final ComparisonExpression node, final C context) { return visitExpression(node, context); } - protected R visitLiteral(Literal node, C context) { + protected R visitLiteral(final Literal node, final C context) { return visitExpression(node, context); } @Override - public R visitDoubleLiteral(DoubleLiteral node, C context) { + public R visitDoubleLiteral(final DoubleLiteral node, final C context) { return visitLiteral(node, context); } @Override - public R visitDecimalLiteral(DecimalLiteral node, C context) { + public R visitDecimalLiteral(final DecimalLiteral node, final C context) { return visitLiteral(node, context); } @Override - public R visitTimeLiteral(TimeLiteral node, C context) { + public R visitTimeLiteral(final TimeLiteral node, final C context) { return visitLiteral(node, context); } @Override - public R visitTimestampLiteral(TimestampLiteral node, C context) { + public R visitTimestampLiteral(final TimestampLiteral node, final C context) { return visitLiteral(node, context); } @Override - public R visitWhenClause(WhenClause node, C context) { + public R visitWhenClause(final WhenClause node, final C context) { return visitExpression(node, context); } @Override - public R visitInPredicate(InPredicate node, C context) { + public R visitInPredicate(final InPredicate node, final C context) { return visitExpression(node, context); } @Override - public R visitFunctionCall(FunctionCall node, C context) { + public R visitFunctionCall(final FunctionCall node, final C context) { return visitExpression(node, context); } @Override - public R visitSimpleCaseExpression(SimpleCaseExpression node, C context) { + public R visitSimpleCaseExpression(final SimpleCaseExpression node, final C context) { return visitExpression(node, context); } @Override - public R visitStringLiteral(StringLiteral node, C context) { + public R visitStringLiteral(final StringLiteral node, final C context) { return visitLiteral(node, context); } @Override - public R visitBooleanLiteral(BooleanLiteral node, C context) { + public R visitBooleanLiteral(final BooleanLiteral node, final C context) { return visitLiteral(node, context); } @Override - public R visitInListExpression(InListExpression node, C context) { + public R visitInListExpression(final InListExpression node, final C context) { return visitExpression(node, context); } @Override - public R visitColumnReference(ColumnReferenceExp node, C context) { + public R visitColumnReference(final ColumnReferenceExp node, final C context) { return visitExpression(node, context); } @Override - public R visitDereferenceExpression(DereferenceExpression node, C context) { + public R visitDereferenceExpression(final DereferenceExpression node, final C context) { return visitExpression(node, context); } @Override - public R visitNullLiteral(NullLiteral node, C context) { + public R visitNullLiteral(final NullLiteral node, final C context) { return visitLiteral(node, context); } @Override - public R visitArithmeticUnary(ArithmeticUnaryExpression node, C context) { + public R visitArithmeticUnary(final ArithmeticUnaryExpression node, final C context) { return visitExpression(node, context); } @Override - public R visitNotExpression(NotExpression node, C context) { + public R visitNotExpression(final NotExpression node, final C context) { return visitExpression(node, context); } @Override - public R visitSearchedCaseExpression(SearchedCaseExpression node, C context) { + public R visitSearchedCaseExpression(final SearchedCaseExpression node, final C context) { return visitExpression(node, context); } @Override - public R visitLikePredicate(LikePredicate node, C context) { + public R visitLikePredicate(final LikePredicate node, final C context) { return visitExpression(node, context); } @Override - public R visitIsNotNullPredicate(IsNotNullPredicate node, C context) { + public R visitIsNotNullPredicate(final IsNotNullPredicate node, final C context) { return visitExpression(node, context); } @Override - public R visitIsNullPredicate(IsNullPredicate node, C context) { + public R visitIsNullPredicate(final IsNullPredicate node, final C context) { return visitExpression(node, context); } @Override - public R visitSubscriptExpression(SubscriptExpression node, C context) { + public R visitSubscriptExpression(final SubscriptExpression node, final C context) { return visitExpression(node, context); } @Override - public R visitStructExpression(CreateStructExpression node, C context) { + public R visitStructExpression(final CreateStructExpression node, final C context) { return visitExpression(node, context); } @Override - public R visitLongLiteral(LongLiteral node, C context) { + public R visitLongLiteral(final LongLiteral node, final C context) { return visitLiteral(node, context); } @Override - public R visitIntegerLiteral(IntegerLiteral node, C context) { + public R visitIntegerLiteral(final IntegerLiteral node, final C context) { return visitLiteral(node, context); } @Override - public R visitLogicalBinaryExpression(LogicalBinaryExpression node, C context) { + public R visitLogicalBinaryExpression(final LogicalBinaryExpression node, final C context) { return visitExpression(node, context); } @Override - public R visitType(Type node, C context) { + public R visitType(final Type node, final C context) { return visitExpression(node, context); } @Override - public R visitCast(Cast node, C context) { + public R visitCast(final Cast node, final C context) { return visitExpression(node, context); } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/WhenClause.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/WhenClause.java index a55aa16ee87d..a3257b44035c 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/WhenClause.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/expression/tree/WhenClause.java @@ -28,11 +28,15 @@ public class WhenClause extends Expression { private final Expression operand; private final Expression result; - public WhenClause(Expression operand, Expression result) { + public WhenClause(final Expression operand, final Expression result) { this(Optional.empty(), operand, result); } - public WhenClause(Optional location, Expression operand, Expression result) { + public WhenClause( + final Optional location, + final Expression operand, + final Expression result + ) { super(location); this.operand = requireNonNull(operand, "operand"); this.result = requireNonNull(result, "result"); @@ -47,12 +51,12 @@ public Expression getResult() { } @Override - public R accept(ExpressionVisitor visitor, C context) { + public R accept(final ExpressionVisitor visitor, final C context) { return visitor.visitWhenClause(this, context); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -60,7 +64,7 @@ public boolean equals(Object o) { return false; } - WhenClause that = (WhenClause) o; + final WhenClause that = (WhenClause) o; return Objects.equals(operand, that.operand) && Objects.equals(result, that.result); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdafUtil.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdafUtil.java index 3ed45fb3256f..412f691baea2 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdafUtil.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdafUtil.java @@ -39,17 +39,19 @@ private UdafUtil() { } public static KsqlAggregateFunction resolveAggregateFunction( - FunctionRegistry functionRegistry, FunctionCall functionCall, LogicalSchema schema + final FunctionRegistry functionRegistry, + final FunctionCall functionCall, + final LogicalSchema schema ) { try { - ExpressionTypeManager expressionTypeManager = + final ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(schema, functionRegistry); - SqlType argumentType = + final SqlType argumentType = expressionTypeManager.getExpressionSqlType(functionCall.getArguments().get(0)); // UDAFs only support one non-constant argument, and that argument must be a column reference - Expression arg = functionCall.getArguments().get(0); + final Expression arg = functionCall.getArguments().get(0); final Optional possibleValueColumn = arg instanceof ColumnReferenceExp ? schema.findValueColumn(((ColumnReferenceExp) arg).getReference()) @@ -59,7 +61,7 @@ private UdafUtil() { final Column valueColumn = possibleValueColumn .orElseThrow(() -> new KsqlException("Could not find column for expression: " + arg)); - AggregateFunctionInitArguments aggregateFunctionInitArguments = + final AggregateFunctionInitArguments aggregateFunctionInitArguments = createAggregateFunctionInitArgs(valueColumn.index(), functionCall); return functionRegistry.getAggregateFunction( @@ -67,16 +69,16 @@ private UdafUtil() { argumentType, aggregateFunctionInitArguments ); - } catch (Exception e) { + } catch (final Exception e) { throw new KsqlException("Failed to create aggregate function: " + functionCall, e); } } public static AggregateFunctionInitArguments createAggregateFunctionInitArgs( - int udafIndex, FunctionCall functionCall + final int udafIndex, final FunctionCall functionCall ) { // args from index > 0 are all literals - List args = functionCall.getArguments() + final List args = functionCall.getArguments() .stream() .skip(1) .map(expr -> { diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdfUtil.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdfUtil.java index 5c80bae23d5d..0a5ea9a76ab2 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdfUtil.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdfUtil.java @@ -61,7 +61,7 @@ private UdfUtil() { * @param argTypes Expected argument types */ public static void ensureCorrectArgs( - FunctionName functionName, Object[] args, Class... argTypes + final FunctionName functionName, final Object[] args, final Class... argTypes ) { if (args == null) { throw new KsqlFunctionException("Null argument list for " + functionName.name() + "."); @@ -90,7 +90,7 @@ public static void ensureCorrectArgs( } } - public static ParamType getSchemaFromType(Type type) { + public static ParamType getSchemaFromType(final Type type) { ParamType schema; if (type instanceof TypeVariable) { schema = GenericType.of(((TypeVariable) type).getName()); @@ -104,22 +104,22 @@ public static ParamType getSchemaFromType(Type type) { return schema; } - private static ParamType handleParameterizedType(Type type) { + private static ParamType handleParameterizedType(final Type type) { if (type instanceof ParameterizedType) { - ParameterizedType parameterizedType = (ParameterizedType) type; + final ParameterizedType parameterizedType = (ParameterizedType) type; if (parameterizedType.getRawType() == Map.class) { - ParamType keyType = getSchemaFromType(parameterizedType.getActualTypeArguments()[0]); + final ParamType keyType = getSchemaFromType(parameterizedType.getActualTypeArguments()[0]); if (!(keyType instanceof StringType)) { throw new KsqlException("Maps only support STRING keys, got: " + keyType); } - Type valueType = ((ParameterizedType) type).getActualTypeArguments()[1]; + final Type valueType = ((ParameterizedType) type).getActualTypeArguments()[1]; if (valueType instanceof TypeVariable) { return MapType.of(GenericType.of(((TypeVariable) valueType).getName())); } return MapType.of(getSchemaFromType(valueType)); } else if (parameterizedType.getRawType() == List.class) { - Type valueType = ((ParameterizedType) type).getActualTypeArguments()[0]; + final Type valueType = ((ParameterizedType) type).getActualTypeArguments()[0]; if (valueType instanceof TypeVariable) { return ArrayType.of(GenericType.of(((TypeVariable) valueType).getName())); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdtfUtil.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdtfUtil.java index 282f9cdec0cb..95120fc6dcef 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdtfUtil.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdtfUtil.java @@ -32,15 +32,20 @@ private UdtfUtil() { } public static KsqlTableFunction resolveTableFunction( - FunctionRegistry functionRegistry, FunctionCall functionCall, LogicalSchema schema + final FunctionRegistry functionRegistry, + final FunctionCall functionCall, + final LogicalSchema schema ) { - ExpressionTypeManager expressionTypeManager = + final ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(schema, functionRegistry); - List functionArgs = functionCall.getArguments(); - List argTypes = functionArgs.isEmpty() + + final List functionArgs = functionCall.getArguments(); + + final List argTypes = functionArgs.isEmpty() ? ImmutableList.of(FunctionRegistry.DEFAULT_FUNCTION_ARG_SCHEMA) : functionArgs.stream().map(expressionTypeManager::getExpressionSqlType) .collect(Collectors.toList()); + return functionRegistry.getTableFunction( functionCall.getName().name(), argTypes diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafAggregator.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafAggregator.java index 421429e0e84e..33850b5a4f06 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafAggregator.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafAggregator.java @@ -35,8 +35,8 @@ public class KudafAggregator implements UdafAggregator { private final int columnCount; public KudafAggregator( - List nonAggColumnIndexes, - List> functions) { + final List nonAggColumnIndexes, + final List> functions) { this.nonAggColumnIndexes = ImmutableList.copyOf(requireNonNull(nonAggColumnIndexes, "nonAggColumnIndexes")); this.aggregateFunctions = ImmutableList.copyOf(requireNonNull(functions, "functions")); @@ -48,11 +48,11 @@ public KudafAggregator( } @Override - public GenericRow apply(K k, GenericRow rowValue, GenericRow aggRowValue) { + public GenericRow apply(final K k, final GenericRow rowValue, final GenericRow aggRowValue) { // copy over group-by and aggregate parameter columns into the output row - int initialUdafIndex = nonAggColumnIndexes.size(); + final int initialUdafIndex = nonAggColumnIndexes.size(); for (int idx = 0; idx < initialUdafIndex; idx++) { - int idxInRow = nonAggColumnIndexes.get(idx); + final int idxInRow = nonAggColumnIndexes.get(idx); aggRowValue.getColumns().set(idx, rowValue.getColumns().get(idxInRow)); } @@ -60,10 +60,10 @@ public GenericRow apply(K k, GenericRow rowValue, GenericRow aggRowValue) { // the columns written by this statement do not overlap with those written by // the above statement. for (int idx = initialUdafIndex; idx < columnCount; idx++) { - KsqlAggregateFunction function = aggregateFunctionForColumn(idx); - Object currentValue = rowValue.getColumns().get(function.getArgIndexInValue()); - Object currentAggregate = aggRowValue.getColumns().get(idx); - Object newAggregate = function.aggregate(currentValue, currentAggregate); + final KsqlAggregateFunction func = aggregateFunctionForColumn(idx); + final Object currentValue = rowValue.getColumns().get(func.getArgIndexInValue()); + final Object currentAggregate = aggRowValue.getColumns().get(idx); + final Object newAggregate = func.aggregate(currentValue, currentAggregate); aggRowValue.getColumns().set(idx, newAggregate); } @@ -78,11 +78,11 @@ public KsqlTransformer getResultMapper() { public Merger getMerger() { return (key, aggRowOne, aggRowTwo) -> { - List columns = new ArrayList<>(columnCount); + final List columns = new ArrayList<>(columnCount); - int initialUdafIndex = nonAggColumnIndexes.size(); + final int initialUdafIndex = nonAggColumnIndexes.size(); for (int idx = 0; idx < initialUdafIndex; idx++) { - int idxInRow = nonAggColumnIndexes.get(idx); + final int idxInRow = nonAggColumnIndexes.get(idx); if (aggRowOne.getColumns().get(idxInRow) == null) { columns.add(idx, aggRowTwo.getColumns().get(idxInRow)); } else { @@ -91,10 +91,10 @@ public Merger getMerger() { } for (int idx = initialUdafIndex; idx < columnCount; idx++) { - KsqlAggregateFunction function = aggregateFunctionForColumn(idx); - Object aggOne = aggRowOne.getColumns().get(idx); - Object aggTwo = aggRowTwo.getColumns().get(idx); - Object merged = function.getMerger().apply(key, aggOne, aggTwo); + final KsqlAggregateFunction func = aggregateFunctionForColumn(idx); + final Object aggOne = aggRowOne.getColumns().get(idx); + final Object aggTwo = aggRowTwo.getColumns().get(idx); + final Object merged = func.getMerger().apply(key, aggOne, aggTwo); columns.add(idx, merged); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafInitializer.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafInitializer.java index c7fe0b9ad84b..b014ba7a18da 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafInitializer.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafInitializer.java @@ -29,7 +29,7 @@ public class KudafInitializer implements Initializer { private final List initialValueSuppliers; private final int nonAggValSize; - public KudafInitializer(int nonAggValSize, List> initialValueSuppliers) { + public KudafInitializer(final int nonAggValSize, final List> initialValueSuppliers) { this.nonAggValSize = nonAggValSize; this.initialValueSuppliers = ImmutableList.copyOf( Objects.requireNonNull(initialValueSuppliers, "initialValueSuppliers") @@ -38,7 +38,7 @@ public KudafInitializer(int nonAggValSize, List> initialValueSupplie @Override public GenericRow apply() { - List values = IntStream.range(0, nonAggValSize) + final List values = IntStream.range(0, nonAggValSize) .mapToObj(value -> null) .collect(Collectors.toList()); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafUndoAggregator.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafUndoAggregator.java index 96b9acbeb022..a22c122901f5 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafUndoAggregator.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udaf/KudafUndoAggregator.java @@ -29,8 +29,8 @@ public class KudafUndoAggregator implements Aggregator> aggregateFunctions; public KudafUndoAggregator( - List nonAggColumnIndexes, - List> aggregateFunctions + final List nonAggColumnIndexes, + final List> aggregateFunctions ) { Objects.requireNonNull(aggregateFunctions, "aggregateFunctions"); this.aggregateFunctions = ImmutableList.copyOf(aggregateFunctions); @@ -39,16 +39,16 @@ public KudafUndoAggregator( @SuppressWarnings("unchecked") @Override - public GenericRow apply(Struct k, GenericRow rowValue, GenericRow aggRowValue) { + public GenericRow apply(final Struct k, final GenericRow rowValue, final GenericRow aggRowValue) { int idx = 0; for (; idx < nonAggColumnIndexes.size(); idx++) { final int idxInRow = nonAggColumnIndexes.get(idx); aggRowValue.getColumns().set(idx, rowValue.getColumns().get(idxInRow)); } - for (TableAggregationFunction function : aggregateFunctions) { - Object argument = rowValue.getColumns().get(function.getArgIndexInValue()); - Object previous = aggRowValue.getColumns().get(idx); + for (final TableAggregationFunction function : aggregateFunctions) { + final Object argument = rowValue.getColumns().get(function.getArgIndexInValue()); + final Object previous = aggRowValue.getColumns().get(idx); aggRowValue.getColumns().set(idx, function.undo(argument, previous)); idx++; } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udtf/KudtfFlatMapper.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udtf/KudtfFlatMapper.java index 756f4935c4fa..106545719de9 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udtf/KudtfFlatMapper.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udtf/KudtfFlatMapper.java @@ -34,7 +34,7 @@ public class KudtfFlatMapper implements KsqlTransformer tableFunctionAppliers; - public KudtfFlatMapper(List tableFunctionAppliers) { + public KudtfFlatMapper(final List tableFunctionAppliers) { this.tableFunctionAppliers = ImmutableList.copyOf(requireNonNull(tableFunctionAppliers)); } @@ -55,16 +55,16 @@ public Iterable transform( final List> iters = new ArrayList<>(tableFunctionAppliers.size()); int maxLength = 0; - for (TableFunctionApplier applier : tableFunctionAppliers) { - List exploded = applier.apply(value); + for (final TableFunctionApplier applier : tableFunctionAppliers) { + final List exploded = applier.apply(value); iters.add(exploded.iterator()); maxLength = Math.max(maxLength, exploded.size()); } final List rows = new ArrayList<>(maxLength); for (int i = 0; i < maxLength; i++) { - List newRow = new ArrayList<>(value.getColumns()); - for (Iterator iter : iters) { + final List newRow = new ArrayList<>(value.getColumns()); + for (final Iterator iter : iters) { if (iter.hasNext()) { newRow.add(iter.next()); } else { diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udtf/TableFunctionApplier.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udtf/TableFunctionApplier.java index 9722165585fc..fa56f49099c2 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udtf/TableFunctionApplier.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/udtf/TableFunctionApplier.java @@ -33,16 +33,16 @@ public class TableFunctionApplier { private final ImmutableList expressionMetadataList; public TableFunctionApplier( - KsqlTableFunction tableFunction, List expressionMetadataList + final KsqlTableFunction tableFunction, final List expressionMetadataList ) { this.tableFunction = requireNonNull(tableFunction); this.expressionMetadataList = ImmutableList.copyOf(requireNonNull(expressionMetadataList)); } List apply(final GenericRow row) { - Object[] args = new Object[expressionMetadataList.size()]; + final Object[] args = new Object[expressionMetadataList.size()]; int i = 0; - for (ExpressionMetadata expressionMetadata : expressionMetadataList) { + for (final ExpressionMetadata expressionMetadata : expressionMetadataList) { args[i++] = expressionMetadata.evaluate(row); } return tableFunction.apply(args); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/materialization/MaterializationInfo.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/materialization/MaterializationInfo.java index ab927b699b57..fc0e38fd79db 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/materialization/MaterializationInfo.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/materialization/MaterializationInfo.java @@ -59,8 +59,10 @@ public List getTransforms() { } private MaterializationInfo( - String stateStoreName, LogicalSchema stateStoreSchema, List transforms, - LogicalSchema schema + final String stateStoreName, + final LogicalSchema stateStoreSchema, + final List transforms, + final LogicalSchema schema ) { this.stateStoreName = requireNonNull(stateStoreName, "stateStoreName"); this.stateStoreSchema = requireNonNull(stateStoreSchema, "stateStoreSchema"); @@ -75,7 +77,7 @@ private MaterializationInfo( * @param stateStoreSchema the schema of the data in the state store * @return builder instance. */ - public static Builder builder(String stateStoreName, LogicalSchema stateStoreSchema) { + public static Builder builder(final String stateStoreName, final LogicalSchema stateStoreSchema) { return new Builder(stateStoreName, stateStoreSchema); } @@ -86,7 +88,7 @@ public static final class Builder { private final List transforms; private LogicalSchema schema; - private Builder(String stateStoreName, LogicalSchema stateStoreSchema) { + private Builder(final String stateStoreName, final LogicalSchema stateStoreSchema) { this.stateStoreName = requireNonNull(stateStoreName, "stateStoreName"); this.stateStoreSchema = requireNonNull(stateStoreSchema, "stateStoreSchema"); this.transforms = new LinkedList<>(); @@ -173,7 +175,7 @@ public KsqlTransformer getMapper( return mapperFactory.apply(loggerFactory.apply(queryContext)); } - public R visit(TransformVisitor visitor) { + public R visit(final TransformVisitor visitor) { return visitor.visit(this); } } @@ -198,7 +200,7 @@ public KsqlTransformer> getPredicate( } @Override - public R visit(TransformVisitor visitor) { + public R visit(final TransformVisitor visitor) { return visitor.visit(this); } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/AbstractStreamSource.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/AbstractStreamSource.java index 42a2a37024f8..042e071314e5 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/AbstractStreamSource.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/AbstractStreamSource.java @@ -35,19 +35,19 @@ public abstract class AbstractStreamSource implements ExecutionStep { private final SourceName alias; public static LogicalSchemaWithMetaAndKeyFields getSchemaWithMetaAndKeyFields( - SourceName alias, - LogicalSchema schema) { + final SourceName alias, + final LogicalSchema schema) { return LogicalSchemaWithMetaAndKeyFields.fromOriginal(alias, schema); } @VisibleForTesting public AbstractStreamSource( - ExecutionStepPropertiesV1 properties, - String topicName, - Formats formats, - Optional timestampColumn, - LogicalSchema sourceSchema, - SourceName alias) { + final ExecutionStepPropertiesV1 properties, + final String topicName, + final Formats formats, + final Optional timestampColumn, + final LogicalSchema sourceSchema, + final SourceName alias) { this.properties = Objects.requireNonNull(properties, "properties"); this.topicName = Objects.requireNonNull(topicName, "topicName"); this.formats = Objects.requireNonNull(formats, "formats"); @@ -87,14 +87,14 @@ public SourceName getAlias() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - AbstractStreamSource that = (AbstractStreamSource) o; + final AbstractStreamSource that = (AbstractStreamSource) o; return Objects.equals(properties, that.properties) && Objects.equals(topicName, that.topicName) && Objects.equals(formats, that.formats) diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/KGroupedStreamHolder.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/KGroupedStreamHolder.java index 59fbb1078c9c..5371f1438501 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/KGroupedStreamHolder.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/KGroupedStreamHolder.java @@ -29,15 +29,15 @@ public final class KGroupedStreamHolder { private final LogicalSchema schema; private KGroupedStreamHolder( - KGroupedStream groupedStream, - LogicalSchema schema) { + final KGroupedStream groupedStream, + final LogicalSchema schema) { this.groupedStream = Objects.requireNonNull(groupedStream, "groupedStream"); this.schema = Objects.requireNonNull(schema, "schema"); } public static KGroupedStreamHolder of( - KGroupedStream groupedStream, - LogicalSchema schema) { + final KGroupedStream groupedStream, + final LogicalSchema schema) { return new KGroupedStreamHolder(groupedStream, schema); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/KGroupedTableHolder.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/KGroupedTableHolder.java index 3b3313728267..c438429714b0 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/KGroupedTableHolder.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/KGroupedTableHolder.java @@ -29,15 +29,15 @@ public final class KGroupedTableHolder { private final LogicalSchema schema; private KGroupedTableHolder( - KGroupedTable groupedTable, - LogicalSchema schema) { + final KGroupedTable groupedTable, + final LogicalSchema schema) { this.groupedTable = Objects.requireNonNull(groupedTable, "groupedTable"); this.schema = Objects.requireNonNull(schema, "schema"); } public static KGroupedTableHolder of( - KGroupedTable groupedTable, - LogicalSchema schema + final KGroupedTable groupedTable, + final LogicalSchema schema ) { return new KGroupedTableHolder(groupedTable, schema); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/LogicalSchemaWithMetaAndKeyFields.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/LogicalSchemaWithMetaAndKeyFields.java index 0612b13ac350..e87f41131d49 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/LogicalSchemaWithMetaAndKeyFields.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/LogicalSchemaWithMetaAndKeyFields.java @@ -23,13 +23,13 @@ public final class LogicalSchemaWithMetaAndKeyFields { private final LogicalSchema schema; - private LogicalSchemaWithMetaAndKeyFields(LogicalSchema schema) { + private LogicalSchemaWithMetaAndKeyFields(final LogicalSchema schema) { this.schema = schema; } static LogicalSchemaWithMetaAndKeyFields fromOriginal( - SourceName alias, - LogicalSchema schema) { + final SourceName alias, + final LogicalSchema schema) { return new LogicalSchemaWithMetaAndKeyFields( schema.withAlias(alias).withMetaAndKeyColsInValue()); } @@ -43,14 +43,14 @@ public LogicalSchema getOriginalSchema() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - LogicalSchemaWithMetaAndKeyFields that = (LogicalSchemaWithMetaAndKeyFields) o; + final LogicalSchemaWithMetaAndKeyFields that = (LogicalSchemaWithMetaAndKeyFields) o; return Objects.equals(schema, that.schema); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/SelectExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/SelectExpression.java index c60fdc54ecd6..e9e7875e5560 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/SelectExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/SelectExpression.java @@ -32,12 +32,12 @@ public final class SelectExpression { private final ColumnName alias; private final Expression expression; - private SelectExpression(ColumnName alias, Expression expression) { + private SelectExpression(final ColumnName alias, final Expression expression) { this.alias = Objects.requireNonNull(alias, "alias"); this.expression = Objects.requireNonNull(expression, "expression"); } - public static SelectExpression of(ColumnName name, Expression expression) { + public static SelectExpression of(final ColumnName name, final Expression expression) { return new SelectExpression(name, expression); } @@ -50,14 +50,14 @@ public Expression getExpression() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - SelectExpression that = (SelectExpression) o; + final SelectExpression that = (SelectExpression) o; return Objects.equals(alias, that.alias) && Objects.equals(expression, that.expression); } @@ -72,7 +72,7 @@ public String toString() { return format(FormatOptions.none()); } - public String format(FormatOptions formatOptions) { + public String format(final FormatOptions formatOptions) { return String.format( FMT, ExpressionFormatter.formatExpression(expression, formatOptions), diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamAggregate.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamAggregate.java index 582beac72ee7..3fe096798841 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamAggregate.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamAggregate.java @@ -36,14 +36,14 @@ public class StreamAggregate implements ExecutionStep> { private final ImmutableList aggregationFunctions; public StreamAggregate( - @JsonProperty(value = "properties", required = true) - ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "source", required = true) + @JsonProperty(value = "properties", required = true) final + ExecutionStepPropertiesV1 properties, + @JsonProperty(value = "source", required = true) final ExecutionStep source, - @JsonProperty(value = "internalFormats", required = true) Formats internalFormats, - @JsonProperty(value = "nonAggregateColumns", required = true) + @JsonProperty(value = "internalFormats", required = true) final Formats internalFormats, + @JsonProperty(value = "nonAggregateColumns", required = true) final List nonAggregateColumns, - @JsonProperty(value = "aggregationFunctions", required = true) + @JsonProperty(value = "aggregationFunctions", required = true) final List aggregationFunctions) { this.properties = requireNonNull(properties, "properties"); this.source = requireNonNull(source, "source"); @@ -82,19 +82,19 @@ public ExecutionStep getSource() { } @Override - public KTableHolder build(PlanBuilder builder) { + public KTableHolder build(final PlanBuilder builder) { return builder.visitStreamAggregate(this); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - StreamAggregate that = (StreamAggregate) o; + final StreamAggregate that = (StreamAggregate) o; return Objects.equals(properties, that.properties) && Objects.equals(source, that.source) && Objects.equals(internalFormats, that.internalFormats) diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamFilter.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamFilter.java index 447d9c044ec3..c3964b2e0f71 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamFilter.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamFilter.java @@ -30,11 +30,11 @@ public class StreamFilter implements ExecutionStep> { private final Expression filterExpression; public StreamFilter( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "source", required = true) ExecutionStep> source, - @JsonProperty(value = "filterExpression", required = true) Expression filterExpression + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "source", required = true) final ExecutionStep> source, + @JsonProperty(value = "filterExpression", required = true) final Expression filterExpression ) { - this.properties = Objects.requireNonNull(properties, "properties"); + this.properties = Objects.requireNonNull(props, "props"); this.source = Objects.requireNonNull(source, "source"); this.filterExpression = Objects.requireNonNull(filterExpression, "filterExpression"); } @@ -59,19 +59,19 @@ public ExecutionStep> getSource() { } @Override - public KStreamHolder build(PlanBuilder builder) { + public KStreamHolder build(final PlanBuilder builder) { return builder.visitStreamFilter(this); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - StreamFilter that = (StreamFilter) o; + final StreamFilter that = (StreamFilter) o; return Objects.equals(properties, that.properties) && Objects.equals(source, that.source) && Objects.equals(filterExpression, that.filterExpression); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamFlatMap.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamFlatMap.java index e8e90abcfaa8..5442791f69c6 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamFlatMap.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamFlatMap.java @@ -31,12 +31,12 @@ public class StreamFlatMap implements ExecutionStep> { private final ImmutableList tableFunctions; public StreamFlatMap( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "source", required = true) ExecutionStep> source, - @JsonProperty(value = "tableFunctions", required = true) + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "source", required = true) final ExecutionStep> source, + @JsonProperty(value = "tableFunctions", required = true) final List tableFunctions ) { - this.properties = Objects.requireNonNull(properties, "properties"); + this.properties = Objects.requireNonNull(props, "props"); this.source = Objects.requireNonNull(source, "source"); this.tableFunctions = ImmutableList.copyOf(Objects.requireNonNull(tableFunctions)); } @@ -53,7 +53,7 @@ public List> getSources() { } @Override - public KStreamHolder build(PlanBuilder builder) { + public KStreamHolder build(final PlanBuilder builder) { return builder.visitFlatMap(this); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamGroupBy.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamGroupBy.java index 332eeb942e65..c41ef794d30a 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamGroupBy.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamGroupBy.java @@ -34,12 +34,12 @@ public class StreamGroupBy implements ExecutionStep { private final ImmutableList groupByExpressions; public StreamGroupBy( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "source", required = true) ExecutionStep> source, - @JsonProperty(value = "internalFormats", required = true) Formats internalFormats, - @JsonProperty(value = "groupByExpressions", required = true) + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "source", required = true) final ExecutionStep> source, + @JsonProperty(value = "internalFormats", required = true) final Formats internalFormats, + @JsonProperty(value = "groupByExpressions", required = true) final List groupByExpressions) { - this.properties = requireNonNull(properties, "properties"); + this.properties = requireNonNull(props, "props"); this.internalFormats = requireNonNull(internalFormats, "internalFormats"); this.source = requireNonNull(source, "source"); this.groupByExpressions = ImmutableList @@ -70,19 +70,19 @@ public ExecutionStep> getSource() { } @Override - public KGroupedStreamHolder build(PlanBuilder planVisitor) { + public KGroupedStreamHolder build(final PlanBuilder planVisitor) { return planVisitor.visitStreamGroupBy(this); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - StreamGroupBy that = (StreamGroupBy) o; + final StreamGroupBy that = (StreamGroupBy) o; return Objects.equals(properties, that.properties) && Objects.equals(source, that.source) && Objects.equals(internalFormats, that.internalFormats) diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamGroupByKey.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamGroupByKey.java index cef844bc45c4..21c0adc99a24 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamGroupByKey.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamGroupByKey.java @@ -29,11 +29,11 @@ public class StreamGroupByKey implements ExecutionStep { private final Formats internalFormats; public StreamGroupByKey( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "source", required = true) + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "source", required = true) final ExecutionStep> source, - @JsonProperty(value = "internalFormats", required = true) Formats internalFormats) { - this.properties = Objects.requireNonNull(properties, "properties"); + @JsonProperty(value = "internalFormats", required = true) final Formats internalFormats) { + this.properties = Objects.requireNonNull(props, "props"); this.internalFormats = Objects.requireNonNull(internalFormats, "internalFormats"); this.source = Objects.requireNonNull(source, "source"); } @@ -58,19 +58,19 @@ public Formats getInternalFormats() { } @Override - public KGroupedStreamHolder build(PlanBuilder builder) { + public KGroupedStreamHolder build(final PlanBuilder builder) { return builder.visitStreamGroupByKey(this); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - StreamGroupByKey that = (StreamGroupByKey) o; + final StreamGroupByKey that = (StreamGroupByKey) o; return Objects.equals(properties, that.properties) && Objects.equals(source, that.source) && Objects.equals(internalFormats, that.internalFormats); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSelect.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSelect.java index 2fad08a1b3c7..2362f2eae7d7 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSelect.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSelect.java @@ -32,12 +32,12 @@ public class StreamSelect implements ExecutionStep> { private final ImmutableList selectExpressions; public StreamSelect( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "source", required = true) ExecutionStep> source, - @JsonProperty(value = "selectExpressions", required = true) + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "source", required = true) final ExecutionStep> source, + @JsonProperty(value = "selectExpressions", required = true) final List selectExpressions ) { - this.properties = requireNonNull(properties, "properties"); + this.properties = requireNonNull(props, "props"); this.source = requireNonNull(source, "source"); this.selectExpressions = ImmutableList.copyOf(selectExpressions); } @@ -62,19 +62,19 @@ public ExecutionStep> getSource() { } @Override - public KStreamHolder build(PlanBuilder builder) { + public KStreamHolder build(final PlanBuilder builder) { return builder.visitStreamSelect(this); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - StreamSelect that = (StreamSelect) o; + final StreamSelect that = (StreamSelect) o; return Objects.equals(properties, that.properties) && Objects.equals(source, that.source) && Objects.equals(selectExpressions, that.selectExpressions); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSelectKey.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSelectKey.java index 0a4c70711da2..e6683265dd92 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSelectKey.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSelectKey.java @@ -33,12 +33,12 @@ public class StreamSelectKey implements ExecutionStep> { private final ExecutionStep> source; public StreamSelectKey( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "source", required = true) + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "source", required = true) final ExecutionStep> source, - @JsonProperty(value = "keyExpression", required = true) Expression keyExpression + @JsonProperty(value = "keyExpression", required = true) final Expression keyExpression ) { - this.properties = Objects.requireNonNull(properties, "properties"); + this.properties = Objects.requireNonNull(props, "props"); this.source = Objects.requireNonNull(source, "source"); this.keyExpression = Objects.requireNonNull(keyExpression, "keyExpression"); } @@ -63,19 +63,19 @@ public ExecutionStep> getSource() { } @Override - public KStreamHolder build(PlanBuilder builder) { + public KStreamHolder build(final PlanBuilder builder) { return builder.visitStreamSelectKey(this); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - StreamSelectKey that = (StreamSelectKey) o; + final StreamSelectKey that = (StreamSelectKey) o; return Objects.equals(properties, that.properties) && Objects.equals(source, that.source); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSink.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSink.java index 0692d392b1b3..66382321d624 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSink.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSink.java @@ -28,11 +28,11 @@ public class StreamSink implements ExecutionStep> { private final String topicName; public StreamSink( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "source", required = true) ExecutionStep> source, - @JsonProperty(value = "formats", required = true) Formats formats, - @JsonProperty(value = "topicName", required = true) String topicName) { - this.properties = Objects.requireNonNull(properties, "properties"); + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "source", required = true) final ExecutionStep> source, + @JsonProperty(value = "formats", required = true) final Formats formats, + @JsonProperty(value = "topicName", required = true) final String topicName) { + this.properties = Objects.requireNonNull(props, "props"); this.formats = Objects.requireNonNull(formats, "formats"); this.source = Objects.requireNonNull(source, "source"); this.topicName = Objects.requireNonNull(topicName, "topicName"); @@ -61,19 +61,19 @@ public ExecutionStep> getSource() { } @Override - public KStreamHolder build(PlanBuilder builder) { + public KStreamHolder build(final PlanBuilder builder) { return builder.visitStreamSink(this); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - StreamSink that = (StreamSink) o; + final StreamSink that = (StreamSink) o; return Objects.equals(properties, that.properties) && Objects.equals(source, that.source) && Objects.equals(formats, that.formats) diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSource.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSource.java index 4a1432cc83c6..3a4e8eb9be47 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSource.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSource.java @@ -25,14 +25,14 @@ @Immutable public final class StreamSource extends AbstractStreamSource> { public StreamSource( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "topicName", required = true) String topicName, - @JsonProperty(value = "formats", required = true) Formats formats, - @JsonProperty("timestampColumn") Optional timestampColumn, - @JsonProperty(value = "sourceSchema", required = true) LogicalSchema sourceSchema, - @JsonProperty(value = "alias", required = true) SourceName alias) { + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "topicName", required = true) final String topicName, + @JsonProperty(value = "formats", required = true) final Formats formats, + @JsonProperty("timestampColumn") final Optional timestampColumn, + @JsonProperty(value = "sourceSchema", required = true) final LogicalSchema sourceSchema, + @JsonProperty(value = "alias", required = true) final SourceName alias) { super( - properties, + props, topicName, formats, timestampColumn, @@ -42,7 +42,7 @@ public StreamSource( } @Override - public KStreamHolder build(PlanBuilder builder) { + public KStreamHolder build(final PlanBuilder builder) { return builder.visitStreamSource(this); } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamStreamJoin.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamStreamJoin.java index 4f177071830b..d7a8292f26f5 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamStreamJoin.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamStreamJoin.java @@ -35,21 +35,19 @@ public class StreamStreamJoin implements ExecutionStep> { private final Duration afterMillis; public StreamStreamJoin( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "joinType", required = true) JoinType joinType, - @JsonProperty(value = "leftInternalFormats", required = true) Formats leftInternalFormats, - @JsonProperty(value = "rightInternalFormats", required = true) Formats rightInternalFormats, - @JsonProperty(value = "leftSource", required = true) + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "joinType", required = true) final JoinType joinType, + @JsonProperty(value = "leftInternalFormats", required = true) final Formats leftIntFormats, + @JsonProperty(value = "rightInternalFormats", required = true) final Formats rightIntFormats, + @JsonProperty(value = "leftSource", required = true) final ExecutionStep> leftSource, - @JsonProperty(value = "rightSource", required = true) + @JsonProperty(value = "rightSource", required = true) final ExecutionStep> rightSource, - @JsonProperty(value = "beforeMillis", required = true) Duration beforeMillis, - @JsonProperty(value = "afterMillis", required = true) Duration afterMillis) { - this.properties = Objects.requireNonNull(properties, "properties"); - this.leftInternalFormats = - Objects.requireNonNull(leftInternalFormats, "leftInternalFormats"); - this.rightInternalFormats = - Objects.requireNonNull(rightInternalFormats, "rightInternalFormats"); + @JsonProperty(value = "beforeMillis", required = true) final Duration beforeMillis, + @JsonProperty(value = "afterMillis", required = true) final Duration afterMillis) { + this.properties = Objects.requireNonNull(props, "props"); + this.leftInternalFormats = Objects.requireNonNull(leftIntFormats, "leftIntFormats"); + this.rightInternalFormats = Objects.requireNonNull(rightIntFormats, "rightIntFormats"); this.joinType = Objects.requireNonNull(joinType, "joinType"); this.leftSource = Objects.requireNonNull(leftSource, "leftSource"); this.rightSource = Objects.requireNonNull(rightSource, "rightSource"); @@ -97,20 +95,20 @@ public Duration getBeforeMillis() { } @Override - public KStreamHolder build(PlanBuilder builder) { + public KStreamHolder build(final PlanBuilder builder) { return builder.visitStreamStreamJoin(this); } // CHECKSTYLE_RULES.OFF: CyclomaticComplexity @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - StreamStreamJoin that = (StreamStreamJoin) o; + final StreamStreamJoin that = (StreamStreamJoin) o; return Objects.equals(properties, that.properties) && joinType == that.joinType && Objects.equals(leftInternalFormats, that.leftInternalFormats) diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamTableJoin.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamTableJoin.java index c284595f90f0..de6044bf8cd3 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamTableJoin.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamTableJoin.java @@ -31,14 +31,14 @@ public class StreamTableJoin implements ExecutionStep> { private final ExecutionStep> rightSource; public StreamTableJoin( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "joinType", required = true) JoinType joinType, - @JsonProperty(value = "internalFormats", required = true) Formats internalFormats, - @JsonProperty(value = "leftSource", required = true) + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "joinType", required = true) final JoinType joinType, + @JsonProperty(value = "internalFormats", required = true) final Formats internalFormats, + @JsonProperty(value = "leftSource", required = true) final ExecutionStep> leftSource, - @JsonProperty(value = "rightSource", required = true) + @JsonProperty(value = "rightSource", required = true) final ExecutionStep> rightSource) { - this.properties = Objects.requireNonNull(properties, "properties"); + this.properties = Objects.requireNonNull(props, "props"); this.internalFormats = Objects.requireNonNull(internalFormats, "internalFormats"); this.joinType = Objects.requireNonNull(joinType, "joinType"); this.leftSource = Objects.requireNonNull(leftSource, "leftSource"); @@ -73,19 +73,19 @@ public JoinType getJoinType() { } @Override - public KStreamHolder build(PlanBuilder builder) { + public KStreamHolder build(final PlanBuilder builder) { return builder.visitStreamTableJoin(this); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - StreamTableJoin that = (StreamTableJoin) o; + final StreamTableJoin that = (StreamTableJoin) o; return Objects.equals(properties, that.properties) && joinType == that.joinType && Objects.equals(internalFormats, that.internalFormats) diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamWindowedAggregate.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamWindowedAggregate.java index d888a800a41d..4407092d7626 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamWindowedAggregate.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamWindowedAggregate.java @@ -41,17 +41,17 @@ public class StreamWindowedAggregate private final KsqlWindowExpression windowExpression; public StreamWindowedAggregate( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "source", required = true) + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "source", required = true) final ExecutionStep source, - @JsonProperty(value = "internalFormats", required = true) Formats internalFormats, - @JsonProperty(value = "nonAggregateColumns", required = true) + @JsonProperty(value = "internalFormats", required = true) final Formats internalFormats, + @JsonProperty(value = "nonAggregateColumns", required = true) final List nonAggregateColumns, - @JsonProperty(value = "aggregationFunctions", required = true) + @JsonProperty(value = "aggregationFunctions", required = true) final List aggregationFunctions, - @JsonProperty(value = "windowExpression", required = true) + @JsonProperty(value = "windowExpression", required = true) final KsqlWindowExpression windowExpression) { - this.properties = requireNonNull(properties, "properties"); + this.properties = requireNonNull(props, "props"); this.source = requireNonNull(source, "source"); this.internalFormats = requireNonNull(internalFormats, "internalFormats"); this.nonAggregateColumns @@ -93,19 +93,19 @@ public ExecutionStep getSource() { } @Override - public KTableHolder> build(PlanBuilder builder) { + public KTableHolder> build(final PlanBuilder builder) { return builder.visitStreamWindowedAggregate(this); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - StreamWindowedAggregate that = (StreamWindowedAggregate) o; + final StreamWindowedAggregate that = (StreamWindowedAggregate) o; return Objects.equals(properties, that.properties) && Objects.equals(source, that.source) && Objects.equals(internalFormats, that.internalFormats) diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableAggregate.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableAggregate.java index 3e4841648e59..f8fed59399bd 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableAggregate.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableAggregate.java @@ -36,15 +36,15 @@ public class TableAggregate implements ExecutionStep> { private final ImmutableList nonAggregateColumns; public TableAggregate( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "source", required = true) + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "source", required = true) final ExecutionStep source, - @JsonProperty(value = "internalFormats", required = true) Formats internalFormats, - @JsonProperty(value = "nonAggregateColumns", required = true) + @JsonProperty(value = "internalFormats", required = true) final Formats internalFormats, + @JsonProperty(value = "nonAggregateColumns", required = true) final List nonAggregateColumns, - @JsonProperty(value = "aggregationFunctions", required = true) + @JsonProperty(value = "aggregationFunctions", required = true) final List aggregationFunctions) { - this.properties = requireNonNull(properties, "properties"); + this.properties = requireNonNull(props, "props"); this.source = requireNonNull(source, "source"); this.internalFormats = requireNonNull(internalFormats, "internalFormats"); this.nonAggregateColumns @@ -81,19 +81,19 @@ public ExecutionStep getSource() { } @Override - public KTableHolder build(PlanBuilder builder) { + public KTableHolder build(final PlanBuilder builder) { return builder.visitTableAggregate(this); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - TableAggregate that = (TableAggregate) o; + final TableAggregate that = (TableAggregate) o; return Objects.equals(properties, that.properties) && Objects.equals(source, that.source) && Objects.equals(internalFormats, that.internalFormats) diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableFilter.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableFilter.java index 6c7443799d73..970a720aba82 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableFilter.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableFilter.java @@ -30,11 +30,11 @@ public class TableFilter implements ExecutionStep> { private final Expression filterExpression; public TableFilter( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "source", required = true) ExecutionStep> source, - @JsonProperty(value = "filterExpression", required = true) Expression filterExpression + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "source", required = true) final ExecutionStep> source, + @JsonProperty(value = "filterExpression", required = true) final Expression filterExpression ) { - this.properties = Objects.requireNonNull(properties, "properties"); + this.properties = Objects.requireNonNull(props, "props"); this.source = Objects.requireNonNull(source, "source"); this.filterExpression = Objects.requireNonNull(filterExpression, "filterExpression"); } @@ -59,19 +59,19 @@ public ExecutionStep> getSource() { } @Override - public KTableHolder build(PlanBuilder builder) { + public KTableHolder build(final PlanBuilder builder) { return builder.visitTableFilter(this); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - TableFilter that = (TableFilter) o; + final TableFilter that = (TableFilter) o; return Objects.equals(properties, that.properties) && Objects.equals(source, that.source) && Objects.equals(filterExpression, that.filterExpression); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableGroupBy.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableGroupBy.java index 0d8c94f4e3f4..ac14900bbfd3 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableGroupBy.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableGroupBy.java @@ -33,13 +33,13 @@ public class TableGroupBy implements ExecutionStep { private final ImmutableList groupByExpressions; public TableGroupBy( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "source", required = true) ExecutionStep> source, - @JsonProperty(value = "internalFormats", required = true) Formats internalFormats, - @JsonProperty(value = "groupByExpressions", required = true) + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "source", required = true) final ExecutionStep> source, + @JsonProperty(value = "internalFormats", required = true) final Formats internalFormats, + @JsonProperty(value = "groupByExpressions", required = true) final List groupByExpressions ) { - this.properties = requireNonNull(properties, "properties"); + this.properties = requireNonNull(props, "props"); this.source = requireNonNull(source, "source"); this.internalFormats = requireNonNull(internalFormats, "internalFormats"); this.groupByExpressions = ImmutableList @@ -70,19 +70,19 @@ public ExecutionStep> getSource() { } @Override - public KGroupedTableHolder build(PlanBuilder builder) { + public KGroupedTableHolder build(final PlanBuilder builder) { return builder.visitTableGroupBy(this); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - TableGroupBy that = (TableGroupBy) o; + final TableGroupBy that = (TableGroupBy) o; return Objects.equals(properties, that.properties) && Objects.equals(source, that.source) && Objects.equals(internalFormats, that.internalFormats) diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSelect.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSelect.java index e551d41ff5c2..152f6ff5eaa5 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSelect.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSelect.java @@ -32,12 +32,12 @@ public class TableSelect implements ExecutionStep> { private final ImmutableList selectExpressions; public TableSelect( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "source", required = true) ExecutionStep> source, - @JsonProperty(value = "selectExpressions", required = true) + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "source", required = true) final ExecutionStep> source, + @JsonProperty(value = "selectExpressions", required = true) final List selectExpressions ) { - this.properties = requireNonNull(properties, "properties"); + this.properties = requireNonNull(props, "props"); this.source = requireNonNull(source, "source"); this.selectExpressions = ImmutableList.copyOf(selectExpressions); } @@ -62,19 +62,19 @@ public ExecutionStep> getSource() { } @Override - public KTableHolder build(PlanBuilder builder) { + public KTableHolder build(final PlanBuilder builder) { return builder.visitTableSelect(this); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - TableSelect that = (TableSelect) o; + final TableSelect that = (TableSelect) o; return Objects.equals(properties, that.properties) && Objects.equals(source, that.source) && Objects.equals(selectExpressions, that.selectExpressions); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSink.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSink.java index 9e7d1ce850a4..9456f1e3e12e 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSink.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSink.java @@ -29,12 +29,12 @@ public class TableSink implements ExecutionStep> { private final String topicName; public TableSink( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "source", required = true) ExecutionStep> source, - @JsonProperty(value = "formats", required = true) Formats formats, - @JsonProperty(value = "topicName", required = true) String topicName + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "source", required = true) final ExecutionStep> source, + @JsonProperty(value = "formats", required = true) final Formats formats, + @JsonProperty(value = "topicName", required = true) final String topicName ) { - this.properties = Objects.requireNonNull(properties, "properties"); + this.properties = Objects.requireNonNull(props, "props"); this.source = Objects.requireNonNull(source, "source"); this.formats = Objects.requireNonNull(formats, "formats"); this.topicName = Objects.requireNonNull(topicName, "topicName"); @@ -64,19 +64,19 @@ public ExecutionStep> getSource() { } @Override - public KTableHolder build(PlanBuilder builder) { + public KTableHolder build(final PlanBuilder builder) { return builder.visitTableSink(this); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - TableSink tableSink = (TableSink) o; + final TableSink tableSink = (TableSink) o; return Objects.equals(properties, tableSink.properties) && Objects.equals(source, tableSink.source) && Objects.equals(formats, tableSink.formats) diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSource.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSource.java index ec56b26518ed..213cbbcfface 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSource.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSource.java @@ -46,7 +46,7 @@ public TableSource( } @Override - public KTableHolder build(PlanBuilder builder) { + public KTableHolder build(final PlanBuilder builder) { return builder.visitTableSource(this); } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableTableJoin.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableTableJoin.java index 9ce53ac071ac..b366d81cd655 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableTableJoin.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableTableJoin.java @@ -29,13 +29,13 @@ public class TableTableJoin implements ExecutionStep> { private final ExecutionStep> rightSource; public TableTableJoin( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "joinType", required = true) JoinType joinType, - @JsonProperty(value = "leftSource", required = true) + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "joinType", required = true) final JoinType joinType, + @JsonProperty(value = "leftSource", required = true) final ExecutionStep> leftSource, - @JsonProperty(value = "rightSource", required = true) + @JsonProperty(value = "rightSource", required = true) final ExecutionStep> rightSource) { - this.properties = Objects.requireNonNull(properties, "properties"); + this.properties = Objects.requireNonNull(props, "props"); this.joinType = Objects.requireNonNull(joinType, "joinType"); this.leftSource = Objects.requireNonNull(leftSource, "leftSource"); this.rightSource = Objects.requireNonNull(rightSource, "rightSource"); @@ -65,19 +65,19 @@ public JoinType getJoinType() { } @Override - public KTableHolder build(PlanBuilder builder) { + public KTableHolder build(final PlanBuilder builder) { return builder.visitTableTableJoin(this); } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - TableTableJoin that = (TableTableJoin) o; + final TableTableJoin that = (TableTableJoin) o; return Objects.equals(properties, that.properties) && joinType == that.joinType && Objects.equals(leftSource, that.leftSource) diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedStreamSource.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedStreamSource.java index 8dda34f06cc7..33040c5b23d8 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedStreamSource.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedStreamSource.java @@ -33,15 +33,15 @@ public final class WindowedStreamSource private final WindowInfo windowInfo; public WindowedStreamSource( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "topicName", required = true) String topicName, - @JsonProperty(value = "formats", required = true) Formats formats, - @JsonProperty(value = "windowInfo", required = true) WindowInfo windowInfo, - @JsonProperty("timestampColumn") Optional timestampColumn, - @JsonProperty(value = "sourceSchema", required = true) LogicalSchema sourceSchema, - @JsonProperty(value = "alias", required = true) SourceName alias) { + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "topicName", required = true) final String topicName, + @JsonProperty(value = "formats", required = true) final Formats formats, + @JsonProperty(value = "windowInfo", required = true) final WindowInfo windowInfo, + @JsonProperty("timestampColumn") final Optional timestampColumn, + @JsonProperty(value = "sourceSchema", required = true) final LogicalSchema sourceSchema, + @JsonProperty(value = "alias", required = true) final SourceName alias) { super( - properties, + props, topicName, formats, timestampColumn, @@ -56,7 +56,7 @@ public WindowInfo getWindowInfo() { } @Override - public KStreamHolder> build(PlanBuilder builder) { + public KStreamHolder> build(final PlanBuilder builder) { return builder.visitWindowedStreamSource(this); } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedTableSource.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedTableSource.java index 1aceee1ded18..b2d123036ebf 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedTableSource.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedTableSource.java @@ -31,16 +31,16 @@ public final class WindowedTableSource private final WindowInfo windowInfo; public WindowedTableSource( - @JsonProperty(value = "properties", required = true) ExecutionStepPropertiesV1 properties, - @JsonProperty(value = "topicName", required = true) String topicName, - @JsonProperty(value = "formats", required = true) Formats formats, - @JsonProperty(value = "windowInfo", required = true) WindowInfo windowInfo, + @JsonProperty(value = "properties", required = true) final ExecutionStepPropertiesV1 props, + @JsonProperty(value = "topicName", required = true) final String topicName, + @JsonProperty(value = "formats", required = true) final Formats formats, + @JsonProperty(value = "windowInfo", required = true) final WindowInfo windowInfo, @JsonProperty("timestampColumn") final Optional timestampColumn, - @JsonProperty(value = "sourceSchema", required = true) LogicalSchema sourceSchema, - @JsonProperty(value = "alias", required = true) SourceName alias + @JsonProperty(value = "sourceSchema", required = true) final LogicalSchema sourceSchema, + @JsonProperty(value = "alias", required = true) final SourceName alias ) { super( - properties, + props, topicName, formats, timestampColumn, @@ -55,7 +55,7 @@ public WindowInfo getWindowInfo() { } @Override - public KTableHolder> build(PlanBuilder builder) { + public KTableHolder> build(final PlanBuilder builder) { return builder.visitWindowedTableSource(this); } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/timestamp/TimestampColumn.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/timestamp/TimestampColumn.java index bda570bde288..db03607f03a3 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/timestamp/TimestampColumn.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/timestamp/TimestampColumn.java @@ -29,8 +29,8 @@ public final class TimestampColumn { @JsonCreator public TimestampColumn( - @JsonProperty(value = "column", required = true) ColumnRef column, - @JsonProperty("format") Optional format + @JsonProperty(value = "column", required = true) final ColumnRef column, + @JsonProperty("format") final Optional format ) { this.column = Objects.requireNonNull(column, "column"); this.format = Objects.requireNonNull(format, "format"); @@ -45,14 +45,14 @@ public Optional getFormat() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - TimestampColumn that = (TimestampColumn) o; + final TimestampColumn that = (TimestampColumn) o; return Objects.equals(column, that.column) && Objects.equals(format, that.format); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/transform/sqlpredicate/SqlPredicate.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/transform/sqlpredicate/SqlPredicate.java index 47fdcc1f38cc..f5f295ed2da1 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/transform/sqlpredicate/SqlPredicate.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/transform/sqlpredicate/SqlPredicate.java @@ -41,14 +41,14 @@ public final class SqlPredicate { private final CodeGenSpec spec; public SqlPredicate( - Expression filterExpression, - LogicalSchema schema, - KsqlConfig ksqlConfig, - FunctionRegistry functionRegistry + final Expression filterExpression, + final LogicalSchema schema, + final KsqlConfig ksqlConfig, + final FunctionRegistry functionRegistry ) { this.filterExpression = requireNonNull(filterExpression, "filterExpression"); - CodeGenRunner codeGenRunner = new CodeGenRunner(schema, ksqlConfig, functionRegistry); + final CodeGenRunner codeGenRunner = new CodeGenRunner(schema, ksqlConfig, functionRegistry); spec = codeGenRunner.getCodeGenSpec(this.filterExpression); try { @@ -58,14 +58,14 @@ public SqlPredicate( ee.setExpressionType(boolean.class); - String expressionStr = SqlToJavaVisitor.of( + final String expressionStr = SqlToJavaVisitor.of( schema, functionRegistry, spec ).process(this.filterExpression); ee.cook(expressionStr); - } catch (Exception e) { + } catch (final Exception e) { throw new KsqlException( "Failed to generate code for SqlPredicate." + " filterExpression: " + filterExpression @@ -104,14 +104,14 @@ public Optional transform( } try { - Object[] values = new Object[spec.arguments().size()]; + final Object[] values = new Object[spec.arguments().size()]; spec.resolve(value, values); final boolean result = (Boolean) ee.evaluate(values); return result ? Optional.of(value) : Optional.empty(); - } catch (Exception e) { + } catch (final Exception e) { logProcessingError(processingLogger, e, value); return Optional.empty(); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/transform/window/WindowSelectMapper.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/transform/window/WindowSelectMapper.java index fc4b39a0a992..c2b57109ba0b 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/transform/window/WindowSelectMapper.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/transform/window/WindowSelectMapper.java @@ -42,10 +42,13 @@ public final class WindowSelectMapper { private final Map windowSelects; - public WindowSelectMapper(int initialUdafIndex, List> functions) { - Builder selectsBuilder = new Builder<>(); + public WindowSelectMapper( + final int initialUdafIndex, + final List> functions + ) { + final Builder selectsBuilder = new Builder<>(); for (int i = 0; i < functions.size(); i++) { - String name = functions.get(i).name().name().toUpperCase(); + final String name = functions.get(i).name().name().toUpperCase(); if (WINDOW_FUNCTION_NAMES.containsKey(name)) { selectsBuilder.put(initialUdafIndex + i, WINDOW_FUNCTION_NAMES.get(name)); } @@ -87,7 +90,7 @@ private enum Type { private final Function mapper; - Type(Function mapper) { + Type(final Function mapper) { this.mapper = mapper; } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ComparisonUtil.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ComparisonUtil.java index 207f040c04ea..a1ecc4e3fe2b 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ComparisonUtil.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ComparisonUtil.java @@ -27,7 +27,7 @@ private ComparisonUtil() { } static boolean isValidComparison( - SqlType left, ComparisonExpression.Type operator, SqlType right + final SqlType left, final ComparisonExpression.Type operator, final SqlType right ) { if (left.baseType().isNumber() && right.baseType().isNumber()) { return true; diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/EngineProcessingLogMessageFactory.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/EngineProcessingLogMessageFactory.java index ed8d515b1ced..1e13807501a3 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/EngineProcessingLogMessageFactory.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/EngineProcessingLogMessageFactory.java @@ -37,19 +37,19 @@ private EngineProcessingLogMessageFactory() { } public static Function recordProcessingError( - String errorMsg, Throwable exception, GenericRow record + final String errorMsg, final Throwable exception, final GenericRow record ) { return (config) -> { - Struct struct = new Struct(ProcessingLogMessageSchema.PROCESSING_LOG_SCHEMA); + final Struct struct = new Struct(ProcessingLogMessageSchema.PROCESSING_LOG_SCHEMA); struct.put(ProcessingLogMessageSchema.TYPE, MessageType.RECORD_PROCESSING_ERROR.getTypeId()); - Struct recordProcessingError = + final Struct recordProcessingError = new Struct(MessageType.RECORD_PROCESSING_ERROR.getSchema()); struct.put(ProcessingLogMessageSchema.RECORD_PROCESSING_ERROR, recordProcessingError); recordProcessingError.put( ProcessingLogMessageSchema.RECORD_PROCESSING_ERROR_FIELD_MESSAGE, errorMsg ); - List cause = ErrorMessageUtil.getErrorMessages(exception); + final List cause = ErrorMessageUtil.getErrorMessages(exception); cause.remove(0); recordProcessingError.put( ProcessingLogMessageSchema.RECORD_PROCESSING_ERROR_FIELD_CAUSE, @@ -66,13 +66,13 @@ public static Function recordProcessingErro }; } - private static String serializeRow(ProcessingLogConfig config, GenericRow record) { + private static String serializeRow(final ProcessingLogConfig config, final GenericRow record) { if (!config.getBoolean(ProcessingLogConfig.INCLUDE_ROWS)) { return null; } try { return JsonMapper.INSTANCE.mapper.writeValueAsString(record.getColumns()); - } catch (Throwable t) { + } catch (final Throwable t) { LOGGER.error("error serializing record for processing log", t); return null; } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java index 3e2c117266ec..029f25773842 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java @@ -79,13 +79,16 @@ public class ExpressionTypeManager { private final LogicalSchema schema; private final FunctionRegistry functionRegistry; - public ExpressionTypeManager(LogicalSchema schema, FunctionRegistry functionRegistry) { + public ExpressionTypeManager( + final LogicalSchema schema, + final FunctionRegistry functionRegistry + ) { this.schema = Objects.requireNonNull(schema, "schema"); this.functionRegistry = Objects.requireNonNull(functionRegistry, "functionRegistry"); } - public SqlType getExpressionSqlType(Expression expression) { - ExpressionTypeContext expressionTypeContext = new ExpressionTypeContext(); + public SqlType getExpressionSqlType(final Expression expression) { + final ExpressionTypeContext expressionTypeContext = new ExpressionTypeContext(); new Visitor().process(expression, expressionTypeContext); return expressionTypeContext.getSqlType(); } @@ -98,7 +101,7 @@ SqlType getSqlType() { return sqlType; } - void setSqlType(SqlType sqlType) { + void setSqlType(final SqlType sqlType) { this.sqlType = sqlType; } } @@ -107,15 +110,15 @@ private class Visitor implements ExpressionVisitor @Override public Void visitArithmeticBinary( - ArithmeticBinaryExpression node, ExpressionTypeContext expressionTypeContext + final ArithmeticBinaryExpression node, final ExpressionTypeContext expressionTypeContext ) { process(node.getLeft(), expressionTypeContext); - SqlType leftType = expressionTypeContext.getSqlType(); + final SqlType leftType = expressionTypeContext.getSqlType(); process(node.getRight(), expressionTypeContext); - SqlType rightType = expressionTypeContext.getSqlType(); + final SqlType rightType = expressionTypeContext.getSqlType(); - SqlType resultType = node.getOperator().resultType(leftType, rightType); + final SqlType resultType = node.getOperator().resultType(leftType, rightType); expressionTypeContext.setSqlType(resultType); return null; @@ -123,7 +126,7 @@ public Void visitArithmeticBinary( @Override public Void visitArithmeticUnary( - ArithmeticUnaryExpression node, ExpressionTypeContext context + final ArithmeticUnaryExpression node, final ExpressionTypeContext context ) { process(node.getValue(), context); return null; @@ -131,15 +134,15 @@ public Void visitArithmeticUnary( @Override public Void visitNotExpression( - NotExpression node, ExpressionTypeContext expressionTypeContext + final NotExpression node, final ExpressionTypeContext expressionTypeContext ) { expressionTypeContext.setSqlType(SqlTypes.BOOLEAN); return null; } @Override - public Void visitCast(Cast node, ExpressionTypeContext expressionTypeContext) { - SqlType sqlType = node.getType().getSqlType(); + public Void visitCast(final Cast node, final ExpressionTypeContext expressionTypeContext) { + final SqlType sqlType = node.getType().getSqlType(); if (!sqlType.supportsCast()) { throw new KsqlFunctionException("Only casts to primitive types or decimals " + "are supported: " + sqlType); @@ -151,28 +154,31 @@ public Void visitCast(Cast node, ExpressionTypeContext expressionTypeContext) { @Override public Void visitComparisonExpression( - ComparisonExpression node, ExpressionTypeContext expressionTypeContext + final ComparisonExpression node, final ExpressionTypeContext expressionTypeContext ) { process(node.getLeft(), expressionTypeContext); - SqlType leftSchema = expressionTypeContext.getSqlType(); + final SqlType leftSchema = expressionTypeContext.getSqlType(); process(node.getRight(), expressionTypeContext); - SqlType rightSchema = expressionTypeContext.getSqlType(); + final SqlType rightSchema = expressionTypeContext.getSqlType(); ComparisonUtil.isValidComparison(leftSchema, node.getType(), rightSchema); expressionTypeContext.setSqlType(SqlTypes.BOOLEAN); return null; } @Override - public Void visitBetweenPredicate(BetweenPredicate node, ExpressionTypeContext context) { + public Void visitBetweenPredicate( + final BetweenPredicate node, + final ExpressionTypeContext context + ) { context.setSqlType(SqlTypes.BOOLEAN); return null; } @Override public Void visitColumnReference( - ColumnReferenceExp node, ExpressionTypeContext expressionTypeContext + final ColumnReferenceExp node, final ExpressionTypeContext expressionTypeContext ) { - Column schemaColumn = schema.findColumn(node.getReference()) + final Column schemaColumn = schema.findColumn(node.getReference()) .orElseThrow(() -> new KsqlException(String.format("Invalid Expression %s.", node.toString()))); @@ -182,18 +188,18 @@ public Void visitColumnReference( @Override public Void visitDereferenceExpression( - DereferenceExpression node, ExpressionTypeContext expressionTypeContext + final DereferenceExpression node, final ExpressionTypeContext expressionTypeContext ) { process(node.getBase(), expressionTypeContext); - SqlType sqlType = expressionTypeContext.getSqlType(); + final SqlType sqlType = expressionTypeContext.getSqlType(); if (!(sqlType instanceof SqlStruct)) { throw new IllegalStateException("Expected STRUCT type, got: " + sqlType); } - SqlStruct structType = (SqlStruct) sqlType; - String fieldName = node.getFieldName(); + final SqlStruct structType = (SqlStruct) sqlType; + final String fieldName = node.getFieldName(); - Field structField = structType + final Field structField = structType .field(fieldName) .orElseThrow(() -> new KsqlException( "Could not find field '" + fieldName + "' in '" + node.getBase() + "'.") @@ -205,7 +211,7 @@ public Void visitDereferenceExpression( @Override public Void visitStringLiteral( - StringLiteral node, ExpressionTypeContext expressionTypeContext + final StringLiteral node, final ExpressionTypeContext expressionTypeContext ) { expressionTypeContext.setSqlType(SqlTypes.STRING); return null; @@ -213,21 +219,24 @@ public Void visitStringLiteral( @Override public Void visitBooleanLiteral( - BooleanLiteral node, ExpressionTypeContext expressionTypeContext + final BooleanLiteral node, final ExpressionTypeContext expressionTypeContext ) { expressionTypeContext.setSqlType(SqlTypes.BOOLEAN); return null; } @Override - public Void visitLongLiteral(LongLiteral node, ExpressionTypeContext expressionTypeContext) { + public Void visitLongLiteral( + final LongLiteral node, + final ExpressionTypeContext expressionTypeContext + ) { expressionTypeContext.setSqlType(SqlTypes.BIGINT); return null; } @Override public Void visitIntegerLiteral( - IntegerLiteral node, ExpressionTypeContext expressionTypeContext + final IntegerLiteral node, final ExpressionTypeContext expressionTypeContext ) { expressionTypeContext.setSqlType(SqlTypes.INTEGER); return null; @@ -235,21 +244,21 @@ public Void visitIntegerLiteral( @Override public Void visitDoubleLiteral( - DoubleLiteral node, ExpressionTypeContext expressionTypeContext + final DoubleLiteral node, final ExpressionTypeContext expressionTypeContext ) { expressionTypeContext.setSqlType(SqlTypes.DOUBLE); return null; } @Override - public Void visitNullLiteral(NullLiteral node, ExpressionTypeContext context) { + public Void visitNullLiteral(final NullLiteral node, final ExpressionTypeContext context) { context.setSqlType(null); return null; } @Override public Void visitLikePredicate( - LikePredicate node, ExpressionTypeContext expressionTypeContext + final LikePredicate node, final ExpressionTypeContext expressionTypeContext ) { expressionTypeContext.setSqlType(SqlTypes.BOOLEAN); return null; @@ -257,7 +266,7 @@ public Void visitLikePredicate( @Override public Void visitIsNotNullPredicate( - IsNotNullPredicate node, ExpressionTypeContext expressionTypeContext + final IsNotNullPredicate node, final ExpressionTypeContext expressionTypeContext ) { expressionTypeContext.setSqlType(SqlTypes.BOOLEAN); return null; @@ -265,7 +274,7 @@ public Void visitIsNotNullPredicate( @Override public Void visitIsNullPredicate( - IsNullPredicate node, ExpressionTypeContext expressionTypeContext + final IsNullPredicate node, final ExpressionTypeContext expressionTypeContext ) { expressionTypeContext.setSqlType(SqlTypes.BOOLEAN); return null; @@ -273,11 +282,11 @@ public Void visitIsNullPredicate( @Override public Void visitSearchedCaseExpression( - SearchedCaseExpression node, ExpressionTypeContext context + final SearchedCaseExpression node, final ExpressionTypeContext context ) { - Optional whenType = validateWhenClauses(node.getWhenClauses(), context); + final Optional whenType = validateWhenClauses(node.getWhenClauses(), context); - Optional defaultType = node.getDefaultValue() + final Optional defaultType = node.getDefaultValue() .map(ExpressionTypeManager.this::getExpressionSqlType); if (whenType.isPresent() && defaultType.isPresent()) { @@ -304,12 +313,12 @@ public Void visitSearchedCaseExpression( @Override public Void visitSubscriptExpression( - SubscriptExpression node, ExpressionTypeContext expressionTypeContext + final SubscriptExpression node, final ExpressionTypeContext expressionTypeContext ) { process(node.getBase(), expressionTypeContext); - SqlType arrayMapType = expressionTypeContext.getSqlType(); + final SqlType arrayMapType = expressionTypeContext.getSqlType(); - SqlType valueType; + final SqlType valueType; if (arrayMapType instanceof SqlMap) { valueType = ((SqlMap) arrayMapType).getValueType(); } else if (arrayMapType instanceof SqlArray) { @@ -323,10 +332,13 @@ public Void visitSubscriptExpression( } @Override - public Void visitStructExpression(CreateStructExpression exp, ExpressionTypeContext context) { + public Void visitStructExpression( + final CreateStructExpression exp, + final ExpressionTypeContext context + ) { final Builder builder = SqlStruct.builder(); - for (CreateStructExpression.Field field : exp.getFields()) { + for (final CreateStructExpression.Field field : exp.getFields()) { process(field.getValue(), context); builder.field(field.getName(), context.getSqlType()); } @@ -336,16 +348,19 @@ public Void visitStructExpression(CreateStructExpression exp, ExpressionTypeCont } @Override - public Void visitFunctionCall(FunctionCall node, ExpressionTypeContext expressionTypeContext) { + public Void visitFunctionCall( + final FunctionCall node, + final ExpressionTypeContext expressionTypeContext + ) { if (functionRegistry.isAggregate(node.getName().name())) { - SqlType schema = node.getArguments().isEmpty() + final SqlType schema = node.getArguments().isEmpty() ? FunctionRegistry.DEFAULT_FUNCTION_ARG_SCHEMA : getExpressionSqlType(node.getArguments().get(0)); - AggregateFunctionInitArguments args = + final AggregateFunctionInitArguments args = UdafUtil.createAggregateFunctionInitArgs(0, node); - KsqlAggregateFunction aggFunc = functionRegistry + final KsqlAggregateFunction aggFunc = functionRegistry .getAggregateFunction(node.getName().name(), schema, args); expressionTypeContext.setSqlType(aggFunc.returnType()); @@ -353,34 +368,34 @@ public Void visitFunctionCall(FunctionCall node, ExpressionTypeContext expressio } if (functionRegistry.isTableFunction(node.getName().name())) { - List argumentTypes = node.getArguments().isEmpty() + final List argumentTypes = node.getArguments().isEmpty() ? ImmutableList.of(FunctionRegistry.DEFAULT_FUNCTION_ARG_SCHEMA) : node.getArguments().stream().map(ExpressionTypeManager.this::getExpressionSqlType) .collect(Collectors.toList()); - KsqlTableFunction tableFunction = functionRegistry + final KsqlTableFunction tableFunction = functionRegistry .getTableFunction(node.getName().name(), argumentTypes); expressionTypeContext.setSqlType(tableFunction.getReturnType(argumentTypes)); return null; } - UdfFactory udfFactory = functionRegistry.getUdfFactory(node.getName().name()); + final UdfFactory udfFactory = functionRegistry.getUdfFactory(node.getName().name()); - List argTypes = new ArrayList<>(); - for (Expression expression : node.getArguments()) { + final List argTypes = new ArrayList<>(); + for (final Expression expression : node.getArguments()) { process(expression, expressionTypeContext); argTypes.add(expressionTypeContext.getSqlType()); } - SqlType returnSchema = udfFactory.getFunction(argTypes).getReturnType(argTypes); + final SqlType returnSchema = udfFactory.getFunction(argTypes).getReturnType(argTypes); expressionTypeContext.setSqlType(returnSchema); return null; } @Override public Void visitLogicalBinaryExpression( - LogicalBinaryExpression node, ExpressionTypeContext context + final LogicalBinaryExpression node, final ExpressionTypeContext context ) { process(node.getLeft(), context); process(node.getRight(), context); @@ -388,67 +403,68 @@ public Void visitLogicalBinaryExpression( } @Override - public Void visitType(Type type, ExpressionTypeContext expressionTypeContext) { + public Void visitType(final Type type, final ExpressionTypeContext expressionTypeContext) { throw VisitorUtil.illegalState(this, type); } @Override public Void visitTimeLiteral( - TimeLiteral timeLiteral, ExpressionTypeContext expressionTypeContext + final TimeLiteral timeLiteral, final ExpressionTypeContext expressionTypeContext ) { throw VisitorUtil.unsupportedOperation(this, timeLiteral); } @Override public Void visitTimestampLiteral( - TimestampLiteral timestampLiteral, ExpressionTypeContext expressionTypeContext + final TimestampLiteral timestampLiteral, final ExpressionTypeContext expressionTypeContext ) { throw VisitorUtil.unsupportedOperation(this, timestampLiteral); } @Override public Void visitDecimalLiteral( - DecimalLiteral decimalLiteral, ExpressionTypeContext expressionTypeContext + final DecimalLiteral decimalLiteral, final ExpressionTypeContext expressionTypeContext ) { throw VisitorUtil.unsupportedOperation(this, decimalLiteral); } @Override public Void visitSimpleCaseExpression( - SimpleCaseExpression simpleCaseExpression, ExpressionTypeContext expressionTypeContext + final SimpleCaseExpression simpleCaseExpression, + final ExpressionTypeContext expressionTypeContext ) { throw VisitorUtil.unsupportedOperation(this, simpleCaseExpression); } @Override public Void visitInListExpression( - InListExpression inListExpression, ExpressionTypeContext expressionTypeContext + final InListExpression inListExpression, final ExpressionTypeContext expressionTypeContext ) { throw VisitorUtil.unsupportedOperation(this, inListExpression); } @Override public Void visitInPredicate( - InPredicate inPredicate, ExpressionTypeContext expressionTypeContext + final InPredicate inPredicate, final ExpressionTypeContext expressionTypeContext ) { throw VisitorUtil.unsupportedOperation(this, inPredicate); } @Override public Void visitWhenClause( - WhenClause whenClause, ExpressionTypeContext expressionTypeContext + final WhenClause whenClause, final ExpressionTypeContext expressionTypeContext ) { throw VisitorUtil.illegalState(this, whenClause); } private Optional validateWhenClauses( - List whenClauses, ExpressionTypeContext context + final List whenClauses, final ExpressionTypeContext context ) { Optional previousResult = Optional.empty(); - for (WhenClause whenClause : whenClauses) { + for (final WhenClause whenClause : whenClauses) { process(whenClause.getOperand(), context); - SqlType operandType = context.getSqlType(); + final SqlType operandType = context.getSqlType(); if (operandType.baseType() != SqlBaseType.BOOLEAN) { throw new KsqlException("WHEN operand type should be boolean." @@ -459,7 +475,7 @@ private Optional validateWhenClauses( process(whenClause.getResult(), context); - SqlType resultType = context.getSqlType(); + final SqlType resultType = context.getSqlType(); if (resultType == null) { continue; // `null` type } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/HoppingWindowExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/HoppingWindowExpression.java index 51bb748f89d8..01f4ce062884 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/HoppingWindowExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/HoppingWindowExpression.java @@ -35,14 +35,20 @@ public class HoppingWindowExpression extends KsqlWindowExpression { private final TimeUnit advanceByUnit; public HoppingWindowExpression( - long size, TimeUnit sizeUnit, long advanceBy, TimeUnit advanceByUnit + final long size, + final TimeUnit sizeUnit, + final long advanceBy, + final TimeUnit advanceByUnit ) { this(Optional.empty(), size, sizeUnit, advanceBy, advanceByUnit); } public HoppingWindowExpression( - Optional location, long size, TimeUnit sizeUnit, long advanceBy, - TimeUnit advanceByUnit + final Optional location, + final long size, + final TimeUnit sizeUnit, + final long advanceBy, + final TimeUnit advanceByUnit ) { super(location); this.size = size; @@ -76,7 +82,7 @@ public long getAdvanceBy() { } @Override - public R accept(WindowVisitor visitor, C context) { + public R accept(final WindowVisitor visitor, final C context) { return visitor.visitHoppingWindowExpression(this, context); } @@ -92,14 +98,14 @@ public int hashCode() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - HoppingWindowExpression hoppingWindowExpression = (HoppingWindowExpression) o; + final HoppingWindowExpression hoppingWindowExpression = (HoppingWindowExpression) o; return hoppingWindowExpression.size == size && hoppingWindowExpression.sizeUnit == sizeUnit && hoppingWindowExpression.advanceBy == advanceBy && hoppingWindowExpression .advanceByUnit == advanceByUnit; diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/KsqlWindowExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/KsqlWindowExpression.java index 0d817c38cb70..f5de6ccee405 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/KsqlWindowExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/KsqlWindowExpression.java @@ -24,7 +24,7 @@ @Immutable public abstract class KsqlWindowExpression extends Node { - KsqlWindowExpression(Optional nodeLocation) { + KsqlWindowExpression(final Optional nodeLocation) { super(nodeLocation); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/SessionWindowExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/SessionWindowExpression.java index fd378c115fae..ed22f3f4d06e 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/SessionWindowExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/SessionWindowExpression.java @@ -31,11 +31,15 @@ public class SessionWindowExpression extends KsqlWindowExpression { private final long gap; private final TimeUnit sizeUnit; - public SessionWindowExpression(long gap, TimeUnit sizeUnit) { + public SessionWindowExpression(final long gap, final TimeUnit sizeUnit) { this(Optional.empty(), gap, sizeUnit); } - public SessionWindowExpression(Optional location, long gap, TimeUnit sizeUnit) { + public SessionWindowExpression( + final Optional location, + final long gap, + final TimeUnit sizeUnit + ) { super(location); this.gap = gap; this.sizeUnit = requireNonNull(sizeUnit, "sizeUnit"); @@ -55,7 +59,7 @@ public WindowInfo getWindowInfo() { } @Override - public R accept(WindowVisitor visitor, C context) { + public R accept(final WindowVisitor visitor, final C context) { return visitor.visitSessionWindowExpression(this, context); } @@ -70,14 +74,14 @@ public int hashCode() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - SessionWindowExpression sessionWindowExpression = (SessionWindowExpression) o; + final SessionWindowExpression sessionWindowExpression = (SessionWindowExpression) o; return sessionWindowExpression.gap == gap && sessionWindowExpression.sizeUnit == sizeUnit; } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/TumblingWindowExpression.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/TumblingWindowExpression.java index 3866d66da7c6..45b5602dcad2 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/TumblingWindowExpression.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/windows/TumblingWindowExpression.java @@ -32,11 +32,15 @@ public class TumblingWindowExpression extends KsqlWindowExpression { private final long size; private final TimeUnit sizeUnit; - public TumblingWindowExpression(long size, TimeUnit sizeUnit) { + public TumblingWindowExpression(final long size, final TimeUnit sizeUnit) { this(Optional.empty(), size, sizeUnit); } - public TumblingWindowExpression(Optional location, long size, TimeUnit sizeUnit) { + public TumblingWindowExpression( + final Optional location, + final long size, + final TimeUnit sizeUnit + ) { super(location); this.size = size; this.sizeUnit = requireNonNull(sizeUnit, "sizeUnit"); @@ -59,7 +63,7 @@ public long getSize() { } @Override - public R accept(WindowVisitor visitor, C context) { + public R accept(final WindowVisitor visitor, final C context) { return visitor.visitTumblingWindowExpression(this, context); } @@ -74,14 +78,14 @@ public int hashCode() { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - TumblingWindowExpression tumblingWindowExpression = (TumblingWindowExpression) o; + final TumblingWindowExpression tumblingWindowExpression = (TumblingWindowExpression) o; return tumblingWindowExpression.size == size && tumblingWindowExpression.sizeUnit == sizeUnit; } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/services/ConnectClient.java b/ksql-execution/src/main/java/io/confluent/ksql/services/ConnectClient.java index 4eb2176c715a..52f572c26a6d 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/services/ConnectClient.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/services/ConnectClient.java @@ -75,15 +75,15 @@ class ConnectResponse { private final Optional error; private final int httpCode; - public static ConnectResponse success(T datum, int code) { + public static ConnectResponse success(final T datum, final int code) { return new ConnectResponse<>(datum, null, code); } - public static ConnectResponse failure(String error, int code) { + public static ConnectResponse failure(final String error, final int code) { return new ConnectResponse<>(null, error, code); } - private ConnectResponse(T datum, String error, int code) { + private ConnectResponse(final T datum, final String error, final int code) { KsqlPreconditions.checkArgument( datum != null ^ error != null, "expected exactly one of datum or error to be null" diff --git a/ksql-execution/src/main/java/io/confluent/ksql/services/DisabledKsqlClient.java b/ksql-execution/src/main/java/io/confluent/ksql/services/DisabledKsqlClient.java index 5a9d6fd11710..f6a41cfb5807 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/services/DisabledKsqlClient.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/services/DisabledKsqlClient.java @@ -34,7 +34,7 @@ private DisabledKsqlClient() { } @Override - public RestResponse makeKsqlRequest(URI serverEndPoint, String sql) { + public RestResponse makeKsqlRequest(final URI serverEndPoint, final String sql) { throw new UnsupportedOperationException("KSQL client is disabled"); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/services/KafkaTopicClient.java b/ksql-execution/src/main/java/io/confluent/ksql/services/KafkaTopicClient.java index 9e18fdde03e4..54df75227c3a 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/services/KafkaTopicClient.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/services/KafkaTopicClient.java @@ -36,15 +36,19 @@ enum TopicCleanupPolicy { COMPACT_DELETE } - default void validateCreateTopic(String topic, int numPartitions, short replicationFactor) { + default void validateCreateTopic( + final String topic, + final int numPartitions, + final short replicationFactor + ) { validateCreateTopic(topic, numPartitions, replicationFactor, Collections.emptyMap()); } default void validateCreateTopic( - String topic, - int numPartitions, - short replicationFactor, - Map configs + final String topic, + final int numPartitions, + final short replicationFactor, + final Map configs ) { createTopic( topic, @@ -66,7 +70,11 @@ default void validateCreateTopic( * @param replicationFactor the rf of the topic. * @param numPartitions the partition count of the topic. */ - default void createTopic(String topic, int numPartitions, short replicationFactor) { + default void createTopic( + final String topic, + final int numPartitions, + final short replicationFactor + ) { createTopic(topic, numPartitions, replicationFactor, Collections.emptyMap()); } @@ -84,10 +92,10 @@ default void createTopic(String topic, int numPartitions, short replicationFacto * @param configs any additional topic configs to use */ default void createTopic( - String topic, - int numPartitions, - short replicationFactor, - Map configs + final String topic, + final int numPartitions, + final short replicationFactor, + final Map configs ) { createTopic( topic, @@ -155,7 +163,7 @@ void createTopic( * @return the description if the topic * @throws KafkaTopicExistsException if the topic does not exist. */ - default TopicDescription describeTopic(String topicName) { + default TopicDescription describeTopic(final String topicName) { return describeTopics(ImmutableList.of(topicName)).get(topicName); } diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/builder/KsqlQueryBuilderTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/builder/KsqlQueryBuilderTest.java index b7007e0593e5..ed02d6fc2a72 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/builder/KsqlQueryBuilderTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/builder/KsqlQueryBuilderTest.java @@ -144,7 +144,7 @@ public void shouldThrowNPEOnConstruction() { @Test public void shouldBuildNodeContext() { // When: - Stacker result = ksqlQueryBuilder.buildNodeContext("some-id"); + final Stacker result = ksqlQueryBuilder.buildNodeContext("some-id"); // Then: assertThat(result, is(new Stacker().push("some-id"))); @@ -153,10 +153,10 @@ public void shouldBuildNodeContext() { @Test public void shouldSwapInKsqlConfig() { // Given: - KsqlConfig other = mock(KsqlConfig.class); + final KsqlConfig other = mock(KsqlConfig.class); // When: - KsqlQueryBuilder result = ksqlQueryBuilder.withKsqlConfig(other); + final KsqlQueryBuilder result = ksqlQueryBuilder.withKsqlConfig(other); // Then: assertThat(ksqlQueryBuilder.getKsqlConfig(), is(ksqlConfig)); @@ -243,7 +243,7 @@ public void shouldTrackSchemasUsed() { @Test public void shouldTrackSchemasTakingIntoAccountSerdeOptions() { // Given: - PhysicalSchema schema = PhysicalSchema.from( + final PhysicalSchema schema = PhysicalSchema.from( SOME_SCHEMA.logicalSchema(), SerdeOption.of(SerdeOption.UNWRAP_SINGLE_VALUES) ); diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/ExpressionMetadataTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/ExpressionMetadataTest.java index 5aa640dc1330..ed5326fe8949 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/ExpressionMetadataTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/ExpressionMetadataTest.java @@ -70,7 +70,7 @@ public void shouldEvaluateExpressionWithValueColumnSpecs() throws Exception { ); // When: - Object result = expressionMetadata.evaluate(new GenericRow(123, 456)); + final Object result = expressionMetadata.evaluate(new GenericRow(123, 456)); // Then: assertThat(result, equalTo(RETURN_VALUE)); @@ -98,7 +98,7 @@ public void shouldEvaluateExpressionWithUdfsSpecs() throws Exception { ); // When: - Object result = expressionMetadata.evaluate(new GenericRow(123)); + final Object result = expressionMetadata.evaluate(new GenericRow(123)); // Then: assertThat(result, equalTo(RETURN_VALUE)); @@ -119,8 +119,8 @@ public void shouldPerformThreadSafeParameterEvaluation() throws Exception { 1 ); - CountDownLatch threadLatch = new CountDownLatch(1); - CountDownLatch mainLatch = new CountDownLatch(1); + final CountDownLatch threadLatch = new CountDownLatch(1); + final CountDownLatch mainLatch = new CountDownLatch(1); when(expressionEvaluator.evaluate(new Object[]{123, 456})) .thenAnswer( @@ -137,7 +137,7 @@ public void shouldPerformThreadSafeParameterEvaluation() throws Exception { expression ); - Thread thread = new Thread( + final Thread thread = new Thread( () -> expressionMetadata.evaluate(new GenericRow(123, 456)) ); diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitorTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitorTest.java index d9e552db1455..c7c709fd5a74 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitorTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitorTest.java @@ -94,8 +94,8 @@ public class SqlToJavaVisitorTest { @Before public void init() { - AtomicInteger funCounter = new AtomicInteger(); - AtomicInteger structCounter = new AtomicInteger(); + final AtomicInteger funCounter = new AtomicInteger(); + final AtomicInteger structCounter = new AtomicInteger(); sqlToJavaVisitor = new SqlToJavaVisitor( SCHEMA, functionRegistry, @@ -108,10 +108,10 @@ public void init() { @Test public void shouldProcessBasicJavaMath() { // Given: - Expression expression = new ArithmeticBinaryExpression(Operator.ADD, COL0, COL3); + final Expression expression = new ArithmeticBinaryExpression(Operator.ADD, COL0, COL3); // When: - String javaExpression = sqlToJavaVisitor.process(expression); + final String javaExpression = sqlToJavaVisitor.process(expression); // Then: assertThat(javaExpression, equalTo("(TEST1_COL0 + TEST1_COL3)")); @@ -120,10 +120,10 @@ public void shouldProcessBasicJavaMath() { @Test public void shouldProcessArrayExpressionCorrectly() { // Given: - Expression expression = new SubscriptExpression(ARRAYCOL, literal(0)); + final Expression expression = new SubscriptExpression(ARRAYCOL, literal(0)); // When: - String javaExpression = sqlToJavaVisitor.process(expression); + final String javaExpression = sqlToJavaVisitor.process(expression); // Then: assertThat( @@ -135,10 +135,10 @@ public void shouldProcessArrayExpressionCorrectly() { @Test public void shouldProcessMapExpressionCorrectly() { // Given: - Expression expression = new SubscriptExpression(MAPCOL, new StringLiteral("key1")); + final Expression expression = new SubscriptExpression(MAPCOL, new StringLiteral("key1")); // When: - String javaExpression = sqlToJavaVisitor.process(expression); + final String javaExpression = sqlToJavaVisitor.process(expression); // Then: assertThat(javaExpression, equalTo("((Double) ((java.util.Map)TEST1_COL5).get(\"key1\"))")); @@ -147,7 +147,7 @@ public void shouldProcessMapExpressionCorrectly() { @Test public void shouldProcessStructExpressionCorrectly() { // Given: - Expression expression = new CreateStructExpression( + final Expression expression = new CreateStructExpression( ImmutableList.of( new Field("col1", new StringLiteral("foo")), new Field("col2", new SubscriptExpression(MAPCOL, new StringLiteral("key1"))) @@ -155,7 +155,7 @@ public void shouldProcessStructExpressionCorrectly() { ); // When: - String javaExpression = sqlToJavaVisitor.process(expression); + final String javaExpression = sqlToJavaVisitor.process(expression); // Then: assertThat( @@ -166,15 +166,15 @@ public void shouldProcessStructExpressionCorrectly() { @Test public void shouldCreateCorrectCastJavaExpression() { // Given: - Expression castBigintInteger = new Cast( + final Expression castBigintInteger = new Cast( COL0, new io.confluent.ksql.execution.expression.tree.Type(SqlPrimitiveType.of("INTEGER")) ); - Expression castDoubleBigint = new Cast( + final Expression castDoubleBigint = new Cast( COL3, new io.confluent.ksql.execution.expression.tree.Type(SqlPrimitiveType.of("BIGINT")) ); - Expression castDoubleString = new Cast( + final Expression castDoubleString = new Cast( COL3, new io.confluent.ksql.execution.expression.tree.Type(SqlPrimitiveType.of("VARCHAR")) ); @@ -197,33 +197,33 @@ public void shouldCreateCorrectCastJavaExpression() { @Test public void shouldPostfixFunctionInstancesWithUniqueId() { // Given: - UdfFactory ssFactory = mock(UdfFactory.class); - KsqlScalarFunction ssFunction = mock(KsqlScalarFunction.class); - UdfFactory catFactory = mock(UdfFactory.class); - KsqlScalarFunction catFunction = mock(KsqlScalarFunction.class); + final UdfFactory ssFactory = mock(UdfFactory.class); + final KsqlScalarFunction ssFunction = mock(KsqlScalarFunction.class); + final UdfFactory catFactory = mock(UdfFactory.class); + final KsqlScalarFunction catFunction = mock(KsqlScalarFunction.class); givenUdf("SUBSTRING", ssFactory, ssFunction); givenUdf("CONCAT", catFactory, catFunction); - FunctionName ssName = FunctionName.of("SUBSTRING"); - FunctionName catName = FunctionName.of("CONCAT"); - FunctionCall substring1 = new FunctionCall( + final FunctionName ssName = FunctionName.of("SUBSTRING"); + final FunctionName catName = FunctionName.of("CONCAT"); + final FunctionCall substring1 = new FunctionCall( ssName, ImmutableList.of(COL1, new IntegerLiteral(1), new IntegerLiteral(3)) ); - FunctionCall substring2 = new FunctionCall( + final FunctionCall substring2 = new FunctionCall( ssName, ImmutableList.of(COL1, new IntegerLiteral(4), new IntegerLiteral(5)) ); - FunctionCall concat = new FunctionCall( + final FunctionCall concat = new FunctionCall( catName, ImmutableList.of(new StringLiteral("-"), substring2) ); - Expression expression = new FunctionCall( + final Expression expression = new FunctionCall( catName, ImmutableList.of(substring1, concat) ); // When: - String javaExpression = sqlToJavaVisitor.process(expression); + final String javaExpression = sqlToJavaVisitor.process(expression); // Then: assertThat(javaExpression, is( @@ -236,10 +236,10 @@ public void shouldPostfixFunctionInstancesWithUniqueId() { @Test public void shouldEscapeQuotesInStringLiteral() { // Given: - Expression expression = new StringLiteral("\"foo\""); + final Expression expression = new StringLiteral("\"foo\""); // When: - String javaExpression = sqlToJavaVisitor.process(expression); + final String javaExpression = sqlToJavaVisitor.process(expression); // Then: assertThat(javaExpression, equalTo("\"\\\"foo\\\"\"")); @@ -248,10 +248,10 @@ public void shouldEscapeQuotesInStringLiteral() { @Test public void shouldEscapeQuotesInStringLiteralQuote() { // Given: - Expression expression = new StringLiteral("\\\""); + final Expression expression = new StringLiteral("\\\""); // When: - String javaExpression = sqlToJavaVisitor.process(expression); + final String javaExpression = sqlToJavaVisitor.process(expression); // Then: assertThat(javaExpression, equalTo("\"\\\\\\\"\"")); @@ -260,14 +260,14 @@ public void shouldEscapeQuotesInStringLiteralQuote() { @Test public void shouldGenerateCorrectCodeForComparisonWithNegativeNumbers() { // Given: - Expression expression = new ComparisonExpression( + final Expression expression = new ComparisonExpression( ComparisonExpression.Type.GREATER_THAN, COL3, new DoubleLiteral(-10.0) ); // When: - String javaExpression = sqlToJavaVisitor.process(expression); + final String javaExpression = sqlToJavaVisitor.process(expression); // Then: assertThat( @@ -278,10 +278,10 @@ javaExpression, equalTo( @Test public void shouldGenerateCorrectCodeForLikePatternWithLeadingWildcard() { // Given: - Expression expression = new LikePredicate(COL1, new StringLiteral("%foo")); + final Expression expression = new LikePredicate(COL1, new StringLiteral("%foo")); // When: - String javaExpression = sqlToJavaVisitor.process(expression); + final String javaExpression = sqlToJavaVisitor.process(expression); // Then: assertThat(javaExpression, equalTo("(TEST1_COL1).endsWith(\"foo\")")); @@ -290,10 +290,10 @@ public void shouldGenerateCorrectCodeForLikePatternWithLeadingWildcard() { @Test public void shouldGenerateCorrectCodeForLikePatternWithTrailingWildcard() { // Given: - Expression expression = new LikePredicate(COL1, new StringLiteral("foo%")); + final Expression expression = new LikePredicate(COL1, new StringLiteral("foo%")); // When: - String javaExpression = sqlToJavaVisitor.process(expression); + final String javaExpression = sqlToJavaVisitor.process(expression); // Then: assertThat(javaExpression, equalTo("(TEST1_COL1).startsWith(\"foo\")")); @@ -302,10 +302,10 @@ public void shouldGenerateCorrectCodeForLikePatternWithTrailingWildcard() { @Test public void shouldGenerateCorrectCodeForLikePatternWithLeadingAndTrailingWildcards() { // Given: - Expression expression = new LikePredicate(COL1, new StringLiteral("%foo%")); + final Expression expression = new LikePredicate(COL1, new StringLiteral("%foo%")); // When: - String javaExpression = sqlToJavaVisitor.process(expression); + final String javaExpression = sqlToJavaVisitor.process(expression); // Then: assertThat(javaExpression, equalTo("(TEST1_COL1).contains(\"foo\")")); @@ -314,10 +314,10 @@ public void shouldGenerateCorrectCodeForLikePatternWithLeadingAndTrailingWildcar @Test public void shouldGenerateCorrectCodeForLikePatternWithoutWildcards() { // Given: - Expression expression = new LikePredicate(COL1, new StringLiteral("foo")); + final Expression expression = new LikePredicate(COL1, new StringLiteral("foo")); // When: - String javaExpression = sqlToJavaVisitor.process(expression); + final String javaExpression = sqlToJavaVisitor.process(expression); // Then: assertThat(javaExpression, equalTo("(TEST1_COL1).equals(\"foo\")")); @@ -326,7 +326,7 @@ public void shouldGenerateCorrectCodeForLikePatternWithoutWildcards() { @Test public void shouldGenerateCorrectCodeForCaseStatement() { // Given: - Expression expression = new SearchedCaseExpression( + final Expression expression = new SearchedCaseExpression( ImmutableList.of( new WhenClause( new ComparisonExpression( @@ -343,7 +343,7 @@ ComparisonExpression.Type.LESS_THAN, COL7, new IntegerLiteral(100)), ); // When: - String javaExpression = sqlToJavaVisitor.process(expression); + final String javaExpression = sqlToJavaVisitor.process(expression); // ThenL assertThat( @@ -354,7 +354,7 @@ javaExpression, equalTo( @Test public void shouldGenerateCorrectCodeForCaseStatementWithNoElse() { // Given: - Expression expression = new SearchedCaseExpression( + final Expression expression = new SearchedCaseExpression( ImmutableList.of( new WhenClause( new ComparisonExpression( @@ -371,7 +371,7 @@ ComparisonExpression.Type.LESS_THAN, COL7, new IntegerLiteral(100)), ); // When: - String javaExpression = sqlToJavaVisitor.process(expression); + final String javaExpression = sqlToJavaVisitor.process(expression); // ThenL assertThat( @@ -382,14 +382,14 @@ javaExpression, equalTo( @Test public void shouldGenerateCorrectCodeForDecimalAdd() { // Given: - ArithmeticBinaryExpression binExp = new ArithmeticBinaryExpression( + final ArithmeticBinaryExpression binExp = new ArithmeticBinaryExpression( Operator.ADD, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))) ); // When: - String java = sqlToJavaVisitor.process(binExp); + final String java = sqlToJavaVisitor.process(binExp); // Then: assertThat( @@ -401,14 +401,14 @@ public void shouldGenerateCorrectCodeForDecimalAdd() { @Test public void shouldGenerateCastLongToDecimalInBinaryExpression() { // Given: - ArithmeticBinaryExpression binExp = new ArithmeticBinaryExpression( + final ArithmeticBinaryExpression binExp = new ArithmeticBinaryExpression( Operator.ADD, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL0"))) ); // When: - String java = sqlToJavaVisitor.process(binExp); + final String java = sqlToJavaVisitor.process(binExp); // Then: assertThat(java, containsString("DecimalUtil.cast(TEST1_COL0, 19, 0)")); @@ -417,14 +417,14 @@ public void shouldGenerateCastLongToDecimalInBinaryExpression() { @Test public void shouldGenerateCastDecimalToDoubleInBinaryExpression() { // Given: - ArithmeticBinaryExpression binExp = new ArithmeticBinaryExpression( + final ArithmeticBinaryExpression binExp = new ArithmeticBinaryExpression( Operator.ADD, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL3"))) ); // When: - String java = sqlToJavaVisitor.process(binExp); + final String java = sqlToJavaVisitor.process(binExp); // Then: assertThat(java, containsString("(TEST1_COL8).doubleValue()")); @@ -433,14 +433,14 @@ public void shouldGenerateCastDecimalToDoubleInBinaryExpression() { @Test public void shouldGenerateCorrectCodeForDecimalSubtract() { // Given: - ArithmeticBinaryExpression binExp = new ArithmeticBinaryExpression( + final ArithmeticBinaryExpression binExp = new ArithmeticBinaryExpression( Operator.SUBTRACT, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))) ); // When: - String java = sqlToJavaVisitor.process(binExp); + final String java = sqlToJavaVisitor.process(binExp); // Then: assertThat( @@ -452,14 +452,14 @@ public void shouldGenerateCorrectCodeForDecimalSubtract() { @Test public void shouldGenerateCorrectCodeForDecimalMultiply() { // Given: - ArithmeticBinaryExpression binExp = new ArithmeticBinaryExpression( + final ArithmeticBinaryExpression binExp = new ArithmeticBinaryExpression( Operator.MULTIPLY, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))) ); // When: - String java = sqlToJavaVisitor.process(binExp); + final String java = sqlToJavaVisitor.process(binExp); // Then: assertThat( @@ -471,14 +471,14 @@ public void shouldGenerateCorrectCodeForDecimalMultiply() { @Test public void shouldGenerateCorrectCodeForDecimalDivide() { // Given: - ArithmeticBinaryExpression binExp = new ArithmeticBinaryExpression( + final ArithmeticBinaryExpression binExp = new ArithmeticBinaryExpression( Operator.DIVIDE, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))) ); // When: - String java = sqlToJavaVisitor.process(binExp); + final String java = sqlToJavaVisitor.process(binExp); // Then: assertThat( @@ -490,14 +490,14 @@ public void shouldGenerateCorrectCodeForDecimalDivide() { @Test public void shouldGenerateCorrectCodeForDecimalMod() { // Given: - ArithmeticBinaryExpression binExp = new ArithmeticBinaryExpression( + final ArithmeticBinaryExpression binExp = new ArithmeticBinaryExpression( Operator.MODULUS, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))) ); // When: - String java = sqlToJavaVisitor.process(binExp); + final String java = sqlToJavaVisitor.process(binExp); // Then: assertThat( @@ -509,14 +509,14 @@ public void shouldGenerateCorrectCodeForDecimalMod() { @Test public void shouldGenerateCorrectCodeForDecimalDecimalEQ() { // Given: - ComparisonExpression compExp = new ComparisonExpression( + final ComparisonExpression compExp = new ComparisonExpression( ComparisonExpression.Type.EQUAL, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL9"))) ); // When: - String java = sqlToJavaVisitor.process(compExp); + final String java = sqlToJavaVisitor.process(compExp); // Then: assertThat(java, containsString("(TEST1_COL8.compareTo(TEST1_COL9) == 0))")); @@ -525,14 +525,14 @@ public void shouldGenerateCorrectCodeForDecimalDecimalEQ() { @Test public void shouldGenerateCorrectCodeForDecimalDecimalGT() { // Given: - ComparisonExpression compExp = new ComparisonExpression( + final ComparisonExpression compExp = new ComparisonExpression( ComparisonExpression.Type.GREATER_THAN, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL9"))) ); // When: - String java = sqlToJavaVisitor.process(compExp); + final String java = sqlToJavaVisitor.process(compExp); // Then: assertThat(java, containsString("(TEST1_COL8.compareTo(TEST1_COL9) > 0))")); @@ -541,14 +541,14 @@ public void shouldGenerateCorrectCodeForDecimalDecimalGT() { @Test public void shouldGenerateCorrectCodeForDecimalDecimalGEQ() { // Given: - ComparisonExpression compExp = new ComparisonExpression( + final ComparisonExpression compExp = new ComparisonExpression( ComparisonExpression.Type.GREATER_THAN_OR_EQUAL, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL9"))) ); // When: - String java = sqlToJavaVisitor.process(compExp); + final String java = sqlToJavaVisitor.process(compExp); // Then: assertThat(java, containsString("(TEST1_COL8.compareTo(TEST1_COL9) >= 0))")); @@ -557,14 +557,14 @@ public void shouldGenerateCorrectCodeForDecimalDecimalGEQ() { @Test public void shouldGenerateCorrectCodeForDecimalDecimalLT() { // Given: - ComparisonExpression compExp = new ComparisonExpression( + final ComparisonExpression compExp = new ComparisonExpression( ComparisonExpression.Type.LESS_THAN, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL9"))) ); // When: - String java = sqlToJavaVisitor.process(compExp); + final String java = sqlToJavaVisitor.process(compExp); // Then: assertThat(java, containsString("(TEST1_COL8.compareTo(TEST1_COL9) < 0))")); @@ -573,14 +573,14 @@ public void shouldGenerateCorrectCodeForDecimalDecimalLT() { @Test public void shouldGenerateCorrectCodeForDecimalDecimalLEQ() { // Given: - ComparisonExpression compExp = new ComparisonExpression( + final ComparisonExpression compExp = new ComparisonExpression( ComparisonExpression.Type.LESS_THAN_OR_EQUAL, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL9"))) ); // When: - String java = sqlToJavaVisitor.process(compExp); + final String java = sqlToJavaVisitor.process(compExp); // Then: assertThat(java, containsString("(TEST1_COL8.compareTo(TEST1_COL9) <= 0))")); @@ -589,14 +589,14 @@ public void shouldGenerateCorrectCodeForDecimalDecimalLEQ() { @Test public void shouldGenerateCorrectCodeForDecimalDecimalIsDistinct() { // Given: - ComparisonExpression compExp = new ComparisonExpression( + final ComparisonExpression compExp = new ComparisonExpression( ComparisonExpression.Type.IS_DISTINCT_FROM, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL9"))) ); // When: - String java = sqlToJavaVisitor.process(compExp); + final String java = sqlToJavaVisitor.process(compExp); // Then: assertThat(java, containsString("(TEST1_COL8.compareTo(TEST1_COL9) != 0))")); @@ -605,14 +605,14 @@ public void shouldGenerateCorrectCodeForDecimalDecimalIsDistinct() { @Test public void shouldGenerateCorrectCodeForDecimalDoubleEQ() { // Given: - ComparisonExpression compExp = new ComparisonExpression( + final ComparisonExpression compExp = new ComparisonExpression( ComparisonExpression.Type.EQUAL, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL3"))) ); // When: - String java = sqlToJavaVisitor.process(compExp); + final String java = sqlToJavaVisitor.process(compExp); // Then: assertThat(java, containsString("(TEST1_COL8.compareTo(new BigDecimal(TEST1_COL3)) == 0))")); @@ -621,14 +621,14 @@ public void shouldGenerateCorrectCodeForDecimalDoubleEQ() { @Test public void shouldGenerateCorrectCodeForDoubleDecimalEQ() { // Given: - ComparisonExpression compExp = new ComparisonExpression( + final ComparisonExpression compExp = new ComparisonExpression( ComparisonExpression.Type.EQUAL, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL3"))), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))) ); // When: - String java = sqlToJavaVisitor.process(compExp); + final String java = sqlToJavaVisitor.process(compExp); // Then: assertThat(java, containsString("(new BigDecimal(TEST1_COL3).compareTo(TEST1_COL8) == 0))")); @@ -637,14 +637,14 @@ public void shouldGenerateCorrectCodeForDoubleDecimalEQ() { @Test public void shouldGenerateCorrectCodeForDecimalNegation() { // Given: - ArithmeticUnaryExpression binExp = new ArithmeticUnaryExpression( + final ArithmeticUnaryExpression binExp = new ArithmeticUnaryExpression( Optional.empty(), Sign.MINUS, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))) ); // When: - String java = sqlToJavaVisitor.process(binExp); + final String java = sqlToJavaVisitor.process(binExp); // Then: assertThat(java, is("(TEST1_COL8.negate(new MathContext(2, RoundingMode.UNNECESSARY)))")); @@ -653,14 +653,14 @@ public void shouldGenerateCorrectCodeForDecimalNegation() { @Test public void shouldGenerateCorrectCodeForDecimalUnaryPlus() { // Given: - ArithmeticUnaryExpression binExp = new ArithmeticUnaryExpression( + final ArithmeticUnaryExpression binExp = new ArithmeticUnaryExpression( Optional.empty(), Sign.PLUS, new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))) ); // When: - String java = sqlToJavaVisitor.process(binExp); + final String java = sqlToJavaVisitor.process(binExp); // Then: assertThat(java, is("(TEST1_COL8.plus(new MathContext(2, RoundingMode.UNNECESSARY)))")); @@ -669,13 +669,13 @@ public void shouldGenerateCorrectCodeForDecimalUnaryPlus() { @Test public void shouldGenerateCorrectCodeForDecimalCast() { // Given: - Cast cast = new Cast( + final Cast cast = new Cast( new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL3"))), new Type(SqlDecimal.of(2, 1)) ); // When: - String java = sqlToJavaVisitor.process(cast); + final String java = sqlToJavaVisitor.process(cast); // Then: assertThat(java, is("(DecimalUtil.cast(TEST1_COL3, 2, 1))")); @@ -684,13 +684,13 @@ public void shouldGenerateCorrectCodeForDecimalCast() { @Test public void shouldGenerateCorrectCodeForDecimalCastNoOp() { // Given: - Cast cast = new Cast( + final Cast cast = new Cast( new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new Type(SqlDecimal.of(2, 1)) ); // When: - String java = sqlToJavaVisitor.process(cast); + final String java = sqlToJavaVisitor.process(cast); // Then: assertThat(java, is("TEST1_COL8")); @@ -699,13 +699,13 @@ public void shouldGenerateCorrectCodeForDecimalCastNoOp() { @Test public void shouldGenerateCorrectCodeForDecimalToIntCast() { // Given: - Cast cast = new Cast( + final Cast cast = new Cast( new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new Type(SqlTypes.INTEGER) ); // When: - String java = sqlToJavaVisitor.process(cast); + final String java = sqlToJavaVisitor.process(cast); // Then: assertThat(java, is("((TEST1_COL8).intValue())")); @@ -714,13 +714,13 @@ public void shouldGenerateCorrectCodeForDecimalToIntCast() { @Test public void shouldGenerateCorrectCodeForDecimalToLongCast() { // Given: - Cast cast = new Cast( + final Cast cast = new Cast( new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new Type(SqlTypes.BIGINT) ); // When: - String java = sqlToJavaVisitor.process(cast); + final String java = sqlToJavaVisitor.process(cast); // Then: assertThat(java, is("((TEST1_COL8).longValue())")); @@ -729,13 +729,13 @@ public void shouldGenerateCorrectCodeForDecimalToLongCast() { @Test public void shouldGenerateCorrectCodeForDecimalToDoubleCast() { // Given: - Cast cast = new Cast( + final Cast cast = new Cast( new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new Type(SqlTypes.DOUBLE) ); // When: - String java = sqlToJavaVisitor.process(cast); + final String java = sqlToJavaVisitor.process(cast); // Then: assertThat(java, is("((TEST1_COL8).doubleValue())")); @@ -744,13 +744,13 @@ public void shouldGenerateCorrectCodeForDecimalToDoubleCast() { @Test public void shouldGenerateCorrectCodeForDecimalToStringCast() { // Given: - Cast cast = new Cast( + final Cast cast = new Cast( new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL8"))), new Type(SqlTypes.STRING) ); // When: - String java = sqlToJavaVisitor.process(cast); + final String java = sqlToJavaVisitor.process(cast); // Then: assertThat(java, is("DecimalUtil.format(2, 1, TEST1_COL8)")); @@ -759,7 +759,7 @@ public void shouldGenerateCorrectCodeForDecimalToStringCast() { @Test public void shouldThrowOnIn() { // Given: - Expression expression = new InPredicate( + final Expression expression = new InPredicate( COL0, new InListExpression(ImmutableList.of(new IntegerLiteral(1), new IntegerLiteral(2))) ); @@ -774,7 +774,7 @@ public void shouldThrowOnIn() { @Test public void shouldThrowOnSimpleCase() { // Given: - Expression expression = new SimpleCaseExpression( + final Expression expression = new SimpleCaseExpression( COL0, ImmutableList.of(new WhenClause(new IntegerLiteral(10), new StringLiteral("ten"))), Optional.empty() @@ -806,13 +806,13 @@ public void shouldThrowOnTimestampLiteral() { } private void givenUdf( - String name, UdfFactory factory, KsqlScalarFunction function + final String name, final UdfFactory factory, final KsqlScalarFunction function ) { when(functionRegistry.isAggregate(name)).thenReturn(false); when(functionRegistry.getUdfFactory(name)).thenReturn(factory); when(factory.getFunction(anyList())).thenReturn(function); when(function.getReturnType(anyList())).thenReturn(SqlTypes.STRING); - UdfMetadata metadata = mock(UdfMetadata.class); + final UdfMetadata metadata = mock(UdfMetadata.class); when(factory.getMetadata()).thenReturn(metadata); } } diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/helpers/ArrayAccessTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/helpers/ArrayAccessTest.java index aee9addbd25a..f6c3eed43ffa 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/helpers/ArrayAccessTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/helpers/ArrayAccessTest.java @@ -16,7 +16,8 @@ package io.confluent.ksql.execution.codegen.helpers; import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; import com.google.common.collect.ImmutableList; import java.util.List; @@ -27,10 +28,10 @@ public class ArrayAccessTest { @Test public void shouldBeOneIndexed() { // Given: - List list = ImmutableList.of(1, 2); + final List list = ImmutableList.of(1, 2); // When: - Integer access = ArrayAccess.arrayAccess(list, 1); + final Integer access = ArrayAccess.arrayAccess(list, 1); // Then: assertThat(access, is(1)); @@ -39,10 +40,10 @@ public void shouldBeOneIndexed() { @Test public void shouldSupportNegativeIndex() { // Given: - List list = ImmutableList.of(1, 2); + final List list = ImmutableList.of(1, 2); // When: - Integer access = ArrayAccess.arrayAccess(list, -1); + final Integer access = ArrayAccess.arrayAccess(list, -1); // Then: assertThat(access, is(2)); @@ -51,10 +52,10 @@ public void shouldSupportNegativeIndex() { @Test public void shouldReturnNullOnOutOfBoundsIndex() { // Given: - List list = ImmutableList.of(1, 2); + final List list = ImmutableList.of(1, 2); // When: - Integer access = ArrayAccess.arrayAccess(list, 3); + final Integer access = ArrayAccess.arrayAccess(list, 3); // Then: assertThat(access, nullValue()); @@ -63,10 +64,10 @@ public void shouldReturnNullOnOutOfBoundsIndex() { @Test public void shouldReturnNullOnNegativeOutOfBoundsIndex() { // Given: - List list = ImmutableList.of(1, 2); + final List list = ImmutableList.of(1, 2); // When: - Integer access = ArrayAccess.arrayAccess(list, -3); + final Integer access = ArrayAccess.arrayAccess(list, -3); // Then: assertThat(access, nullValue()); diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/helpers/SearchedCaseFunctionTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/helpers/SearchedCaseFunctionTest.java index 9d8ea9597526..a3b255e3c302 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/helpers/SearchedCaseFunctionTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/helpers/SearchedCaseFunctionTest.java @@ -36,14 +36,14 @@ public class SearchedCaseFunctionTest { @Test public void shouldWorkForBooleanValues() { // Given: - List> lazyWhenClauses = ImmutableList.of( + final List> lazyWhenClauses = ImmutableList.of( SearchedCaseFunction.whenClause(() -> false, () -> Boolean.TRUE), SearchedCaseFunction.whenClause(() -> false, () -> Boolean.FALSE), SearchedCaseFunction.whenClause(() -> true, () -> Boolean.TRUE) ); // When: - Boolean result = SearchedCaseFunction.searchedCaseFunction( + final Boolean result = SearchedCaseFunction.searchedCaseFunction( lazyWhenClauses, () -> null ); @@ -55,7 +55,7 @@ public void shouldWorkForBooleanValues() { @Test public void shouldWorkForIntegerValues() { // Given: - List> lazyWhenClauses = ImmutableList.of( + final List> lazyWhenClauses = ImmutableList.of( SearchedCaseFunction.whenClause(() -> false, () -> 1), SearchedCaseFunction.whenClause(() -> false, () -> 2), SearchedCaseFunction.whenClause(() -> true, () -> 3), @@ -63,7 +63,7 @@ public void shouldWorkForIntegerValues() { ); // When: - Integer result = SearchedCaseFunction.searchedCaseFunction( + final Integer result = SearchedCaseFunction.searchedCaseFunction( lazyWhenClauses, () -> null ); @@ -75,7 +75,7 @@ public void shouldWorkForIntegerValues() { @Test public void shouldWorkForBigIntValues() { // Given: - List> lazyWhenClauses = ImmutableList.of( + final List> lazyWhenClauses = ImmutableList.of( SearchedCaseFunction.whenClause(() -> false, () -> 1L), SearchedCaseFunction.whenClause(() -> false, () -> 2L), SearchedCaseFunction.whenClause(() -> false, () -> 3L), @@ -83,7 +83,7 @@ public void shouldWorkForBigIntValues() { ); // When: - Long result = SearchedCaseFunction.searchedCaseFunction( + final Long result = SearchedCaseFunction.searchedCaseFunction( lazyWhenClauses, () -> null ); @@ -95,7 +95,7 @@ public void shouldWorkForBigIntValues() { @Test public void shouldWorkForDoubleValues() { // Given: - List> lazyWhenClauses = ImmutableList.of( + final List> lazyWhenClauses = ImmutableList.of( SearchedCaseFunction.whenClause(() -> false, () -> 1.0), SearchedCaseFunction.whenClause(() -> false, () -> 2.0), SearchedCaseFunction.whenClause(() -> false, () -> 3.0), @@ -103,7 +103,7 @@ public void shouldWorkForDoubleValues() { ); // When: - Double result = SearchedCaseFunction.searchedCaseFunction( + final Double result = SearchedCaseFunction.searchedCaseFunction( lazyWhenClauses, () -> null ); @@ -115,7 +115,7 @@ public void shouldWorkForDoubleValues() { @Test public void shouldWorkForStringValues() { // Given: - List> lazyWhenClauses = ImmutableList.of( + final List> lazyWhenClauses = ImmutableList.of( SearchedCaseFunction.whenClause(() -> false, () -> "foo"), SearchedCaseFunction.whenClause(() -> false, () -> "bar"), SearchedCaseFunction.whenClause(() -> false, () -> "tab"), @@ -123,7 +123,7 @@ public void shouldWorkForStringValues() { ); // When: - String result = SearchedCaseFunction.searchedCaseFunction( + final String result = SearchedCaseFunction.searchedCaseFunction( lazyWhenClauses, () -> null ); @@ -135,13 +135,13 @@ public void shouldWorkForStringValues() { @Test public void shouldWorkForArrayValues() { // Given: - List>> lazyWhenClauses = ImmutableList.of( + final List>> lazyWhenClauses = ImmutableList.of( SearchedCaseFunction.whenClause(() -> false, () -> ImmutableList.of("foo", "bar")), SearchedCaseFunction.whenClause(() -> true, () -> ImmutableList.of("tab", "ksql")) ); // When: - List result = SearchedCaseFunction.searchedCaseFunction( + final List result = SearchedCaseFunction.searchedCaseFunction( lazyWhenClauses, () -> null ); @@ -153,13 +153,13 @@ public void shouldWorkForArrayValues() { @Test public void shouldWorkForMapValues() { // Given: - List>> lazyWhenClauses = ImmutableList.of( + final List>> lazyWhenClauses = ImmutableList.of( SearchedCaseFunction.whenClause(() -> false, () -> ImmutableMap.of("foo", 1.0)), SearchedCaseFunction.whenClause(() -> true, () -> ImmutableMap.of("tab", 2.0)) ); // When: - Map result = SearchedCaseFunction.searchedCaseFunction( + final Map result = SearchedCaseFunction.searchedCaseFunction( lazyWhenClauses, () -> null ); @@ -171,13 +171,13 @@ public void shouldWorkForMapValues() { @Test public void shouldWorkForStructValues() { // Given: - List>> lazyWhenClauses = ImmutableList.of( + final List>> lazyWhenClauses = ImmutableList.of( SearchedCaseFunction.whenClause(() -> false, () -> ImmutableMap.of("foo", 1.0)), SearchedCaseFunction.whenClause(() -> true, () -> ImmutableMap.of("tab", "ksql")) ); // When: - Map result = SearchedCaseFunction.searchedCaseFunction( + final Map result = SearchedCaseFunction.searchedCaseFunction( lazyWhenClauses, () -> null ); @@ -189,7 +189,7 @@ public void shouldWorkForStructValues() { @Test public void shouldReturnDefaultIfNoMatch() { // Given: - List> lazyWhenClauses = ImmutableList.of( + final List> lazyWhenClauses = ImmutableList.of( SearchedCaseFunction.whenClause(() -> false, () -> 1), SearchedCaseFunction.whenClause(() -> false, () -> 2), SearchedCaseFunction.whenClause(() -> false, () -> 3), @@ -197,7 +197,7 @@ public void shouldReturnDefaultIfNoMatch() { ); // When: - Integer result = SearchedCaseFunction.searchedCaseFunction( + final Integer result = SearchedCaseFunction.searchedCaseFunction( lazyWhenClauses, () -> 10 ); diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatterTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatterTest.java index c44e684d622b..cc243a917806 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatterTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/expression/formatter/ExpressionFormatterTest.java @@ -136,14 +136,14 @@ public void shouldFormatQualifiedNameReference() { @Test public void shouldFormatDereferenceExpression() { // Given: - DereferenceExpression expression = new DereferenceExpression( + final DereferenceExpression expression = new DereferenceExpression( Optional.of(LOCATION), new StringLiteral("foo"), "name" ); // When: - String text = ExpressionFormatter.formatExpression(expression); + final String text = ExpressionFormatter.formatExpression(expression); // Then: assertThat(text, equalTo("'foo'->name")); @@ -151,7 +151,7 @@ public void shouldFormatDereferenceExpression() { @Test public void shouldFormatFunctionCallWithCount() { - FunctionCall functionCall = new FunctionCall(FunctionName.of("COUNT"), + final FunctionCall functionCall = new FunctionCall(FunctionName.of("COUNT"), Collections.singletonList(new StringLiteral("name"))); assertThat(ExpressionFormatter.formatExpression(functionCall), equalTo("COUNT('name')")); @@ -159,13 +159,13 @@ public void shouldFormatFunctionCallWithCount() { @Test public void shouldFormatFunctionCountStar() { - FunctionCall functionCall = new FunctionCall(FunctionName.of("COUNT"), Collections.emptyList()); + final FunctionCall functionCall = new FunctionCall(FunctionName.of("COUNT"), Collections.emptyList()); assertThat(ExpressionFormatter.formatExpression(functionCall), equalTo("COUNT(*)")); } @Test public void shouldFormatFunctionWithDistinct() { - FunctionCall functionCall = new FunctionCall( + final FunctionCall functionCall = new FunctionCall( FunctionName.of("COUNT"), Collections.singletonList(new StringLiteral("name"))); assertThat(ExpressionFormatter.formatExpression(functionCall), equalTo("COUNT('name')")); @@ -173,7 +173,7 @@ public void shouldFormatFunctionWithDistinct() { @Test public void shouldFormatLogicalBinaryExpression() { - LogicalBinaryExpression expression = new LogicalBinaryExpression(LogicalBinaryExpression.Type.AND, + final LogicalBinaryExpression expression = new LogicalBinaryExpression(LogicalBinaryExpression.Type.AND, new StringLiteral("a"), new StringLiteral("b")); assertThat(ExpressionFormatter.formatExpression(expression), equalTo("('a' AND 'b')")); @@ -221,19 +221,19 @@ public void shouldFormatArithmeticBinary() { @Test public void shouldFormatLikePredicate() { - LikePredicate predicate = new LikePredicate(new StringLiteral("string"), new StringLiteral("*")); + final LikePredicate predicate = new LikePredicate(new StringLiteral("string"), new StringLiteral("*")); assertThat(ExpressionFormatter.formatExpression(predicate), equalTo("('string' LIKE '*')")); } @Test public void shouldFormatCast() { // Given: - Cast cast = new Cast( + final Cast cast = new Cast( new LongLiteral(1), new Type(SqlTypes.DOUBLE)); // When: - String result = ExpressionFormatter.formatExpression(cast); + final String result = ExpressionFormatter.formatExpression(cast); // Then: assertThat(result, equalTo("CAST(1 AS DOUBLE)")); @@ -241,7 +241,7 @@ public void shouldFormatCast() { @Test public void shouldFormatSearchedCaseExpression() { - SearchedCaseExpression expression = new SearchedCaseExpression( + final SearchedCaseExpression expression = new SearchedCaseExpression( Collections.singletonList( new WhenClause(new StringLiteral("foo"), new LongLiteral(1))), @@ -251,7 +251,7 @@ public void shouldFormatSearchedCaseExpression() { @Test public void shouldFormatSearchedCaseExpressionWithDefaultValue() { - SearchedCaseExpression expression = new SearchedCaseExpression( + final SearchedCaseExpression expression = new SearchedCaseExpression( Collections.singletonList( new WhenClause(new StringLiteral("foo"), new LongLiteral(1))), @@ -261,7 +261,7 @@ public void shouldFormatSearchedCaseExpressionWithDefaultValue() { @Test public void shouldFormatSimpleCaseExpressionWithDefaultValue() { - SimpleCaseExpression expression = new SimpleCaseExpression( + final SimpleCaseExpression expression = new SimpleCaseExpression( new StringLiteral("operand"), Collections.singletonList( new WhenClause(new StringLiteral("foo"), @@ -272,7 +272,7 @@ public void shouldFormatSimpleCaseExpressionWithDefaultValue() { @Test public void shouldFormatSimpleCaseExpression() { - SimpleCaseExpression expression = new SimpleCaseExpression( + final SimpleCaseExpression expression = new SimpleCaseExpression( new StringLiteral("operand"), Collections.singletonList( new WhenClause(new StringLiteral("foo"), @@ -288,13 +288,13 @@ public void shouldFormatWhen() { @Test public void shouldFormatBetweenPredicate() { - BetweenPredicate predicate = new BetweenPredicate(new StringLiteral("blah"), new LongLiteral(5), new LongLiteral(10)); + final BetweenPredicate predicate = new BetweenPredicate(new StringLiteral("blah"), new LongLiteral(5), new LongLiteral(10)); assertThat(ExpressionFormatter.formatExpression(predicate), equalTo("('blah' BETWEEN 5 AND 10)")); } @Test public void shouldFormatInPredicate() { - InPredicate predicate = new InPredicate( + final InPredicate predicate = new InPredicate( new StringLiteral("foo"), new InListExpression(ImmutableList.of(new StringLiteral("a")))); @@ -308,7 +308,7 @@ public void shouldFormatInListExpression() { @Test public void shouldFormatStruct() { - SqlStruct struct = SqlStruct.builder() + final SqlStruct struct = SqlStruct.builder() .field("field1", SqlTypes.INTEGER) .field("field2", SqlTypes.STRING) .build(); @@ -320,7 +320,7 @@ public void shouldFormatStruct() { @Test public void shouldFormatStructWithColumnWithReservedWordName() { - SqlStruct struct = SqlStruct.builder() + final SqlStruct struct = SqlStruct.builder() .field("RESERVED", SqlTypes.INTEGER) .build(); @@ -331,14 +331,14 @@ public void shouldFormatStructWithColumnWithReservedWordName() { @Test public void shouldFormatMap() { - SqlMap map = SqlTypes.map(SqlTypes.BIGINT); + final SqlMap map = SqlTypes.map(SqlTypes.BIGINT); assertThat(ExpressionFormatter.formatExpression(new Type(map)), equalTo("MAP")); } @Test public void shouldFormatArray() { - SqlArray array = SqlTypes.array(SqlTypes.BOOLEAN); + final SqlArray array = SqlTypes.array(SqlTypes.BOOLEAN); assertThat(ExpressionFormatter.formatExpression(new Type(array)), equalTo("ARRAY")); } } diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/function/UdafUtilTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/function/UdafUtilTest.java index 89a1633409b9..e3b8622b9e42 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/function/UdafUtilTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/function/UdafUtilTest.java @@ -68,7 +68,7 @@ public void init() { @Test public void shouldResolveUDAF() { // When: - KsqlAggregateFunction returned = + final KsqlAggregateFunction returned = UdafUtil.resolveAggregateFunction(functionRegistry, FUNCTION_CALL, SCHEMA); // Then: diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/function/UdfUtilTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/function/UdfUtilTest.java index 7a6a6db34885..b8a1a8c7dcd9 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/function/UdfUtilTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/function/UdfUtilTest.java @@ -41,37 +41,37 @@ public class UdfUtilTest { @Test public void shouldPassIfArgsAreCorrect() { - Object[] args = new Object[]{"TtestArg1", 10L}; + final Object[] args = new Object[]{"TtestArg1", 10L}; UdfUtil.ensureCorrectArgs(FUNCTION_NAME, args, String.class, Long.class); } @Test(expected = KsqlException.class) public void shouldFailIfTypeIsIncorrect() { - Object[] args = new Object[]{"TtestArg1", 10L}; + final Object[] args = new Object[]{"TtestArg1", 10L}; UdfUtil.ensureCorrectArgs(FUNCTION_NAME, args, String.class, Boolean.class); } @Test(expected = KsqlException.class) public void shouldFailIfArgCountIsTooFew() { - Object[] args = new Object[]{"TtestArg1", 10L}; + final Object[] args = new Object[]{"TtestArg1", 10L}; UdfUtil.ensureCorrectArgs(FUNCTION_NAME, args, String.class, Boolean.class, String.class); } @Test(expected = KsqlException.class) public void shouldFailIfArgCountIsTooMany() { - Object[] args = new Object[]{"TtestArg1", 10L}; + final Object[] args = new Object[]{"TtestArg1", 10L}; UdfUtil.ensureCorrectArgs(FUNCTION_NAME, args, String.class); } @Test public void shouldPassWithNullArgs() { - Object[] args = new Object[]{"TtestArg1", null}; + final Object[] args = new Object[]{"TtestArg1", null}; UdfUtil.ensureCorrectArgs(FUNCTION_NAME, args, String.class, Long.class); } @Test public void shouldHandleSubTypes() { - Object[] args = new Object[]{1.345, 55}; + final Object[] args = new Object[]{1.345, 55}; UdfUtil.ensureCorrectArgs(FUNCTION_NAME, args, Number.class, Number.class); } @@ -149,18 +149,18 @@ public void shouldGetDecimalSchemaForBigDecimalClass() { @Test public void shouldGetMapSchemaFromMapClass() throws NoSuchMethodException { - Type type = getClass().getDeclaredMethod("mapType", Map.class) + final Type type = getClass().getDeclaredMethod("mapType", Map.class) .getGenericParameterTypes()[0]; - ParamType schema = UdfUtil.getSchemaFromType(type); + final ParamType schema = UdfUtil.getSchemaFromType(type); assertThat(schema, instanceOf(MapType.class)); assertThat(((MapType) schema).value(), equalTo(ParamTypes.INTEGER)); } @Test public void shouldGetArraySchemaFromListClass() throws NoSuchMethodException { - Type type = getClass().getDeclaredMethod("listType", List.class) + final Type type = getClass().getDeclaredMethod("listType", List.class) .getGenericParameterTypes()[0]; - ParamType schema = UdfUtil.getSchemaFromType(type); + final ParamType schema = UdfUtil.getSchemaFromType(type); assertThat(schema, instanceOf(ArrayType.class)); assertThat(((ArrayType) schema).element(), equalTo(ParamTypes.DOUBLE)); } @@ -181,10 +181,10 @@ public void shouldThrowExceptionIfClassDoesntMapToSchema() { @Test public void shouldGetGenericSchemaFromType() throws NoSuchMethodException { // Given: - Type genericType = getClass().getMethod("genericType").getGenericReturnType(); + final Type genericType = getClass().getMethod("genericType").getGenericReturnType(); // When: - ParamType returnType = UdfUtil.getSchemaFromType(genericType); + final ParamType returnType = UdfUtil.getSchemaFromType(genericType); // Then: MatcherAssert.assertThat(returnType, CoreMatchers.is(GenericType.of("T"))); @@ -193,10 +193,10 @@ public void shouldGetGenericSchemaFromType() throws NoSuchMethodException { @Test public void shouldGetGenericSchemaFromParameterizedType() throws NoSuchMethodException { // Given: - Type genericType = getClass().getMethod("genericMapType").getGenericReturnType(); + final Type genericType = getClass().getMethod("genericMapType").getGenericReturnType(); // When: - ParamType returnType = UdfUtil.getSchemaFromType(genericType); + final ParamType returnType = UdfUtil.getSchemaFromType(genericType); // Then: assertThat(returnType, is(MapType.of(GenericType.of("T")))); @@ -219,10 +219,10 @@ public Map genericMapType() { } @SuppressWarnings("unused") - private void mapType(Map map) { + private void mapType(final Map map) { } @SuppressWarnings("unused") - private void listType(List list) { + private void listType(final List list) { } } diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/function/udtf/KudtfFlatMapperTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/function/udtf/KudtfFlatMapperTest.java index aa5987d16cfb..c6c9ce41bc3a 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/function/udtf/KudtfFlatMapperTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/function/udtf/KudtfFlatMapperTest.java @@ -44,14 +44,14 @@ public class KudtfFlatMapperTest { @Test public void shouldFlatMapOneFunction() { // Given: - TableFunctionApplier applier = createApplier(Arrays.asList(10, 10, 10)); - KudtfFlatMapper flatMapper = new KudtfFlatMapper<>(ImmutableList.of(applier)); + final TableFunctionApplier applier = createApplier(Arrays.asList(10, 10, 10)); + final KudtfFlatMapper flatMapper = new KudtfFlatMapper<>(ImmutableList.of(applier)); // When: - Iterable iterable = flatMapper.transform(KEY, VALUE, ctx); + final Iterable iterable = flatMapper.transform(KEY, VALUE, ctx); // Then: - Iterator iter = iterable.iterator(); + final Iterator iter = iterable.iterator(); assertThat(iter.next().getColumns(), is(Arrays.asList(1, 2, 3, 10))); assertThat(iter.next().getColumns(), is(Arrays.asList(1, 2, 3, 10))); assertThat(iter.next().getColumns(), is(Arrays.asList(1, 2, 3, 10))); @@ -61,23 +61,23 @@ public void shouldFlatMapOneFunction() { @Test public void shouldZipTwoFunctions() { // Given: - TableFunctionApplier applier1 = createApplier(Arrays.asList(10, 10, 10)); - TableFunctionApplier applier2 = createApplier(Arrays.asList(20, 20)); - KudtfFlatMapper flatMapper = new KudtfFlatMapper<>(ImmutableList.of(applier1, applier2)); + final TableFunctionApplier applier1 = createApplier(Arrays.asList(10, 10, 10)); + final TableFunctionApplier applier2 = createApplier(Arrays.asList(20, 20)); + final KudtfFlatMapper flatMapper = new KudtfFlatMapper<>(ImmutableList.of(applier1, applier2)); // When: - Iterable iterable = flatMapper.transform(KEY, VALUE, ctx); + final Iterable iterable = flatMapper.transform(KEY, VALUE, ctx); // Then: - Iterator iter = iterable.iterator(); + final Iterator iter = iterable.iterator(); assertThat(iter.next().getColumns(), is(Arrays.asList(1, 2, 3, 10, 20))); assertThat(iter.next().getColumns(), is(Arrays.asList(1, 2, 3, 10, 20))); assertThat(iter.next().getColumns(), is(Arrays.asList(1, 2, 3, 10, null))); assertThat(iter.hasNext(), is(false)); } - private static TableFunctionApplier createApplier(List list) { - TableFunctionApplier applier = mock(TableFunctionApplier.class); + private static TableFunctionApplier createApplier(final List list) { + final TableFunctionApplier applier = mock(TableFunctionApplier.class); doReturn(list).when(applier).apply(any()); return applier; } diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/testutil/TestExpressions.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/testutil/TestExpressions.java index 6e942d607b76..d02dbbd173e4 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/testutil/TestExpressions.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/testutil/TestExpressions.java @@ -46,11 +46,11 @@ private TestExpressions() { public static final ColumnReferenceExp MAPCOL = columnRef(TEST1, "COL5"); public static final ColumnReferenceExp COL7 = columnRef(TEST1, "COL7"); - private static ColumnReferenceExp columnRef(String source, String name) { + private static ColumnReferenceExp columnRef(final String source, final String name) { return new ColumnReferenceExp(ColumnRef.of(SourceName.of(source), ColumnName.of(name))); } - public static Expression literal(int value) { + public static Expression literal(final int value) { return new IntegerLiteral(value); } } diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/transform/sqlpredicate/SqlPredicateTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/transform/sqlpredicate/SqlPredicateTest.java index f8f3f7630d8f..3989add8d7b6 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/transform/sqlpredicate/SqlPredicateTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/transform/sqlpredicate/SqlPredicateTest.java @@ -135,7 +135,7 @@ public void shouldNotPassFilter() { @Test public void shouldIgnoreNullRows() { // Given: - KsqlTransformer> predicate = givenSqlPredicateFor( + final KsqlTransformer> predicate = givenSqlPredicateFor( new ComparisonExpression(Type.GREATER_THAN, COL0, new IntegerLiteral(100))); // When/Then: @@ -145,7 +145,7 @@ public void shouldIgnoreNullRows() { @Test public void shouldWriteProcessingLogOnError() { // Given: - KsqlTransformer> predicate = givenSqlPredicateFor( + final KsqlTransformer> predicate = givenSqlPredicateFor( new ComparisonExpression(Type.GREATER_THAN, COL0, new IntegerLiteral(100))); // When: @@ -156,17 +156,17 @@ public void shouldWriteProcessingLogOnError() { ); // Then: - ArgumentCaptor> captor + final ArgumentCaptor> captor = ArgumentCaptor.forClass(Function.class); verify(processingLogger).error(captor.capture()); - SchemaAndValue schemaAndValue = captor.getValue().apply(processingLogConfig); + final SchemaAndValue schemaAndValue = captor.getValue().apply(processingLogConfig); assertThat(schemaAndValue.schema(), equalTo(ProcessingLogMessageSchema.PROCESSING_LOG_SCHEMA)); - Struct struct = (Struct) schemaAndValue.value(); + final Struct struct = (Struct) schemaAndValue.value(); assertThat( struct.get(ProcessingLogMessageSchema.TYPE), equalTo(MessageType.RECORD_PROCESSING_ERROR.ordinal()) ); - Struct errorStruct + final Struct errorStruct = struct.getStruct(ProcessingLogMessageSchema.RECORD_PROCESSING_ERROR); assertThat( errorStruct.get(ProcessingLogMessageSchema.RECORD_PROCESSING_ERROR_FIELD_MESSAGE), @@ -190,7 +190,7 @@ private KsqlTransformer> givenSqlPredicateFor( public static class LenDummy implements Kudf { @Override - public Object evaluate(Object... args) { + public Object evaluate(final Object... args) { throw new IllegalStateException(); } } diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/transform/window/WindowSelectMapperTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/transform/window/WindowSelectMapperTest.java index 65496afa159a..c69f10185fbd 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/transform/window/WindowSelectMapperTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/transform/window/WindowSelectMapperTest.java @@ -82,11 +82,11 @@ public void shouldUpdateRowWithWindowBounds() { ImmutableList.of(otherFunc, windowStartFunc, windowEndFunc, windowStartFunc) ).getTransformer(); - Window window = new SessionWindow(12345L, 54321L); - GenericRow row = new GenericRow(Arrays.asList(0, 1, 2, 3, 4, 5)); + final Window window = new SessionWindow(12345L, 54321L); + final GenericRow row = new GenericRow(Arrays.asList(0, 1, 2, 3, 4, 5)); // When: - GenericRow result = mapper.transform(new Windowed<>("k", window), row, ctx); + final GenericRow result = mapper.transform(new Windowed<>("k", window), row, ctx); // Then: assertThat(result, is(sameInstance(row))); @@ -101,8 +101,8 @@ public void shouldThrowIfRowNotBigEnough() { ImmutableList.of(windowStartFunc) ).getTransformer(); - Window window = new SessionWindow(12345L, 54321L); - GenericRow row = new GenericRow(new ArrayList<>()); + final Window window = new SessionWindow(12345L, 54321L); + final GenericRow row = new GenericRow(new ArrayList<>()); // When: mapper.transform(new Windowed<>("k", window), row, ctx); diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ComparisonUtilTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ComparisonUtilTest.java index 1aad05eae94a..6b774fa101a1 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ComparisonUtilTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ComparisonUtilTest.java @@ -70,8 +70,8 @@ public void shouldAssertTrueForValidComparisons() { // When: int i = 0; int j = 0; - for (SqlType leftType: typesTable) { - for (SqlType rightType: typesTable) { + for (final SqlType leftType: typesTable) { + for (final SqlType rightType: typesTable) { if (expectedResults.get(i).get(j)) { assertThat( ComparisonUtil.isValidComparison(leftType, ComparisonExpression.Type.EQUAL, rightType) @@ -90,13 +90,13 @@ public void shouldThrowForInvalidComparisons() { // When: int i = 0; int j = 0; - for (SqlType leftType: typesTable) { - for (SqlType rightType: typesTable) { + for (final SqlType leftType: typesTable) { + for (final SqlType rightType: typesTable) { if (!expectedResults.get(i).get(j)) { try { ComparisonUtil.isValidComparison(leftType, ComparisonExpression.Type.EQUAL, rightType); assertThat("fail", false); - } catch (KsqlException e) { + } catch (final KsqlException e) { assertThat(e.getMessage(), is("Operator EQUAL cannot be used to compare " + SCHEMA_TO_SQL_NAME[i] + " and " diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/util/EngineProcessingLogMessageFactoryTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/EngineProcessingLogMessageFactoryTest.java index 87fb2f08051a..8d86aaa1b88a 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/util/EngineProcessingLogMessageFactoryTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/EngineProcessingLogMessageFactoryTest.java @@ -33,18 +33,18 @@ public class EngineProcessingLogMessageFactoryTest { @SuppressWarnings("unchecked") public void shouldBuildRecordProcessingErrorCorrectly() throws IOException { // When: - SchemaAndValue msgAndSchema = EngineProcessingLogMessageFactory.recordProcessingError( + final SchemaAndValue msgAndSchema = EngineProcessingLogMessageFactory.recordProcessingError( errorMsg, error, new GenericRow(123, "data") ).apply(config); // Then: assertThat(msgAndSchema.schema(), equalTo(ProcessingLogMessageSchema.PROCESSING_LOG_SCHEMA)); - Struct msg = (Struct) msgAndSchema.value(); + final Struct msg = (Struct) msgAndSchema.value(); assertThat( msg.get(ProcessingLogMessageSchema.TYPE), equalTo(MessageType.RECORD_PROCESSING_ERROR.getTypeId())); assertThat(msg.get(ProcessingLogMessageSchema.RECORD_PROCESSING_ERROR), notNullValue()); - Struct recordProcessingError = + final Struct recordProcessingError = msg.getStruct(ProcessingLogMessageSchema.RECORD_PROCESSING_ERROR); assertThat( recordProcessingError.get( @@ -54,7 +54,7 @@ errorMsg, error, new GenericRow(123, "data") recordProcessingError.get( ProcessingLogMessageSchema.RECORD_PROCESSING_ERROR_FIELD_CAUSE), equalTo(ErrorMessageUtil.getErrorMessages(cause))); - List rowAsList = + final List rowAsList = OBJECT_MAPPER.readValue( recordProcessingError.getString( ProcessingLogMessageSchema.RECORD_PROCESSING_ERROR_FIELD_RECORD), @@ -66,13 +66,13 @@ errorMsg, error, new GenericRow(123, "data") @Test public void shouldBuildRecordProcessingErrorCorrectlyIfRowNull() { // When: - SchemaAndValue msgAndSchema = EngineProcessingLogMessageFactory.recordProcessingError( + final SchemaAndValue msgAndSchema = EngineProcessingLogMessageFactory.recordProcessingError( errorMsg, error, null ).apply(config); // Then: - Struct msg = (Struct) msgAndSchema.value(); - Struct recordProcessingError = + final Struct msg = (Struct) msgAndSchema.value(); + final Struct recordProcessingError = msg.getStruct(ProcessingLogMessageSchema.RECORD_PROCESSING_ERROR); assertThat( recordProcessingError.get( @@ -83,18 +83,18 @@ public void shouldBuildRecordProcessingErrorCorrectlyIfRowNull() { @Test public void shouldBuildRecordProcessingErrorWithNullRowIfIncludeRowsFalse() { // Given: - ProcessingLogConfig config = new ProcessingLogConfig( + final ProcessingLogConfig config = new ProcessingLogConfig( Collections.singletonMap(ProcessingLogConfig.INCLUDE_ROWS, false) ); // When: - SchemaAndValue msgAndSchema = EngineProcessingLogMessageFactory.recordProcessingError( + final SchemaAndValue msgAndSchema = EngineProcessingLogMessageFactory.recordProcessingError( errorMsg, error, new GenericRow(123, "data") ).apply(config); // Then: - Struct msg = (Struct) msgAndSchema.value(); - Struct recordProcessingError = + final Struct msg = (Struct) msgAndSchema.value(); + final Struct recordProcessingError = msg.getStruct(ProcessingLogMessageSchema.RECORD_PROCESSING_ERROR); assertThat( recordProcessingError.get( diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java index 52937d157dd6..37b41aab9cc3 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java @@ -104,8 +104,8 @@ public class ExpressionTypeManagerTest { public void init() { expressionTypeManager = new ExpressionTypeManager(SCHEMA, functionRegistry); - UdfFactory internalFactory = mock(UdfFactory.class); - UdfMetadata metadata = mock(UdfMetadata.class); + final UdfFactory internalFactory = mock(UdfFactory.class); + final UdfMetadata metadata = mock(UdfMetadata.class); when(internalFactory.getMetadata()).thenReturn(metadata); when(functionRegistry.getUdfFactory(anyString())) @@ -114,52 +114,52 @@ public void init() { @Test public void shouldResolveTypeForAddBigIntDouble() { - Expression expression = new ArithmeticBinaryExpression(Operator.ADD, TestExpressions.COL0, + final Expression expression = new ArithmeticBinaryExpression(Operator.ADD, TestExpressions.COL0, COL3 ); - SqlType type = expressionTypeManager.getExpressionSqlType(expression); + final SqlType type = expressionTypeManager.getExpressionSqlType(expression); assertThat(type, is(SqlTypes.DOUBLE)); } @Test public void shouldResolveTypeForAddDoubleIntegerLiteral() { - Expression expression = new ArithmeticBinaryExpression(Operator.ADD, COL3, literal(10)); + final Expression expression = new ArithmeticBinaryExpression(Operator.ADD, COL3, literal(10)); - SqlType type = expressionTypeManager.getExpressionSqlType(expression); + final SqlType type = expressionTypeManager.getExpressionSqlType(expression); assertThat(type, is(SqlTypes.DOUBLE)); } @Test public void shouldResolveTypeForAddBigintIntegerLiteral() { - Expression expression = new ArithmeticBinaryExpression(Operator.ADD, TestExpressions.COL0, + final Expression expression = new ArithmeticBinaryExpression(Operator.ADD, TestExpressions.COL0, literal(10) ); - SqlType type = expressionTypeManager.getExpressionSqlType(expression); + final SqlType type = expressionTypeManager.getExpressionSqlType(expression); assertThat(type, is(SqlTypes.BIGINT)); } @Test public void shouldResolveTypeForMultiplyBigintIntegerLiteral() { - Expression expression = + final Expression expression = new ArithmeticBinaryExpression(Operator.MULTIPLY, TestExpressions.COL0, literal(10)); - SqlType type = expressionTypeManager.getExpressionSqlType(expression); + final SqlType type = expressionTypeManager.getExpressionSqlType(expression); assertThat(type, is(SqlTypes.BIGINT)); } @Test public void testComparisonExpr() { - Expression expression = new ComparisonExpression(Type.GREATER_THAN, TestExpressions.COL0, + final Expression expression = new ComparisonExpression(Type.GREATER_THAN, TestExpressions.COL0, COL3 ); - SqlType exprType = expressionTypeManager.getExpressionSqlType(expression); + final SqlType exprType = expressionTypeManager.getExpressionSqlType(expression); assertThat(exprType, is(SqlTypes.BOOLEAN)); } @@ -167,7 +167,7 @@ public void testComparisonExpr() { @Test public void shouldFailIfComparisonOperandsAreIncompatible() { // Given: - ComparisonExpression expr = new ComparisonExpression(Type.GREATER_THAN, + final ComparisonExpression expr = new ComparisonExpression(Type.GREATER_THAN, TestExpressions.COL0, COL1 ); expectedException.expect(KsqlException.class); @@ -182,7 +182,7 @@ public void shouldFailIfComparisonOperandsAreIncompatible() { @Test public void shouldFailIfOperatorCannotBeAppiled() { // Given: - ComparisonExpression expr = new ComparisonExpression( + final ComparisonExpression expr = new ComparisonExpression( Type.GREATER_THAN, new BooleanLiteral("true"), new BooleanLiteral("false") @@ -198,7 +198,7 @@ public void shouldFailIfOperatorCannotBeAppiled() { @Test public void shouldFailForComplexTypeComparison() { // Given: - Expression expression = new ComparisonExpression(Type.GREATER_THAN, MAPCOL, ADDRESS); + final Expression expression = new ComparisonExpression(Type.GREATER_THAN, MAPCOL, ADDRESS); expectedException.expect(KsqlException.class); expectedException .expectMessage("Operator GREATER_THAN cannot be used to compare MAP and STRUCT"); @@ -210,7 +210,7 @@ public void shouldFailForComplexTypeComparison() { @Test public void shouldFailForCheckingComplexTypeEquality() { // Given: - Expression expression = new ComparisonExpression(Type.EQUAL, MAPCOL, ADDRESS); + final Expression expression = new ComparisonExpression(Type.EQUAL, MAPCOL, ADDRESS); expectedException.expect(KsqlException.class); expectedException.expectMessage("Operator EQUAL cannot be used to compare MAP and STRUCT"); @@ -221,18 +221,18 @@ public void shouldFailForCheckingComplexTypeEquality() { @Test public void shouldEvaluateBooleanSchemaForLikeExpression() { - Expression expression = new LikePredicate(COL1, new StringLiteral("%foo")); + final Expression expression = new LikePredicate(COL1, new StringLiteral("%foo")); - SqlType exprType0 = expressionTypeManager.getExpressionSqlType(expression); + final SqlType exprType0 = expressionTypeManager.getExpressionSqlType(expression); assertThat(exprType0, is(SqlTypes.BOOLEAN)); } @Test public void shouldEvaluateBooleanSchemaForNotLikeExpression() { - Expression expression = + final Expression expression = new NotExpression(new LikePredicate(COL1, new StringLiteral("%foo"))); - SqlType exprType0 = expressionTypeManager.getExpressionSqlType(expression); + final SqlType exprType0 = expressionTypeManager.getExpressionSqlType(expression); assertThat(exprType0, is(SqlTypes.BOOLEAN)); } @@ -240,11 +240,11 @@ public void shouldEvaluateBooleanSchemaForNotLikeExpression() { public void shouldEvaluateTypeForUDF() { // Given: givenUdfWithNameAndReturnType("FLOOR", SqlTypes.DOUBLE); - Expression expression = + final Expression expression = new FunctionCall(FunctionName.of("FLOOR"), ImmutableList.of(COL3)); // When: - SqlType exprType = expressionTypeManager.getExpressionSqlType(expression); + final SqlType exprType = expressionTypeManager.getExpressionSqlType(expression); // Then: assertThat(exprType, is(SqlTypes.DOUBLE)); @@ -256,11 +256,11 @@ public void shouldEvaluateTypeForUDF() { public void shouldEvaluateTypeForStringUDF() { // Given: givenUdfWithNameAndReturnType("LCASE", SqlTypes.STRING); - Expression expression = + final Expression expression = new FunctionCall(FunctionName.of("LCASE"), ImmutableList.of(COL2)); // When: - SqlType exprType = expressionTypeManager.getExpressionSqlType(expression); + final SqlType exprType = expressionTypeManager.getExpressionSqlType(expression); // Then: assertThat(exprType, is(SqlTypes.STRING)); @@ -272,14 +272,14 @@ public void shouldEvaluateTypeForStringUDF() { public void shouldHandleNestedUdfs() { // Given: givenUdfWithNameAndReturnType("EXTRACTJSONFIELD", SqlTypes.STRING); - UdfFactory outerFactory = mock(UdfFactory.class); - KsqlScalarFunction function = mock(KsqlScalarFunction.class); + final UdfFactory outerFactory = mock(UdfFactory.class); + final KsqlScalarFunction function = mock(KsqlScalarFunction.class); givenUdfWithNameAndReturnType("LCASE", SqlTypes.STRING, outerFactory, function); - Expression inner = new FunctionCall( + final Expression inner = new FunctionCall( FunctionName.of("EXTRACTJSONFIELD"), ImmutableList.of(COL1, new StringLiteral("$.name)")) ); - Expression expression = + final Expression expression = new FunctionCall(FunctionName.of("LCASE"), ImmutableList.of(inner)); // When/Then: @@ -289,14 +289,14 @@ public void shouldHandleNestedUdfs() { @Test public void shouldHandleStructFieldDereference() { // Given: - Expression expression = new DereferenceExpression( + final Expression expression = new DereferenceExpression( Optional.empty(), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL6"))), "STREET" ); // When: - SqlType result = expressionTypeManager.getExpressionSqlType(expression); + final SqlType result = expressionTypeManager.getExpressionSqlType(expression); assertThat(result, is(SqlTypes.STRING)); } @@ -304,7 +304,7 @@ public void shouldHandleStructFieldDereference() { @Test public void shouldFailIfThereIsInvalidFieldNameInStructCall() { // Given: - Expression expression = new DereferenceExpression( + final Expression expression = new DereferenceExpression( Optional.empty(), new ColumnReferenceExp(ColumnRef.of(TEST1, ColumnName.of("COL6"))), "ZIP" @@ -321,12 +321,12 @@ public void shouldFailIfThereIsInvalidFieldNameInStructCall() { @Test public void shouldEvaluateTypeForStructExpression() { // Given: - LogicalSchema schema = LogicalSchema.builder() + final LogicalSchema schema = LogicalSchema.builder() .valueColumn(TEST1, COL0, SqlTypes.array(SqlTypes.INTEGER)) .build(); expressionTypeManager = new ExpressionTypeManager(schema, functionRegistry); - Expression exp = new CreateStructExpression(ImmutableList.of( + final Expression exp = new CreateStructExpression(ImmutableList.of( new Field("field1", new StringLiteral("foo")), new Field("field2", new ColumnReferenceExp(ColumnRef.of(TEST1, COL0))), new Field("field3", new CreateStructExpression(ImmutableList.of())) @@ -347,22 +347,22 @@ public void shouldEvaluateTypeForStructExpression() { @Test public void shouldEvaluateTypeForStructDereferenceInArray() { // Given: - SqlStruct inner = SqlTypes.struct().field("IN0", SqlTypes.INTEGER).build(); + final SqlStruct inner = SqlTypes.struct().field("IN0", SqlTypes.INTEGER).build(); - LogicalSchema schema = LogicalSchema.builder() + final LogicalSchema schema = LogicalSchema.builder() .valueColumn(TEST1, COL0, SqlTypes.array(inner)) .build(); expressionTypeManager = new ExpressionTypeManager(schema, functionRegistry); - Expression expression = new DereferenceExpression( + final Expression expression = new DereferenceExpression( Optional.empty(), new SubscriptExpression(TestExpressions.COL0, new IntegerLiteral(1)), "IN0" ); // When: - SqlType result = expressionTypeManager.getExpressionSqlType(expression); + final SqlType result = expressionTypeManager.getExpressionSqlType(expression); // Then: assertThat(result, is(SqlTypes.INTEGER)); @@ -371,27 +371,27 @@ public void shouldEvaluateTypeForStructDereferenceInArray() { @Test public void shouldEvaluateTypeForArrayReferenceInStruct() { // Given: - SqlStruct inner = SqlTypes + final SqlStruct inner = SqlTypes .struct() .field("IN0", SqlTypes.array(SqlTypes.INTEGER)) .build(); - LogicalSchema schema = LogicalSchema.builder() + final LogicalSchema schema = LogicalSchema.builder() .valueColumn(TEST1, COL0, inner) .build(); expressionTypeManager = new ExpressionTypeManager(schema, functionRegistry); - Expression structRef = new DereferenceExpression( + final Expression structRef = new DereferenceExpression( Optional.empty(), new ColumnReferenceExp(ColumnRef.of(TEST1, COL0)), "IN0" ); - Expression expression = new SubscriptExpression(structRef, new IntegerLiteral(1)); + final Expression expression = new SubscriptExpression(structRef, new IntegerLiteral(1)); // When: - SqlType result = expressionTypeManager.getExpressionSqlType(expression); + final SqlType result = expressionTypeManager.getExpressionSqlType(expression); // Then: assertThat(result, is(SqlTypes.INTEGER)); @@ -400,7 +400,7 @@ public void shouldEvaluateTypeForArrayReferenceInStruct() { @Test public void shouldGetCorrectSchemaForSearchedCase() { // Given: - Expression expression = new SearchedCaseExpression( + final Expression expression = new SearchedCaseExpression( ImmutableList.of( new WhenClause( new ComparisonExpression(Type.LESS_THAN, COL7, new IntegerLiteral(10)), @@ -415,7 +415,7 @@ public void shouldGetCorrectSchemaForSearchedCase() { ); // When: - SqlType result = + final SqlType result = expressionTypeManager.getExpressionSqlType(expression); // Then: @@ -426,7 +426,7 @@ public void shouldGetCorrectSchemaForSearchedCase() { @Test public void shouldGetCorrectSchemaForSearchedCaseWhenStruct() { // Given: - Expression expression = new SearchedCaseExpression( + final Expression expression = new SearchedCaseExpression( ImmutableList.of( new WhenClause( new ComparisonExpression(Type.EQUAL, TestExpressions.COL0, new IntegerLiteral(10)), @@ -437,17 +437,17 @@ public void shouldGetCorrectSchemaForSearchedCaseWhenStruct() { ); // When: - SqlType result = expressionTypeManager.getExpressionSqlType(expression); + final SqlType result = expressionTypeManager.getExpressionSqlType(expression); // Then: - SqlType sqlType = SCHEMA.findColumn(ADDRESS.getReference()).get().type(); + final SqlType sqlType = SCHEMA.findColumn(ADDRESS.getReference()).get().type(); assertThat(result, is(sqlType)); } @Test public void shouldFailIfWhenIsNotBoolean() { // Given: - Expression expression = new SearchedCaseExpression( + final Expression expression = new SearchedCaseExpression( ImmutableList.of( new WhenClause( new ArithmeticBinaryExpression(Operator.ADD, TestExpressions.COL0, @@ -472,7 +472,7 @@ public void shouldFailIfWhenIsNotBoolean() { @Test public void shouldFailOnInconsistentWhenResultType() { // Given: - Expression expression = new SearchedCaseExpression( + final Expression expression = new SearchedCaseExpression( ImmutableList.of( new WhenClause( new ComparisonExpression(Type.EQUAL, TestExpressions.COL0, new IntegerLiteral(100)), @@ -502,7 +502,7 @@ public void shouldFailOnInconsistentWhenResultType() { @Test public void shouldFailIfDefaultHasDifferentTypeToWhen() { // Given: - Expression expression = new SearchedCaseExpression( + final Expression expression = new SearchedCaseExpression( ImmutableList.of( new WhenClause( new ComparisonExpression(Type.EQUAL, TestExpressions.COL0, new IntegerLiteral(10)), @@ -545,7 +545,7 @@ public void shouldThrowOnTimestampLiteral() { @Test public void shouldThrowOnIn() { // Given: - Expression expression = new InPredicate( + final Expression expression = new InPredicate( TestExpressions.COL0, new InListExpression(ImmutableList.of(new IntegerLiteral(1), new IntegerLiteral(2))) ); @@ -559,7 +559,7 @@ public void shouldThrowOnIn() { @Test public void shouldThrowOnSimpleCase() { - Expression expression = new SimpleCaseExpression( + final Expression expression = new SimpleCaseExpression( TestExpressions.COL0, ImmutableList.of(new WhenClause(new IntegerLiteral(10), new StringLiteral("ten"))), Optional.empty() @@ -572,18 +572,18 @@ public void shouldThrowOnSimpleCase() { expressionTypeManager.getExpressionSqlType(expression); } - private void givenUdfWithNameAndReturnType(String name, SqlType returnType) { + private void givenUdfWithNameAndReturnType(final String name, final SqlType returnType) { givenUdfWithNameAndReturnType(name, returnType, udfFactory, function); } private void givenUdfWithNameAndReturnType( - String name, SqlType returnType, UdfFactory factory, KsqlScalarFunction function + final String name, final SqlType returnType, final UdfFactory factory, final KsqlScalarFunction function ) { when(functionRegistry.isAggregate(name)).thenReturn(false); when(functionRegistry.getUdfFactory(name)).thenReturn(factory); when(factory.getFunction(anyList())).thenReturn(function); when(function.getReturnType(anyList())).thenReturn(returnType); - UdfMetadata metadata = mock(UdfMetadata.class); + final UdfMetadata metadata = mock(UdfMetadata.class); when(factory.getMetadata()).thenReturn(metadata); } } diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/json/ValueSpecJsonSerdeSupplier.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/json/ValueSpecJsonSerdeSupplier.java index dfc099b342e1..71fa0faf18e4 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/json/ValueSpecJsonSerdeSupplier.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/json/ValueSpecJsonSerdeSupplier.java @@ -155,7 +155,7 @@ private JsonNode convert(final Object o) { private static JsonNode handleCollection(final Collection collection) { final ArrayNode list = JsonNodeFactory.instance.arrayNode(); - for (Object element : collection) { + for (final Object element : collection) { list.add(toJsonNode(element)); } return list; diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/KafkaStreamsInternalTopicsAccessor.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/KafkaStreamsInternalTopicsAccessor.java index f51fb7c2f1a1..7d9ac83470f9 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/KafkaStreamsInternalTopicsAccessor.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/KafkaStreamsInternalTopicsAccessor.java @@ -36,7 +36,7 @@ static Set getInternalTopics( ) { try { return (Set) INTERNAL_TOPICS_FIELD.get(topologyTestDriver); - } catch (IllegalAccessException e) { + } catch (final IllegalAccessException e) { throw new AssertionError("Failed to get internal topic names", e); } } diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java index d4bcda5c6e27..49f774fc5099 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java @@ -396,7 +396,7 @@ private Deserializer getKeyDeserializer(final String topicName) { final SerdeSupplier keySerdeSupplier = SerdeUtil .getKeySerdeSupplier(topicInfo.getKeyFormat(), topicInfo::getSchema); - Deserializer deserializer = keySerdeSupplier.getDeserializer( + final Deserializer deserializer = keySerdeSupplier.getDeserializer( serviceContext.getSchemaRegistryClient() ); @@ -546,7 +546,7 @@ private void processRecordsForTopic( final TopologyTestDriver topologyTestDriver, final Topic sinkTopic ) { - int idx = 0; + final int idx = 0; while (true) { final ProducerRecord producerRecord = readOutput(topologyTestDriver, sinkTopic, idx); if (producerRecord == null) { diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/SchemaTranslationTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/SchemaTranslationTest.java index d35e870519a6..2396c139cfc0 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/SchemaTranslationTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/SchemaTranslationTest.java @@ -79,11 +79,11 @@ public static Collection data() { private static List generateInputRecords( final Topic topic, final org.apache.avro.Schema avroSchema) { final Generator generator = new Generator(avroSchema, new Random()); - List list = new ArrayList<>(); + final List list = new ArrayList<>(); for (int i = 0; i < 100; i++) { final Object avro = generator.generate(); final JsonNode spec = avroToJson(avro, avroSchema, true); - Record record = new Record( + final Record record = new Record( topic, "test-key", avroToValueSpec(avro, avroSchema, true), diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestGenerator.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestGenerator.java index 312b9d9e4cdc..2fa7498cea03 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestGenerator.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestGenerator.java @@ -33,7 +33,7 @@ public class PlannedTestGenerator { private static final ObjectMapper MAPPER = PlanJsonMapper.create(); - public static void generatePlans(Stream testCases) { + public static void generatePlans(final Stream testCases) { testCases .filter(PlannedTestUtils::isPlannedTestCase) .forEach(PlannedTestGenerator::maybeGenerateTestCase); diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java index f8c740512986..fc85300b13eb 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java @@ -465,7 +465,7 @@ private static void threadYield() { try { // More reliable than Thread.yield Thread.sleep(1); - } catch (InterruptedException e) { + } catch (final InterruptedException e) { // ignore } } diff --git a/ksql-parser/src/main/java/io/confluent/ksql/parser/AstBuilder.java b/ksql-parser/src/main/java/io/confluent/ksql/parser/AstBuilder.java index b1eb61a74d3a..1c0807e98720 100644 --- a/ksql-parser/src/main/java/io/confluent/ksql/parser/AstBuilder.java +++ b/ksql-parser/src/main/java/io/confluent/ksql/parser/AstBuilder.java @@ -928,8 +928,8 @@ public Node visitCast(final SqlBaseParser.CastContext context) { } @Override - public Node visitStructConstructor(SqlBaseParser.StructConstructorContext context) { - ImmutableList.Builder fields = ImmutableList.builder(); + public Node visitStructConstructor(final SqlBaseParser.StructConstructorContext context) { + final ImmutableList.Builder fields = ImmutableList.builder(); for (int i = 0; i < context.identifier().size(); i++) { fields.add(new Field( diff --git a/ksql-parser/src/main/java/io/confluent/ksql/parser/json/KsqlTypesDeserializationModule.java b/ksql-parser/src/main/java/io/confluent/ksql/parser/json/KsqlTypesDeserializationModule.java index b1fc161fe2ab..673214a5c975 100644 --- a/ksql-parser/src/main/java/io/confluent/ksql/parser/json/KsqlTypesDeserializationModule.java +++ b/ksql-parser/src/main/java/io/confluent/ksql/parser/json/KsqlTypesDeserializationModule.java @@ -20,7 +20,7 @@ public class KsqlTypesDeserializationModule extends SimpleModule { - public KsqlTypesDeserializationModule(boolean withImplicitColumns) { + public KsqlTypesDeserializationModule(final boolean withImplicitColumns) { addDeserializer(LogicalSchema.class, new LogicalSchemaDeserializer(withImplicitColumns)); } } diff --git a/ksql-parser/src/main/java/io/confluent/ksql/parser/tree/WindowExpression.java b/ksql-parser/src/main/java/io/confluent/ksql/parser/tree/WindowExpression.java index 32bdaae39da2..c449217299f3 100644 --- a/ksql-parser/src/main/java/io/confluent/ksql/parser/tree/WindowExpression.java +++ b/ksql-parser/src/main/java/io/confluent/ksql/parser/tree/WindowExpression.java @@ -89,7 +89,7 @@ public static TimeUnit getWindowUnit(final String windowUnitString) { return TimeUnit.valueOf(windowUnitString + "S"); } return TimeUnit.valueOf(windowUnitString); - } catch (IllegalArgumentException e) { + } catch (final IllegalArgumentException e) { return null; } } diff --git a/ksql-parser/src/main/java/io/confluent/ksql/schema/ksql/DefaultSqlValueCoercer.java b/ksql-parser/src/main/java/io/confluent/ksql/schema/ksql/DefaultSqlValueCoercer.java index 37c408fada11..396483c5e104 100644 --- a/ksql-parser/src/main/java/io/confluent/ksql/schema/ksql/DefaultSqlValueCoercer.java +++ b/ksql-parser/src/main/java/io/confluent/ksql/schema/ksql/DefaultSqlValueCoercer.java @@ -80,7 +80,7 @@ private static Optional doCoerce(final Object value, final SqlType targetType return Optional.of(result); } - private static Optional coerceStruct(Object value, SqlStruct targetType) { + private static Optional coerceStruct(final Object value, final SqlStruct targetType) { if (!(value instanceof Struct)) { return Optional.empty(); } @@ -90,8 +90,10 @@ private static Optional coerceStruct(Object value, SqlStruct targetType) { SchemaConverters.sqlToConnectConverter().toConnectSchema(targetType) ); - for (Field field : coerced.schema().fields()) { - Optional sqlField = targetType.field(field.name()); + for (final Field field : coerced.schema().fields()) { + final Optional sqlField = + targetType.field(field.name()); + if (!sqlField.isPresent()) { // if there was a field in the struct that wasn't in the schema // we cannot coerce @@ -101,7 +103,7 @@ private static Optional coerceStruct(Object value, SqlStruct targetType) { continue; } - Optional val = doCoerce(struct.get(field), sqlField.get().type()); + final Optional val = doCoerce(struct.get(field), sqlField.get().type()); val.ifPresent(v -> coerced.put(field.name(), v)); } diff --git a/ksql-parser/src/test/java/io/confluent/ksql/parser/KsqlParserTest.java b/ksql-parser/src/test/java/io/confluent/ksql/parser/KsqlParserTest.java index 72eb11aa8915..8363ef10e2fb 100644 --- a/ksql-parser/src/test/java/io/confluent/ksql/parser/KsqlParserTest.java +++ b/ksql-parser/src/test/java/io/confluent/ksql/parser/KsqlParserTest.java @@ -1287,18 +1287,18 @@ private static Matcher equalToColumn( final Optional alias) { return new TypeSafeMatcher() { @Override - protected boolean matchesSafely(SelectItem item) { + protected boolean matchesSafely(final SelectItem item) { if (!(item instanceof SingleColumn)) { return false; } - SingleColumn column = (SingleColumn) item; + final SingleColumn column = (SingleColumn) item; return Objects.equals(column.getExpression().toString(), expression) && Objects.equals(column.getAlias(), alias); } @Override - public void describeTo(Description description) { + public void describeTo(final Description description) { description.appendText( String.format("Expression: %s, Alias: %s", expression, diff --git a/ksql-parser/src/test/java/io/confluent/ksql/parser/json/LogicalSchemaDeserializerTest.java b/ksql-parser/src/test/java/io/confluent/ksql/parser/json/LogicalSchemaDeserializerTest.java index 65f4e904bcf0..2293987af957 100644 --- a/ksql-parser/src/test/java/io/confluent/ksql/parser/json/LogicalSchemaDeserializerTest.java +++ b/ksql-parser/src/test/java/io/confluent/ksql/parser/json/LogicalSchemaDeserializerTest.java @@ -101,7 +101,7 @@ public void shouldAddImplicitColumns() throws Exception { private static class TestModule extends SimpleModule { - private TestModule(boolean withImplicitColumns) { + private TestModule(final boolean withImplicitColumns) { addDeserializer( LogicalSchema.class, new LogicalSchemaDeserializer(withImplicitColumns) diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/ExecutableServer.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/ExecutableServer.java index a8ec8b81d08f..a0b673b62a8a 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/ExecutableServer.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/ExecutableServer.java @@ -30,7 +30,10 @@ public class ExecutableServer implements Executable { private final ApplicationServer server; private final List> apps; - public ExecutableServer(ApplicationServer server, List> apps) { + public ExecutableServer( + final ApplicationServer server, + final List> apps + ) { this.server = server; this.apps = apps; } @@ -40,7 +43,7 @@ public void startAsync() throws Exception { apps.forEach(server::registerApplication); server.start(); - for (ExecutableApplication app : apps) { + for (final ExecutableApplication app : apps) { app.startAsync(); } } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java index 549293a091c2..69edf8f894f8 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java @@ -353,12 +353,12 @@ List getListeners() { .map(port -> { try { return new URL(url.getProtocol(), url.getHost(), port, url.getFile()); - } catch (MalformedURLException e) { + } catch (final MalformedURLException e) { throw new KsqlServerException("Malformed URL specified in '" + LISTENERS_CONFIG + "' config: " + listener, e); } }); - } catch (MalformedURLException e) { + } catch (final MalformedURLException e) { throw new KsqlServerException("Malformed URL specified in '" + LISTENERS_CONFIG + "' config: " + listener, e); } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java index 398e23bb000b..ad021684623f 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java @@ -396,7 +396,7 @@ private static Optional parseInetAddress(final String address) { private static String getLocalHostName() { try { return InetAddress.getLocalHost().getCanonicalHostName(); - } catch (UnknownHostException e) { + } catch (final UnknownHostException e) { throw new KsqlServerException("Failed to obtain local host info", e); } } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StandaloneExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StandaloneExecutor.java index 56f6a29adf3d..b74f44e91be4 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StandaloneExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/StandaloneExecutor.java @@ -219,7 +219,7 @@ private static String readQueriesFile(final String queryFilePath) { return new String(java.nio.file.Files.readAllBytes( Paths.get(queryFilePath)), StandardCharsets.UTF_8); - } catch (IOException e) { + } catch (final IOException e) { throw new KsqlException( String.format("Could not read the query file: %s. Details: %s", queryFilePath, e.getMessage()), diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutor.java index aa412a3b0852..fa1385c27b37 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutor.java @@ -120,7 +120,7 @@ public Optional execute( try { topics = getTopics(serviceContext.getAdminClient(), connector); warnings = ImmutableList.of(); - } catch (Exception e) { + } catch (final Exception e) { topics = ImmutableList.of(); warnings = ImmutableList.of( new KsqlWarning("Could not list related topics due to " + e.getMessage()) diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutor.java index e02baf1ffb6c..e0d3aa69ac11 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutor.java @@ -71,7 +71,7 @@ public static Optional execute( } private static List mergedProperties( - ConfiguredStatement statement) { + final ConfiguredStatement statement) { final List mergedProperties = new ArrayList<>(); statement.getConfig() @@ -87,18 +87,18 @@ private static List mergedProperties( } private static Map embeddedConnectWorkerProperties( - ConfiguredStatement statement) { - String configFile = statement.getConfig() + final ConfiguredStatement statement) { + final String configFile = statement.getConfig() .getString(KsqlConfig.CONNECT_WORKER_CONFIG_FILE_PROPERTY); return !configFile.isEmpty() ? Utils.propsToStringMap(getWorkerProps(configFile)) : Collections.emptyMap(); } - private static Properties getWorkerProps(String configFile) { + private static Properties getWorkerProps(final String configFile) { try { return Utils.loadProps(configFile); - } catch (IOException e) { + } catch (final IOException e) { return new Properties(); } } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java index d473db0e53f4..1f5c20c9f0de 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java @@ -73,7 +73,7 @@ public KsqlEntityList execute( ) { final Map scopedPropertyOverrides = new HashMap<>(propertyOverrides); final KsqlEntityList entities = new KsqlEntityList(); - for (ParsedStatement parsed : statements) { + for (final ParsedStatement parsed : statements) { final PreparedStatement prepared = ksqlEngine.prepare(parsed); final ConfiguredStatement configured = ConfiguredStatement.of( prepared, scopedPropertyOverrides, ksqlConfig); diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/filters/KsqlAuthorizationFilter.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/filters/KsqlAuthorizationFilter.java index abfef499f58e..306e45ca74fd 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/filters/KsqlAuthorizationFilter.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/filters/KsqlAuthorizationFilter.java @@ -87,7 +87,7 @@ private static Set getPathsFrom(final Class ...resourceClass) { ); paths.add(mainPath); - for (Method m : clazz.getMethods()) { + for (final Method m : clazz.getMethods()) { if (m.isAnnotationPresent(Path.class)) { paths.add(mainPath + "/" + StringUtils.strip(m.getAnnotation(Path.class).value(), "/")); diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java index 5258424d9793..e41b8a89a14e 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java @@ -101,7 +101,7 @@ public class KsqlResource implements KsqlConfigurable { private final Optional authorizationValidator; private RequestValidator validator; private RequestHandler handler; - private Errors errorHandler; + private final Errors errorHandler; public KsqlResource( diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java index 57f1df807025..cc2558cc23ed 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java @@ -285,7 +285,7 @@ private Response handlePushQuery( private String writeValueAsString(final Object object) { try { return objectMapper.writeValueAsString(object); - } catch (JsonProcessingException e) { + } catch (final JsonProcessingException e) { throw new RuntimeException(e); } } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/RequestValidator.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/RequestValidator.java index 5e8aed556cad..752846811284 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/RequestValidator.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/validation/RequestValidator.java @@ -103,7 +103,7 @@ public int validate( final Injector injector = injectorFactory.apply(ctx, serviceContext); int numPersistentQueries = 0; - for (ParsedStatement parsed : statements) { + for (final ParsedStatement parsed : statements) { final PreparedStatement prepared = ctx.prepare(parsed); final ConfiguredStatement configured = ConfiguredStatement.of( prepared, scopedPropertyOverrides, ksqlConfig); diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/util/RocksDBConfigSetterHandler.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/util/RocksDBConfigSetterHandler.java index ef01ba3a3fed..55e7cfb576ad 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/util/RocksDBConfigSetterHandler.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/util/RocksDBConfigSetterHandler.java @@ -35,7 +35,7 @@ public static void maybeConfigureRocksDBConfigSetter(final KsqlConfig ksqlConfig try { ((org.apache.kafka.common.Configurable) Utils.newInstance(clazz)) .configure(ksqlConfig.originals()); - } catch (Exception e) { + } catch (final Exception e) { throw new ConfigException( "Failed to configure Configurable RocksDBConfigSetter. " + StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG + ": " + clazz.getName(), diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/parser/ParserMatchers.java b/ksql-rest-app/src/test/java/io/confluent/ksql/parser/ParserMatchers.java index 0cc8f2b05eaf..88a05ca4aeb8 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/parser/ParserMatchers.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/parser/ParserMatchers.java @@ -140,7 +140,7 @@ public void describeTo(final Description description) { public static final class StatementTextMatcher extends FeatureMatcher, String> { - public StatementTextMatcher(Matcher textMatcher) { + public StatementTextMatcher(final Matcher textMatcher) { super(textMatcher, "a prepared statement with text", "statement text"); } @@ -168,7 +168,7 @@ public static Matcher> statementWithT public static final class StatementMatcher extends FeatureMatcher, Statement> { - public StatementMatcher(Matcher statementMatcher) { + public StatementMatcher(final Matcher statementMatcher) { super(statementMatcher, "a prepared statement", "statement"); } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/client/KsqlRestClientFunctionalTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/client/KsqlRestClientFunctionalTest.java index 754c5580bd56..6a021e656d01 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/client/KsqlRestClientFunctionalTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/client/KsqlRestClientFunctionalTest.java @@ -105,7 +105,7 @@ public void init() throws Exception { public void cleanUp() { try { mockServer.triggerShutdown(); - } catch (Exception e) { + } catch (final Exception e) { System.err.println("Failed to stop app"); e.printStackTrace(System.err); } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/TableRowsEntityFactoryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/TableRowsEntityFactoryTest.java index e7a41469e992..97d3f94d2211 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/TableRowsEntityFactoryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/TableRowsEntityFactoryTest.java @@ -127,7 +127,7 @@ public void shouldSupportNullColumns() { newColumns.add(null); newColumns.add(null); newColumns.add(null); - GenericRow row = new GenericRow(newColumns); + final GenericRow row = new GenericRow(newColumns); final Builder builder = ImmutableList.builder(); builder.add(Row.of(SCHEMA_NULL, STRING_KEY_BUILDER.build("k"), row, ROWTIME)); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/healthcheck/HealthCheckAgentTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/healthcheck/HealthCheckAgentTest.java index d194a4f4793f..7439c7929ba2 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/healthcheck/HealthCheckAgentTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/healthcheck/HealthCheckAgentTest.java @@ -49,7 +49,7 @@ public class HealthCheckAgentTest { static { try { SERVER_URI = new URL(SERVER_ADDRESS).toURI(); - } catch (Exception e) { + } catch (final Exception e) { fail("failed to create server URI"); } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/MockKsqlSecurityExtension.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/MockKsqlSecurityExtension.java index 99a0010f84eb..7c2c1a0d3e16 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/MockKsqlSecurityExtension.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/MockKsqlSecurityExtension.java @@ -17,7 +17,7 @@ public static void setAuthorizationProvider(final KsqlAuthorizationProvider prov } @Override - public void initialize(KsqlConfig ksqlConfig) { + public void initialize(final KsqlConfig ksqlConfig) { } @Override diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationFunctionalTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationFunctionalTest.java index 6f44fb7f4e38..12e5693e813f 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationFunctionalTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationFunctionalTest.java @@ -100,7 +100,7 @@ private void givenAppStartedWith(final Map config) { try { app.start(); - } catch (Exception e) { + } catch (final Exception e) { throw new AssertionError("Failed to start", e); } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java index cedc84acb5e2..577387493b88 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java @@ -317,8 +317,8 @@ public void shouldCheckPreconditionsBeforeUsingServiceContext() { @Test public void shouldNotInitializeUntilPreconditionsChecked() { // Given: - KsqlErrorMessage error1 = new KsqlErrorMessage(50000, "error1"); - KsqlErrorMessage error2 = new KsqlErrorMessage(50000, "error2"); + final KsqlErrorMessage error1 = new KsqlErrorMessage(50000, "error1"); + final KsqlErrorMessage error2 = new KsqlErrorMessage(50000, "error2"); final Queue errors = new LinkedList<>(); errors.add(error1); errors.add(error2); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestConfigTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestConfigTest.java index f16e20d6133e..4580a4b470da 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestConfigTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestConfigTest.java @@ -520,7 +520,7 @@ private static URL url(final String address) { private static String getLocalHostName() { try { return InetAddress.getLocalHost().getCanonicalHostName(); - } catch (UnknownHostException e) { + } catch (final UnknownHostException e) { throw new AssertionError("Failed to obtain local host info", e); } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/ServerUtilTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/ServerUtilTest.java index ff19fd76622c..26ea3a316465 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/ServerUtilTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/ServerUtilTest.java @@ -32,7 +32,7 @@ public void setUp() { } @Test(expected = ConfigException.class) public void shouldThrowConfigExceptionIfInvalidServerAddress() { // Given: - KsqlRestConfig restConfig = + final KsqlRestConfig restConfig = new KsqlRestConfig( Collections.singletonMap(RestConfig.LISTENERS_CONFIG, "invalid")); @@ -44,7 +44,7 @@ public void shouldThrowConfigExceptionIfInvalidServerAddress() { @Test public void shouldReturnServerAddress() { // Given: - KsqlRestConfig restConfig = + final KsqlRestConfig restConfig = new KsqlRestConfig( Collections.singletonMap(RestConfig.LISTENERS_CONFIG, "http://localhost:8088, http://localhost:9099")); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorFunctionalTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorFunctionalTest.java index 4574f2f2bca4..4be35d382a66 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorFunctionalTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/StandaloneExecutorFunctionalTest.java @@ -313,7 +313,7 @@ private static void givenIncompatibleSchemaExists( private void givenScript(final String contents) { try { Files.write(queryFile, contents.getBytes(StandardCharsets.UTF_8)); - } catch (IOException e) { + } catch (final IOException e) { throw new AssertionError("Failed to save query file", e); } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TemporaryEngine.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TemporaryEngine.java index 634d916a20f3..738a2646311a 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TemporaryEngine.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TemporaryEngine.java @@ -95,7 +95,7 @@ protected void before() { ); final SqlTypeParser typeParser = SqlTypeParser.create(TypeRegistry.EMPTY); - UdtfLoader udtfLoader = new UdtfLoader(functionRegistry, Optional.empty(), + final UdtfLoader udtfLoader = new UdtfLoader(functionRegistry, Optional.empty(), typeParser, true ); udtfLoader.loadUdtfFromClass(TestUdtf1.class, "whatever"); @@ -191,12 +191,12 @@ public ServiceContext getServiceContext() { public static class TestUdtf1 { @Udtf - public List foo1(@UdfParameter(value = "foo") int foo) { + public List foo1(@UdfParameter(value = "foo") final int foo) { return ImmutableList.of(1); } @Udtf - public List foo2(@UdfParameter(value = "foo") double foo) { + public List foo2(@UdfParameter(value = "foo") final double foo) { return ImmutableList.of(1.0d); } } @@ -206,12 +206,12 @@ public List foo2(@UdfParameter(value = "foo") double foo) { public static class TestUdtf2 { @Udtf - public List foo1(@UdfParameter(value = "foo") int foo) { + public List foo1(@UdfParameter(value = "foo") final int foo) { return ImmutableList.of(1); } @Udtf - public List foo2(@UdfParameter(value = "foo") double foo) { + public List foo2(@UdfParameter(value = "foo") final double foo) { return ImmutableList.of(1.0d); } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java index bd7088b603f0..d06d26809edf 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java @@ -149,7 +149,7 @@ public URI getHttpsListener() { public URI getWsListener() { try { return WSURI.toWebsocket(getHttpListener()); - } catch (URISyntaxException e) { + } catch (final URISyntaxException e) { throw new RuntimeException("Invalid WS listener", e); } } @@ -158,7 +158,7 @@ public URI getWsListener() { public URI getWssListener() { try { return WSURI.toWebsocket(getHttpsListener()); - } catch (URISyntaxException e) { + } catch (final URISyntaxException e) { throw new RuntimeException("Invalid WS listener", e); } } @@ -258,7 +258,7 @@ protected void before() { try { restServer.startAsync(); listeners.addAll(ksqlRestApplication.getListeners()); - } catch (Exception var2) { + } catch (final Exception var2) { throw new RuntimeException("Failed to start Ksql rest server", var2); } } @@ -272,7 +272,7 @@ protected void after() { listeners.clear(); try { restServer.triggerShutdown(); - } catch (Exception e) { + } catch (final Exception e) { throw new RuntimeException(e); } restServer = null; diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandRunnerTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandRunnerTest.java index 6f710c9d11a9..c50da1676442 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandRunnerTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/CommandRunnerTest.java @@ -15,16 +15,14 @@ package io.confluent.ksql.rest.server.computation; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.notNullValue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isNotNull; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.inOrder; @@ -33,23 +31,19 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static org.mockito.hamcrest.MockitoHamcrest.argThat; -import static io.confluent.ksql.test.util.AssertEventually.assertThatEventually; import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.metrics.MetricCollectors; import io.confluent.ksql.rest.server.state.ServerState; import io.confluent.ksql.rest.util.ClusterTerminator; import io.confluent.ksql.rest.util.TerminateCluster; - import java.time.Clock; import java.time.Duration; import java.time.Instant; import java.util.Arrays; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; - import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -62,7 +56,7 @@ @RunWith(MockitoJUnitRunner.class) public class CommandRunnerTest { - private static long COMMAND_RUNNER_HEALTH_TIMEOUT = 1000; + private static final long COMMAND_RUNNER_HEALTH_TIMEOUT = 1000; @Rule public final ExpectedException expectedException = ExpectedException.none(); @@ -246,11 +240,11 @@ public void shouldTransitionFromRunningToError() throws InterruptedException { }).when(statementExecutor).handleStatement(queuedCommand1); // When: - AtomicReference expectedException = new AtomicReference<>(null); + final AtomicReference expectedException = new AtomicReference<>(null); final Thread commandRunnerThread = (new Thread(() -> { try { commandRunner.fetchAndRunCommands(); - } catch (Exception e) { + } catch (final Exception e) { expectedException.set(e); } })); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/DistributingExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/DistributingExecutorTest.java index f4f133caae6d..f95abe7d0a94 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/DistributingExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/DistributingExecutorTest.java @@ -156,7 +156,7 @@ public void shouldEnqueueSuccessfulCommandTransactionally() { distributor.execute(CONFIGURED_STATEMENT, executionContext, serviceContext); // Then: - InOrder inOrder = Mockito.inOrder(transactionalProducer, queue, validatedCommandFactory); + final InOrder inOrder = Mockito.inOrder(transactionalProducer, queue, validatedCommandFactory); inOrder.verify(transactionalProducer).initTransactions(); inOrder.verify(transactionalProducer).beginTransaction(); inOrder.verify(queue).waitForCommandConsumer(); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java index 44bd43a9933e..10b8acd5de7c 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/InteractiveStatementExecutorTest.java @@ -321,13 +321,13 @@ public void shouldCompleteFutureOnSuccess() { handleStatement(command, COMMAND_ID, Optional.of(status), 0L); // Then: - InOrder inOrder = Mockito.inOrder(status); - ArgumentCaptor argCommandStatus = ArgumentCaptor.forClass(CommandStatus.class); - ArgumentCaptor argFinalCommandStatus = ArgumentCaptor.forClass(CommandStatus.class); + final InOrder inOrder = Mockito.inOrder(status); + final ArgumentCaptor argCommandStatus = ArgumentCaptor.forClass(CommandStatus.class); + final ArgumentCaptor argFinalCommandStatus = ArgumentCaptor.forClass(CommandStatus.class); inOrder.verify(status, times(2)).setStatus(argCommandStatus.capture()); inOrder.verify(status, times(1)).setFinalStatus(argFinalCommandStatus.capture()); - List commandStatusList = argCommandStatus.getAllValues(); + final List commandStatusList = argCommandStatus.getAllValues(); assertEquals(CommandStatus.Status.PARSING, commandStatusList.get(0).getStatus()); assertEquals(CommandStatus.Status.EXECUTING, commandStatusList.get(1).getStatus()); assertEquals(CommandStatus.Status.SUCCESS, argFinalCommandStatus.getValue().getStatus()); @@ -454,17 +454,17 @@ public void shouldThrowExceptionIfCommandFails() { // When: try { handleStatement(command, COMMAND_ID, Optional.of(status), 0L); - } catch (KsqlStatementException e) { + } catch (final KsqlStatementException e) { // Then: assertEquals("Cannot add stream 'FOO': A stream with the same name already exists\n" + "Statement: " + CREATE_STREAM_FOO_STATMENT, e.getMessage()); } - InOrder inOrder = Mockito.inOrder(status); - ArgumentCaptor argCommandStatus = ArgumentCaptor.forClass(CommandStatus.class); + final InOrder inOrder = Mockito.inOrder(status); + final ArgumentCaptor argCommandStatus = ArgumentCaptor.forClass(CommandStatus.class); inOrder.verify(status, times(3)).setStatus(argCommandStatus.capture()); - List commandStatusList = argCommandStatus.getAllValues(); + final List commandStatusList = argCommandStatus.getAllValues(); assertEquals(CommandStatus.Status.PARSING, commandStatusList.get(0).getStatus()); assertEquals(CommandStatus.Status.EXECUTING, commandStatusList.get(1).getStatus()); assertEquals(CommandStatus.Status.ERROR, commandStatusList.get(2).getStatus()); @@ -623,7 +623,8 @@ public void shouldSkipStartWhenReplayingLog() { verify(mockQuery, times(0)).start(); } - private ConfiguredStatement eqConfiguredStatement(PreparedStatement preparedStatement) { + private ConfiguredStatement eqConfiguredStatement( + final PreparedStatement preparedStatement) { return argThat(new ConfiguredStatementMatcher<>(preparedStatement)); } @@ -633,28 +634,28 @@ private ConfiguredKsqlPlan eqConfiguredPlan(final KsqlPlan plan) { private class ConfiguredKsqlPlanMatcher implements ArgumentMatcher { - private ConfiguredKsqlPlan plan; + private final ConfiguredKsqlPlan plan; - ConfiguredKsqlPlanMatcher(KsqlPlan ksqlPlan) { + ConfiguredKsqlPlanMatcher(final KsqlPlan ksqlPlan) { plan = ConfiguredKsqlPlan.of(ksqlPlan, Collections.emptyMap(), ksqlConfig); } @Override - public boolean matches(ConfiguredKsqlPlan configuredKsqlPlan) { + public boolean matches(final ConfiguredKsqlPlan configuredKsqlPlan) { return plan.getPlan().equals(configuredKsqlPlan.getPlan()); } } private class ConfiguredStatementMatcher implements ArgumentMatcher> { - private ConfiguredStatement statement; + private final ConfiguredStatement statement; - ConfiguredStatementMatcher(PreparedStatement preparedStatement) { + ConfiguredStatementMatcher(final PreparedStatement preparedStatement) { statement = ConfiguredStatement.of(preparedStatement, Collections.emptyMap(), ksqlConfig); } @Override - public boolean matches(ConfiguredStatement matchStatement) { + public boolean matches(final ConfiguredStatement matchStatement) { return statement.getStatementText().equals(matchStatement.getStatementText()) && statement.getStatement().equals(matchStatement.getStatement()); } @@ -695,7 +696,7 @@ public void shouldTerminateAll() { .thenReturn(PreparedStatement.of(queryStatement, terminateAll)); final PersistentQueryMetadata query0 = mock(PersistentQueryMetadata.class); - PersistentQueryMetadata query1 = mock(PersistentQueryMetadata.class); + final PersistentQueryMetadata query1 = mock(PersistentQueryMetadata.class); when(mockEngine.getPersistentQueries()).thenReturn(ImmutableList.of(query0, query1)); @@ -928,7 +929,7 @@ private void terminateQueries() { getCommandStatus(terminateCommandId2).getStatus(), equalTo(CommandStatus.Status.SUCCESS)); } - private CommandStatus getCommandStatus(CommandId commandId) { + private CommandStatus getCommandStatus(final CommandId commandId) { final Optional commandStatus = statementExecutor.getStatus(commandId); assertThat("command not registered: " + commandId, commandStatus, diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java index 82f90254d469..6cad7478961c 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java @@ -90,7 +90,7 @@ public class RecoveryTest { @Mock @SuppressWarnings("unchecked") - private Producer transactionalProducer = (Producer) mock(Producer.class); + private final Producer transactionalProducer = (Producer) mock(Producer.class); private final KsqlServer server1 = new KsqlServer(commands); private final KsqlServer server2 = new KsqlServer(commands); @@ -118,7 +118,7 @@ private KsqlEngine createKsqlEngine(final QueryIdGenerator queryIdGenerator) { private static class FakeCommandQueue implements CommandQueue { private final List commandLog; private int offset; - private Producer transactionalProducer; + private final Producer transactionalProducer; FakeCommandQueue(final List commandLog, final Producer transactionalProducer) { this.commandLog = commandLog; diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/SequenceNumberFutureStoreTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/SequenceNumberFutureStoreTest.java index bf8bfbc04868..a3e0a5ad1f29 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/SequenceNumberFutureStoreTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/SequenceNumberFutureStoreTest.java @@ -104,13 +104,13 @@ public void shouldBeThreadSafe() { assertThat(futures.stream().allMatch(CompletableFuture::isDone), is(true)); } - private static void assertFutureIsCompleted(CompletableFuture future) { + private static void assertFutureIsCompleted(final CompletableFuture future) { assertThat(future.isDone(), is(true)); assertThat(future.isCancelled(), is(false)); assertThat(future.isCompletedExceptionally(), is(false)); } - private static void assertFutureIsNotCompleted(CompletableFuture future) { + private static void assertFutureIsNotCompleted(final CompletableFuture future) { assertThat(future.isDone(), is(false)); } } \ No newline at end of file diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutorTest.java index 103957867e7a..c0250c765164 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutorTest.java @@ -181,7 +181,7 @@ public void shouldDescribeKnownConnector() { @Test public void shouldDescribeKnownConnectorIfTopicListFails() { // Given: - KafkaFuture> fut = new KafkaFutureImpl<>(); + final KafkaFuture> fut = new KafkaFutureImpl<>(); fut.cancel(true); when(topics.names()).thenReturn(fut); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/DescribeFunctionExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/DescribeFunctionExecutorTest.java index 212938fa3b08..1bbb65f2a0ed 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/DescribeFunctionExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/DescribeFunctionExecutorTest.java @@ -47,13 +47,13 @@ public void shouldDescribeUDF() { // Then: assertThat(functionList, new TypeSafeMatcher() { @Override - protected boolean matchesSafely(FunctionDescriptionList item) { + protected boolean matchesSafely(final FunctionDescriptionList item) { return functionList.getName().equals("CONCAT") && functionList.getType().equals(FunctionType.SCALAR); } @Override - public void describeTo(Description description) { + public void describeTo(final Description description) { description.appendText(functionList.getName()); } }); @@ -73,13 +73,13 @@ public void shouldDescribeUDAF() { // Then: assertThat(functionList, new TypeSafeMatcher() { @Override - protected boolean matchesSafely(FunctionDescriptionList item) { + protected boolean matchesSafely(final FunctionDescriptionList item) { return functionList.getName().equals("MAX") && functionList.getType().equals(FunctionType.AGGREGATE); } @Override - public void describeTo(Description description) { + public void describeTo(final Description description) { description.appendText(functionList.getName()); } }); @@ -99,13 +99,13 @@ public void shouldDescribeUDTF() { // Then: assertThat(functionList, new TypeSafeMatcher() { @Override - protected boolean matchesSafely(FunctionDescriptionList item) { + protected boolean matchesSafely(final FunctionDescriptionList item) { return functionList.getName().equals("TEST_UDTF1") && functionList.getType().equals(FunctionType.TABLE); } @Override - public void describeTo(Description description) { + public void describeTo(final Description description) { description.appendText(functionList.getName()); } }); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ExplainExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ExplainExecutorTest.java index 2da255335abe..a4bf771c84f7 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ExplainExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ExplainExecutorTest.java @@ -56,7 +56,7 @@ public void shouldExplainQueryId() { final PersistentQueryMetadata metadata = givenPersistentQuery("id"); when(metadata.getState()).thenReturn("Running"); - KsqlEngine engine = mock(KsqlEngine.class); + final KsqlEngine engine = mock(KsqlEngine.class); when(engine.getPersistentQuery(metadata.getQueryId())).thenReturn(Optional.of(metadata)); // When: diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListFunctionsExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListFunctionsExecutorTest.java index a377cf0c7bb1..e906017acd4d 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListFunctionsExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListFunctionsExecutorTest.java @@ -48,7 +48,7 @@ public void shouldListFunctions() { ).orElseThrow(IllegalStateException::new); // Then: - Collection functions = functionList.getFunctions(); + final Collection functions = functionList.getFunctions(); assertThat(functions, hasItems( new SimpleFunctionInfo("CONCAT", FunctionType.SCALAR), new SimpleFunctionInfo("TOPK", FunctionType.AGGREGATE), diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutorTest.java index d6c517d519ad..12243ec9290b 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListPropertiesExecutorTest.java @@ -58,9 +58,9 @@ public void shouldListProperties() { assertThat(properties.getOverwrittenProperties(), is(empty())); } - private Map toMap(PropertiesList properties) { - Map map = new HashMap<>(); - for (Property property : properties.getProperties()) { + private Map toMap(final PropertiesList properties) { + final Map map = new HashMap<>(); + for (final Property property : properties.getProperties()) { map.put(property.getName(), property.getValue()); } return map; diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java index cb5e539da48f..407dba9999ed 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java @@ -17,7 +17,6 @@ import static io.confluent.ksql.parser.ParserMatchers.configured; import static io.confluent.ksql.parser.ParserMatchers.preparedStatement; -import static io.confluent.ksql.parser.ParserMatchers.preparedStatementText; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.is; @@ -45,7 +44,6 @@ import io.confluent.ksql.rest.server.computation.DistributingExecutor; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.util.KsqlConfig; -import io.confluent.ksql.util.KsqlConstants; import java.util.Arrays; import java.util.List; import java.util.Map; @@ -223,7 +221,7 @@ private StatementExecutor givenReturningExecutor( private static Matcher hasItems(final KsqlEntity... items) { return new TypeSafeMatcher() { @Override - protected boolean matchesSafely(KsqlEntityList actual) { + protected boolean matchesSafely(final KsqlEntityList actual) { if (items.length != actual.size()) { return false; } @@ -237,7 +235,7 @@ protected boolean matchesSafely(KsqlEntityList actual) { } @Override - public void describeTo(Description description) { + public void describeTo(final Description description) { description.appendText(Arrays.toString(items)); } }; diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/filters/KsqlAuthorizationFilterTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/filters/KsqlAuthorizationFilterTest.java index 0160ef4c5b69..6b33f9f14fc5 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/filters/KsqlAuthorizationFilterTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/filters/KsqlAuthorizationFilterTest.java @@ -62,7 +62,7 @@ public void setUp() { @Test public void filterShouldContinueIfAuthorizationIsAllowed() { // Given: - ContainerRequest request = givenRequestContext(userPrincipal, "GET", "query"); + final ContainerRequest request = givenRequestContext(userPrincipal, "GET", "query"); // When: authorizationFilter.filter(request); @@ -74,7 +74,7 @@ public void filterShouldContinueIfAuthorizationIsAllowed() { @Test public void filterShouldAbortIfAuthorizationIsDenied() { // Given: - ContainerRequest request = givenRequestContext(userPrincipal, "GET", "query"); + final ContainerRequest request = givenRequestContext(userPrincipal, "GET", "query"); doThrow(new KsqlException("access denied")) .when(authorizationProvider).checkEndpointAccess(userPrincipal, "GET", "/query"); @@ -90,7 +90,7 @@ public void filterShouldAbortIfAuthorizationIsDenied() { @Test public void filterShouldContinueOnUnauthorizedMetadataPath() { // Given: - ContainerRequest request = givenRequestContext(userPrincipal, "GET", "v1/metadata"); + final ContainerRequest request = givenRequestContext(userPrincipal, "GET", "v1/metadata"); // When: authorizationFilter.filter(request); @@ -103,7 +103,7 @@ public void filterShouldContinueOnUnauthorizedMetadataPath() { @Test public void filterShouldContinueOnUnauthorizedMetadataIdPath() { // Given: - ContainerRequest request = givenRequestContext(userPrincipal, "GET", "v1/metadata/id"); + final ContainerRequest request = givenRequestContext(userPrincipal, "GET", "v1/metadata/id"); // When: authorizationFilter.filter(request); @@ -116,7 +116,7 @@ public void filterShouldContinueOnUnauthorizedMetadataIdPath() { @Test public void filterShouldContinueOnUnauthorizedHealthCheckPath() { // Given: - ContainerRequest request = givenRequestContext(userPrincipal, "GET", "healthcheck"); + final ContainerRequest request = givenRequestContext(userPrincipal, "GET", "healthcheck"); // When: authorizationFilter.filter(request); @@ -133,7 +133,7 @@ private ContainerRequest givenRequestContext( ) { when(securityContext.getUserPrincipal()).thenReturn(principal); - ContainerRequest requestContext = new ContainerRequest( + final ContainerRequest requestContext = new ContainerRequest( URI.create(""), URI.create(path), method, diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockApplication.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockApplication.java index 40eec17a94f7..99c01c6853e6 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockApplication.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/mock/MockApplication.java @@ -50,7 +50,7 @@ public void startAsync() { @Override public void triggerShutdown() { - for (TestStreamWriter testStreamWriter : streamedQueryResource.getWriters()) { + for (final TestStreamWriter testStreamWriter : streamedQueryResource.getWriters()) { try { testStreamWriter.finished(); } catch (final Exception e) { diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java index 4b13e92df8df..16870b922376 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java @@ -340,8 +340,8 @@ public void setUp() throws IOException, RestClientException { when(errorsHandler.generateResponse(any(), any())).thenAnswer(new Answer() { @Override - public Response answer(InvocationOnMock invocation) throws Throwable { - Object[] args = invocation.getArguments(); + public Response answer(final InvocationOnMock invocation) throws Throwable { + final Object[] args = invocation.getArguments(); return (Response) args[1]; } }); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/PollingSubscriptionTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/PollingSubscriptionTest.java index 8eeae20f932b..5ccd9b91f048 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/PollingSubscriptionTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/PollingSubscriptionTest.java @@ -33,7 +33,6 @@ import java.util.Queue; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; import org.junit.Test; @@ -100,13 +99,13 @@ public void testBasicFlow() throws Exception { final TestPublisher testPublisher = new TestPublisher(); testPublisher.subscribe(testSubscriber); - assertTrue(testSubscriber.done.await(1000, TimeUnit.MILLISECONDS)); + assertTrue(testSubscriber.await()); assertTrue(exec.shutdownNow().isEmpty()); assertTrue(testPublisher.subscription.closed); - assertNull(testSubscriber.error); - assertNotNull(testSubscriber.schema); - assertEquals(ELEMENTS, testSubscriber.elements); + assertNull(testSubscriber.getError()); + assertNotNull(testSubscriber.getSchema()); + assertEquals(ELEMENTS, testSubscriber.getElements()); } @Test @@ -134,12 +133,12 @@ String poll() { testPublisher.subscribe(testSubscriber); - assertTrue(testSubscriber.done.await(1000, TimeUnit.MILLISECONDS)); + assertTrue(testSubscriber.await()); assertTrue(exec.shutdownNow().isEmpty()); assertTrue(testPublisher.subscription.closed); - assertNotNull(testSubscriber.error); - assertEquals(ImmutableList.of("a", "b"), testSubscriber.elements); + assertNotNull(testSubscriber.getError()); + assertEquals(ImmutableList.of("a", "b"), testSubscriber.getElements()); } @Test @@ -155,13 +154,13 @@ TestPollingSubscription createSubscription( }; testPublisher.subscribe(testSubscriber); - assertTrue(testSubscriber.done.await(1000, TimeUnit.MILLISECONDS)); + assertTrue(testSubscriber.await()); assertTrue(multithreadedExec.shutdownNow().isEmpty()); assertTrue(testPublisher.subscription.closed); - assertNull(testSubscriber.error); - assertNotNull(testSubscriber.schema); - assertEquals(ELEMENTS, testSubscriber.elements); + assertNull(testSubscriber.getError()); + assertNotNull(testSubscriber.getSchema()); + assertEquals(ELEMENTS, testSubscriber.getElements()); } @Test @@ -192,12 +191,12 @@ String poll() { testPublisher.subscribe(testSubscriber); - assertTrue(testSubscriber.done.await(1000, TimeUnit.MILLISECONDS)); + assertTrue(testSubscriber.await()); assertTrue(multithreadedExec.shutdownNow().isEmpty()); assertTrue(testPublisher.subscription.closed); - assertNotNull(testSubscriber.error); - assertEquals(ImmutableList.of("a"), testSubscriber.elements); + assertNotNull(testSubscriber.getError()); + assertEquals(ImmutableList.of("a"), testSubscriber.getElements()); } @Test @@ -220,12 +219,12 @@ String poll() { testPublisher.subscribe(testSubscriber); - assertTrue(testSubscriber.done.await(1000, TimeUnit.MILLISECONDS)); + assertTrue(testSubscriber.await()); assertTrue(exec.shutdownNow().isEmpty()); assertTrue(testPublisher.subscription.closed); - assertNull(testSubscriber.error); - assertEquals(ImmutableList.of(), testSubscriber.elements); + assertNull(testSubscriber.getError()); + assertEquals(ImmutableList.of(), testSubscriber.getElements()); } @Test(expected = IllegalArgumentException.class) diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/PrintSubscriptionTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/PrintSubscriptionTest.java index 618c08c90c55..3bf1f887239d 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/PrintSubscriptionTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/PrintSubscriptionTest.java @@ -50,8 +50,8 @@ public void setup() { @Test public void testPrintPublisher() { // Given: - TestSubscriber> subscriber = new TestSubscriber<>(); - PrintSubscription subscription = new PrintSubscription( + final TestSubscriber> subscriber = new TestSubscriber<>(); + final PrintSubscription subscription = new PrintSubscription( MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), StreamingTestUtils.printTopic("topic", true, null, null), subscriber, @@ -60,7 +60,7 @@ public void testPrintPublisher() { ); // When: - Collection results = subscription.poll(); + final Collection results = subscription.poll(); // Then: assertThat(results, contains(Lists.newArrayList( @@ -73,8 +73,8 @@ public void testPrintPublisher() { @Test public void testPrintPublisherLimit() { // Given: - TestSubscriber> subscriber = new TestSubscriber<>(); - PrintSubscription subscription = new PrintSubscription( + final TestSubscriber> subscriber = new TestSubscriber<>(); + final PrintSubscription subscription = new PrintSubscription( MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), StreamingTestUtils.printTopic("topic", true, null, 2), subscriber, @@ -83,8 +83,8 @@ public void testPrintPublisherLimit() { ); // When: - Collection results = subscription.poll(); - Collection results2 = subscription.poll(); + final Collection results = subscription.poll(); + final Collection results2 = subscription.poll(); // Then: assertThat(results, contains(Lists.newArrayList( @@ -97,8 +97,8 @@ public void testPrintPublisherLimit() { @Test public void testPrintPublisherLimitTwoBatches() { // Given: - TestSubscriber> subscriber = new TestSubscriber<>(); - PrintSubscription subscription = new PrintSubscription( + final TestSubscriber> subscriber = new TestSubscriber<>(); + final PrintSubscription subscription = new PrintSubscription( MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), StreamingTestUtils.printTopic("topic", true, null, 5), subscriber, @@ -107,8 +107,8 @@ public void testPrintPublisherLimitTwoBatches() { ); // When: - Collection results = subscription.poll(); - Collection results2 = subscription.poll(); + final Collection results = subscription.poll(); + final Collection results2 = subscription.poll(); // Then: assertThat(results, contains(Lists.newArrayList( @@ -125,8 +125,8 @@ public void testPrintPublisherLimitTwoBatches() { @Test public void testPrintPublisherIntervalNoLimit() { // Given: - TestSubscriber> subscriber = new TestSubscriber<>(); - PrintSubscription subscription = new PrintSubscription( + final TestSubscriber> subscriber = new TestSubscriber<>(); + final PrintSubscription subscription = new PrintSubscription( MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), StreamingTestUtils.printTopic("topic", true, 2, null), subscriber, @@ -135,8 +135,8 @@ public void testPrintPublisherIntervalNoLimit() { ); // When: - Collection results = subscription.poll(); - Collection results2 = subscription.poll(); + final Collection results = subscription.poll(); + final Collection results2 = subscription.poll(); // Then: assertThat(results, contains(Lists.newArrayList( @@ -151,8 +151,8 @@ public void testPrintPublisherIntervalNoLimit() { @Test public void testPrintPublisherIntervalAndLimit() { // Given: - TestSubscriber> subscriber = new TestSubscriber<>(); - PrintSubscription subscription = new PrintSubscription( + final TestSubscriber> subscriber = new TestSubscriber<>(); + final PrintSubscription subscription = new PrintSubscription( MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), StreamingTestUtils.printTopic("topic", true, 2, 4), subscriber, @@ -161,10 +161,10 @@ public void testPrintPublisherIntervalAndLimit() { ); // When: - Collection results = subscription.poll(); - Collection results2 = subscription.poll(); - Collection results3 = subscription.poll(); - Collection results4 = subscription.poll(); + final Collection results = subscription.poll(); + final Collection results2 = subscription.poll(); + final Collection results3 = subscription.poll(); + final Collection results4 = subscription.poll(); // Then: assertThat(results, contains(Lists.newArrayList( diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java index fb4a23ffe9c4..2b31f5ef51fb 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java @@ -113,7 +113,7 @@ public class StreamedQueryResourceTest { )); private static final Long closeTimeout = KsqlConfig.KSQL_SHUTDOWN_TIMEOUT_MS_DEFAULT; - private static Response AUTHORIZATION_ERROR_RESPONSE = Response + private static final Response AUTHORIZATION_ERROR_RESPONSE = Response .status(FORBIDDEN) .entity(new KsqlErrorMessage(ERROR_CODE_FORBIDDEN_KAFKA_ACCESS, "some error")) .build(); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamingTestUtils.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamingTestUtils.java index 69f60fb2a3b3..35c8f3432349 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamingTestUtils.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamingTestUtils.java @@ -31,6 +31,7 @@ import java.util.Optional; import java.util.OptionalInt; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.IntStream; import org.apache.kafka.clients.consumer.ConsumerRecord; @@ -38,7 +39,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.utils.Bytes; -class StreamingTestUtils { +final class StreamingTestUtils { private StreamingTestUtils() { /* private constructor for utility class */ } @@ -65,10 +66,10 @@ static Iterator> partition( .iterator(); } - private static ConsumerRecords combine(List> recordsList) { + private static ConsumerRecords combine(final List> recordsList) { final Map>> recordMap = new HashMap<>(); - for (ConsumerRecords records : recordsList) { - for (TopicPartition tp : records.partitions()) { + for (final ConsumerRecords records : recordsList) { + for (final TopicPartition tp : records.partitions()) { recordMap.computeIfAbsent(tp, ignored -> new ArrayList<>()).addAll(records.records(tp)); } } @@ -91,11 +92,11 @@ static PrintTopic printTopic( static class TestSubscriber implements Subscriber { - CountDownLatch done = new CountDownLatch(1); - Throwable error = null; - List elements = Lists.newLinkedList(); - LogicalSchema schema = null; - Subscription subscription; + private final CountDownLatch done = new CountDownLatch(1); + private final List elements = Lists.newLinkedList(); + private Throwable error = null; + private LogicalSchema schema = null; + private Subscription subscription; @Override public void onNext(final T item) { @@ -136,5 +137,25 @@ public void onSubscribe(final Subscription subscription) { this.subscription = subscription; subscription.request(1); } + + public Throwable getError() { + return error; + } + + public LogicalSchema getSchema() { + return schema; + } + + public List getElements() { + return elements; + } + + public boolean await() { + try { + return done.await(10, TimeUnit.SECONDS); + } catch (final InterruptedException e) { + throw new AssertionError("Test interrupted", e); + } + } } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamWriterTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamWriterTest.java index 4d014ea0e4e6..73921aeca073 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamWriterTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamWriterTest.java @@ -94,7 +94,7 @@ public void testIntervalTwoAndLimitTwo() { ); // When: - ValidatingOutputStream out = new ValidatingOutputStream(); + final ValidatingOutputStream out = new ValidatingOutputStream(); writer.write(out); // Then: diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/state/ServerStateTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/state/ServerStateTest.java index 3b652070f1de..c2d262104f59 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/state/ServerStateTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/state/ServerStateTest.java @@ -31,7 +31,7 @@ public class ServerStateTest { @Test public void shouldReturnErrorWhenInitializing() { // Given: - KsqlErrorMessage error = new KsqlErrorMessage(12345, "bad stuff is happening"); + final KsqlErrorMessage error = new KsqlErrorMessage(12345, "bad stuff is happening"); serverState.setInitializingReason(error); // When: diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/RequestValidatorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/RequestValidatorTest.java index 4a53092733a9..1930b9876ee3 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/RequestValidatorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/validation/RequestValidatorTest.java @@ -277,7 +277,7 @@ private List givenParsed(final String sql) { } private void givenRequestValidator( - Map, StatementValidator> customValidators + final Map, StatementValidator> customValidators ) { validator = new RequestValidator( customValidators, diff --git a/ksql-rest-client/src/test/java/io/confluent/ksql/rest/client/KsqlTargetTest.java b/ksql-rest-client/src/test/java/io/confluent/ksql/rest/client/KsqlTargetTest.java index b35d2841dcef..5ac65fb12f12 100644 --- a/ksql-rest-client/src/test/java/io/confluent/ksql/rest/client/KsqlTargetTest.java +++ b/ksql-rest-client/src/test/java/io/confluent/ksql/rest/client/KsqlTargetTest.java @@ -334,7 +334,7 @@ private Entity jsonKsqlRequest( } private static MultivaluedMap authHeaders(final String value) { - MultivaluedMap headers = new MultivaluedHashMap<>(); + final MultivaluedMap headers = new MultivaluedHashMap<>(); headers.add(HttpHeaders.AUTHORIZATION, value); return headers; } diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java index 501457dbd05b..0e33f821e63c 100644 --- a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/PropertiesList.java @@ -54,14 +54,14 @@ public String getValue() { } @Override - public boolean equals(Object object) { + public boolean equals(final Object object) { if (this == object) { return true; } if (object == null || getClass() != object.getClass()) { return false; } - Property that = (Property) object; + final Property that = (Property) object; return Objects.equals(name, that.name) && Objects.equals(scope, that.scope) && Objects.equals(value, that.value); diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/StreamedRow.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/StreamedRow.java index 05f2adca66c8..60feb9f2b52a 100644 --- a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/StreamedRow.java +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/StreamedRow.java @@ -140,7 +140,7 @@ public int hashCode() { public String toString() { try { return JsonMapper.INSTANCE.mapper.writeValueAsString(this); - } catch (JsonProcessingException e) { + } catch (final JsonProcessingException e) { return super.toString(); } } @@ -205,7 +205,7 @@ public int hashCode() { public String toString() { try { return JsonMapper.INSTANCE.mapper.writeValueAsString(this); - } catch (JsonProcessingException e) { + } catch (final JsonProcessingException e) { return super.toString(); } } diff --git a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/ErrorsTest.java b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/ErrorsTest.java index de70d02321ee..e6d38bb66fb8 100644 --- a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/ErrorsTest.java +++ b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/ErrorsTest.java @@ -35,8 +35,8 @@ @RunWith(MockitoJUnitRunner.class) public class ErrorsTest { - private static String SOME_ERROR = "error string"; - private static Response KAFKA_DENIED_ERROR = Response + private static final String SOME_ERROR = "error string"; + private static final Response KAFKA_DENIED_ERROR = Response .status(FORBIDDEN) .entity(new KsqlErrorMessage(Errors.ERROR_CODE_FORBIDDEN_KAFKA_ACCESS, SOME_ERROR)) .build(); diff --git a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/KsqlErrorMessageTest.java b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/KsqlErrorMessageTest.java index c1c5e1227f35..5627eff1564a 100644 --- a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/KsqlErrorMessageTest.java +++ b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/KsqlErrorMessageTest.java @@ -54,7 +54,7 @@ public void shouldDeserializeFromJson() { private static String serialize(final KsqlErrorMessage errorMessage) { try { return OBJECT_MAPPER.writeValueAsString(errorMessage); - } catch (IOException e) { + } catch (final IOException e) { throw new RuntimeException("test invalid", e); } } @@ -62,7 +62,7 @@ private static String serialize(final KsqlErrorMessage errorMessage) { private static KsqlErrorMessage deserialize(final String json) { try { return OBJECT_MAPPER.readValue(json, KsqlErrorMessage.class); - } catch (IOException e) { + } catch (final IOException e) { if (e.getCause() instanceof RuntimeException) { throw (RuntimeException) e.getCause(); } diff --git a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/KsqlRequestTest.java b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/KsqlRequestTest.java index ec50a3f3027c..47aa60f32731 100644 --- a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/KsqlRequestTest.java +++ b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/KsqlRequestTest.java @@ -228,7 +228,7 @@ public void shouldHandleOverridesOfTypeList() { private static String serialize(final KsqlRequest request) { try { return OBJECT_MAPPER.writeValueAsString(request); - } catch (IOException e) { + } catch (final IOException e) { throw new RuntimeException("test invalid", e); } } @@ -236,7 +236,7 @@ private static String serialize(final KsqlRequest request) { private static KsqlRequest deserialize(final String json) { try { return OBJECT_MAPPER.readValue(json, KsqlRequest.class); - } catch (IOException e) { + } catch (final IOException e) { if (e.getCause() instanceof RuntimeException) { throw (RuntimeException) e.getCause(); } diff --git a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/ServerInfoTest.java b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/ServerInfoTest.java index 641709fee6ce..e84127078b1f 100644 --- a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/ServerInfoTest.java +++ b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/ServerInfoTest.java @@ -28,7 +28,7 @@ public class ServerInfoTest { private static final String KAFKA_CLUSTER_ID = "test-kafka-cluster"; private static final String KSQL_SERVICE_ID = "test-ksql-service"; - private ServerInfo serverInfo = new ServerInfo(VERSION, KAFKA_CLUSTER_ID, KSQL_SERVICE_ID); + private final ServerInfo serverInfo = new ServerInfo(VERSION, KAFKA_CLUSTER_ID, KSQL_SERVICE_ID); @Test public void testSerializeDeserialize() throws IOException { diff --git a/ksql-rocksdb-config-setter/src/main/java/io/confluent/ksql/rocksdb/KsqlBoundedMemoryRocksDBConfigSetter.java b/ksql-rocksdb-config-setter/src/main/java/io/confluent/ksql/rocksdb/KsqlBoundedMemoryRocksDBConfigSetter.java index 9ab95220bd4f..3062d0ca3659 100644 --- a/ksql-rocksdb-config-setter/src/main/java/io/confluent/ksql/rocksdb/KsqlBoundedMemoryRocksDBConfigSetter.java +++ b/ksql-rocksdb-config-setter/src/main/java/io/confluent/ksql/rocksdb/KsqlBoundedMemoryRocksDBConfigSetter.java @@ -55,7 +55,7 @@ static void configure(final Map config, final Options options) { limitTotalMemory(pluginConfig); configureNumThreads(pluginConfig, options); - } catch (IllegalArgumentException e) { + } catch (final IllegalArgumentException e) { reset(); throw e; } diff --git a/ksql-serde/src/main/java/io/confluent/ksql/serde/KsqlSerdeFactory.java b/ksql-serde/src/main/java/io/confluent/ksql/serde/KsqlSerdeFactory.java index 9231b9d5a8a2..5d706ca50b13 100644 --- a/ksql-serde/src/main/java/io/confluent/ksql/serde/KsqlSerdeFactory.java +++ b/ksql-serde/src/main/java/io/confluent/ksql/serde/KsqlSerdeFactory.java @@ -48,10 +48,10 @@ Serde createSerde( @SuppressWarnings("unchecked") default Serde createSerde( - PersistenceSchema schema, - KsqlConfig ksqlConfig, - Supplier schemaRegistryClientFactory, - Class type + final PersistenceSchema schema, + final KsqlConfig ksqlConfig, + final Supplier schemaRegistryClientFactory, + final Class type ) { final Class actualType = SchemaUtil.getJavaType(schema.serializedSchema()); diff --git a/ksql-serde/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java b/ksql-serde/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java index 5d027db5970e..39a62b458c2b 100644 --- a/ksql-serde/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java +++ b/ksql-serde/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java @@ -107,7 +107,7 @@ private Object deserialize(final byte[] bytes) { final JsonNode value = MAPPER.readTree(bytes); return enforceFieldType(this, physicalSchema.serializedSchema(), value); - } catch (IOException e) { + } catch (final IOException e) { throw new SerializationException(e); } } @@ -137,7 +137,7 @@ private static String processString(final JsonValueContext context) { if (context.val instanceof ObjectNode) { try { return MAPPER.writeValueAsString(MAPPER.treeToValue(context.val, Object.class)); - } catch (JsonProcessingException e) { + } catch (final JsonProcessingException e) { throw new KsqlException("Unexpected inability to write value as string: " + context.val); } } @@ -186,7 +186,7 @@ private static Map enforceKeyAndValueTypeForMap(final JsonValueC final ObjectNode map = (ObjectNode) context.val; final Map ksqlMap = new HashMap<>(map.size()); - for (Iterator> it = map.fields(); it.hasNext(); ) { + for (final Iterator> it = map.fields(); it.hasNext(); ) { final Entry e = it.next(); ksqlMap.put( enforceFieldType( @@ -210,7 +210,7 @@ private static Struct enforceFieldTypesForStruct(final JsonValueContext context) final ObjectNode jsonFields = (ObjectNode) context.val; final Map upperCasedFields = upperCaseKeys(jsonFields); - for (Field ksqlField : context.schema.fields()) { + for (final Field ksqlField : context.schema.fields()) { // the "case insensitive" strategy leverages that all KSQL fields are internally // case sensitive - if they were specified without quotes, then they are upper-cased // during parsing. any ksql fields that are case insensitive, therefore, will be matched @@ -235,7 +235,7 @@ private static Struct enforceFieldTypesForStruct(final JsonValueContext context) private static Map upperCaseKeys(final ObjectNode map) { final Map result = new HashMap<>(map.size()); - for (Iterator> it = map.fields(); it.hasNext(); ) { + for (final Iterator> it = map.fields(); it.hasNext(); ) { final Entry entry = it.next(); // what happens if we have two fields with the same name and different case? result.put(entry.getKey().toUpperCase(), entry.getValue()); diff --git a/ksql-serde/src/test/java/io/confluent/ksql/serde/avro/AvroDataTranslatorTest.java b/ksql-serde/src/test/java/io/confluent/ksql/serde/avro/AvroDataTranslatorTest.java index b836991a539c..4f59ad557f6b 100644 --- a/ksql-serde/src/test/java/io/confluent/ksql/serde/avro/AvroDataTranslatorTest.java +++ b/ksql-serde/src/test/java/io/confluent/ksql/serde/avro/AvroDataTranslatorTest.java @@ -215,7 +215,7 @@ public void shouldUseExplicitSchemaName() { .optional() .build(); - String schemaFullName = "com.custom.schema"; + final String schemaFullName = "com.custom.schema"; final AvroDataTranslator dataTranslator = new AvroDataTranslator(schema, schemaFullName); final Struct ksqlRow = new Struct(schema) diff --git a/ksql-serde/src/test/java/io/confluent/ksql/serde/avro/KsqlAvroSerializerTest.java b/ksql-serde/src/test/java/io/confluent/ksql/serde/avro/KsqlAvroSerializerTest.java index 9022098e8166..b54ddfd109b8 100644 --- a/ksql-serde/src/test/java/io/confluent/ksql/serde/avro/KsqlAvroSerializerTest.java +++ b/ksql-serde/src/test/java/io/confluent/ksql/serde/avro/KsqlAvroSerializerTest.java @@ -155,7 +155,7 @@ public class KsqlAvroSerializerTest { private final SchemaRegistryClient schemaRegistryClient = new MockSchemaRegistryClient(); - private KsqlConfig ksqlConfig = new KsqlConfig(ImmutableMap.of()); + private final KsqlConfig ksqlConfig = new KsqlConfig(ImmutableMap.of()); private Serializer serializer; private Deserializer deserializer; diff --git a/ksql-serde/src/test/java/io/confluent/ksql/serde/delimited/KsqlDelimitedDeserializerTest.java b/ksql-serde/src/test/java/io/confluent/ksql/serde/delimited/KsqlDelimitedDeserializerTest.java index e71fc30a12c8..b8abe7cafed7 100644 --- a/ksql-serde/src/test/java/io/confluent/ksql/serde/delimited/KsqlDelimitedDeserializerTest.java +++ b/ksql-serde/src/test/java/io/confluent/ksql/serde/delimited/KsqlDelimitedDeserializerTest.java @@ -214,7 +214,7 @@ public void shouldDeserializeDelimitedCorrectlyWithBarDelimiter() { shouldDeserializeDelimitedCorrectlyWithNonDefaultDelimiter('|'); } - private void shouldDeserializeDelimitedCorrectlyWithNonDefaultDelimiter(char delimiter) { + private void shouldDeserializeDelimitedCorrectlyWithNonDefaultDelimiter(final char delimiter) { // Given: final byte[] bytes = "1511897796092\t1\titem_1\t10.0\t10.10\r\n".getBytes(StandardCharsets.UTF_8); @@ -324,7 +324,7 @@ private static PersistenceSchema persistenceSchema(final Schema connectSchema) { return PersistenceSchema.from((ConnectSchema) connectSchema, false); } - private static KsqlDelimitedDeserializer createDeserializer(PersistenceSchema schema) { + private static KsqlDelimitedDeserializer createDeserializer(final PersistenceSchema schema) { return new KsqlDelimitedDeserializer(schema, CSVFormat.DEFAULT.withDelimiter(',')); } diff --git a/ksql-serde/src/test/java/io/confluent/ksql/serde/json/KsqlJsonDeserializerTest.java b/ksql-serde/src/test/java/io/confluent/ksql/serde/json/KsqlJsonDeserializerTest.java index 0a3cc4811fe8..adf3ad624d56 100644 --- a/ksql-serde/src/test/java/io/confluent/ksql/serde/json/KsqlJsonDeserializerTest.java +++ b/ksql-serde/src/test/java/io/confluent/ksql/serde/json/KsqlJsonDeserializerTest.java @@ -127,7 +127,7 @@ public void shouldDeserializeJsonObjectCorrectly() { @Test public void shouldIgnoreDeserializeJsonObjectCaseMismatch() { // Given: - Map anOrder = ImmutableMap.builder() + final Map anOrder = ImmutableMap.builder() .put("CASEFIELD", 1L) .build(); final byte[] bytes = serializeJson(anOrder); @@ -701,7 +701,7 @@ private void givenDeserializerForSchema(final Schema serializedSchema) { private static byte[] serializeJson(final Object expected) { try { return OBJECT_MAPPER.writeValueAsBytes(expected); - } catch (JsonProcessingException e) { + } catch (final JsonProcessingException e) { throw new RuntimeException(e); } } diff --git a/ksql-serde/src/test/java/io/confluent/ksql/serde/tls/ThreadLocalCloseableTest.java b/ksql-serde/src/test/java/io/confluent/ksql/serde/tls/ThreadLocalCloseableTest.java index 2c1d96c46b23..7c771cdb80bc 100644 --- a/ksql-serde/src/test/java/io/confluent/ksql/serde/tls/ThreadLocalCloseableTest.java +++ b/ksql-serde/src/test/java/io/confluent/ksql/serde/tls/ThreadLocalCloseableTest.java @@ -15,14 +15,6 @@ package io.confluent.ksql.serde.tls; -import org.easymock.EasyMock; -import org.junit.Test; - -import java.io.Closeable; -import java.io.IOException; -import java.util.LinkedList; -import java.util.List; - import static org.easymock.EasyMock.expectLastCall; import static org.easymock.EasyMock.mock; import static org.easymock.EasyMock.replay; @@ -30,6 +22,13 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; +import java.io.Closeable; +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; +import org.easymock.EasyMock; +import org.junit.Test; + public class ThreadLocalCloseableTest { @Test public void shouldCloseAllInstances() { @@ -43,7 +42,7 @@ public void shouldCloseAllInstances() { closeables.add(closeable); try { closeable.close(); - } catch (IOException e) { + } catch (final IOException e) { throw new RuntimeException(e); } expectLastCall(); @@ -64,7 +63,7 @@ public void shouldCloseAllInstances() { t -> { try { t.join(); - } catch (InterruptedException e) { + } catch (final InterruptedException e) { throw new RuntimeException(e); } }); diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/SourceBuilder.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/SourceBuilder.java index f3c7d585b7c3..8775eef0cc77 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/SourceBuilder.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/SourceBuilder.java @@ -269,7 +269,7 @@ private static KStream buildKStream( final Consumed consumed, final Function rowKeyGenerator ) { - KStream stream = queryBuilder.getStreamsBuilder() + final KStream stream = queryBuilder.getStreamsBuilder() .stream(streamSource.getTopicName(), consumed); return stream diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamFlatMapBuilder.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamFlatMapBuilder.java index 0d220a0e6ab5..ffc59f2b092b 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamFlatMapBuilder.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StreamFlatMapBuilder.java @@ -57,10 +57,10 @@ public static KStreamHolder build( final CodeGenRunner codeGenRunner = new CodeGenRunner(schema, queryBuilder.getKsqlConfig(), queryBuilder.getFunctionRegistry()); - for (FunctionCall functionCall: tableFunctions) { + for (final FunctionCall functionCall: tableFunctions) { final List expressionMetadataList = new ArrayList<>( functionCall.getArguments().size()); - for (Expression expression : functionCall.getArguments()) { + for (final Expression expression : functionCall.getArguments()) { final ExpressionMetadata expressionMetadata = codeGenRunner.buildCodeGenFromParseTree(expression, "Table function"); expressionMetadataList.add(expressionMetadata); @@ -102,7 +102,7 @@ public static LogicalSchema buildSchema( // We copy all the original columns to the output schema schemaBuilder.keyColumns(inputSchema.key()); - for (Column col : cols) { + for (final Column col : cols) { schemaBuilder.valueColumn(col); } diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializationFactory.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializationFactory.java index 135d550978a4..81082e7da557 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializationFactory.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/materialization/ks/KsMaterializationFactory.java @@ -115,7 +115,7 @@ private static URL buildLocalHost(final Object appServer) { try { return new URL((String) appServer); - } catch (MalformedURLException e) { + } catch (final MalformedURLException e) { throw new IllegalArgumentException(StreamsConfig.APPLICATION_SERVER_CONFIG + " malformed: " + "'" + appServer + "'"); } diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsLocatorTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsLocatorTest.java index 193f41a5cc89..a84897f1d3ba 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsLocatorTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/materialization/ks/KsLocatorTest.java @@ -200,7 +200,7 @@ private void givenOwnerMetadata(final Optional hostInfo) { private static URL localHost() { try { return new URL("http://somehost:1234"); - } catch (MalformedURLException e) { + } catch (final MalformedURLException e) { throw new AssertionError("Failed to build URL", e); } } diff --git a/ksql-test-util/src/main/java/io/confluent/ksql/test/util/KafkaEmbedded.java b/ksql-test-util/src/main/java/io/confluent/ksql/test/util/KafkaEmbedded.java index f6fb47cc5f56..0fcac933c528 100644 --- a/ksql-test-util/src/main/java/io/confluent/ksql/test/util/KafkaEmbedded.java +++ b/ksql-test-util/src/main/java/io/confluent/ksql/test/util/KafkaEmbedded.java @@ -119,7 +119,7 @@ void stop() { log.debug("Deleting logs.dir at {} ...", logDir()); try { Files.delete(Paths.get(logDir())); - } catch (IOException e) { + } catch (final IOException e) { log.error("Failed to delete log dir {}", logDir()); } log.debug("Shutdown of embedded Kafka broker at {} completed (with ZK ensemble at {}) ...", diff --git a/ksql-test-util/src/main/java/io/confluent/ksql/test/util/TestMethods.java b/ksql-test-util/src/main/java/io/confluent/ksql/test/util/TestMethods.java index 02f9edabf10e..f68173234c2c 100644 --- a/ksql-test-util/src/main/java/io/confluent/ksql/test/util/TestMethods.java +++ b/ksql-test-util/src/main/java/io/confluent/ksql/test/util/TestMethods.java @@ -181,9 +181,9 @@ private TestCase(final Method method, final Object[] args) { public void invokeMethod(final T instanceUnderTest) throws Throwable { try { method.invoke(instanceUnderTest, args); - } catch (IllegalAccessException e) { + } catch (final IllegalAccessException e) { throw new AssertionError("Invoke failed", e); - } catch (InvocationTargetException e) { + } catch (final InvocationTargetException e) { throw e.getCause(); } } From e41d8ee626dc023e89aa2033c48424279a206f03 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Thu, 2 Jan 2020 16:53:52 +0000 Subject: [PATCH 062/123] test: enhance test framework to support primitive keys (#4201) * test: enhance test framework to support primitive keys Enhanced the test framework to work with test cases with `BIGINT` and `DOUBLE` keys. This was previously failing due to Jackson deserializing the JSON describing the expected values into to `INT` and `DECIMAL`. To achieve this I've started to tidy up the mess in the test framework. Specifically, we no longer track the serde to use in the `Topic` class, as this is constructed when parsing the test cases. Doing so was problematic as it was hard to know what serde should be used just from the test case text. The serde to use is now determined by looking at the format of sources in the metastore once the SQL statements have been executed. Also includes some additional test cases for pull queries working with primitive keys, and pulling out the code that determines the format / serde to use for a topic into a `TopicInfoCache`, as this is not shared by `QTT` and `RQTT` tests. --- .../ksql/engine/StubInsertValuesExecutor.java | 76 +++-- .../confluent/ksql/test/model/RecordNode.java | 11 +- .../confluent/ksql/test/model/TopicNode.java | 29 +- .../json/ValueSpecJsonSerdeSupplier.java | 2 + .../test/serde/kafka/KafkaSerdeSupplier.java | 29 +- .../io/confluent/ksql/test/tools/Record.java | 43 +-- .../ksql/test/tools/TestCaseBuilder.java | 1 - .../ksql/test/tools/TestCaseBuilderUtil.java | 69 +---- .../ksql/test/tools/TestExecutor.java | 272 ++---------------- .../ksql/test/tools/TestExecutorUtil.java | 81 +++--- .../io/confluent/ksql/test/tools/Topic.java | 59 +--- .../ksql/test/tools/TopicInfoCache.java | 272 ++++++++++++++++++ .../confluent/ksql/test/utils/SerdeUtil.java | 12 +- .../engine/StubInsertValuesExecutorTest.java | 59 ++-- .../ksql/test/SchemaTranslationTest.java | 15 +- .../test/rest/RestQueryTranslationTest.java | 1 + .../ksql/test/rest/RestTestCaseBuilder.java | 1 - .../ksql/test/rest/RestTestExecutor.java | 160 +++++++---- .../confluent/ksql/test/tools/RecordTest.java | 122 +------- .../ksql/test/tools/StubKafkaServiceTest.java | 10 +- .../ksql/test/tools/TestExecutorTest.java | 9 + .../ksql/test/tools/TestExecutorUtilTest.java | 17 +- .../query-validation-tests/serdes.json | 54 ++-- ...eries-against-materialized-aggregates.json | 53 ++++ .../ksql/rest/server/KsqlRestApplication.java | 5 + .../ksql/rest/server/TestKsqlRestApp.java | 8 + 26 files changed, 641 insertions(+), 829 deletions(-) create mode 100644 ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TopicInfoCache.java diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/engine/StubInsertValuesExecutor.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/engine/StubInsertValuesExecutor.java index b241454de837..14e202c35ef6 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/engine/StubInsertValuesExecutor.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/engine/StubInsertValuesExecutor.java @@ -15,17 +15,17 @@ package io.confluent.ksql.engine; -import com.fasterxml.jackson.databind.ObjectMapper; +import static java.util.Objects.requireNonNull; + import com.google.common.annotations.VisibleForTesting; -import io.confluent.ksql.test.serde.string.StringSerdeSupplier; +import io.confluent.ksql.KsqlExecutionContext; import io.confluent.ksql.test.tools.Record; import io.confluent.ksql.test.tools.Topic; +import io.confluent.ksql.test.tools.TopicInfoCache; +import io.confluent.ksql.test.tools.TopicInfoCache.TopicInfo; import io.confluent.ksql.test.tools.exceptions.InvalidFieldException; import io.confluent.ksql.test.tools.stubs.StubKafkaRecord; import io.confluent.ksql.test.tools.stubs.StubKafkaService; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Objects; import java.util.Optional; import org.apache.kafka.clients.producer.ProducerRecord; @@ -34,54 +34,78 @@ public final class StubInsertValuesExecutor { private StubInsertValuesExecutor() { } - public static InsertValuesExecutor of(final StubKafkaService stubKafkaService) { - final StubProducer stubProducer = new StubProducer(stubKafkaService); + public static InsertValuesExecutor of( + final StubKafkaService stubKafkaService, + final KsqlExecutionContext executionContext + ) { + final TopicInfoCache topicInfoCache = new TopicInfoCache( + executionContext, + executionContext.getServiceContext().getSchemaRegistryClient() + ); + + final StubProducer stubProducer = new StubProducer(stubKafkaService, topicInfoCache); return new InsertValuesExecutor( false, - (record, ignored1, ingnored2) -> stubProducer.sendRecord(record)); + (record, ignored1, ignored2) -> stubProducer.sendRecord(record) + ); } @VisibleForTesting static class StubProducer { private final StubKafkaService stubKafkaService; + private final TopicInfoCache topicInfoCache; - StubProducer(final StubKafkaService stubKafkaService) { - this.stubKafkaService = Objects.requireNonNull(stubKafkaService, "stubKafkaService"); + StubProducer( + final StubKafkaService stubKafkaService, + final TopicInfoCache topicInfoCache + ) { + this.stubKafkaService = requireNonNull(stubKafkaService, "stubKafkaService"); + this.topicInfoCache = requireNonNull(topicInfoCache, "topicInfoCache"); } void sendRecord(final ProducerRecord record) { - final Object value = getValue(record); + final Topic topic = stubKafkaService.getTopic(record.topic()); + + final Object key = deserializeKey(record); + + final Object value = deserializeValue(record); + + final Optional timestamp = Optional.of(record.timestamp()); this.stubKafkaService.writeRecord(record.topic(), StubKafkaRecord.of( new Record( - stubKafkaService.getTopic(record.topic()), - new String(record.key(), StandardCharsets.UTF_8), + topic, + key, value, null, - Optional.of(record.timestamp()), + timestamp, null ), null) ); } - private Object getValue(final ProducerRecord record) { - final Topic topic = stubKafkaService.getTopic(record.topic()); + private Object deserializeKey(final ProducerRecord record) { + try { + final TopicInfo topicInfo = topicInfoCache.get(record.topic()); + return topicInfo.getKeyDeserializer() + .deserialize(record.topic(), record.key()); + } catch (final Exception e) { + throw new InvalidFieldException("key", "failed to parse", e); + } + } - final Object value; - if (topic.getValueSerdeSupplier() instanceof StringSerdeSupplier) { - value = new String(record.value(), StandardCharsets.UTF_8); - } else { - try { - value = new ObjectMapper().readValue(record.value(), Object.class); - } catch (final IOException e) { - throw new InvalidFieldException("value", "failed to parse", e); - } + private Object deserializeValue(final ProducerRecord record) { + try { + final TopicInfo topicInfo = topicInfoCache.get(record.topic()); + return topicInfo.getValueDeserializer() + .deserialize(record.topic(), record.value()); + } catch (final Exception e) { + throw new InvalidFieldException("value", "failed to parse", e); } - return value; } } } diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/RecordNode.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/RecordNode.java index a8e5d37b2a3c..c4b3d8bc3eeb 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/RecordNode.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/RecordNode.java @@ -23,7 +23,8 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; -import io.confluent.ksql.test.serde.string.StringSerdeSupplier; +import com.fasterxml.jackson.databind.node.NullNode; +import com.fasterxml.jackson.databind.node.TextNode; import io.confluent.ksql.test.tools.Record; import io.confluent.ksql.test.tools.Topic; import io.confluent.ksql.test.tools.exceptions.InvalidFieldException; @@ -69,7 +70,7 @@ public String topicName() { public Record build(final Map topics) { final Topic topic = topics.get(topicName); - final Object recordValue = buildValue(topic); + final Object recordValue = buildValue(); return new Record( topic, @@ -85,12 +86,12 @@ public Optional getWindow() { return window; } - private Object buildValue(final Topic topic) { - if (value.asText().equals("null")) { + private Object buildValue() { + if (value instanceof NullNode) { return null; } - if (topic.getValueSerdeSupplier() instanceof StringSerdeSupplier) { + if (value instanceof TextNode) { return value.asText(); } diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/TopicNode.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/TopicNode.java index 4e367acdfa4b..d696be93db7d 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/TopicNode.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/TopicNode.java @@ -27,14 +27,9 @@ import io.confluent.ksql.schema.ksql.LogicalSchema.Builder; import io.confluent.ksql.schema.ksql.SchemaConverters; import io.confluent.ksql.schema.ksql.types.SqlStruct; -import io.confluent.ksql.serde.Format; -import io.confluent.ksql.serde.FormatInfo; -import io.confluent.ksql.serde.KeyFormat; import io.confluent.ksql.test.TestFrameworkException; -import io.confluent.ksql.test.serde.SerdeSupplier; import io.confluent.ksql.test.tools.Topic; import io.confluent.ksql.test.tools.exceptions.InvalidFieldException; -import io.confluent.ksql.test.utils.SerdeUtil; import java.util.Optional; import org.apache.avro.Schema; @@ -68,28 +63,8 @@ public final class TopicNode { } } - public Topic build(final Optional defaultFormat) { - final String formatToUse = format - .replace("{FORMAT}", defaultFormat.orElse(FORMAT_REPLACE_ERROR)); - - final SerdeSupplier keySerdeSupplier = SerdeUtil.getKeySerdeSupplier( - KeyFormat.nonWindowed(FormatInfo.of(Format.KAFKA)), - LogicalSchema.builder()::build // Assume default STRING key for now. - ); - - final SerdeSupplier valueSerdeSupplier = SerdeUtil.getSerdeSupplier( - Format.of(formatToUse), - this::logicalSchema - ); - - return new Topic( - name, - avroSchema, - keySerdeSupplier, - valueSerdeSupplier, - numPartitions, - replicas - ); + public Topic build() { + return new Topic(name, numPartitions, replicas, avroSchema); } private LogicalSchema logicalSchema() { diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/json/ValueSpecJsonSerdeSupplier.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/json/ValueSpecJsonSerdeSupplier.java index 71fa0faf18e4..9a73a9c7a0be 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/json/ValueSpecJsonSerdeSupplier.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/json/ValueSpecJsonSerdeSupplier.java @@ -24,6 +24,7 @@ import com.google.common.collect.ImmutableList; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.test.serde.SerdeSupplier; +import java.math.BigDecimal; import java.util.Collection; import java.util.List; import java.util.Map; @@ -101,6 +102,7 @@ private static final class Converter { Converter.converter(Long.class, JsonNodeFactory.instance::numberNode), Converter.converter(Float.class, JsonNodeFactory.instance::numberNode), Converter.converter(Double.class, JsonNodeFactory.instance::numberNode), + Converter.converter(BigDecimal.class, JsonNodeFactory.instance::numberNode), Converter.converter(String.class, JsonNodeFactory.instance::textNode), Converter.converter(Collection.class, Converter::handleCollection), Converter.converter(Map.class, Converter::handleMap) diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/kafka/KafkaSerdeSupplier.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/kafka/KafkaSerdeSupplier.java index 6a23853317fd..6e985c566070 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/kafka/KafkaSerdeSupplier.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/serde/kafka/KafkaSerdeSupplier.java @@ -15,6 +15,8 @@ package io.confluent.ksql.test.serde.kafka; +import static java.util.Objects.requireNonNull; + import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.schema.ksql.Column; import io.confluent.ksql.schema.ksql.LogicalSchema; @@ -24,8 +26,6 @@ import io.confluent.ksql.test.serde.SerdeSupplier; import java.util.List; import java.util.Map; -import java.util.Objects; -import java.util.function.Supplier; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.common.serialization.Serdes; @@ -34,10 +34,10 @@ public class KafkaSerdeSupplier implements SerdeSupplier { - private final Supplier schemaSupplier; + private final LogicalSchema schema; - public KafkaSerdeSupplier(final Supplier schemaSupplier) { - this.schemaSupplier = Objects.requireNonNull(schemaSupplier, "schema"); + public KafkaSerdeSupplier(final LogicalSchema schema) { + this.schema = requireNonNull(schema, "schema"); } @Override @@ -51,7 +51,6 @@ public Deserializer getDeserializer(final SchemaRegistryClient schemaReg } private SqlType getColumnType(final boolean isKey) { - final LogicalSchema schema = schemaSupplier.get(); final List columns = isKey ? schema.key() : schema.value(); if (columns.isEmpty()) { throw new IllegalStateException("No columns in schema"); @@ -64,23 +63,20 @@ private SqlType getColumnType(final boolean isKey) { return columns.get(0).type(); } - @SuppressWarnings("unchecked") - private Serde getSerde( - final SqlType sqlType - ) { + private static Serde getSerde(final SqlType sqlType) { final Type connectType = SchemaConverters.sqlToConnectConverter() .toConnectSchema(sqlType) .type(); switch (connectType) { case INT32: - return (Serde) Serdes.Integer(); + return Serdes.Integer(); case INT64: - return (Serde) Serdes.Long(); + return Serdes.Long(); case FLOAT64: - return (Serde) Serdes.Double(); + return Serdes.Double(); case STRING: - return (Serde) Serdes.String(); + return Serdes.String(); default: throw new IllegalStateException("Unsupported type for KAFKA format"); } @@ -90,10 +86,11 @@ private final class RowSerializer implements Serializer { private Serializer delegate; + @SuppressWarnings({"unchecked", "rawtypes"}) @Override public void configure(final Map configs, final boolean isKey) { final SqlType sqlType = getColumnType(isKey); - delegate = getSerde(sqlType).serializer(); + delegate = (Serializer)getSerde(sqlType).serializer(); delegate.configure(configs, isKey); } @@ -105,7 +102,7 @@ public byte[] serialize(final String topic, final Object value) { private final class RowDeserializer implements Deserializer { - private Deserializer delegate; + private Deserializer delegate; private String type; @Override diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Record.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Record.java index 711babda4711..a9a66480705b 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Record.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Record.java @@ -19,13 +19,6 @@ import io.confluent.ksql.test.model.WindowData; import java.util.Objects; import java.util.Optional; -import org.apache.kafka.common.serialization.Deserializer; -import org.apache.kafka.common.serialization.Serdes; -import org.apache.kafka.common.serialization.Serializer; -import org.apache.kafka.streams.kstream.SessionWindowedDeserializer; -import org.apache.kafka.streams.kstream.SessionWindowedSerializer; -import org.apache.kafka.streams.kstream.TimeWindowedDeserializer; -import org.apache.kafka.streams.kstream.TimeWindowedSerializer; import org.apache.kafka.streams.kstream.Window; import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.internals.SessionWindow; @@ -33,24 +26,13 @@ public class Record { - final Topic topic; + private final Topic topic; private final Object key; private final Object value; private final Optional timestamp; private final WindowData window; private final Optional jsonValue; - public Record( - final Topic topic, - final Object key, - final Object value, - final JsonNode jsonValue, - final long timestamp, - final WindowData window - ) { - this(topic, key, value, jsonValue, Optional.of(timestamp), window); - } - public Record( final Topic topic, final Object key, @@ -67,27 +49,8 @@ public Record( this.window = window; } - Serializer keySerializer() { - final Serializer stringDe = Serdes.String().serializer(); - if (window == null) { - return stringDe; - } - - return window.type == WindowData.Type.SESSION - ? new SessionWindowedSerializer<>(stringDe) - : new TimeWindowedSerializer<>(stringDe); - } - - @SuppressWarnings("rawtypes") - Deserializer keyDeserializer() { - if (window == null) { - return Serdes.String().deserializer(); - } - - final Deserializer inner = Serdes.String().deserializer(); - return window.type == WindowData.Type.SESSION - ? new SessionWindowedDeserializer<>(inner) - : new TimeWindowedDeserializer<>(inner, window.size()); + public Topic getTopic() { + return topic; } public Object rawKey() { diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCaseBuilder.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCaseBuilder.java index 46b82fcdd622..7ffb42babdea 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCaseBuilder.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCaseBuilder.java @@ -81,7 +81,6 @@ private TestCase createTest( test.topics(), test.outputs(), test.inputs(), - explicitFormat, ee.isPresent(), functionRegistry ); diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCaseBuilderUtil.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCaseBuilderUtil.java index 96420378137a..8dc8d97bc3e3 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCaseBuilderUtil.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCaseBuilderUtil.java @@ -23,32 +23,22 @@ import io.confluent.ksql.execution.ddl.commands.KsqlTopic; import io.confluent.ksql.function.FunctionRegistry; import io.confluent.ksql.metastore.MetaStoreImpl; -import io.confluent.ksql.model.WindowType; import io.confluent.ksql.parser.DefaultKsqlParser; import io.confluent.ksql.parser.KsqlParser; import io.confluent.ksql.parser.KsqlParser.ParsedStatement; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; import io.confluent.ksql.parser.SqlBaseParser; import io.confluent.ksql.parser.tree.CreateSource; -import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.SchemaConverters; import io.confluent.ksql.serde.Format; -import io.confluent.ksql.serde.FormatInfo; -import io.confluent.ksql.serde.KeyFormat; import io.confluent.ksql.serde.ValueFormat; -import io.confluent.ksql.serde.WindowInfo; import io.confluent.ksql.test.model.RecordNode; import io.confluent.ksql.test.model.TopicNode; -import io.confluent.ksql.test.model.WindowData; -import io.confluent.ksql.test.model.WindowData.Type; -import io.confluent.ksql.test.serde.SerdeSupplier; import io.confluent.ksql.test.tools.exceptions.InvalidFieldException; -import io.confluent.ksql.test.utils.SerdeUtil; import io.confluent.ksql.topic.TopicFactory; import io.confluent.ksql.util.DecimalUtil; import io.confluent.ksql.util.KsqlConstants; import java.nio.file.Path; -import java.time.Duration; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -57,7 +47,6 @@ import java.util.UUID; import java.util.function.Function; import java.util.function.Predicate; -import java.util.function.Supplier; import java.util.stream.Collectors; import org.apache.kafka.connect.data.Field; import org.apache.kafka.connect.data.Schema; @@ -105,7 +94,6 @@ public static Map getTopicsByName( final List topics, final List outputs, final List inputs, - final Optional defaultFormat, final boolean expectsException, final FunctionRegistry functionRegistry ) { @@ -113,7 +101,7 @@ public static Map getTopicsByName( // Add all topics from topic nodes to the map: topics.stream() - .map(node -> node.build(defaultFormat)) + .map(TopicNode::build) .forEach(topic -> allTopics.put(topic.getName(), topic)); // Infer topics if not added already: @@ -129,19 +117,9 @@ public static Map getTopicsByName( throw new InvalidFieldException("statements/topics", "The test does not define any topics"); } - final SerdeSupplier defaultValueSerdeSupplier = - allTopics.values().iterator().next().getValueSerdeSupplier(); - // Get topics from inputs and outputs fields: Streams.concat(inputs.stream(), outputs.stream()) - .map(recordNode -> new Topic( - recordNode.topicName(), - Optional.empty(), - getKeySerdeSupplier(recordNode.getWindow()), - defaultValueSerdeSupplier, - 4, - 1 - )) + .map(recordNode -> new Topic(recordNode.topicName(), 4, 1, Optional.empty())) .forEach(topic -> allTopics.putIfAbsent(topic.getName(), topic)); return allTopics; @@ -163,14 +141,6 @@ private static Topic createTopicFromStatement( final KsqlTopic ksqlTopic = TopicFactory.create(statement.getProperties()); - final KeyFormat keyFormat = ksqlTopic.getKeyFormat(); - - final Supplier logicalSchemaSupplier = - () -> statement.getElements().toLogicalSchema(true); - - final SerdeSupplier keySerdeSupplier = - SerdeUtil.getKeySerdeSupplier(keyFormat, logicalSchemaSupplier); - final ValueFormat valueFormat = ksqlTopic.getValueFormat(); final Optional avroSchema; if (valueFormat.getFormat() == Format.AVRO) { @@ -186,18 +156,11 @@ private static Topic createTopicFromStatement( avroSchema = Optional.empty(); } - final SerdeSupplier valueSerdeSupplier = SerdeUtil.getSerdeSupplier( - valueFormat.getFormat(), - logicalSchemaSupplier - ); - return new Topic( ksqlTopic.getKafkaTopicName(), - avroSchema, - keySerdeSupplier, - valueSerdeSupplier, KsqlConstants.legacyDefaultSinkPartitionCount, - KsqlConstants.legacyDefaultSinkReplicaCount + KsqlConstants.legacyDefaultSinkReplicaCount, + avroSchema ); }; @@ -222,30 +185,6 @@ private static Topic createTopicFromStatement( } } - private static SerdeSupplier getKeySerdeSupplier(final Optional windowDataInfo) { - if (windowDataInfo.isPresent()) { - final WindowData windowData = windowDataInfo.get(); - final WindowType windowType = WindowType.of((windowData.type == Type.SESSION) - ? WindowType.SESSION.name() - : WindowType.TUMBLING.name()); - final KeyFormat windowKeyFormat = KeyFormat.windowed( - FormatInfo.of( - Format.KAFKA, - Optional.empty(), - Optional.empty() - ), - WindowInfo.of( - windowType, - windowType == WindowType.SESSION - ? Optional.empty() : Optional.of(Duration.ofMillis(windowData.size()))) - ); - return SerdeUtil.getKeySerdeSupplier(windowKeyFormat, () -> LogicalSchema.builder().build()); - } - return SerdeUtil.getKeySerdeSupplier( - KeyFormat.nonWindowed(FormatInfo.of(Format.KAFKA)), - () -> LogicalSchema.builder().build()); - } - private static Schema addNames(final Schema schema) { final SchemaBuilder builder; switch (schema.type()) { diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java index 49f774fc5099..810e68d6195c 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java @@ -26,42 +26,27 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Iterables; -import com.google.errorprone.annotations.Immutable; import io.confluent.common.utils.TestUtils; import io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; -import io.confluent.kafka.serializers.KafkaAvroSerializerConfig; import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.internal.KsqlEngineMetrics; import io.confluent.ksql.logging.processing.ProcessingLogContext; import io.confluent.ksql.metastore.MetaStoreImpl; import io.confluent.ksql.metastore.MutableMetaStore; -import io.confluent.ksql.parser.DurationParser; -import io.confluent.ksql.query.QueryId; import io.confluent.ksql.query.id.SequentialQueryIdGenerator; -import io.confluent.ksql.schema.ksql.DefaultSqlValueCoercer; -import io.confluent.ksql.schema.ksql.LogicalSchema; -import io.confluent.ksql.schema.ksql.SchemaConverters; -import io.confluent.ksql.schema.ksql.types.SqlType; -import io.confluent.ksql.serde.KeyFormat; import io.confluent.ksql.services.DefaultConnectClient; import io.confluent.ksql.services.DefaultServiceContext; import io.confluent.ksql.services.DisabledKsqlClient; import io.confluent.ksql.services.ServiceContext; -import io.confluent.ksql.test.TestFrameworkException; -import io.confluent.ksql.test.serde.SerdeSupplier; -import io.confluent.ksql.test.serde.avro.AvroSerdeSupplier; -import io.confluent.ksql.test.serde.avro.ValueSpecAvroSerdeSupplier; +import io.confluent.ksql.test.tools.TopicInfoCache.TopicInfo; import io.confluent.ksql.test.tools.stubs.StubKafkaClientSupplier; import io.confluent.ksql.test.tools.stubs.StubKafkaRecord; import io.confluent.ksql.test.tools.stubs.StubKafkaService; import io.confluent.ksql.test.tools.stubs.StubKafkaTopicClient; -import io.confluent.ksql.test.utils.SerdeUtil; import io.confluent.ksql.util.KsqlConfig; import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.KsqlServerException; -import io.confluent.ksql.util.PersistentQueryMetadata; import java.io.Closeable; import java.util.Collections; import java.util.HashSet; @@ -69,19 +54,14 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; -import java.util.OptionalLong; import java.util.Set; import java.util.function.Function; -import java.util.regex.Pattern; import java.util.stream.Collectors; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.common.serialization.Deserializer; -import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.TopologyTestDriver; -import org.apache.kafka.streams.kstream.TimeWindowedDeserializer; import org.hamcrest.Matcher; import org.hamcrest.StringDescription; @@ -99,21 +79,13 @@ public class TestExecutor implements Closeable { .put(KsqlConfig.KSQL_SERVICE_ID_CONFIG, "some.ksql.service.id") .build(); - private static final Pattern INTERNAL_TOPIC_PATTERN = Pattern - .compile("_confluent.*query_(.*_\\d+)-.*-(changelog|repartition)"); - - private static final Pattern WINDOWED_JOIN_PATTERN = Pattern - .compile( - "CREATE .* JOIN .* WITHIN (\\d+ \\w+) ON .*", - Pattern.CASE_INSENSITIVE | Pattern.DOTALL - ); - private final ServiceContext serviceContext; private final KsqlEngine ksqlEngine; private final Map config = baseConfig(); private final StubKafkaService stubKafkaService; private final TopologyBuilder topologyBuilder; private final Function> internalTopicsAccessor; + private final TopicInfoCache topicInfoCache; public TestExecutor() { this(StubKafkaService.create(), getServiceContext()); @@ -132,6 +104,7 @@ public TestExecutor() { this.ksqlEngine = requireNonNull(ksqlEngine, "ksqlEngine"); this.topologyBuilder = requireNonNull(topologyBuilder, "topologyBuilder"); this.internalTopicsAccessor = requireNonNull(internalTopicsAccessor, "internalTopicsAccessor"); + this.topicInfoCache = new TopicInfoCache(ksqlEngine, serviceContext.getSchemaRegistryClient()); } private TestExecutor( @@ -149,6 +122,8 @@ private TestExecutor( public void buildAndExecuteQuery(final TestCase testCase) { + topicInfoCache.clear(); + final KsqlConfig currentConfigs = new KsqlConfig(config); final Map persistedConfigs = testCase.persistedProperties(); final KsqlConfig ksqlConfig = persistedConfigs.isEmpty() ? currentConfigs : @@ -171,7 +146,7 @@ public void buildAndExecuteQuery(final TestCase testCase) { writeInputIntoTopics(testCase.getInputRecords(), stubKafkaService); final Set inputTopics = testCase.getInputRecords() .stream() - .map(record -> record.topic.getName()) + .map(record -> record.getTopic().getName()) .collect(Collectors.toSet()); final Set allTopicNames = new HashSet<>(); @@ -251,10 +226,10 @@ private void validateTopicData( + "> records but it was <" + actual.size() + ">\n" + getActualsForErrorMessage(actual)); } - final Function keyCoercer = keyCoercerForTopic(topicName); + final TopicInfo topicInfo = topicInfoCache.get(topicName); for (int i = 0; i < expected.size(); i++) { - final Record expectedRecord = coerceRecordKey(expected.get(i), i, keyCoercer); + final Record expectedRecord = topicInfo.coerceRecordKey(expected.get(i), i); final ProducerRecord actualProducerRecord = actual.get(i).getProducerRecord(); validateCreatedMessage( @@ -267,153 +242,6 @@ private void validateTopicData( } } - /** - * Coerce the key value to the correct type. - * - *

The type of the key loaded from the JSON test case file may not be the exact match on type, - * e.g. JSON will load a small number as an integer, but the key type of the source might be a - * long. - * - * @param record the record to coerce - * @param msgIndex the index of the message, displayed in the error message - * @param keyCoercer keyCoercer to use - * @return a new Record with the correct key type. - */ - private static Record coerceRecordKey( - final Record record, - final int msgIndex, - final Function keyCoercer - ) { - try { - final Object coerced = keyCoercer.apply(record.rawKey()); - return record.withKey(coerced); - } catch (final Exception e) { - throw new AssertionError( - "Topic '" + record.topic.getName() + "', message " + msgIndex - + ": Invalid test-case: could not coerce key in test case to required type. " - + e.getMessage(), - e); - } - } - - private Function keyCoercerForTopic(final String topicName) { - final TopicInfo topicInfo = getTopicInfo(topicName); - - final SqlType keyType = topicInfo - .getSchema() - .key() - .get(0) - .type(); - - return key -> { - if (key == null) { - return null; - } - - return DefaultSqlValueCoercer.INSTANCE - .coerce(key, keyType) - .orElseThrow(() -> new AssertionError("Invalid key value for topic " + topicName + "." - + System.lineSeparator() - + "Expected KeyType: " + keyType - + System.lineSeparator() - + "Actual KeyType: " + SchemaConverters.javaToSqlConverter().toSqlType(key.getClass()) - + ", key: " + key + "." - + System.lineSeparator() - + "This is likely caused by the key type in the test-case not matching the schema.")); - }; - } - - private TopicInfo getTopicInfo(final String topicName) { - try { - final java.util.regex.Matcher matcher = INTERNAL_TOPIC_PATTERN.matcher(topicName); - if (matcher.matches()) { - // Internal topic: - final QueryId queryId = new QueryId(matcher.group(1)); - final PersistentQueryMetadata query = ksqlEngine - .getPersistentQuery(queryId) - .orElseThrow(() -> new TestFrameworkException("Unknown queryId for internal topic: " - + queryId)); - - final java.util.regex.Matcher windowedJoinMatcher = WINDOWED_JOIN_PATTERN - .matcher(query.getStatementString()); - - final OptionalLong changeLogWindowSize = topicName.endsWith("-changelog") - && windowedJoinMatcher.matches() - ? OptionalLong.of(DurationParser.parse(windowedJoinMatcher.group(1)).toMillis()) - : OptionalLong.empty(); - - return new TopicInfo( - query.getLogicalSchema(), - query.getResultTopic().getKeyFormat(), - changeLogWindowSize - ); - } - - // Source / sink topic: - final Set keyTypes = ksqlEngine.getMetaStore().getAllDataSources().values() - .stream() - .filter(source -> source.getKafkaTopicName().equals(topicName)) - .map(source -> new TopicInfo( - source.getSchema(), - source.getKsqlTopic().getKeyFormat(), - OptionalLong.empty() - )) - .collect(Collectors.toSet()); - - if (keyTypes.isEmpty()) { - throw new TestFrameworkException("no source found for topic"); - } - - return Iterables.get(keyTypes, 0); - } catch (final Exception e) { - throw new TestFrameworkException("Failed to determine key type for" - + System.lineSeparator() + "topic: " + topicName - + System.lineSeparator() + "reason: " + e.getMessage(), e); - } - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - private Serializer getKeySerializer(final String topicName) { - final TopicInfo topicInfo = getTopicInfo(topicName); - - final SerdeSupplier keySerdeSupplier = SerdeUtil - .getKeySerdeSupplier(topicInfo.getKeyFormat(), topicInfo::getSchema); - - final Serializer serializer = keySerdeSupplier.getSerializer( - serviceContext.getSchemaRegistryClient() - ); - - serializer.configure(ImmutableMap.of( - KafkaAvroSerializerConfig.SCHEMA_REGISTRY_URL_CONFIG, "something" - ), true); - - return (Serializer) serializer; - } - - private Deserializer getKeyDeserializer(final String topicName) { - final TopicInfo topicInfo = getTopicInfo(topicName); - - final SerdeSupplier keySerdeSupplier = SerdeUtil - .getKeySerdeSupplier(topicInfo.getKeyFormat(), topicInfo::getSchema); - - final Deserializer deserializer = keySerdeSupplier.getDeserializer( - serviceContext.getSchemaRegistryClient() - ); - - deserializer.configure(ImmutableMap.of(), true); - - if (!topicInfo.getChangeLogWindowSize().isPresent()) { - return deserializer; - } - - final TimeWindowedDeserializer changeLogDeserializer = new TimeWindowedDeserializer<>( - deserializer, topicInfo.getChangeLogWindowSize().getAsLong()); - - changeLogDeserializer.setIsChangelogTopic(true); - - return changeLogDeserializer; - } - private static String getActualsForErrorMessage(final List actual) { final StringBuilder stringBuilder = new StringBuilder("Actual records: \n"); for (final StubKafkaRecord stubKafkaRecord : actual) { @@ -462,13 +290,11 @@ private void pipeRecordsFromProvidedInput( int inputRecordIndex = 0; for (final Record record : testCase.getInputRecords()) { - if (topologyTestDriverContainer.getSourceTopicNames().contains(record.topic.getName())) { + if (topologyTestDriverContainer.getSourceTopicNames().contains(record.getTopic().getName())) { - final Record coerced = coerceRecordKey( - record, - inputRecordIndex, - keyCoercerForTopic(record.topic.getName()) - ); + final TopicInfo topicInfo = topicInfoCache.get(record.getTopic().getName()); + + final Record coerced = topicInfo.coerceRecordKey(record, inputRecordIndex); processSingleRecord( StubKafkaRecord.of(coerced, null), @@ -493,35 +319,28 @@ private void pipeRecordsFromKafka( } } - @SuppressWarnings("unchecked") private void processSingleRecord( final StubKafkaRecord inputRecord, final TopologyTestDriverContainer testDriver, final Set possibleSinkTopics ) { final Topic recordTopic = stubKafkaService - .getTopic(inputRecord.getTestRecord().topic.getName()); - - final SchemaRegistryClient schemaRegistryClient = serviceContext.getSchemaRegistryClient(); - - final Serializer keySerializer = getKeySerializer(recordTopic.getName()); + .getTopic(inputRecord.getTestRecord().getTopic().getName()); - final Serializer valueSerializer = - recordTopic.getValueSerdeSupplier() instanceof AvroSerdeSupplier - ? new ValueSpecAvroSerdeSupplier().getSerializer(schemaRegistryClient) - : recordTopic.getValueSerializer(schemaRegistryClient); + final TopicInfo topicInfo = topicInfoCache.get(recordTopic.getName()); final Object key = getKey(inputRecord); final ConsumerRecord consumerRecord = new org.apache.kafka.streams.test.ConsumerRecordFactory<>( - keySerializer, - valueSerializer + topicInfo.getKeySerializer(), + topicInfo.getValueSerializer() ).create( recordTopic.getName(), key, inputRecord.getTestRecord().value(), inputRecord.getTestRecord().timestamp().orElse(0L) ); + testDriver.getTopologyTestDriver().pipeInput(consumerRecord); final Topic sinkTopic = testDriver.getSinkTopic(); @@ -562,17 +381,18 @@ private void processRecordsForTopic( } } - @SuppressWarnings("unchecked") private ProducerRecord readOutput( final TopologyTestDriver topologyTestDriver, final Topic sinkTopic, final int idx ) { try { + final TopicInfo topicInfo = topicInfoCache.get(sinkTopic.getName()); + return topologyTestDriver.readOutput( sinkTopic.getName(), - getKeyDeserializer(sinkTopic.getName()), - sinkTopic.getValueDeserializer(serviceContext.getSchemaRegistryClient()) + topicInfo.getKeyDeserializer(), + topicInfo.getValueDeserializer() ); } catch (final Exception e) { throw new AssertionError("Topic " + sinkTopic.getName() @@ -631,7 +451,7 @@ private static void writeInputIntoTopics( ) { inputRecords.forEach( record -> stubKafkaService.writeRecord( - record.topic.getName(), + record.getTopic().getName(), StubKafkaRecord.of(record, null)) ); } @@ -683,52 +503,4 @@ List buildStreamsTopologyTestDrivers( StubKafkaService stubKafkaService ); } - - @Immutable - private static final class TopicInfo { - - private final LogicalSchema schema; - private final KeyFormat keyFormat; - private final OptionalLong changeLogWindowSize; - - TopicInfo( - final LogicalSchema schema, - final KeyFormat keyFormat, - final OptionalLong changeLogWindowSize - ) { - this.schema = requireNonNull(schema, "schema"); - this.keyFormat = requireNonNull(keyFormat, "keyFormat"); - this.changeLogWindowSize = requireNonNull(changeLogWindowSize, "changeLogWindowSize"); - } - - public KeyFormat getKeyFormat() { - return keyFormat; - } - - public LogicalSchema getSchema() { - return schema; - } - - public OptionalLong getChangeLogWindowSize() { - return changeLogWindowSize; - } - - @Override - public boolean equals(final Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - final TopicInfo topicInfo = (TopicInfo) o; - return Objects.equals(schema, topicInfo.schema) - && Objects.equals(keyFormat, topicInfo.keyFormat); - } - - @Override - public int hashCode() { - return Objects.hash(schema, keyFormat); - } - } } diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java index c60e89eeb4fc..935ce292ef84 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java @@ -15,6 +15,7 @@ package io.confluent.ksql.test.tools; +import static java.util.Objects.requireNonNull; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; @@ -29,7 +30,6 @@ import io.confluent.ksql.KsqlExecutionContext.ExecuteResult; import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.engine.KsqlPlan; -import io.confluent.ksql.engine.QueryPlan; import io.confluent.ksql.engine.SqlFormatInjector; import io.confluent.ksql.engine.StubInsertValuesExecutor; import io.confluent.ksql.execution.json.PlanJsonMapper; @@ -46,9 +46,7 @@ import io.confluent.ksql.services.KafkaTopicClient; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; -import io.confluent.ksql.test.serde.SerdeSupplier; import io.confluent.ksql.test.tools.stubs.StubKafkaService; -import io.confluent.ksql.test.utils.SerdeUtil; import io.confluent.ksql.util.KsqlConfig; import io.confluent.ksql.util.KsqlConstants; import io.confluent.ksql.util.KsqlException; @@ -61,7 +59,6 @@ import java.util.List; import java.util.Map; import java.util.NoSuchElementException; -import java.util.Objects; import java.util.Optional; import java.util.Properties; import java.util.stream.Collectors; @@ -157,23 +154,11 @@ private static Topic buildSinkTopic( final Optional avroSchema = getAvroSchema(sinkDataSource, schemaRegistryClient); - final SerdeSupplier keySerdeFactory = SerdeUtil.getKeySerdeSupplier( - sinkDataSource.getKsqlTopic().getKeyFormat(), - sinkDataSource::getSchema - ); - - final SerdeSupplier valueSerdeSupplier = SerdeUtil.getSerdeSupplier( - sinkDataSource.getKsqlTopic().getValueFormat().getFormat(), - sinkDataSource::getSchema - ); - final Topic sinkTopic = new Topic( kafkaTopicName, - avroSchema, - keySerdeFactory, - valueSerdeSupplier, KsqlConstants.legacyDefaultSinkPartitionCount, - KsqlConstants.legacyDefaultSinkReplicaCount + KsqlConstants.legacyDefaultSinkReplicaCount, + avroSchema ); if (stubKafkaService.topicExists(sinkTopic)) { @@ -233,7 +218,7 @@ private static void initializeTopics( topic.getNumPartitions(), topic.getReplicas()); - topic.getSchema().ifPresent(schema -> { + topic.getAvroSchema().ifPresent(schema -> { try { srClient .register(topic.getName() + KsqlConstants.SCHEMA_REGISTRY_VALUE_SUFFIX, schema); @@ -247,6 +232,7 @@ private static void initializeTopics( /** * @param srClient if supplied, then schemas can be inferred from the schema registry. */ + @SuppressWarnings("OptionalGetWithoutIsPresent") private static List execute( final KsqlEngine engine, final TestCase testCase, @@ -258,18 +244,17 @@ private static List execute( for (final ConfiguredKsqlPlan plan : planTestCase(engine, testCase, ksqlConfig, srClient, stubKafkaService)) { final ExecuteResultAndSources result = executePlan(engine, plan); - if (result.getSources() == null) { + if (!result.getSources().isPresent()) { continue; } queriesBuilder.add(new PersistentQueryAndSources( (PersistentQueryMetadata) result.getExecuteResult().getQuery().get(), - result.getSources() + result.getSources().get() )); } return queriesBuilder.build(); } - @SuppressWarnings({"rawtypes", "unchecked"}) private static ExecuteResultAndSources executePlan( final KsqlExecutionContext executionContext, final ConfiguredKsqlPlan plan @@ -278,17 +263,11 @@ private static ExecuteResultAndSources executePlan( executionContext.getServiceContext(), plan ); - final Optional maybeQueryPlan = plan.getPlan().getQueryPlan(); - if (maybeQueryPlan.isPresent()) { - return new ExecuteResultAndSources( - executeResult, - getSources(maybeQueryPlan.get().getSources(), executionContext.getMetaStore()) - ); - } - return new ExecuteResultAndSources( - executeResult, - null - ); + + final Optional>> dataSources = plan.getPlan().getQueryPlan() + .map(queryPlan -> getSources(queryPlan.getSources(), executionContext.getMetaStore())); + + return new ExecuteResultAndSources(executeResult, dataSources); } private static List> getSources( @@ -322,12 +301,12 @@ private PlannedStatementIterator( final StubKafkaService stubKafkaService, final Optional schemaInjector ) { - this.statements = Objects.requireNonNull(statements, "statements"); - this.executionContext = Objects.requireNonNull(executionContext, "executionContext"); - this.overrides = Objects.requireNonNull(overrides, "overrides"); - this.ksqlConfig = Objects.requireNonNull(ksqlConfig, "ksqlConfig"); - this.stubKafkaService = Objects.requireNonNull(stubKafkaService, "stubKafkaService"); - this.schemaInjector = Objects.requireNonNull(schemaInjector, "schemaInjector"); + this.statements = requireNonNull(statements, "statements"); + this.executionContext = requireNonNull(executionContext, "executionContext"); + this.overrides = requireNonNull(overrides, "overrides"); + this.ksqlConfig = requireNonNull(ksqlConfig, "ksqlConfig"); + this.stubKafkaService = requireNonNull(stubKafkaService, "stubKafkaService"); + this.schemaInjector = requireNonNull(schemaInjector, "schemaInjector"); } public static PlannedStatementIterator of( @@ -361,6 +340,7 @@ public boolean hasNext() { return next.isPresent(); } + @SuppressWarnings("ResultOfMethodCallIgnored") @Override public ConfiguredKsqlPlan next() { hasNext(); @@ -369,19 +349,20 @@ public ConfiguredKsqlPlan next() { return current; } + @SuppressWarnings("NullableProblems") @Override public Iterator iterator() { return this; } - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "rawtypes"}) private Optional planStatement(final ParsedStatement stmt) { final PreparedStatement prepared = executionContext.prepare(stmt); final ConfiguredStatement configured = ConfiguredStatement.of( prepared, overrides, ksqlConfig); if (prepared.getStatement() instanceof InsertValues) { - StubInsertValuesExecutor.of(stubKafkaService).execute( + StubInsertValuesExecutor.of(stubKafkaService, executionContext).execute( (ConfiguredStatement) configured, overrides, executionContext, @@ -413,7 +394,7 @@ private Optional planStatement(final ParsedStatement stmt) { } } - private KsqlPlan rewritePlan(final KsqlPlan plan) { + private static KsqlPlan rewritePlan(final KsqlPlan plan) { try { final String serialized = PLAN_MAPPER.writeValueAsString(plan); return PLAN_MAPPER.readValue(serialized, KsqlPlan.class); @@ -426,20 +407,21 @@ private KsqlPlan rewritePlan(final KsqlPlan plan) { private static final class ExecuteResultAndSources { private final ExecuteResult executeResult; - private final List> sources; + private final Optional>> sources; ExecuteResultAndSources( final ExecuteResult executeResult, - final List> sources) { - this.executeResult = executeResult; - this.sources = sources; + final Optional>> sources + ) { + this.executeResult = requireNonNull(executeResult, "executeResult"); + this.sources = requireNonNull(sources, "sources"); } ExecuteResult getExecuteResult() { return executeResult; } - List> getSources() { + Optional>> getSources() { return sources; } } @@ -453,8 +435,9 @@ private static final class PersistentQueryAndSources { final PersistentQueryMetadata persistentQueryMetadata, final List> sources ) { - this.persistentQueryMetadata = persistentQueryMetadata; - this.sources = sources; + this.persistentQueryMetadata = + requireNonNull(persistentQueryMetadata, "persistentQueryMetadata"); + this.sources = requireNonNull(sources, "sources"); } PersistentQueryMetadata getPersistentQueryMetadata() { diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Topic.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Topic.java index 16da97c458b0..795cc66ce71a 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Topic.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/Topic.java @@ -17,37 +17,24 @@ import static java.util.Objects.requireNonNull; -import com.google.common.collect.ImmutableMap; -import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; -import io.confluent.kafka.serializers.KafkaAvroSerializerConfig; -import io.confluent.ksql.test.serde.SerdeSupplier; import java.util.Optional; import org.apache.avro.Schema; -import org.apache.kafka.common.serialization.Deserializer; -import org.apache.kafka.common.serialization.Serializer; -@SuppressWarnings("rawtypes") public class Topic { private final String name; - private final Optional schema; - private final SerdeSupplier keySerdeFactory; - private final SerdeSupplier valueSerdeSupplier; private final int numPartitions; private final short replicas; + private final Optional avroSchema; public Topic( final String name, - final Optional schema, - final SerdeSupplier keySerdeFactory, - final SerdeSupplier valueSerdeSupplier, final int numPartitions, - final int replicas + final int replicas, + final Optional avroSchema ) { this.name = requireNonNull(name, "name"); - this.schema = requireNonNull(schema, "schema"); - this.keySerdeFactory = requireNonNull(keySerdeFactory, "keySerdeFactory"); - this.valueSerdeSupplier = requireNonNull(valueSerdeSupplier, "valueSerdeSupplier"); + this.avroSchema = requireNonNull(avroSchema, "schema"); this.numPartitions = numPartitions; this.replicas = (short) replicas; } @@ -56,8 +43,8 @@ public String getName() { return name; } - public Optional getSchema() { - return schema; + public Optional getAvroSchema() { + return avroSchema; } public int getNumPartitions() { @@ -67,38 +54,4 @@ public int getNumPartitions() { public short getReplicas() { return replicas; } - - public SerdeSupplier getValueSerdeSupplier() { - return valueSerdeSupplier; - } - - public Serializer getValueSerializer(final SchemaRegistryClient schemaRegistryClient) { - final Serializer serializer = valueSerdeSupplier.getSerializer(schemaRegistryClient); - serializer.configure(ImmutableMap.of( - KafkaAvroSerializerConfig.SCHEMA_REGISTRY_URL_CONFIG, "something" - ), false); - return serializer; - } - - public Deserializer getValueDeserializer(final SchemaRegistryClient schemaRegistryClient) { - final Deserializer deserializer = valueSerdeSupplier.getDeserializer(schemaRegistryClient); - deserializer.configure(ImmutableMap.of( - KafkaAvroSerializerConfig.SCHEMA_REGISTRY_URL_CONFIG, "foo" - ), false); - return deserializer; - } - - public Serializer getKeySerializer(final SchemaRegistryClient schemaRegistryClient) { - final Serializer serializer = keySerdeFactory.getSerializer(schemaRegistryClient); - serializer.configure(ImmutableMap.of( - KafkaAvroSerializerConfig.SCHEMA_REGISTRY_URL_CONFIG, "something" - ), true); - return serializer; - } - - public Deserializer getKeyDeserializer(final SchemaRegistryClient schemaRegistryClient) { - final Deserializer deserializer = keySerdeFactory.getDeserializer(schemaRegistryClient); - deserializer.configure(ImmutableMap.of(), true); - return deserializer; - } } diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TopicInfoCache.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TopicInfoCache.java new file mode 100644 index 000000000000..525eca869e4e --- /dev/null +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TopicInfoCache.java @@ -0,0 +1,272 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test.tools; + +import static java.util.Objects.requireNonNull; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterables; +import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; +import io.confluent.kafka.serializers.KafkaAvroSerializerConfig; +import io.confluent.ksql.KsqlExecutionContext; +import io.confluent.ksql.parser.DurationParser; +import io.confluent.ksql.query.QueryId; +import io.confluent.ksql.schema.ksql.DefaultSqlValueCoercer; +import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.schema.ksql.SchemaConverters; +import io.confluent.ksql.schema.ksql.types.SqlType; +import io.confluent.ksql.serde.KeyFormat; +import io.confluent.ksql.serde.ValueFormat; +import io.confluent.ksql.test.TestFrameworkException; +import io.confluent.ksql.test.serde.SerdeSupplier; +import io.confluent.ksql.test.utils.SerdeUtil; +import io.confluent.ksql.util.PersistentQueryMetadata; +import java.util.OptionalLong; +import java.util.Set; +import java.util.function.Function; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serializer; +import org.apache.kafka.streams.kstream.TimeWindowedDeserializer; + +/** + * Cache of info known about topics in use in the test. + */ +public class TopicInfoCache { + + private static final Pattern INTERNAL_TOPIC_PATTERN = Pattern + .compile("_confluent.*query_(.*_\\d+)-.*-(changelog|repartition)"); + + private static final Pattern WINDOWED_JOIN_PATTERN = Pattern + .compile( + "CREATE .* JOIN .* WITHIN (\\d+ \\w+) ON .*", + Pattern.CASE_INSENSITIVE | Pattern.DOTALL + ); + + private final KsqlExecutionContext ksqlEngine; + private final SchemaRegistryClient srClient; + private final LoadingCache cache; + + public TopicInfoCache( + final KsqlExecutionContext ksqlEngine, + final SchemaRegistryClient srClient + ) { + this.ksqlEngine = requireNonNull(ksqlEngine, "ksqlEngine"); + this.srClient = requireNonNull(srClient, "srClient"); + this.cache = CacheBuilder.newBuilder() + .build(CacheLoader.from(this::load)); + } + + public TopicInfo get(final String topicName) { + return cache.getUnchecked(topicName); + } + + public void clear() { + cache.invalidateAll(); + } + + private TopicInfo load(final String topicName) { + try { + final java.util.regex.Matcher matcher = INTERNAL_TOPIC_PATTERN.matcher(topicName); + if (matcher.matches()) { + // Internal topic: + final QueryId queryId = new QueryId(matcher.group(1)); + final PersistentQueryMetadata query = ksqlEngine + .getPersistentQuery(queryId) + .orElseThrow(() -> new TestFrameworkException("Unknown queryId for internal topic: " + + queryId)); + + final java.util.regex.Matcher windowedJoinMatcher = WINDOWED_JOIN_PATTERN + .matcher(query.getStatementString()); + + final OptionalLong changeLogWindowSize = topicName.endsWith("-changelog") + && windowedJoinMatcher.matches() + ? OptionalLong.of(DurationParser.parse(windowedJoinMatcher.group(1)).toMillis()) + : OptionalLong.empty(); + + return new TopicInfo( + topicName, + query.getLogicalSchema(), + query.getResultTopic().getKeyFormat(), + query.getResultTopic().getValueFormat(), + changeLogWindowSize + ); + } + + // Source / sink topic: + final Set keyTypes = ksqlEngine.getMetaStore().getAllDataSources().values() + .stream() + .filter(source -> source.getKafkaTopicName().equals(topicName)) + .map(source -> new TopicInfo( + topicName, + source.getSchema(), + source.getKsqlTopic().getKeyFormat(), + source.getKsqlTopic().getValueFormat(), + OptionalLong.empty() + )) + .collect(Collectors.toSet()); + + if (keyTypes.isEmpty()) { + throw new TestFrameworkException("no source found for topic"); + } + + return Iterables.get(keyTypes, 0); + } catch (final Exception e) { + throw new TestFrameworkException("Failed to determine key type for" + + System.lineSeparator() + "topic: " + topicName + + System.lineSeparator() + "reason: " + e.getMessage(), e); + } + } + + public final class TopicInfo { + + private final String topicName; + private final LogicalSchema schema; + private final KeyFormat keyFormat; + private final ValueFormat valueFormat; + private final OptionalLong changeLogWindowSize; + + private TopicInfo( + final String topicName, + final LogicalSchema schema, + final KeyFormat keyFormat, + final ValueFormat valueFormat, + final OptionalLong changeLogWindowSize + ) { + this.topicName = requireNonNull(topicName, "topicName"); + this.schema = requireNonNull(schema, "schema"); + this.keyFormat = requireNonNull(keyFormat, "keyFormat"); + this.valueFormat = requireNonNull(valueFormat, "valueFormat"); + this.changeLogWindowSize = requireNonNull(changeLogWindowSize, "changeLogWindowSize"); + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + public Serializer getKeySerializer() { + final SerdeSupplier keySerdeSupplier = SerdeUtil + .getKeySerdeSupplier(keyFormat, schema); + + final Serializer serializer = keySerdeSupplier.getSerializer(srClient); + + serializer.configure(ImmutableMap.of( + KafkaAvroSerializerConfig.SCHEMA_REGISTRY_URL_CONFIG, "something" + ), true); + + return (Serializer) serializer; + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + public Serializer getValueSerializer() { + final SerdeSupplier valueSerdeSupplier = SerdeUtil + .getSerdeSupplier(valueFormat.getFormat(), schema); + + final Serializer serializer = valueSerdeSupplier.getSerializer(srClient); + + serializer.configure(ImmutableMap.of( + KafkaAvroSerializerConfig.SCHEMA_REGISTRY_URL_CONFIG, "something" + ), false); + + return (Serializer) serializer; + } + + public Deserializer getKeyDeserializer() { + final SerdeSupplier keySerdeSupplier = SerdeUtil + .getKeySerdeSupplier(keyFormat, schema); + + final Deserializer deserializer = keySerdeSupplier.getDeserializer(srClient); + + deserializer.configure(ImmutableMap.of(), true); + + if (!changeLogWindowSize.isPresent()) { + return deserializer; + } + + final TimeWindowedDeserializer changeLogDeserializer = + new TimeWindowedDeserializer<>(deserializer, changeLogWindowSize.getAsLong()); + + changeLogDeserializer.setIsChangelogTopic(true); + + return changeLogDeserializer; + } + + public Deserializer getValueDeserializer() { + final SerdeSupplier valueSerdeSupplier = SerdeUtil + .getSerdeSupplier(valueFormat.getFormat(), schema); + + final Deserializer deserializer = valueSerdeSupplier.getDeserializer(srClient); + + deserializer.configure(ImmutableMap.of(), false); + + return deserializer; + } + + /** + * Coerce the key value to the correct type. + * + *

The type of the key loaded from the JSON test case file may not be the exact match on + * type, e.g. JSON will load a small number as an integer, but the key type of the source might + * be a long. + * + * @param record the record to coerce + * @param msgIndex the index of the message, displayed in the error message + * @return a new Record with the correct key type. + */ + public Record coerceRecordKey( + final Record record, + final int msgIndex + ) { + try { + final Object coerced = keyCoercer().apply(record.rawKey()); + return record.withKey(coerced); + } catch (final Exception e) { + throw new AssertionError( + "Topic '" + record.getTopic().getName() + "', message " + msgIndex + + ": Invalid test-case: could not coerce key in test case to required type. " + + e.getMessage(), + e); + } + } + + private Function keyCoercer() { + final SqlType keyType = schema + .key() + .get(0) + .type(); + + return key -> { + if (key == null) { + return null; + } + + return DefaultSqlValueCoercer.INSTANCE + .coerce(key, keyType) + .orElseThrow(() -> new AssertionError("Invalid key value for topic " + topicName + "." + + System.lineSeparator() + + "Expected KeyType: " + keyType + + System.lineSeparator() + + "Actual KeyType: " + SchemaConverters.javaToSqlConverter() + .toSqlType(key.getClass()) + + ", key: " + key + "." + + System.lineSeparator() + + "This is likely caused by the key type in the test-case not matching the schema." + )); + }; + } + } +} diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/utils/SerdeUtil.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/utils/SerdeUtil.java index 441677bbeb21..0507fa367055 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/utils/SerdeUtil.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/utils/SerdeUtil.java @@ -28,7 +28,6 @@ import io.confluent.ksql.test.serde.kafka.KafkaSerdeSupplier; import io.confluent.ksql.test.serde.string.StringSerdeSupplier; import io.confluent.ksql.test.tools.exceptions.InvalidFieldException; -import java.util.function.Supplier; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.streams.kstream.SessionWindowedDeserializer; @@ -46,7 +45,7 @@ private SerdeUtil() { public static SerdeSupplier getSerdeSupplier( final Format format, - final Supplier schemaSupplier + final LogicalSchema schema ) { switch (format) { case AVRO: @@ -56,7 +55,7 @@ public static SerdeSupplier getSerdeSupplier( case DELIMITED: return new StringSerdeSupplier(); case KAFKA: - return new KafkaSerdeSupplier(schemaSupplier); + return new KafkaSerdeSupplier(schema); default: throw new InvalidFieldException("format", "unsupported value: " + format); } @@ -65,11 +64,11 @@ public static SerdeSupplier getSerdeSupplier( @SuppressWarnings("unchecked") public static SerdeSupplier getKeySerdeSupplier( final KeyFormat keyFormat, - final Supplier logicalSchemaSupplier + final LogicalSchema schema ) { - final SerdeSupplier inner = (SerdeSupplier) SerdeUtil.getSerdeSupplier( + final SerdeSupplier inner = (SerdeSupplier) getSerdeSupplier( keyFormat.getFormat(), - logicalSchemaSupplier + schema ); if (!keyFormat.getWindowType().isPresent()) { @@ -107,6 +106,7 @@ public Serializer> getSerializer(final SchemaRegistryClient srClient return new TimeWindowedSerializer<>(serializer); } + @SuppressWarnings("OptionalGetWithoutIsPresent") @Override public Deserializer> getDeserializer(final SchemaRegistryClient srClient) { final Deserializer deserializer = inner.getDeserializer(srClient); diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/engine/StubInsertValuesExecutorTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/engine/StubInsertValuesExecutorTest.java index b0542664a485..4f2796e48246 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/engine/StubInsertValuesExecutorTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/engine/StubInsertValuesExecutorTest.java @@ -22,17 +22,18 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import com.google.common.collect.ImmutableMap; import io.confluent.ksql.engine.StubInsertValuesExecutor.StubProducer; -import io.confluent.ksql.test.serde.avro.AvroSerdeSupplier; -import io.confluent.ksql.test.serde.string.StringSerdeSupplier; import io.confluent.ksql.test.tools.Record; import io.confluent.ksql.test.tools.Topic; +import io.confluent.ksql.test.tools.TopicInfoCache; +import io.confluent.ksql.test.tools.TopicInfoCache.TopicInfo; import io.confluent.ksql.test.tools.stubs.StubKafkaRecord; import io.confluent.ksql.test.tools.stubs.StubKafkaService; import java.nio.charset.StandardCharsets; import java.util.Optional; import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.StringDeserializer; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -49,22 +50,29 @@ public final class StubInsertValuesExecutorTest { @Mock private StubKafkaService stubKafkaService; + @Mock + private TopicInfoCache topicInfoCache; + @Mock + private TopicInfo topicInfo; @Captor private ArgumentCaptor recordCaptor; private StubProducer stubProducer; + @SuppressWarnings({"unchecked", "rawtypes"}) @Before public void setUp() { when(stubKafkaService.getTopic(SOME_TOPIC)).thenReturn(new Topic( SOME_TOPIC, - Optional.empty(), - new StringSerdeSupplier(), - new StringSerdeSupplier(), 1, - 1 + 1, + Optional.empty() )); - stubProducer = new StubProducer(stubKafkaService); + when(topicInfoCache.get(SOME_TOPIC)).thenReturn(topicInfo); + when(topicInfo.getKeyDeserializer()).thenReturn((Deserializer) new StringDeserializer()); + when(topicInfo.getValueDeserializer()).thenReturn((Deserializer) new StringDeserializer()); + + stubProducer = new StubProducer(stubKafkaService, topicInfoCache); } @Test @@ -75,7 +83,7 @@ public void shouldWriteRecordKeyAndMetadata() { SOME_TOPIC, null, timestamp, - "the-key".getBytes(StandardCharsets.UTF_8), + KEY_BYTES, new byte[]{0} ); @@ -114,37 +122,4 @@ public void shouldWriteRecordStringValue() { final Record actual = recordCaptor.getValue().getTestRecord(); assertThat(actual.value(), is("the-value")); } - - @Test - public void shouldWriteRecordJsonValue() { - // Given: - final byte[] value = "{\"this\": 1}".getBytes(StandardCharsets.UTF_8); - - when(stubKafkaService.getTopic(SOME_TOPIC)).thenReturn(new Topic( - SOME_TOPIC, - Optional.empty(), - new StringSerdeSupplier(), - new AvroSerdeSupplier(), - 1, - 1 - )); - - final long timestamp = 22L; - final ProducerRecord record = new ProducerRecord<>( - SOME_TOPIC, - null, - timestamp, - KEY_BYTES, - value - ); - - // When: - stubProducer.sendRecord(record); - - // Then: - verify(stubKafkaService).writeRecord(eq(SOME_TOPIC), recordCaptor.capture()); - - final Record actual = recordCaptor.getValue().getTestRecord(); - assertThat(actual.value(), is(ImmutableMap.of("this", 1))); - } } \ No newline at end of file diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/SchemaTranslationTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/SchemaTranslationTest.java index 2396c139cfc0..b7b33e20316a 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/SchemaTranslationTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/SchemaTranslationTest.java @@ -13,9 +13,6 @@ import io.confluent.avro.random.generator.Generator; import io.confluent.ksql.test.loader.JsonTestLoader; import io.confluent.ksql.test.loader.TestFile; -import io.confluent.ksql.test.serde.avro.AvroSerdeSupplier; -import io.confluent.ksql.test.serde.avro.ValueSpecAvroSerdeSupplier; -import io.confluent.ksql.test.serde.string.StringSerdeSupplier; import io.confluent.ksql.test.tools.Record; import io.confluent.ksql.test.tools.TestCase; import io.confluent.ksql.test.tools.Topic; @@ -49,11 +46,9 @@ public class SchemaTranslationTest { private static final Topic OUTPUT_TOPIC = new Topic( OUTPUT_TOPIC_NAME, - Optional.empty(), - new StringSerdeSupplier(), - new ValueSpecAvroSerdeSupplier(), 1, - 1 + 1, + Optional.empty() ); private final TestCase testCase; @@ -160,11 +155,9 @@ Stream buildTests(final Path testPath) { try { final Topic srcTopic = new Topic( TOPIC_NAME, - Optional.of(schema), - new StringSerdeSupplier(), - new AvroSerdeSupplier(), 1, - 1 + 1, + Optional.of(schema) ); final List inputRecords = generateInputRecords(srcTopic, schema); diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestQueryTranslationTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestQueryTranslationTest.java index 141b9820761b..e29bbe6968c7 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestQueryTranslationTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestQueryTranslationTest.java @@ -122,6 +122,7 @@ public void shouldBuildAndExecuteQueries() { private static RestTestExecutor testExecutor() { return new RestTestExecutor( + REST_APP.getEngine(), REST_APP.getListeners().get(0), TEST_HARNESS.getKafkaCluster(), TEST_HARNESS.getServiceContext() diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestCaseBuilder.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestCaseBuilder.java index 4de1de088668..97062125e12d 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestCaseBuilder.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestCaseBuilder.java @@ -81,7 +81,6 @@ private RestTestCase createTest( test.topics(), test.outputs(), test.inputs(), - explicitFormat, ee.isPresent(), functionRegistry ); diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java index fc85300b13eb..d55148b45024 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java @@ -29,6 +29,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList.Builder; import com.google.common.collect.ImmutableMap; +import io.confluent.ksql.KsqlExecutionContext; import io.confluent.ksql.json.JsonMapper; import io.confluent.ksql.rest.client.KsqlRestClient; import io.confluent.ksql.rest.client.QueryStream; @@ -43,6 +44,8 @@ import io.confluent.ksql.test.tools.ExpectedRecordComparator; import io.confluent.ksql.test.tools.Record; import io.confluent.ksql.test.tools.Topic; +import io.confluent.ksql.test.tools.TopicInfoCache; +import io.confluent.ksql.test.tools.TopicInfoCache.TopicInfo; import io.confluent.ksql.test.util.EmbeddedSingleNodeKafkaCluster; import io.confluent.ksql.util.KsqlConstants; import io.confluent.ksql.util.KsqlServerException; @@ -62,7 +65,6 @@ import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.common.serialization.Deserializer; import org.hamcrest.Matcher; import org.hamcrest.StringDescription; import org.slf4j.Logger; @@ -78,8 +80,10 @@ public class RestTestExecutor implements Closeable { private final KsqlRestClient restClient; private final EmbeddedSingleNodeKafkaCluster kafkaCluster; private final ServiceContext serviceContext; + private final TopicInfoCache topicInfoCache; RestTestExecutor( + final KsqlExecutionContext engine, final URL url, final EmbeddedSingleNodeKafkaCluster kafkaCluster, final ServiceContext serviceContext @@ -92,20 +96,45 @@ public class RestTestExecutor implements Closeable { ); this.kafkaCluster = requireNonNull(kafkaCluster, "kafkaCluster"); this.serviceContext = requireNonNull(serviceContext, "serviceContext"); + this.topicInfoCache = new TopicInfoCache(engine, serviceContext.getSchemaRegistryClient()); } void buildAndExecuteQuery(final RestTestCase testCase) { + topicInfoCache.clear(); + initializeTopics(testCase.getTopics()); - produceInputs(testCase.getInputsByTopic()); + final StatementSplit statements = splitStatements(testCase); - final Optional> responses = sendStatements(testCase); - if (!responses.isPresent()) { + final Optional> adminResults = sendAdminStatements(testCase, + statements.admin); + if (!adminResults.isPresent()) { return; } + produceInputs(testCase.getInputsByTopic()); + + if (!testCase.expectedError().isPresent() + && testCase.getExpectedResponses().size() > statements.admin.size()) { + waitForWarmStateStores( + statements.queries, + testCase.getExpectedResponses() + .subList(statements.admin.size(), testCase.getExpectedResponses().size()) + ); + } + + final List queryResults = sendQueryStatements(testCase, statements.queries); + if (!queryResults.isEmpty()) { + failIfExpectingError(testCase); + } + + final List responses = ImmutableList.builder() + .addAll(adminResults.orElseGet(ImmutableList::of)) + .addAll(queryResults) + .build(); + verifyOutput(testCase); - verifyResponses(responses.get(), testCase.getExpectedResponses(), testCase.getStatements()); + verifyResponses(responses, testCase.getExpectedResponses(), testCase.getStatements()); } public void close() { @@ -129,7 +158,7 @@ private void initializeTopics(final List topics) { createJob ); - topic.getSchema().ifPresent(schema -> { + topic.getAvroSchema().ifPresent(schema -> { try { serviceContext.getSchemaRegistryClient() .register(topic.getName() + KsqlConstants.SCHEMA_REGISTRY_VALUE_SUFFIX, schema); @@ -140,80 +169,69 @@ private void initializeTopics(final List topics) { }); } - @SuppressWarnings("unchecked") private void produceInputs(final Map> inputs) { inputs.forEach((topic, records) -> { + final TopicInfo topicInfo = topicInfoCache.get(topic.getName()); + try (KafkaProducer producer = new KafkaProducer<>( kafkaCluster.producerConfig(), - topic.getKeySerializer(serviceContext.getSchemaRegistryClient()), - topic.getValueSerializer(serviceContext.getSchemaRegistryClient()) + topicInfo.getKeySerializer(), + topicInfo.getValueSerializer() )) { + for (int idx = 0; idx < records.size(); idx++) { + final Record record = records.get(idx); - records.forEach(record -> producer.send(new ProducerRecord<>( - topic.getName(), - null, - record.timestamp().orElse(0L), - record.key(), - record.value() - )) - ); + final Record coerced = topicInfo.coerceRecordKey(record, idx); + + producer.send(new ProducerRecord<>( + topic.getName(), + null, + coerced.timestamp().orElse(0L), + coerced.key(), + coerced.value() + )); + } } catch (final Exception e) { throw new RuntimeException("Failed to send record to " + topic.getName(), e); } }); } - private Optional> sendStatements(final RestTestCase testCase) { + private static StatementSplit splitStatements(final RestTestCase testCase) { final List allStatements = testCase.getStatements(); - int firstQuery = 0; - for (; firstQuery < allStatements.size(); firstQuery++) { - final boolean isQuery = allStatements.get(firstQuery).startsWith("SELECT "); + Integer firstQuery = null; + for (int idx = 0; idx < allStatements.size(); idx++) { + final boolean isQuery = allStatements.get(idx).startsWith("SELECT "); if (isQuery) { - break; + if (firstQuery == null) { + firstQuery = idx; + } + } else { + if (firstQuery != null) { + throw new AssertionError("Invalid test case: statement " + idx + + " follows queries, but is not a query. " + + "All queries should be at the end of the statement list" + ); + } } } - final List nonQuery = IntStream.range(0, firstQuery) + if (firstQuery == null) { + firstQuery = allStatements.size(); + } + + final List admin = IntStream.range(0, firstQuery) .mapToObj(allStatements::get) .collect(Collectors.toList()); - final Optional> adminResults = sendAdminStatements(testCase, nonQuery); - if (!adminResults.isPresent()) { - return Optional.empty(); - } - final List queries = IntStream.range(firstQuery, allStatements.size()) .mapToObj(allStatements::get) .collect(Collectors.toList()); - if (queries.isEmpty()) { - failIfExpectingError(testCase); - return adminResults; - } - - if (!testCase.expectedError().isPresent()) { - for (int idx = firstQuery; testCase.getExpectedResponses().size() > idx; ++idx) { - final String queryStatement = allStatements.get(idx); - final Response queryResponse = testCase.getExpectedResponses().get(idx); - - waitForWarmStateStores(queryStatement, queryResponse); - } - } - - final List moreResults = sendQueryStatements(testCase, queries); - if (moreResults.isEmpty()) { - return Optional.empty(); - } - - failIfExpectingError(testCase); - - return Optional.of(ImmutableList.builder() - .addAll(adminResults.get()) - .addAll(moreResults) - .build()); + return StatementSplit.of(admin, queries); } private Optional> sendAdminStatements( @@ -262,18 +280,15 @@ private Optional sendQueryStatement( private void verifyOutput(final RestTestCase testCase) { testCase.getOutputsByTopic().forEach((topic, records) -> { - final Deserializer keyDeserializer = - topic.getKeyDeserializer(serviceContext.getSchemaRegistryClient()); - final Deserializer valueDeserializer = - topic.getValueDeserializer(serviceContext.getSchemaRegistryClient()); + final TopicInfo topicInfo = topicInfoCache.get(topic.getName()); final List> received = kafkaCluster .verifyAvailableRecords( topic.getName(), records.size(), - keyDeserializer, - valueDeserializer + topicInfo.getKeyDeserializer(), + topicInfo.getValueDeserializer() ); for (int idx = 0; idx < records.size(); idx++) { @@ -423,6 +438,18 @@ private static T asJson(final Object response, final TypeReference type) } private void waitForWarmStateStores( + final List queries, + final List expectedResponses + ) { + for (int i = 0; i != expectedResponses.size(); ++i) { + final String queryStatement = queries.get(i); + final Response queryResponse = expectedResponses.get(i); + + waitForWarmStateStore(queryStatement, queryResponse); + } + } + + private void waitForWarmStateStore( final String querySql, final Response queryResponse ) { @@ -609,4 +636,19 @@ public void verify( } } } + + private static final class StatementSplit { + + final List admin; + final List queries; + + static StatementSplit of(final List admin, final List queries) { + return new StatementSplit(admin, queries); + } + + private StatementSplit(final List admin, final List queries) { + this.admin = ImmutableList.copyOf(admin); + this.queries = ImmutableList.copyOf(queries); + } + } } diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/RecordTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/RecordTest.java index 146d241a3f86..70520185aa2b 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/RecordTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/RecordTest.java @@ -21,13 +21,6 @@ import io.confluent.ksql.test.model.WindowData; import java.util.Optional; -import org.apache.kafka.common.serialization.Deserializer; -import org.apache.kafka.common.serialization.Serdes; -import org.apache.kafka.common.serialization.Serializer; -import org.apache.kafka.streams.kstream.SessionWindowedDeserializer; -import org.apache.kafka.streams.kstream.SessionWindowedSerializer; -import org.apache.kafka.streams.kstream.TimeWindowedDeserializer; -import org.apache.kafka.streams.kstream.TimeWindowedSerializer; import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.internals.SessionWindow; import org.apache.kafka.streams.kstream.internals.TimeWindow; @@ -44,117 +37,10 @@ public class RecordTest { private Topic topic; @Test - public void shouldGetCorrectStringKeySerializer() { - // Given: - final Record record = new Record( - topic, - "foo", - "bar", - null, - Optional.of(1000L), - null - ); - - // When: - final Serializer serializer = record.keySerializer(); - - // Then: - assertThat(serializer, instanceOf(Serdes.String().serializer().getClass())); - } - - @Test - public void shouldGetCorrectTimeWondowedKeySerializer() { - // Given: - final Record record = new Record(topic, - "foo", - "bar", - null, - Optional.of(1000L), - new WindowData(100L, 1000L, "TIME")); - - // When: - final Serializer serializer = record.keySerializer(); - - // Then: - assertThat(serializer, instanceOf(TimeWindowedSerializer.class)); - } - - @Test - public void shouldGetCorrectSessionWindowedKeySerializer() { - // Given: - final Record record = new Record(topic, - "foo", - "bar", - null, - Optional.of(1000L), - new WindowData(100L, 1000L, "SESSION")); - - // When: - final Serializer serializer = record.keySerializer(); - - // Then: - assertThat(serializer, instanceOf(SessionWindowedSerializer.class)); - } - - @Test - public void shouldGetCorrectStringKeyDeserializer() { - // Given: - final Record record = new Record(topic, - "foo", - "bar", - null, - Optional.of(1000L), - null); - - // When: - final Deserializer deserializer = record.keyDeserializer(); - - // Then: - assertThat(deserializer, instanceOf(Serdes.String().deserializer().getClass())); - - } - - @Test - public void shouldGetCorrectTimedWindowKeyDeserializer() { - // Given: - final Record record = new Record(topic, - "foo", - "bar", - null, - Optional.of(1000L), - new WindowData(100L, 1000L, "TIME")); - - // When: - final Deserializer deserializer = record.keyDeserializer(); - - // Then: - assertThat(deserializer, instanceOf(TimeWindowedDeserializer.class)); - - } - - @Test - public void shouldGetCorrectSessionedWindowKeyDeserializer() { - // Given: - final Record record = new Record(topic, - "foo", - "bar", - null, - Optional.of(1000L), - new WindowData(100L, 1000L, "SESSION")); - - // When: - final Deserializer deserializer = record.keyDeserializer(); - - // Then: - assertThat(deserializer, instanceOf(SessionWindowedDeserializer.class)); - - } - - @Test - public void shouldGetStringKey() { + public void shouldGetKey() { // Given: final Record record = new Record(topic, - "foo", + 10, "bar", null, Optional.of(1000L), @@ -164,10 +50,9 @@ public void shouldGetStringKey() { final Object key = record.key(); // Then: - assertThat(key, equalTo("foo")); + assertThat(key, equalTo(10)); } - @Test public void shouldGetTimeWindowKey() { // Given: @@ -209,5 +94,4 @@ public void shouldGetSessionWindowKey() { assertThat(windowed.window().start(), equalTo(100L)); assertThat(windowed.window().end(), equalTo(1000L)); } - } \ No newline at end of file diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/StubKafkaServiceTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/StubKafkaServiceTest.java index 4689423da296..a5b5f0ff2cb4 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/StubKafkaServiceTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/StubKafkaServiceTest.java @@ -18,7 +18,6 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; -import io.confluent.ksql.test.serde.string.StringSerdeSupplier; import io.confluent.ksql.test.tools.stubs.StubKafkaRecord; import io.confluent.ksql.test.tools.stubs.StubKafkaService; import java.util.List; @@ -50,14 +49,7 @@ public class StubKafkaServiceTest { public void setUp() { stubKafkaRecord = StubKafkaRecord.of(record, producerRecord); stubKafkaService = StubKafkaService.create(); - topic = new Topic( - "foo", - Optional.of(avroSchema), - new StringSerdeSupplier(), - new StringSerdeSupplier(), - 1, - 1 - ); + topic = new Topic("foo", 1, 1, Optional.of(avroSchema)); } diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorTest.java index 3d8621552b99..ec8e3cb85f2e 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorTest.java @@ -27,6 +27,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.execution.ddl.commands.KsqlTopic; import io.confluent.ksql.metastore.MetaStore; @@ -38,6 +39,7 @@ import io.confluent.ksql.serde.Format; import io.confluent.ksql.serde.FormatInfo; import io.confluent.ksql.serde.KeyFormat; +import io.confluent.ksql.serde.ValueFormat; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.test.tools.TestExecutor.TopologyBuilder; import io.confluent.ksql.test.tools.conditions.PostConditions; @@ -98,6 +100,8 @@ public class TestExecutorTest { private MetaStore metaStore; @Mock private Function> internalTopicsAccessor; + @Mock + private SchemaRegistryClient srClient; private TestExecutor executor; private final Map> allSources = new HashMap<>(); @@ -106,6 +110,8 @@ public class TestExecutorTest { public void setUp() { allSources.clear(); + when(serviceContext.getSchemaRegistryClient()).thenReturn(srClient); + executor = new TestExecutor( kafkaService, serviceContext, @@ -351,6 +357,9 @@ private void givenDataSourceTopic(final LogicalSchema schema) { final KsqlTopic topic = mock(KsqlTopic.class); when(topic.getKeyFormat()) .thenReturn(KeyFormat.of(FormatInfo.of(Format.KAFKA), Optional.empty())); + when(topic.getValueFormat()) + .thenReturn(ValueFormat.of(FormatInfo.of(Format.JSON))); + final DataSource dataSource = mock(DataSource.class); when(dataSource.getKsqlTopic()).thenReturn(topic); when(dataSource.getSchema()).thenReturn(schema); diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorUtilTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorUtilTest.java index 03d721a56029..eb02401e516d 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorUtilTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/tools/TestExecutorUtilTest.java @@ -27,7 +27,6 @@ import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.test.model.QttTestFile; import io.confluent.ksql.test.model.TestCaseNode; -import io.confluent.ksql.test.serde.string.StringSerdeSupplier; import io.confluent.ksql.test.tools.stubs.StubKafkaService; import io.confluent.ksql.util.KsqlConfig; import java.io.File; @@ -74,14 +73,7 @@ public void tearDown() { @Test public void shouldPlanTestCase() { // Given: - final Topic sourceTopic = new Topic( - "test_topic", - Optional.empty(), - new StringSerdeSupplier(), - new StringSerdeSupplier(), - 1, - 1 - ); + final Topic sourceTopic = new Topic("test_topic", 1, 1, Optional.empty()); stubKafkaService.createTopic(sourceTopic); @@ -113,15 +105,12 @@ public void shouldPlanTestCase() { @Test public void shouldBuildStreamsTopologyTestDrivers() { - // Given: final Topic sourceTopic = new Topic( "test_topic", - Optional.empty(), - new StringSerdeSupplier(), - new StringSerdeSupplier(), 1, - 1 + 1, + Optional.empty() ); stubKafkaService.createTopic(sourceTopic); diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/serdes.json b/ksql-functional-tests/src/test/resources/query-validation-tests/serdes.json index b78aa46b724c..5b79961ad44b 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/serdes.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/serdes.json @@ -39,8 +39,7 @@ "topics": [ { "name": "input_topic", - "schema": "int", - "format": "{FORMAT}" + "schema": "int" } ], "inputs": [ @@ -62,8 +61,7 @@ "topics": [ { "name": "input_topic", - "schema": "int", - "format": "{FORMAT}" + "schema": "int" } ], "inputs": [ @@ -83,8 +81,7 @@ "topics": [ { "name": "input_topic", - "schema": {"name": "ignored", "type": "record", "fields": [{"name": "FOO", "type": "int"}]}, - "format": "{FORMAT}" + "schema": {"name": "ignored", "type": "record", "fields": [{"name": "FOO", "type": "int"}]} } ], "inputs": [ @@ -109,8 +106,7 @@ "topics": [ { "name": "input_topic", - "schema": {"type": "array", "items": ["null", "string"]}, - "format": "{FORMAT}" + "schema": {"type": "array", "items": ["null", "string"]} } ], "inputs": [ @@ -134,8 +130,7 @@ "topics": [ { "name": "input_topic", - "schema": {"type": "array", "items": ["null", "string"]}, - "format": "{FORMAT}" + "schema": {"type": "array", "items": ["null", "string"]} } ], "inputs": [ @@ -155,8 +150,7 @@ "topics": [ { "name": "input_topic", - "schema": {"name": "ignored", "type": "record", "fields": [{"name": "FOO", "type": ["null", {"type": "array", "items": ["null", "string"]}]}]}, - "format": "{FORMAT}" + "schema": {"name": "ignored", "type": "record", "fields": [{"name": "FOO", "type": ["null", {"type": "array", "items": ["null", "string"]}]}]} } ], "inputs": [ @@ -202,13 +196,11 @@ "topics": [ { "name": "input_topic", - "schema": {"type": "map", "values": ["null", "int"]}, - "format": "{FORMAT}" + "schema": {"type": "map", "values": ["null", "int"]} }, { "name": "OUTPUT", - "schema": {"type": "record", "name": "ignored", "fields": [{"name": "FOO", "type": ["null",{"type":"array","items":{"type":"record","name":"test","fields":[{"name":"key","type":["null","string"],"default":null},{"name":"value","type":["null","int"],"default":null}]}}]}]}, - "format": "{FORMAT}" + "schema": {"type": "record", "name": "ignored", "fields": [{"name": "FOO", "type": ["null",{"type":"array","items":{"type":"record","name":"test","fields":[{"name":"key","type":["null","string"],"default":null},{"name":"value","type":["null","int"],"default":null}]}}]}]} } ], "inputs": [ @@ -275,13 +267,11 @@ "topics": [ { "name": "input_topic", - "schema": {"name": "ignored", "type": "record", "fields": [{"name": "FOO", "type": ["null",{"type": "map", "values": ["null", "int"]}]}]}, - "format": "{FORMAT}" + "schema": {"name": "ignored", "type": "record", "fields": [{"name": "FOO", "type": ["null",{"type": "map", "values": ["null", "int"]}]}]} }, { "name": "OUTPUT", - "schema": {"type": "record", "name": "ignored", "fields": [{"name": "FOO", "type": ["null",{"type":"array","items":{"type":"record","name":"test","fields":[{"name":"key","type":["null","string"],"default":null},{"name":"value","type":["null","int"],"default":null}]}}]}]}, - "format": "{FORMAT}" + "schema": {"type": "record", "name": "ignored", "fields": [{"name": "FOO", "type": ["null",{"type":"array","items":{"type":"record","name":"test","fields":[{"name":"key","type":["null","string"],"default":null},{"name":"value","type":["null","int"],"default":null}]}}]}]} } ], "inputs": [ @@ -396,8 +386,7 @@ "topics": [ { "name": "OUTPUT", - "schema": "boolean", - "format": "{FORMAT}" + "schema": "boolean" } ], "inputs": [ @@ -421,8 +410,7 @@ "topics": [ { "name": "OUTPUT", - "schema": {"name": "ignored", "type": "record", "fields": [{"name": "FOO", "type": ["null","boolean"]}]}, - "format": "{FORMAT}" + "schema": {"name": "ignored", "type": "record", "fields": [{"name": "FOO", "type": ["null","boolean"]}]} } ], "inputs": [ @@ -446,8 +434,7 @@ "topics": [ { "name": "OUTPUT", - "schema": {"type": "array", "items": ["null", "long"]}, - "format": "{FORMAT}" + "schema": {"type": "array", "items": ["null", "long"]} } ], "inputs": [ @@ -473,8 +460,7 @@ "topics": [ { "name": "OUTPUT", - "schema": {"name": "ignored", "type": "record", "fields": [{"name": "FOO", "type": ["null",{"type": "array", "items": ["null", "long"]}]}]}, - "format": "{FORMAT}" + "schema": {"name": "ignored", "type": "record", "fields": [{"name": "FOO", "type": ["null",{"type": "array", "items": ["null", "long"]}]}]} } ], "inputs": [ @@ -500,8 +486,7 @@ "topics": [ { "name": "OUTPUT", - "schema": {"type":"array","items":{"type":"record","name":"test","fields":[{"name":"key","type":["null","string"],"default":null},{"name":"value","type":["null","double"],"default":null}]}}, - "format": "{FORMAT}" + "schema": {"type":"array","items":{"type":"record","name":"test","fields":[{"name":"key","type":["null","string"],"default":null},{"name":"value","type":["null","double"],"default":null}]}} } ], "inputs": [ @@ -527,8 +512,7 @@ "topics": [ { "name": "OUTPUT", - "schema": {"name": "ignored", "type": "record", "fields": [{"name": "FOO", "type": ["null",{"type":"array","items":{"type":"record","name":"test","fields":[{"name":"key","type":["null","string"],"default":null},{"name":"value","type":["null","double"],"default":null}]}}]}]}, - "format": "{FORMAT}" + "schema": {"name": "ignored", "type": "record", "fields": [{"name": "FOO", "type": ["null",{"type":"array","items":{"type":"record","name":"test","fields":[{"name":"key","type":["null","string"],"default":null},{"name":"value","type":["null","double"],"default":null}]}}]}]} } ], "inputs": [ @@ -554,8 +538,7 @@ "topics": [ { "name": "OUTPUT", - "schema": {"type": "record", "name": "ignored", "fields": [{"name": "F0", "type": ["null", "int"]}]}, - "format": "{FORMAT}" + "schema": {"type": "record", "name": "ignored", "fields": [{"name": "F0", "type": ["null", "int"]}]} } ], "inputs": [ @@ -583,8 +566,7 @@ "name": "OUTPUT", "schema": {"name": "ignored", "type": "record", "fields": [ {"name": "FOO", "type": ["null", {"name": "ignored2", "type": "record", "fields": [{"name": "F0", "type": ["null", "int"]}]}]} - ]}, - "format": "{FORMAT}" + ]} } ], "inputs": [ diff --git a/ksql-functional-tests/src/test/resources/rest-query-validation-tests/pull-queries-against-materialized-aggregates.json b/ksql-functional-tests/src/test/resources/rest-query-validation-tests/pull-queries-against-materialized-aggregates.json index af6e91052c37..53c776f0da07 100644 --- a/ksql-functional-tests/src/test/resources/rest-query-validation-tests/pull-queries-against-materialized-aggregates.json +++ b/ksql-functional-tests/src/test/resources/rest-query-validation-tests/pull-queries-against-materialized-aggregates.json @@ -51,6 +51,59 @@ ]} ] }, + { + "name": "non-windowed single key lookup - BIGINT", + "statements": [ + "CREATE STREAM INPUT (ROWKEY BIGINT KEY, IGNORED INT) WITH (kafka_topic='test_topic', value_format='JSON');", + "CREATE TABLE AGGREGATE AS SELECT COUNT(1) AS COUNT FROM INPUT GROUP BY ROWKEY;", + "SELECT * FROM AGGREGATE WHERE ROWKEY=10;", + "SELECT * FROM AGGREGATE WHERE ROWKEY=123369;" + ], + "inputs": [ + {"topic": "test_topic", "timestamp": 12345, "key": 11, "value": {}}, + {"topic": "test_topic", "timestamp": 12365, "key": 10, "value": {}} + ], + "responses": [ + {"admin": {"@type": "currentStatus"}}, + {"admin": {"@type": "currentStatus"}}, + {"query": [ + {"header":{"schema":"`ROWKEY` BIGINT KEY, `ROWTIME` BIGINT, `COUNT` BIGINT"}}, + {"row":{"columns":[10, 12365, 1]}} + ]}, + {"query": [ + {"header":{"schema":"`ROWKEY` BIGINT KEY, `ROWTIME` BIGINT, `COUNT` BIGINT"}} + ]} + ] + }, + { + "name": "non-windowed single key lookup - DOUBLE", + "statements": [ + "CREATE STREAM INPUT (ROWKEY DOUBLE KEY, IGNORED INT) WITH (kafka_topic='test_topic', value_format='JSON');", + "CREATE TABLE AGGREGATE AS SELECT COUNT(1) AS COUNT FROM INPUT GROUP BY ROWKEY;", + "SELECT * FROM AGGREGATE WHERE ROWKEY=10;", + "SELECT * FROM AGGREGATE WHERE ROWKEY=10.0;", + "SELECT * FROM AGGREGATE WHERE ROWKEY=123369;" + ], + "inputs": [ + {"topic": "test_topic", "timestamp": 12345, "key": 11.0, "value": {}}, + {"topic": "test_topic", "timestamp": 12365, "key": 10.0, "value": {}} + ], + "responses": [ + {"admin": {"@type": "currentStatus"}}, + {"admin": {"@type": "currentStatus"}}, + {"query": [ + {"header":{"schema":"`ROWKEY` DOUBLE KEY, `ROWTIME` BIGINT, `COUNT` BIGINT"}}, + {"row":{"columns":[10.0, 12365, 1]}} + ]}, + {"query": [ + {"header":{"schema":"`ROWKEY` DOUBLE KEY, `ROWTIME` BIGINT, `COUNT` BIGINT"}}, + {"row":{"columns":[10.0, 12365, 1]}} + ]}, + {"query": [ + {"header":{"schema":"`ROWKEY` DOUBLE KEY, `ROWTIME` BIGINT, `COUNT` BIGINT"}} + ]} + ] + }, { "name": "lookup on wrong type type", "statements": [ diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java index 69edf8f894f8..8bc4574c9dd0 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java @@ -236,6 +236,11 @@ void startKsql() { initialize(); } + @VisibleForTesting + KsqlEngine getEngine() { + return ksqlEngine; + } + private static final class KsqlFailedPrecondition extends RuntimeException { private KsqlFailedPrecondition(final String message) { diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java index d06d26809edf..e6cfd89a8c82 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java @@ -23,6 +23,7 @@ import com.fasterxml.jackson.datatype.jdk8.Jdk8Module; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import io.confluent.ksql.KsqlExecutionContext; import io.confluent.ksql.json.JsonMapper; import io.confluent.ksql.query.QueryId; import io.confluent.ksql.rest.client.BasicCredentials; @@ -100,6 +101,7 @@ public class TestKsqlRestApp extends ExternalResource { private final List listeners = new ArrayList<>(); private final Optional credentials; private ExecutableServer restServer; + private KsqlExecutionContext ksqlEngine; private TestKsqlRestApp( final Supplier bootstrapServers, @@ -116,6 +118,10 @@ private TestKsqlRestApp( this.credentials = requireNonNull(credentials, "credentials"); } + public KsqlExecutionContext getEngine() { + return ksqlEngine; + } + public List getListeners() { return this.listeners; } @@ -261,6 +267,8 @@ protected void before() { } catch (final Exception var2) { throw new RuntimeException("Failed to start Ksql rest server", var2); } + + ksqlEngine = ksqlRestApplication.getEngine(); } @Override From b038f885f512f1aefc7dacf225c44b883a6d5a19 Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Thu, 2 Jan 2020 12:37:12 -0800 Subject: [PATCH 063/123] docs: unpin mkdocs-macros-plugin version (DOCS-3100) (#4208) --- docs-md/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs-md/requirements.txt b/docs-md/requirements.txt index 7208b92fb755..46ad69a14b59 100644 --- a/docs-md/requirements.txt +++ b/docs-md/requirements.txt @@ -1,6 +1,6 @@ mkdocs==1.0.4 mdx_gh_links>=0.2 -mkdocs-macros-plugin==0.2.4 +mkdocs-macros-plugin mkdocs-git-revision-date-plugin pymdown-extensions mkdocs-material From 9b48f4e775cbb8acfd435ad6e56ae85918618f46 Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Thu, 2 Jan 2020 15:24:13 -0800 Subject: [PATCH 064/123] docs: add security note linking to processing log settings (DOCS-3076) (#4211) --- docs/developer-guide/processing-log.rst | 6 ++++++ docs/installation/server-config/config-reference.rst | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/docs/developer-guide/processing-log.rst b/docs/developer-guide/processing-log.rst index 6b90bea3f830..d83f9affb5c4 100644 --- a/docs/developer-guide/processing-log.rst +++ b/docs/developer-guide/processing-log.rst @@ -90,6 +90,12 @@ You can disable the log completely by setting the level to OFF: log4j.logger.processing=OFF +.. note:: + + To enable security for the KSQL Processing Log, assign log4j properties + as shown in + `log4j-secure.properties `__. + Log Schema ========== diff --git a/docs/installation/server-config/config-reference.rst b/docs/installation/server-config/config-reference.rst index 85f2a4a80deb..175bf5466c87 100644 --- a/docs/installation/server-config/config-reference.rst +++ b/docs/installation/server-config/config-reference.rst @@ -428,6 +428,12 @@ KSQL Processing Log Settings The following configuration settings control the behavior of the :ref:`KSQL processing log `. +.. note:: + + To enable security for the KSQL Processing Log, assign log4j properties + as shown in + `log4j-secure.properties `__. + .. _ksql-processing-log-topic-auto-create: ----------------------------------------- From 869f7ac933cc060457338dae6435d9b2f28bfec9 Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Thu, 2 Jan 2020 16:27:36 -0800 Subject: [PATCH 065/123] docs: add note to ksqlDB processing log settings for security (#4213) --- docs-md/developer-guide/test-and-debug/processing-log.md | 5 +++++ .../installation/server-config/config-reference.md | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/docs-md/developer-guide/test-and-debug/processing-log.md b/docs-md/developer-guide/test-and-debug/processing-log.md index 0a76a9b017e0..68a1c1c3e118 100644 --- a/docs-md/developer-guide/test-and-debug/processing-log.md +++ b/docs-md/developer-guide/test-and-debug/processing-log.md @@ -100,6 +100,11 @@ You can disable the log completely by setting the level to OFF: log4j.logger.processing=OFF ``` +!!! note + To enable security for the KSQL Processing Log, assign log4j properties + as shown in + [log4j-secure.properties](https://github.com/confluentinc/cp-demo/blob/master/scripts/security/log4j-secure.properties). + Log Schema ---------- diff --git a/docs-md/operate-and-deploy/installation/server-config/config-reference.md b/docs-md/operate-and-deploy/installation/server-config/config-reference.md index 87b8f77fbab7..55e147c21f7b 100644 --- a/docs-md/operate-and-deploy/installation/server-config/config-reference.md +++ b/docs-md/operate-and-deploy/installation/server-config/config-reference.md @@ -422,6 +422,11 @@ ksqlDB Processing Log Settings The following configuration settings control the behavior of the [ksqlDB Processing Log](../../../developer-guide/test-and-debug/processing-log.md). +!!! note + To enable security for the KSQL Processing Log, assign log4j properties + as shown in + [log4j-secure.properties](https://github.com/confluentinc/cp-demo/blob/master/scripts/security/log4j-secure.properties). + ### ksql.logging.processing.topic.auto.create Toggles automatic processing log topic creation. If set to true, ksqlDB From cec0efe137dd79ae77ff19c5d06141e7f06817e2 Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Thu, 2 Jan 2020 17:21:45 -0800 Subject: [PATCH 066/123] docs: add section for ksql.streams.state.dir to md docs (#4214) --- .../installation/server-config/config-reference.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs-md/operate-and-deploy/installation/server-config/config-reference.md b/docs-md/operate-and-deploy/installation/server-config/config-reference.md index 55e147c21f7b..ef1cc89cc63f 100644 --- a/docs-md/operate-and-deploy/installation/server-config/config-reference.md +++ b/docs-md/operate-and-deploy/installation/server-config/config-reference.md @@ -388,6 +388,20 @@ A list of tags to be included with emitted a string of `key:value` pairs separated by commas. For example, `key1:value1,key2:value2`. +### ksql.streams.state.dir + +Sets the storage directory for stateful operations, like aggregations and +joins, to a durable location. By default, state is stored in the +`/tmp/kafka-streams` directory. + +!!! note + The state storage directory must be unique for every server running on the + machine. Otherwise, servers may appear to be stuck and not doing any work. + +The corresponding environment variable in the +[ksqlDB Server image](https://hub.docker.com/r/confluentinc/ksqldb-server/) +is `KSQL_KSQL_STREAMS_STATE_DIR`. + Confluent Control Center Settings --------------------------------- From 607b53920fc77941737cda75d29aa5a4c33f59c2 Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Fri, 3 Jan 2020 10:59:19 -0800 Subject: [PATCH 067/123] docs: add section for ksql.streams.state.dir (DOCS-3153) (#4210) * docs: add section for ksql.streams.state.dir (DOCS-3153) * docs: add note about uniqueness of state store dir * docs: update default state store dir per feedback --- .../server-config/config-reference.rst | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/docs/installation/server-config/config-reference.rst b/docs/installation/server-config/config-reference.rst index 175bf5466c87..31d6f8f6db9f 100644 --- a/docs/installation/server-config/config-reference.rst +++ b/docs/installation/server-config/config-reference.rst @@ -389,6 +389,25 @@ A list of tags to be included with emitted :ref:`JMX metrics `__ is +``KSQL_KSQL_STREAMS_STATE_DIR``. + .. _ksql-c3-settings: |c3| Settings From 80191ac23c3ae75466e67bb1a3e8df577b408bff Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Mon, 6 Jan 2020 10:29:43 +0000 Subject: [PATCH 068/123] chore: drop square brackets from string representation of logical schema (MINOR) (#4203) * chore: drop square brackets from string representation of logical schema They are not needed and stop the string representation being easily used, e.g. to build the schema in a CS/CT statement. Invariably, code has to strip or replace the square brackets. --- .../ksql/json/LogicalSchemaSerializer.java | 7 +-- .../ksql/schema/ksql/LogicalSchema.java | 2 +- .../ksql/schema/ksql/LogicalSchemaTest.java | 20 +++----- .../physical/PhysicalPlanBuilderTest.java | 50 +++++++++---------- .../confluent/ksql/util/PlanSummaryTest.java | 20 ++++---- ...MetadataTimestampExtractionPolicyTest.java | 1 + 6 files changed, 46 insertions(+), 54 deletions(-) diff --git a/ksql-common/src/main/java/io/confluent/ksql/json/LogicalSchemaSerializer.java b/ksql-common/src/main/java/io/confluent/ksql/json/LogicalSchemaSerializer.java index d8a1751b6630..555a72663732 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/json/LogicalSchemaSerializer.java +++ b/ksql-common/src/main/java/io/confluent/ksql/json/LogicalSchemaSerializer.java @@ -34,11 +34,6 @@ public void serialize( final JsonGenerator gen, final SerializerProvider serializerProvider ) throws IOException { - final String text = schema.toString(); - gen.writeString(trimArrayBrackets(text)); - } - - private static String trimArrayBrackets(final String text) { - return text.substring(1, text.length() - 1); + gen.writeString(schema.toString()); } } diff --git a/ksql-common/src/main/java/io/confluent/ksql/schema/ksql/LogicalSchema.java b/ksql-common/src/main/java/io/confluent/ksql/schema/ksql/LogicalSchema.java index 3bb6807be809..201d4808e014 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/schema/ksql/LogicalSchema.java +++ b/ksql-common/src/main/java/io/confluent/ksql/schema/ksql/LogicalSchema.java @@ -219,7 +219,7 @@ public String toString(final FormatOptions formatOptions) { return columns.stream() .filter(withNamespace(Namespace.META).negate()) .map(c -> c.toString(formatOptions)) - .collect(Collectors.joining(", ", "[", "]")); + .collect(Collectors.joining(", ")); } private Optional findColumnMatching(final Predicate predicate) { diff --git a/ksql-common/src/test/java/io/confluent/ksql/schema/ksql/LogicalSchemaTest.java b/ksql-common/src/test/java/io/confluent/ksql/schema/ksql/LogicalSchemaTest.java index a7cee9051bcc..5605db6f8e47 100644 --- a/ksql-common/src/test/java/io/confluent/ksql/schema/ksql/LogicalSchemaTest.java +++ b/ksql-common/src/test/java/io/confluent/ksql/schema/ksql/LogicalSchemaTest.java @@ -520,8 +520,7 @@ public void shouldConvertSchemaToString() { // Then: assertThat(s, is( - "[" - + "`k0` BIGINT KEY, " + "`k0` BIGINT KEY, " + "`k1` DOUBLE KEY, " + "`f0` BOOLEAN, " + "`f1` INTEGER, " @@ -531,7 +530,7 @@ public void shouldConvertSchemaToString() { + "`f6` STRUCT<`a` BIGINT>, " + "`f7` ARRAY, " + "`f8` MAP" - + "]")); + )); } @Test @@ -550,13 +549,12 @@ public void shouldSupportKeyInterleavedWithValueColumns() { // Then: assertThat(s, is( - "[" - + "`f0` BOOLEAN, " + "`f0` BOOLEAN, " + "`k0` BIGINT KEY, " + "`v0` INTEGER, " + "`k1` DOUBLE KEY, " + "`v1` BOOLEAN" - + "]")); + )); } @Test @@ -578,11 +576,10 @@ public void shouldConvertSchemaToStringWithReservedWords() { // Then: assertThat(s, is( - "[" - + "ROWKEY STRING KEY, " + "ROWKEY STRING KEY, " + "`f0` BOOLEAN, " + "f1 STRUCT<`f0` BIGINT, f1 BIGINT>" - + "]")); + )); } @Test @@ -598,10 +595,9 @@ public void shouldConvertAliasedSchemaToString() { // Then: assertThat(s, is( - "[" - + "`bob`.`ROWKEY` STRING KEY, " + "`bob`.`ROWKEY` STRING KEY, " + "`bob`.`f0` BOOLEAN" - + "]")); + )); } @Test diff --git a/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java b/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java index fad0339c4198..6ef10d5f24d1 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java @@ -203,29 +203,29 @@ public void shouldCreateExecutionPlan() { final String[] lines = planText.split("\n"); assertThat(lines[0], startsWith( - " > [ PROJECT ] | Schema: [ROWKEY BIGINT KEY, COL0 BIGINT, KSQL_COL_1 DOUBLE, " - + "KSQL_COL_2 BIGINT] |")); + " > [ PROJECT ] | Schema: ROWKEY BIGINT KEY, COL0 BIGINT, KSQL_COL_1 DOUBLE, " + + "KSQL_COL_2 BIGINT |")); assertThat(lines[1], startsWith( - "\t\t > [ AGGREGATE ] | Schema: [ROWKEY BIGINT KEY, KSQL_INTERNAL_COL_0 BIGINT, " + "\t\t > [ AGGREGATE ] | Schema: ROWKEY BIGINT KEY, KSQL_INTERNAL_COL_0 BIGINT, " + "KSQL_INTERNAL_COL_1 DOUBLE, KSQL_AGG_VARIABLE_0 DOUBLE, " - + "KSQL_AGG_VARIABLE_1 BIGINT] |")); + + "KSQL_AGG_VARIABLE_1 BIGINT |")); assertThat(lines[2], startsWith( - "\t\t\t\t > [ GROUP_BY ] | Schema: [ROWKEY BIGINT KEY, KSQL_INTERNAL_COL_0 BIGINT, " - + "KSQL_INTERNAL_COL_1 DOUBLE] |" + "\t\t\t\t > [ GROUP_BY ] | Schema: ROWKEY BIGINT KEY, KSQL_INTERNAL_COL_0 BIGINT, " + + "KSQL_INTERNAL_COL_1 DOUBLE |" )); assertThat(lines[3], startsWith( - "\t\t\t\t\t\t > [ PROJECT ] | Schema: [ROWKEY STRING KEY, KSQL_INTERNAL_COL_0 BIGINT, " - + "KSQL_INTERNAL_COL_1 DOUBLE] |")); + "\t\t\t\t\t\t > [ PROJECT ] | Schema: ROWKEY STRING KEY, KSQL_INTERNAL_COL_0 BIGINT, " + + "KSQL_INTERNAL_COL_1 DOUBLE |")); assertThat(lines[4], startsWith( - "\t\t\t\t\t\t\t\t > [ FILTER ] | Schema: [TEST1.ROWKEY STRING KEY, TEST1.ROWTIME BIGINT, TEST1.ROWKEY STRING, " + "\t\t\t\t\t\t\t\t > [ FILTER ] | Schema: TEST1.ROWKEY STRING KEY, TEST1.ROWTIME BIGINT, TEST1.ROWKEY STRING, " + "TEST1.COL0 BIGINT, TEST1.COL1 STRING, TEST1.COL2 STRING, " + "TEST1.COL3 DOUBLE, TEST1.COL4 ARRAY, " - + "TEST1.COL5 MAP] |")); + + "TEST1.COL5 MAP |")); assertThat(lines[5], startsWith( - "\t\t\t\t\t\t\t\t\t\t > [ SOURCE ] | Schema: [TEST1.ROWKEY STRING KEY, TEST1.ROWTIME BIGINT, TEST1.ROWKEY STRING, " + "\t\t\t\t\t\t\t\t\t\t > [ SOURCE ] | Schema: TEST1.ROWKEY STRING KEY, TEST1.ROWTIME BIGINT, TEST1.ROWKEY STRING, " + "TEST1.COL0 BIGINT, TEST1.COL1 STRING, TEST1.COL2 STRING, " + "TEST1.COL3 DOUBLE, TEST1.COL4 ARRAY, " - + "TEST1.COL5 MAP] |")); + + "TEST1.COL5 MAP |")); } @Test @@ -243,11 +243,11 @@ public void shouldCreateExecutionPlanForInsert() { final String[] lines = planText.split("\n"); Assert.assertTrue(lines.length == 3); Assert.assertEquals(lines[0], - " > [ SINK ] | Schema: [ROWKEY STRING KEY, COL0 BIGINT, COL1 STRING, COL2 DOUBLE] | Logger: InsertQuery_1.S1"); + " > [ SINK ] | Schema: ROWKEY STRING KEY, COL0 BIGINT, COL1 STRING, COL2 DOUBLE | Logger: InsertQuery_1.S1"); Assert.assertEquals(lines[1], - "\t\t > [ PROJECT ] | Schema: [ROWKEY STRING KEY, COL0 BIGINT, COL1 STRING, COL2 DOUBLE] | Logger: InsertQuery_1.Project"); + "\t\t > [ PROJECT ] | Schema: ROWKEY STRING KEY, COL0 BIGINT, COL1 STRING, COL2 DOUBLE | Logger: InsertQuery_1.Project"); Assert.assertEquals(lines[2], - "\t\t\t\t > [ SOURCE ] | Schema: [TEST1.ROWKEY STRING KEY, TEST1.ROWTIME BIGINT, TEST1.ROWKEY STRING, TEST1.COL0 BIGINT, TEST1.COL1 STRING, TEST1.COL2 DOUBLE] | Logger: InsertQuery_1.KsqlTopic.Source"); + "\t\t\t\t > [ SOURCE ] | Schema: TEST1.ROWKEY STRING KEY, TEST1.ROWTIME BIGINT, TEST1.ROWKEY STRING, TEST1.COL0 BIGINT, TEST1.COL1 STRING, TEST1.COL2 DOUBLE | Logger: InsertQuery_1.KsqlTopic.Source"); assertThat(queryMetadataList.get(1), instanceOf(PersistentQueryMetadata.class)); final PersistentQueryMetadata persistentQuery = (PersistentQueryMetadata) queryMetadataList.get(1); @@ -273,13 +273,13 @@ public void shouldCreatePlanForInsertIntoStreamFromStream() { final String[] lines = planText.split("\n"); assertThat(lines.length, equalTo(3)); assertThat(lines[0], containsString( - "> [ SINK ] | Schema: [ROWKEY STRING KEY, COL0 INTEGER]")); + "> [ SINK ] | Schema: ROWKEY STRING KEY, COL0 INTEGER")); assertThat(lines[1], containsString( - "> [ PROJECT ] | Schema: [ROWKEY STRING KEY, COL0 INTEGER]")); + "> [ PROJECT ] | Schema: ROWKEY STRING KEY, COL0 INTEGER")); assertThat(lines[2], containsString( - "> [ SOURCE ] | Schema: [TEST1.ROWKEY STRING KEY, TEST1.ROWTIME BIGINT, TEST1.ROWKEY STRING, TEST1.COL0 INTEGER]")); + "> [ SOURCE ] | Schema: TEST1.ROWKEY STRING KEY, TEST1.ROWTIME BIGINT, TEST1.ROWKEY STRING, TEST1.COL0 INTEGER")); } @Test @@ -294,13 +294,13 @@ public void shouldRekeyIfPartitionByDoesNotMatchResultKey() { final String planText = queryMetadataList.get(1).getExecutionPlan(); final String[] lines = planText.split("\n"); assertThat(lines.length, equalTo(4)); - assertThat(lines[0], equalTo(" > [ SINK ] | Schema: [ROWKEY BIGINT KEY, COL0 BIGINT, COL1 STRING, COL2 " - + "DOUBLE] | Logger: InsertQuery_1.S1")); + assertThat(lines[0], equalTo(" > [ SINK ] | Schema: ROWKEY BIGINT KEY, COL0 BIGINT, COL1 STRING, COL2 " + + "DOUBLE | Logger: InsertQuery_1.S1")); assertThat(lines[2], - containsString("[ REKEY ] | Schema: [ROWKEY BIGINT KEY, TEST1.ROWTIME BIGINT, TEST1.ROWKEY STRING, TEST1.COL0 BIGINT, TEST1.COL1 STRING, TEST1.COL2 DOUBLE] " + containsString("[ REKEY ] | Schema: ROWKEY BIGINT KEY, TEST1.ROWTIME BIGINT, TEST1.ROWKEY STRING, TEST1.COL0 BIGINT, TEST1.COL1 STRING, TEST1.COL2 DOUBLE " + "| Logger: InsertQuery_1.PartitionBy")); - assertThat(lines[1], containsString("[ PROJECT ] | Schema: [ROWKEY BIGINT KEY, COL0 BIGINT, COL1 STRING" - + ", COL2 DOUBLE] | Logger: InsertQuery_1.Project")); + assertThat(lines[1], containsString("[ PROJECT ] | Schema: ROWKEY BIGINT KEY, COL0 BIGINT, COL1 STRING" + + ", COL2 DOUBLE | Logger: InsertQuery_1.Project")); } @Test @@ -316,7 +316,7 @@ public void shouldRepartitionLeftStreamIfNotCorrectKey() { .get(0); // Then: - assertThat(result.getExecutionPlan(), containsString("[ REKEY ] | Schema: [ROWKEY BIGINT KEY, TEST2.")); + assertThat(result.getExecutionPlan(), containsString("[ REKEY ] | Schema: ROWKEY BIGINT KEY, TEST2.")); } @Test @@ -332,7 +332,7 @@ public void shouldRepartitionRightStreamIfNotCorrectKey() { .get(0); // Then: - assertThat(result.getExecutionPlan(), containsString("[ REKEY ] | Schema: [ROWKEY BIGINT KEY, TEST3.")); + assertThat(result.getExecutionPlan(), containsString("[ REKEY ] | Schema: ROWKEY BIGINT KEY, TEST3.")); } @Test diff --git a/ksql-engine/src/test/java/io/confluent/ksql/util/PlanSummaryTest.java b/ksql-engine/src/test/java/io/confluent/ksql/util/PlanSummaryTest.java index 5ae82fbeb8c8..e3a35465931e 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/util/PlanSummaryTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/util/PlanSummaryTest.java @@ -50,7 +50,7 @@ public class PlanSummaryTest { @Mock private StepSchemaResolver schemaResolver; - private ExecutionStep sourceStep; + private ExecutionStep sourceStep; private PlanSummary planSummaryBuilder; @Before @@ -66,7 +66,7 @@ public void shouldSummarizeSource() { // Then: assertThat(summary, is( - " > [ SOURCE ] | Schema: [ROWKEY STRING KEY, L0 INTEGER] | Logger: QID.src\n" + " > [ SOURCE ] | Schema: ROWKEY STRING KEY, L0 INTEGER | Logger: QID.src\n" )); } @@ -76,15 +76,15 @@ public void shouldSummarizeWithSource() { final LogicalSchema schema = LogicalSchema.builder() .valueColumn(ColumnName.of("L1"), SqlTypes.STRING) .build(); - final ExecutionStep step = givenStep(StreamSelect.class, "child", schema, sourceStep); + final ExecutionStep step = givenStep(StreamSelect.class, "child", schema, sourceStep); // When: final String summary = planSummaryBuilder.summarize(step); // Then: assertThat(summary, is( - " > [ PROJECT ] | Schema: [ROWKEY STRING KEY, L1 STRING] | Logger: QID.child" - + "\n\t\t > [ SOURCE ] | Schema: [ROWKEY STRING KEY, L0 INTEGER] | Logger: QID.src\n" + " > [ PROJECT ] | Schema: ROWKEY STRING KEY, L1 STRING | Logger: QID.child" + + "\n\t\t > [ SOURCE ] | Schema: ROWKEY STRING KEY, L0 INTEGER | Logger: QID.src\n" )); } @@ -97,8 +97,8 @@ public void shouldSummarizePlanWithMultipleSources() { final LogicalSchema schema = LogicalSchema.builder() .valueColumn(ColumnName.of("L1"), SqlTypes.STRING) .build(); - final ExecutionStep sourceStep2 = givenStep(StreamSource.class, "src2", sourceSchema2); - final ExecutionStep step = + final ExecutionStep sourceStep2 = givenStep(StreamSource.class, "src2", sourceSchema2); + final ExecutionStep step = givenStep(StreamStreamJoin.class, "child", schema, sourceStep, sourceStep2); // When: @@ -106,9 +106,9 @@ public void shouldSummarizePlanWithMultipleSources() { // Then: assertThat(summary, is( - " > [ JOIN ] | Schema: [ROWKEY STRING KEY, L1 STRING] | Logger: QID.child" - + "\n\t\t > [ SOURCE ] | Schema: [ROWKEY STRING KEY, L0 INTEGER] | Logger: QID.src" - + "\n\t\t > [ SOURCE ] | Schema: [ROWKEY STRING KEY, L0_2 STRING] | Logger: QID.src2\n" + " > [ JOIN ] | Schema: ROWKEY STRING KEY, L1 STRING | Logger: QID.child" + + "\n\t\t > [ SOURCE ] | Schema: ROWKEY STRING KEY, L0 INTEGER | Logger: QID.src" + + "\n\t\t > [ SOURCE ] | Schema: ROWKEY STRING KEY, L0_2 STRING | Logger: QID.src2\n" )); } diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/timestamp/MetadataTimestampExtractionPolicyTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/timestamp/MetadataTimestampExtractionPolicyTest.java index 9a13cd9f9ec8..e531d2d253d6 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/timestamp/MetadataTimestampExtractionPolicyTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/timestamp/MetadataTimestampExtractionPolicyTest.java @@ -20,6 +20,7 @@ import org.junit.Test; public class MetadataTimestampExtractionPolicyTest { + @SuppressWarnings("UnstableApiUsage") @Test public void shouldTestEqualityCorrectly() { new EqualsTester() From 444fef0d853c347a768c08aff8ce911c9aee4d50 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Mon, 6 Jan 2020 16:09:35 +0000 Subject: [PATCH 069/123] chore: fix TestDataProvider's to have correct key type (#4206) * chore: fix TestDataProvider's to have correct key type A couple of the `TestDataProvider` implementations have a `WITH(KEY)` field that has a non-`STRING` SQL type, but the SQL type of `ROWKEY` was still `STRING`. This is not incorrect: the with-key field must have the same SQL type as the actual key, i.e. `ROWKEY`. This change fixes the `TestDataProvider`s and any associated tests. It also refactors the providers and the code tests use to produce and consume messages to clean this up and remove duplicate functionality. --- .../java/io/confluent/ksql/cli/CliTest.java | 92 +++---- .../confluent/ksql/cli/SslFunctionalTest.java | 16 +- .../integration/IntegrationTestHarness.java | 252 +++++++++++------- .../ksql/integration/JsonFormatTest.java | 25 +- .../integration/SecureIntegrationTest.java | 47 ++-- .../StreamsSelectAndProjectIntTest.java | 69 +++-- .../ksql/integration/UdfIntTest.java | 26 +- .../ks/KsMaterializationFunctionalTest.java | 6 +- .../confluent/ksql/util/ItemDataProvider.java | 47 ++-- .../ksql/util/OrderDataProvider.java | 201 +++++++------- .../ksql/util/PageViewDataProvider.java | 51 ++-- .../confluent/ksql/util/TestDataProvider.java | 14 +- .../io/confluent/ksql/util/TopicConsumer.java | 136 ---------- .../io/confluent/ksql/util/TopicProducer.java | 100 ------- .../confluent/ksql/util/UserDataProvider.java | 40 ++- .../integration/ClusterTerminationTest.java | 17 +- .../KsqlResourceFunctionalTest.java | 9 +- .../integration/PullQueryFunctionalTest.java | 5 +- .../ksql/rest/integration/RestApiTest.java | 20 +- .../integration/RestIntegrationTestUtil.java | 31 +-- .../ksql/serde/kafka/KafkaSerdeFactory.java | 6 +- .../serde/avro/KsqlAvroSerializerTest.java | 6 +- .../util/EmbeddedSingleNodeKafkaCluster.java | 98 ++++++- 23 files changed, 598 insertions(+), 716 deletions(-) delete mode 100644 ksql-engine/src/test/java/io/confluent/ksql/util/TopicConsumer.java delete mode 100644 ksql-engine/src/test/java/io/confluent/ksql/util/TopicProducer.java diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java index e985a4ad7f57..6e1c5b01fcdd 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java @@ -44,6 +44,7 @@ import io.confluent.ksql.cli.console.OutputFormat; import io.confluent.ksql.cli.console.cmd.RemoteServerSpecificCommand; import io.confluent.ksql.cli.console.cmd.RequestPipeliningCommand; +import io.confluent.ksql.integration.IntegrationTestHarness; import io.confluent.ksql.integration.Retry; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.rest.Errors; @@ -61,15 +62,13 @@ import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.PhysicalSchema; import io.confluent.ksql.schema.ksql.types.SqlTypes; +import io.confluent.ksql.serde.Format; import io.confluent.ksql.serde.SerdeOption; -import io.confluent.ksql.test.util.EmbeddedSingleNodeKafkaCluster; import io.confluent.ksql.test.util.KsqlIdentifierTestUtil; import io.confluent.ksql.util.KsqlConfig; import io.confluent.ksql.util.KsqlConstants; import io.confluent.ksql.util.OrderDataProvider; import io.confluent.ksql.util.TestDataProvider; -import io.confluent.ksql.util.TopicConsumer; -import io.confluent.ksql.util.TopicProducer; import java.io.File; import java.net.URI; import java.nio.charset.StandardCharsets; @@ -87,7 +86,6 @@ import javax.ws.rs.ProcessingException; import kafka.zookeeper.ZooKeeperClientException; import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.streams.StreamsConfig; import org.eclipse.jetty.http.HttpStatus; import org.eclipse.jetty.http.HttpStatus.Code; @@ -116,7 +114,6 @@ @Category(IntegrationTest.class) public class CliTest { - private static final EmbeddedSingleNodeKafkaCluster CLUSTER = EmbeddedSingleNodeKafkaCluster.build(); private static final String SERVER_OVERRIDE = "SERVER"; private static final String SESSION_OVERRIDE = "SESSION"; @@ -125,8 +122,10 @@ public class CliTest { private static final Pattern WRITE_QUERIES = Pattern .compile(".*The following queries write into this source: \\[(.+)].*", Pattern.DOTALL); + public static final IntegrationTestHarness TEST_HARNESS = IntegrationTestHarness.build(); + private static final TestKsqlRestApp REST_APP = TestKsqlRestApp - .builder(CLUSTER::bootstrapServers) + .builder(TEST_HARNESS::kafkaBootstrapServers) .withProperty(KsqlConfig.SINK_WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_MS_PROPERTY, KsqlConstants.defaultSinkWindowChangeLogAdditionalRetention + 1) .withProperty(KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG, true) @@ -138,7 +137,7 @@ public class CliTest { @ClassRule public static final RuleChain CHAIN = RuleChain .outerRule(Retry.of(3, ZooKeeperClientException.class, 3, TimeUnit.SECONDS)) - .around(CLUSTER) + .around(TEST_HARNESS) .around(REST_APP); private static final ServerInfo SERVER_INFO = mock(ServerInfo.class); @@ -156,8 +155,6 @@ public class CliTest { private static final List> EMPTY_RESULT = ImmutableList.of(); - private static TopicProducer topicProducer; - private static TopicConsumer topicConsumer; private static KsqlRestClient restClient; private static OrderDataProvider orderDataProvider; @@ -180,12 +177,9 @@ public static void classSetUp() { ); orderDataProvider = new OrderDataProvider(); - CLUSTER.createTopic(orderDataProvider.topicName()); - - topicProducer = new TopicProducer(CLUSTER); - topicConsumer = new TopicConsumer(CLUSTER); + TEST_HARNESS.getKafkaCluster().createTopic(orderDataProvider.topicName()); - produceInputStream(orderDataProvider); + TEST_HARNESS.produceRows(orderDataProvider.topicName(), orderDataProvider, Format.JSON); try (Cli cli = Cli.build(1L, 1000L, OutputFormat.JSON, restClient)) { createKStream(orderDataProvider, cli); @@ -236,14 +230,10 @@ private static void run(final String command, final Cli localCli) { } } - private static void produceInputStream(final TestDataProvider dataProvider) { - topicProducer.produceInputData(dataProvider); - } - - private static void createKStream(final TestDataProvider dataProvider, final Cli cli) { - run(String.format( - "CREATE STREAM %s %s WITH (value_format = 'json', kafka_topic = '%s');", - dataProvider.kstreamName(), dataProvider.ksqlSchemaString(), dataProvider.topicName()), + private static void createKStream(final TestDataProvider dataProvider, final Cli cli) { + run("CREATE STREAM " + dataProvider.kstreamName() + + " (" + dataProvider.ksqlSchemaString() + ")" + + " WITH (value_format = 'json', kafka_topic = '" + dataProvider.topicName() + "');", cli); } @@ -264,7 +254,7 @@ public static void classTearDown() { private void testCreateStreamAsSelect( final String selectQuery, final PhysicalSchema resultSchema, - final Map expectedResults + final Map expectedResults ) { final String queryString = "CREATE STREAM " + streamName + " AS " + selectQuery; @@ -277,8 +267,12 @@ private void testCreateStreamAsSelect( isRow(is("Executing statement")))); /* Assert Results */ - final Map results = topicConsumer - .readResults(streamName, resultSchema, expectedResults.size(), new StringDeserializer()); + final Map results = TEST_HARNESS.verifyAvailableUniqueRows( + streamName, + expectedResults.size(), + Format.JSON, + resultSchema + ); dropStream(streamName); @@ -470,7 +464,7 @@ public void shouldPrintCorrectSchemaForDescribeStream() { "describe " + orderDataProvider.kstreamName() + ";", containsRows( row("ROWTIME", "BIGINT (system)"), - row("ROWKEY", "VARCHAR(STRING) (system)"), + row("ROWKEY", "BIGINT (system)"), row("ORDERTIME", "BIGINT"), row("ORDERID", "VARCHAR(STRING)"), row("ITEMID", "VARCHAR(STRING)"), @@ -492,49 +486,49 @@ public void testPersistentSelectStar() { @Test public void testSelectProject() { - final Map expectedResults = new HashMap<>(); - expectedResults.put("1", new GenericRow( + final Map expectedResults = new HashMap<>(); + expectedResults.put(1L, new GenericRow( ImmutableList.of( "ITEM_1", 10.0, new Double[]{100.0, 110.99, 90.0}))); - expectedResults.put("2", new GenericRow( + expectedResults.put(2L, new GenericRow( ImmutableList.of( "ITEM_2", 20.0, new Double[]{10.0, 10.99, 9.0}))); - expectedResults.put("3", new GenericRow( + expectedResults.put(3L, new GenericRow( ImmutableList.of( "ITEM_3", 30.0, new Double[]{10.0, 10.99, 91.0}))); - expectedResults.put("4", new GenericRow( + expectedResults.put(4L, new GenericRow( ImmutableList.of( "ITEM_4", 40.0, new Double[]{10.0, 140.99, 94.0}))); - expectedResults.put("5", new GenericRow( + expectedResults.put(5L, new GenericRow( ImmutableList.of( "ITEM_5", 50.0, new Double[]{160.0, 160.99, 98.0}))); - expectedResults.put("6", new GenericRow( + expectedResults.put(6L, new GenericRow( ImmutableList.of( "ITEM_6", 60.0, new Double[]{1000.0, 1100.99, 900.0}))); - expectedResults.put("7", new GenericRow( + expectedResults.put(7L, new GenericRow( ImmutableList.of( "ITEM_7", 70.0, new Double[]{1100.0, 1110.99, 190.0}))); - expectedResults.put("8", new GenericRow( + expectedResults.put(8L, new GenericRow( ImmutableList.of( "ITEM_8", 80.0, @@ -542,6 +536,7 @@ public void testSelectProject() { final PhysicalSchema resultSchema = PhysicalSchema.from( LogicalSchema.builder() + .keyColumns(orderDataProvider.schema().logicalSchema().key()) .valueColumn(ColumnName.of("ITEMID"), SqlTypes.STRING) .valueColumn(ColumnName.of("ORDERUNITS"), SqlTypes.DOUBLE) .valueColumn(ColumnName.of("PRICEARRAY"), SqlTypes.array(SqlTypes.DOUBLE)) @@ -558,14 +553,14 @@ public void testSelectProject() { @Test public void testSelectFilter() { - final Map expectedResults = new HashMap<>(); + final Map expectedResults = new HashMap<>(); final Map mapField = new HashMap<>(); mapField.put("key1", 1.0); mapField.put("key2", 2.0); mapField.put("key3", 3.0); - expectedResults.put("8", new GenericRow( + expectedResults.put(8L, new GenericRow( ImmutableList.of( - 8, + 8L, "ORDER_6", "ITEM_8", 80.0, @@ -582,10 +577,10 @@ public void testSelectFilter() { @Test public void testTransientSelect() { - final Map streamData = orderDataProvider.data(); - final List row1 = streamData.get("1").getColumns(); - final List row2 = streamData.get("2").getColumns(); - final List row3 = streamData.get("3").getColumns(); + final Map streamData = orderDataProvider.data(); + final List row1 = streamData.get(1L).getColumns(); + final List row2 = streamData.get(2L).getColumns(); + final List row3 = streamData.get(3L).getColumns(); selectWithLimit( "SELECT ORDERID, ITEMID FROM " + orderDataProvider.kstreamName() + " EMIT CHANGES", @@ -645,10 +640,10 @@ public void shouldOutputPullQueryHeader() { @Test public void testTransientContinuousSelectStar() { - final Map streamData = orderDataProvider.data(); - final List row1 = streamData.get("1").getColumns(); - final List row2 = streamData.get("2").getColumns(); - final List row3 = streamData.get("3").getColumns(); + final Map streamData = orderDataProvider.data(); + final List row1 = streamData.get(1L).getColumns(); + final List row2 = streamData.get(2L).getColumns(); + final List row3 = streamData.get(3L).getColumns(); selectWithLimit( "SELECT * FROM " + orderDataProvider.kstreamName() + " EMIT CHANGES", @@ -691,6 +686,7 @@ public void testSelectUDFs() { final PhysicalSchema resultSchema = PhysicalSchema.from( LogicalSchema.builder() + .keyColumns(orderDataProvider.schema().logicalSchema().key()) .valueColumn(ColumnName.of("ITEMID"), SqlTypes.STRING) .valueColumn(ColumnName.of("COL1"), SqlTypes.DOUBLE) .valueColumn(ColumnName.of("COL2"), SqlTypes.DOUBLE) @@ -700,8 +696,8 @@ public void testSelectUDFs() { SerdeOption.none() ); - final Map expectedResults = new HashMap<>(); - expectedResults.put("8", new GenericRow(ImmutableList.of("ITEM_8", 800.0, 1110.0, 12.0, true))); + final Map expectedResults = new HashMap<>(); + expectedResults.put(8L, new GenericRow(ImmutableList.of("ITEM_8", 800.0, 1110.0, 12.0, true))); testCreateStreamAsSelect(queryString, resultSchema, expectedResults); } diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/SslFunctionalTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/SslFunctionalTest.java index 475cf85fe561..602c8f15fe70 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/SslFunctionalTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/SslFunctionalTest.java @@ -15,6 +15,7 @@ package io.confluent.ksql.cli; +import static io.confluent.ksql.serde.Format.JSON; import static java.util.Collections.emptyMap; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.both; @@ -27,16 +28,15 @@ import com.google.common.net.UrlEscapers; import io.confluent.common.utils.IntegrationTest; +import io.confluent.ksql.integration.IntegrationTestHarness; import io.confluent.ksql.integration.Retry; import io.confluent.ksql.rest.client.KsqlRestClient; import io.confluent.ksql.rest.client.KsqlRestClientException; import io.confluent.ksql.rest.client.RestResponse; import io.confluent.ksql.rest.server.TestKsqlRestApp; -import io.confluent.ksql.test.util.EmbeddedSingleNodeKafkaCluster; import io.confluent.ksql.test.util.secure.ClientTrustStore; import io.confluent.ksql.test.util.secure.ServerKeyStore; import io.confluent.ksql.util.OrderDataProvider; -import io.confluent.ksql.util.TopicProducer; import io.confluent.rest.RestConfig; import java.io.EOFException; import java.net.URI; @@ -80,12 +80,10 @@ public class SslFunctionalTest { + " \"ksql\": \"PRINT " + TOPIC_NAME + " FROM BEGINNING;\"" + "}"); - private static final EmbeddedSingleNodeKafkaCluster CLUSTER = EmbeddedSingleNodeKafkaCluster - .newBuilder() - .build(); + public static final IntegrationTestHarness TEST_HARNESS = IntegrationTestHarness.build(); private static final TestKsqlRestApp REST_APP = TestKsqlRestApp - .builder(CLUSTER::bootstrapServers) + .builder(TEST_HARNESS::kafkaBootstrapServers) .withProperties(ServerKeyStore.keyStoreProps()) .withProperty(RestConfig.LISTENERS_CONFIG, "https://localhost:0") .build(); @@ -93,7 +91,7 @@ public class SslFunctionalTest { @ClassRule public static final RuleChain CHAIN = RuleChain .outerRule(Retry.of(3, ZooKeeperClientException.class, 3, TimeUnit.SECONDS)) - .around(CLUSTER) + .around(TEST_HARNESS) .around(REST_APP); @Rule @@ -105,8 +103,8 @@ public class SslFunctionalTest { @BeforeClass public static void classSetUp() { final OrderDataProvider dataProvider = new OrderDataProvider(); - CLUSTER.createTopic(TOPIC_NAME); - new TopicProducer(CLUSTER).produceInputData(dataProvider); + TEST_HARNESS.getKafkaCluster().createTopic(TOPIC_NAME); + TEST_HARNESS.produceRows(dataProvider.topicName(), dataProvider, JSON); } @Before diff --git a/ksql-engine/src/test/java/io/confluent/ksql/integration/IntegrationTestHarness.java b/ksql-engine/src/test/java/io/confluent/ksql/integration/IntegrationTestHarness.java index 02558557e8ff..cad69957dbdd 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/integration/IntegrationTestHarness.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/integration/IntegrationTestHarness.java @@ -18,8 +18,10 @@ import static io.confluent.ksql.test.util.ConsumerTestUtil.hasUniqueRecords; import static io.confluent.ksql.test.util.ConsumerTestUtil.toUniqueRecords; import static io.confluent.ksql.test.util.MapMatchers.mapHasSize; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; +import com.google.common.collect.ImmutableMap; import io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.GenericRow; @@ -30,6 +32,7 @@ import io.confluent.ksql.serde.FormatInfo; import io.confluent.ksql.serde.GenericRowSerDe; import io.confluent.ksql.serde.avro.AvroSchemas; +import io.confluent.ksql.serde.kafka.KafkaSerdeFactory; import io.confluent.ksql.services.KafkaTopicClient; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.services.TestServiceContext; @@ -44,36 +47,27 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.Objects; import java.util.Optional; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; -import java.util.stream.Collectors; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.clients.producer.KafkaProducer; -import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.serialization.ByteArrayDeserializer; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.Serializer; -import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.test.TestUtils; import org.hamcrest.Matcher; import org.junit.rules.ExternalResource; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; @SuppressWarnings("WeakerAccess") public final class IntegrationTestHarness extends ExternalResource { - private static final Logger LOG = LoggerFactory.getLogger(IntegrationTestHarness.class); private static final int DEFAULT_PARTITION_COUNT = 1; private static final short DEFAULT_REPLICATION_FACTOR = (short) 1; - private static final long PRODUCE_TIMEOUT_MS = 30_000; + private static final Supplier DEFAULT_TS_SUPPLIER = () -> null; private final LazyServiceContext serviceContext; private final EmbeddedSingleNodeKafkaCluster kafkaCluster; @@ -160,17 +154,13 @@ public void ensureTopics(final int partitionCount, final String... topicNames) { * @param data the String value of the record. */ public void produceRecord(final String topicName, final String key, final String data) { - try { - try (KafkaProducer producer = new KafkaProducer<>( - kafkaCluster.producerConfig(), - new StringSerializer(), - new StringSerializer()) - ) { - producer.send(new ProducerRecord<>(topicName, key, data)).get(); - } - } catch (final Exception e) { - throw new RuntimeException("Failed to send record to " + topicName, e); - } + kafkaCluster.produceRows( + topicName, + ImmutableMap.of(key, data), + new StringSerializer(), + new StringSerializer(), + DEFAULT_TS_SUPPLIER + ); } /** @@ -181,16 +171,17 @@ public void produceRecord(final String topicName, final String key, final String * @param valueFormat the format values should be produced as. * @return the map of produced rows */ - public Map produceRows( + public Map produceRows( final String topic, - final TestDataProvider dataProvider, + final TestDataProvider dataProvider, final Format valueFormat ) { return produceRows( topic, dataProvider, valueFormat, - () -> null); + DEFAULT_TS_SUPPLIER + ); } /** @@ -202,63 +193,69 @@ public Map produceRows( * @param timestampSupplier supplier of timestamps. * @return the map of produced rows */ - public Map produceRows( + public Map produceRows( final String topic, - final TestDataProvider dataProvider, + final TestDataProvider dataProvider, final Format valueFormat, final Supplier timestampSupplier ) { return produceRows( topic, dataProvider.data(), - getSerializer(valueFormat, dataProvider.schema()), + getKeySerializer(dataProvider.schema()), + getValueSerializer(valueFormat, dataProvider.schema()), timestampSupplier ); } + /** + * Produce data to a topic + * + * @param topic the name of the topic to produce to. + * @param rowsToPublish the rows to publish + * @param schema the schema of the rows + * @param valueFormat the format values should be produced as. + * @return the map of produced rows + */ + public Map produceRows( + final String topic, + final Map rowsToPublish, + final PhysicalSchema schema, + final Format valueFormat + ) { + return produceRows( + topic, + rowsToPublish, + getKeySerializer(schema), + getValueSerializer(valueFormat, schema), + DEFAULT_TS_SUPPLIER + ); + } + /** * Publish test data to the supplied {@code topic}. * * @param topic the name of the topic to produce to. * @param recordsToPublish the records to produce. + * @param keySerializer the serializer to use to serialize keys. * @param valueSerializer the serializer to use to serialize values. * @param timestampSupplier supplier of timestamps. * @return the map of produced rows, with an iteration order that matches produce order. */ - public Map produceRows( + public Map produceRows( final String topic, - final Map recordsToPublish, + final Map recordsToPublish, + final Serializer keySerializer, final Serializer valueSerializer, final Supplier timestampSupplier ) { - ensureTopics(topic); - - try (KafkaProducer producer = new KafkaProducer<>( - kafkaCluster.producerConfig(), - new StringSerializer(), - valueSerializer - )) { - final Map> futures = recordsToPublish.entrySet().stream() - .collect(Collectors.toMap(Entry::getKey, entry -> { - final String key = entry.getKey(); - final GenericRow value = entry.getValue(); - final Long timestamp = timestampSupplier.get(); - - LOG.debug("Producing message. topic:{}, key:{}, value:{}, timestamp:{}", - topic, key, value, timestamp); - - return producer.send(new ProducerRecord<>(topic, null, timestamp, key, value)); - })); - - return futures.entrySet().stream() - .collect(Collectors.toMap(Entry::getKey, entry -> { - try { - return entry.getValue().get(PRODUCE_TIMEOUT_MS, TimeUnit.MILLISECONDS); - } catch (final Exception e) { - throw new RuntimeException(e); - } - })); - } + return kafkaCluster.produceRows( + topic, + recordsToPublish, + keySerializer, + valueSerializer, + timestampSupplier + ); } /** @@ -268,15 +265,29 @@ public Map produceRows( * @param expectedCount the expected number of records. * @return the list of consumed records. */ - public List> verifyAvailableRecords( + public List> verifyAvailableRecords( final String topic, final int expectedCount + ) { + return verifyAvailableRecords(topic, is(expectedCount)); + } + + /** + * Verify there are {@code expectedCount} records available on the supplied {@code topic}. + * + * @param topic the name of the topic to check. + * @param expectedCount the expected number of records. + * @return the list of consumed records. + */ + public List> verifyAvailableRecords( + final String topic, + final Matcher expectedCount ) { return kafkaCluster.verifyAvailableRecords( topic, - expectedCount, - new StringDeserializer(), - new StringDeserializer() + hasSize(expectedCount), + new ByteArrayDeserializer(), + new ByteArrayDeserializer() ); } @@ -289,21 +300,13 @@ public List> verifyAvailableRecords( * @param schema the schema of the value. * @return the list of consumed records. */ - public List> verifyAvailableRows( + public List> verifyAvailableRows( final String topic, final int expectedCount, final Format valueFormat, final PhysicalSchema schema ) { - final Deserializer valueDeserializer = - getDeserializer(valueFormat, schema); - - return kafkaCluster.verifyAvailableRecords( - topic, - expectedCount, - new StringDeserializer(), - valueDeserializer - ); + return verifyAvailableRows(topic, hasSize(expectedCount), valueFormat, schema); } /** @@ -315,13 +318,14 @@ public List> verifyAvailableRows( * @param schema the schema of the value. * @return the list of consumed records. */ - public List> verifyAvailableRows( + public List> verifyAvailableRows( final String topic, - final Matcher>> expected, + final Matcher>> expected, final Format valueFormat, final PhysicalSchema schema ) { - return verifyAvailableRows(topic, expected, valueFormat, schema, new StringDeserializer()); + final Deserializer keyDeserializer = getKeyDeserializer(schema); + return verifyAvailableRows(topic, expected, valueFormat, schema, keyDeserializer); } /** @@ -367,18 +371,15 @@ public List> verifyAvailableRows( final Deserializer keyDeserializer, final Duration timeout ) { - final Deserializer valueDeserializer = - getDeserializer(valueFormat, schema); + final Deserializer valueDeserializer = getValueDeserializer(valueFormat, schema); - try (KafkaConsumer consumer = new KafkaConsumer<>( - kafkaCluster.consumerConfig(), + return kafkaCluster.verifyAvailableRecords( + topic, + expected, keyDeserializer, - valueDeserializer - )) { - consumer.subscribe(Collections.singleton(topic)); - - return ConsumerTestUtil.verifyAvailableRecords(consumer, expected, timeout); - } + valueDeserializer, + timeout + ); } /** @@ -390,14 +391,68 @@ public List> verifyAvailableRows( * @param schema the schema of the value. * @return the list of consumed records. */ - public Map verifyAvailableUniqueRows( + public Map verifyAvailableUniqueRows( final String topic, final int expectedCount, final Format valueFormat, final PhysicalSchema schema ) { + return verifyAvailableUniqueRows(topic, is(expectedCount), valueFormat, schema); + } + + /** + * Verify there are {@code expectedCount} unique rows available on the supplied {@code topic}. + * + * @param topic the name of the topic to check. + * @param expectedCount the expected number of records. + * @param valueFormat the format of the value. + * @param schema the schema of the value. + * @return the list of consumed records. + */ + public Map verifyAvailableUniqueRows( + final String topic, + final Matcher expectedCount, + final Format valueFormat, + final PhysicalSchema schema + ) { + final Deserializer keyDeserializer = getKeyDeserializer(schema); + final Deserializer valueDeserializer = getValueDeserializer(valueFormat, schema); + return verifyAvailableUniqueRows( - topic, expectedCount, valueFormat, schema, new StringDeserializer()); + topic, + expectedCount, + keyDeserializer, + valueDeserializer + ); + } + + /** + * Verify there are {@code expectedCount} unique rows available on the supplied {@code topic}. + * + * @param topic the name of the topic to check. + * @param expectedCount the expected number of records. + * @param keyDeserializer the keyDeserilizer to use. + * @param valueDeserializer the valueDeserializer of use. + * @return the list of consumed records. + */ + public Map verifyAvailableUniqueRows( + final String topic, + final Matcher expectedCount, + final Deserializer keyDeserializer, + final Deserializer valueDeserializer + ) { + try (KafkaConsumer consumer = new KafkaConsumer<>( + kafkaCluster.consumerConfig(), + keyDeserializer, + valueDeserializer + )) { + consumer.subscribe(Collections.singleton(topic)); + + final List> consumerRecords = ConsumerTestUtil + .verifyAvailableRecords(consumer, hasUniqueRecords(mapHasSize(expectedCount))); + + return toUniqueRecords(consumerRecords); + } } /** @@ -440,8 +495,7 @@ public Map verifyAvailableUniqueRows( final PhysicalSchema schema, final Deserializer keyDeserializer ) { - final Deserializer valueDeserializer = - getDeserializer(valueFormat, schema); + final Deserializer valueDeserializer = getValueDeserializer(valueFormat, schema); try (KafkaConsumer consumer = new KafkaConsumer<>( kafkaCluster.consumerConfig(), @@ -523,7 +577,14 @@ protected void after() { kafkaCluster.stop(); } - private Serializer getSerializer( + @SuppressWarnings({"unchecked", "rawtypes"}) + private static Serializer getKeySerializer(final PhysicalSchema schema) { + return (Serializer) KafkaSerdeFactory + .getPrimitiveSerde(schema.keySchema().ksqlSchema()) + .serializer(); + } + + private Serializer getValueSerializer( final Format format, final PhysicalSchema schema ) { @@ -537,7 +598,16 @@ private Serializer getSerializer( ).serializer(); } - private Deserializer getDeserializer( + @SuppressWarnings({"unchecked", "rawtypes"}) + private static Deserializer getKeyDeserializer( + final PhysicalSchema schema + ) { + return (Deserializer) KafkaSerdeFactory + .getPrimitiveSerde(schema.keySchema().ksqlSchema()) + .deserializer(); + } + + private Deserializer getValueDeserializer( final Format format, final PhysicalSchema schema ) { diff --git a/ksql-engine/src/test/java/io/confluent/ksql/integration/JsonFormatTest.java b/ksql-engine/src/test/java/io/confluent/ksql/integration/JsonFormatTest.java index 935da945bda4..cb1a96d33a9c 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/integration/JsonFormatTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/integration/JsonFormatTest.java @@ -15,6 +15,7 @@ package io.confluent.ksql.integration; +import static io.confluent.ksql.serde.Format.JSON; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.MatcherAssert.assertThat; @@ -40,13 +41,10 @@ import io.confluent.ksql.services.KafkaTopicClient; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.services.ServiceContextFactory; -import io.confluent.ksql.test.util.EmbeddedSingleNodeKafkaCluster; import io.confluent.ksql.util.KsqlConfig; import io.confluent.ksql.util.OrderDataProvider; import io.confluent.ksql.util.PersistentQueryMetadata; import io.confluent.ksql.util.QueryMetadata; -import io.confluent.ksql.util.TopicConsumer; -import io.confluent.ksql.util.TopicProducer; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -54,7 +52,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import kafka.zookeeper.ZooKeeperClientException; -import org.apache.kafka.common.serialization.StringDeserializer; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; @@ -72,29 +69,27 @@ public class JsonFormatTest { private static final String messageLogStream = "message_log"; private static final AtomicInteger COUNTER = new AtomicInteger(); - private static final EmbeddedSingleNodeKafkaCluster CLUSTER = EmbeddedSingleNodeKafkaCluster.build(); + public static final IntegrationTestHarness TEST_HARNESS = IntegrationTestHarness.build(); @ClassRule public static final RuleChain CLUSTER_WITH_RETRY = RuleChain .outerRule(Retry.of(3, ZooKeeperClientException.class, 3, TimeUnit.SECONDS)) - .around(CLUSTER); + .around(TEST_HARNESS); private MetaStore metaStore; private KsqlConfig ksqlConfig; private KsqlEngine ksqlEngine; private ServiceContext serviceContext; - private final TopicProducer topicProducer = new TopicProducer(CLUSTER); - private final TopicConsumer topicConsumer = new TopicConsumer(CLUSTER); private QueryId queryId; private KafkaTopicClient topicClient; private String streamName; @Before - public void before() throws Exception { + public void before() { streamName = "STREAM_" + COUNTER.getAndIncrement(); - ksqlConfig = KsqlConfigTestUtil.create(CLUSTER); + ksqlConfig = KsqlConfigTestUtil.create(TEST_HARNESS.kafkaBootstrapServers()); serviceContext = ServiceContextFactory.create(ksqlConfig, DisabledKsqlClient::instance); ksqlEngine = new KsqlEngine( @@ -118,11 +113,10 @@ private void createInitTopics() { topicClient.createTopic(messageLogTopic, 1, (short) 1); } - private void produceInitData() throws Exception { + private static void produceInitData() { final OrderDataProvider orderDataProvider = new OrderDataProvider(); - topicProducer - .produceInputData(inputTopic, orderDataProvider.data(), orderDataProvider.schema()); + TEST_HARNESS.produceRows(inputTopic, orderDataProvider, JSON); final LogicalSchema messageSchema = LogicalSchema.builder() .valueColumn(ColumnName.of("MESSAGE"), SqlTypes.STRING) @@ -140,7 +134,7 @@ private void produceInitData() throws Exception { SerdeOption.none() ); - topicProducer.produceInputData(messageLogTopic, records, schema); + TEST_HARNESS.produceRows(messageLogTopic, records, schema, JSON); } private void execInitCreateStreamQueries() { @@ -260,7 +254,8 @@ private Map readNormalResults( source.getSerdeOptions() ); - return topicConsumer.readResults(resultTopic, resultSchema, expectedNumMessages, new StringDeserializer()); + return TEST_HARNESS + .verifyAvailableUniqueRows(resultTopic, expectedNumMessages, JSON, resultSchema); } private void terminateQuery() { diff --git a/ksql-engine/src/test/java/io/confluent/ksql/integration/SecureIntegrationTest.java b/ksql-engine/src/test/java/io/confluent/ksql/integration/SecureIntegrationTest.java index a089d702c815..89504cc3888e 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/integration/SecureIntegrationTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/integration/SecureIntegrationTest.java @@ -15,6 +15,7 @@ package io.confluent.ksql.integration; +import static io.confluent.ksql.serde.Format.JSON; import static io.confluent.ksql.test.util.AssertEventually.assertThatEventually; import static io.confluent.ksql.test.util.EmbeddedSingleNodeKafkaCluster.VALID_USER1; import static io.confluent.ksql.test.util.EmbeddedSingleNodeKafkaCluster.VALID_USER2; @@ -57,8 +58,6 @@ import io.confluent.ksql.util.OrderDataProvider; import io.confluent.ksql.util.PersistentQueryMetadata; import io.confluent.ksql.util.QueryMetadata; -import io.confluent.ksql.util.TopicConsumer; -import io.confluent.ksql.util.TopicProducer; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -96,35 +95,37 @@ public class SecureIntegrationTest { private static final Credentials NORMAL_USER = VALID_USER2; private static final AtomicInteger COUNTER = new AtomicInteger(0); - private static final EmbeddedSingleNodeKafkaCluster SECURE_CLUSTER = - EmbeddedSingleNodeKafkaCluster.newBuilder() - .withoutPlainListeners() - .withSaslSslListeners() - .withSslListeners() - .withAclsEnabled(SUPER_USER.username) - .build(); + public static final IntegrationTestHarness TEST_HARNESS = IntegrationTestHarness + .builder() + .withKafkaCluster( + EmbeddedSingleNodeKafkaCluster.newBuilder() + .withoutPlainListeners() + .withSaslSslListeners() + .withSslListeners() + .withAclsEnabled(SUPER_USER.username) + ) + .build(); @ClassRule public static final RuleChain CLUSTER_WITH_RETRY = RuleChain .outerRule(Retry.of(3, ZooKeeperClientException.class, 3, TimeUnit.SECONDS)) - .around(SECURE_CLUSTER); + .around(TEST_HARNESS); private QueryId queryId; private KsqlConfig ksqlConfig; private KsqlEngine ksqlEngine; - private final TopicProducer topicProducer = new TopicProducer(SECURE_CLUSTER); private KafkaTopicClient topicClient; private String outputTopic; private Admin adminClient; private ServiceContext serviceContext; @Before - public void before() throws Exception { - SECURE_CLUSTER.clearAcls(); + public void before() { + TEST_HARNESS.getKafkaCluster().clearAcls(); outputTopic = "TEST_" + COUNTER.incrementAndGet(); - adminClient = AdminClient.create(new KsqlConfig(getKsqlConfig(SUPER_USER)) - .getKsqlAdminClientConfigProps()); + adminClient = AdminClient.create(new KsqlConfig(getKsqlConfig(SUPER_USER)) + .getKsqlAdminClientConfigProps()); topicClient = new KafkaTopicClientImpl(() -> adminClient); produceInitData(); @@ -169,7 +170,7 @@ public void shouldRunQueryAgainstKafkaClusterOverSsl() throws Exception { final Map configs = getBaseKsqlConfig(); configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, - SECURE_CLUSTER.bootstrapServers(SecurityProtocol.SSL)); + TEST_HARNESS.getKafkaCluster().bootstrapServers(SecurityProtocol.SSL)); // Additional Properties required for KSQL to talk to cluster over SSL: configs.put("security.protocol", "SSL"); @@ -261,7 +262,8 @@ public void shouldRunQueryAgainstSecureSchemaRegistry() throws Exception { private static void givenAllowAcl(final Credentials credentials, final ResourcePattern resource, final Set ops) { - SECURE_CLUSTER.addUserAcl(credentials.username, AclPermissionType.ALLOW, resource, ops); + TEST_HARNESS.getKafkaCluster() + .addUserAcl(credentials.username, AclPermissionType.ALLOW, resource, ops); } private void givenTestSetupWithConfig(final Map ksqlConfigs) { @@ -300,13 +302,15 @@ private void assertCanRunKsqlQuery( is(true) ); - final TopicConsumer consumer = new TopicConsumer(SECURE_CLUSTER); - consumer.verifyRecordsReceived(outputTopic, greaterThan(0)); + TEST_HARNESS.verifyAvailableRecords(outputTopic, greaterThan(0)); } private static Map getBaseKsqlConfig() { final Map configs = new HashMap<>(KsqlConfigTestUtil.baseTestConfig()); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, SECURE_CLUSTER.bootstrapServers()); + configs.put( + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, + TEST_HARNESS.getKafkaCluster().bootstrapServers() + ); // Additional Properties required for KSQL to talk to test secure cluster, // where SSL cert not properly signed. (Not required for proper cluster). @@ -331,8 +335,7 @@ private void produceInitData() { final OrderDataProvider orderDataProvider = new OrderDataProvider(); - topicProducer - .produceInputData(INPUT_TOPIC, orderDataProvider.data(), orderDataProvider.schema()); + TEST_HARNESS.produceRows(INPUT_TOPIC, orderDataProvider, JSON); } private void awaitAsyncInputTopicCreation() { diff --git a/ksql-engine/src/test/java/io/confluent/ksql/integration/StreamsSelectAndProjectIntTest.java b/ksql-engine/src/test/java/io/confluent/ksql/integration/StreamsSelectAndProjectIntTest.java index 35dac0d8a615..7363c1eacdf0 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/integration/StreamsSelectAndProjectIntTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/integration/StreamsSelectAndProjectIntTest.java @@ -70,8 +70,8 @@ public class StreamsSelectAndProjectIntTest { private String avroTopicName; private String intermediateStream; private String resultStream; - private Map producedAvroRecords; - private Map producedJsonRecords; + private Map producedAvroRecords; + private Map producedJsonRecords; @Before public void before() { @@ -168,7 +168,7 @@ public void shouldUseStringTimestampWithFormat() throws Exception { + " CREATE STREAM " + resultStream + " AS" + " SELECT ORDERID, TIMESTAMP from " + intermediateStream + ";"); - final List> records = + final List> records = TEST_HARNESS.verifyAvailableRecords(resultStream.toUpperCase(), 1); final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); @@ -182,7 +182,7 @@ public void shouldUseTimestampExtractedFromDDLStatement() throws Exception { + " AS SELECT ORDERID, ORDERTIME FROM " + AVRO_TIMESTAMP_STREAM_NAME + " WHERE ITEMID='ITEM_4';"); - final List> records = + final List> records = TEST_HARNESS.verifyAvailableRecords(resultStream.toUpperCase(), 1); final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); @@ -193,7 +193,7 @@ public void shouldUseTimestampExtractedFromDDLStatement() throws Exception { private void testTimestampColumnSelection( final String inputStreamName, final Format dataSourceSerDe, - final Map recordMetadataMap + final Map recordMetadataMap ) { final String query1String = String.format("CREATE STREAM %s WITH (timestamp='RTIME') AS SELECT ROWKEY AS RKEY, " @@ -206,18 +206,17 @@ private void testTimestampColumnSelection( + "FROM %s ;", intermediateStream, inputStreamName, resultStream, intermediateStream); - ksqlContext.sql(query1String); - final Map expectedResults = new HashMap<>(); - expectedResults.put("8", + final Map expectedResults = new HashMap<>(); + expectedResults.put(8L, new GenericRow(Arrays.asList(null, null, "8", - recordMetadataMap.get("8").timestamp() + 10000, + recordMetadataMap.get(8L).timestamp() + 10000, "8", - recordMetadataMap.get("8").timestamp() + 10000, - recordMetadataMap.get("8").timestamp() + 100, + recordMetadataMap.get(8L).timestamp() + 10000, + recordMetadataMap.get(8L).timestamp() + 100, "ORDER_6", "ITEM_8"))); @@ -230,22 +229,23 @@ private void testTimestampColumnSelection( private void testSelectProjectKeyTimestamp( final String inputStreamName, - final Format dataSourceSerDe, - final Map recordMetadataMap + final Format valueFormat, + final Map recordMetadataMap ) { ksqlContext.sql(String.format("CREATE STREAM %s AS SELECT ROWKEY AS RKEY, ROWTIME " + "AS RTIME, ITEMID FROM %s WHERE ORDERUNITS > 20 AND ITEMID = " + "'ITEM_8';", resultStream, inputStreamName)); - final List> results = TEST_HARNESS.verifyAvailableRows( + final List> results = TEST_HARNESS.verifyAvailableRows( resultStream.toUpperCase(), 1, - dataSourceSerDe, - getResultSchema()); + valueFormat, + getResultSchema() + ); - assertThat(results.get(0).key(), is("8")); + assertThat(results.get(0).key(), is(8L)); assertThat(results.get(0).value(), is(new GenericRow( - ImmutableList.of("8", recordMetadataMap.get("8").timestamp(), "ITEM_8")))); + ImmutableList.of(8L, recordMetadataMap.get(8L).timestamp(), "ITEM_8")))); } private void testSelectProject( @@ -286,17 +286,18 @@ public void testSelectProjectAvroJson() { private void testSelectStar( final String inputStreamName, - final Format dataSourceSerDe + final Format valueFormat ) { ksqlContext.sql(String.format("CREATE STREAM %s AS SELECT * FROM %s;", resultStream, inputStreamName)); - final Map results = TEST_HARNESS.verifyAvailableUniqueRows( + final Map results = TEST_HARNESS.verifyAvailableUniqueRows( resultStream.toUpperCase(), DATA_PROVIDER.data().size(), - dataSourceSerDe, - DATA_PROVIDER.schema()); + valueFormat, + DATA_PROVIDER.schema() + ); assertThat(results, is(DATA_PROVIDER.data())); } @@ -357,11 +358,12 @@ public void testInsertSelectStarJson() { ksqlContext.sql("INSERT INTO " + resultStream + " SELECT * FROM " + JSON_STREAM_NAME + ";"); - final Map results = TEST_HARNESS.verifyAvailableUniqueRows( + final Map results = TEST_HARNESS.verifyAvailableUniqueRows( resultStream.toUpperCase(), DATA_PROVIDER.data().size(), JSON, - DATA_PROVIDER.schema()); + DATA_PROVIDER.schema() + ); assertThat(results, is(DATA_PROVIDER.data())); } @@ -373,11 +375,12 @@ public void testInsertSelectStarAvro() { ksqlContext.sql("INSERT INTO " + resultStream + " SELECT * FROM " + AVRO_STREAM_NAME + ";"); - final Map results = TEST_HARNESS.verifyAvailableUniqueRows( + final Map results = TEST_HARNESS.verifyAvailableUniqueRows( resultStream.toUpperCase(), DATA_PROVIDER.data().size(), AVRO, - DATA_PROVIDER.schema()); + DATA_PROVIDER.schema() + ); assertThat(results, is(DATA_PROVIDER.data())); } @@ -415,21 +418,11 @@ private PhysicalSchema getResultSchema() { .getMetaStore() .getSource(SourceName.of(resultStream.toUpperCase())); - return PhysicalSchema.from( - source.getSchema().withoutMetaAndKeyColsInValue(), - source.getSerdeOptions() - ); + return PhysicalSchema.from(source.getSchema(), source.getSerdeOptions()); } private void createOrdersStream() { - final String columns = "" - + "ORDERTIME bigint, " - + "ORDERID varchar, " - + "ITEMID varchar, " - + "ORDERUNITS double, " - + "TIMESTAMP varchar, " - + "PRICEARRAY array," - + " KEYVALUEMAP map"; + final String columns = DATA_PROVIDER.ksqlSchemaString(); ksqlContext.sql("CREATE STREAM " + JSON_STREAM_NAME + " (" + columns + ") WITH " + "(kafka_topic='" + jsonTopicName + "', value_format='JSON');"); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/integration/UdfIntTest.java b/ksql-engine/src/test/java/io/confluent/ksql/integration/UdfIntTest.java index 984d42265177..329203ff1fc9 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/integration/UdfIntTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/integration/UdfIntTest.java @@ -63,8 +63,8 @@ public class UdfIntTest { private static final String DELIMITED_TOPIC_NAME = "delimitedTopic"; private static final String DELIMITED_STREAM_NAME = "items_delimited"; - private static Map jsonRecordMetadataMap; - private static Map avroRecordMetadataMap; + private static Map jsonRecordMetadataMap; + private static Map avroRecordMetadataMap; private static final IntegrationTestHarness TEST_HARNESS = IntegrationTestHarness.build(); @@ -151,7 +151,7 @@ public void testApplyUdfsToColumns() { // Then: final Map results = consumeOutputMessages(); - assertThat(results, is(ImmutableMap.of("8", + assertThat(results, is(ImmutableMap.of(8L, new GenericRow(Arrays.asList("ITEM_8", 800.0, 1110.0, 12.0, true))))); } @@ -177,7 +177,7 @@ public void testShouldCastSelectedColumns() { // Then: final Map results = consumeOutputMessages(); - assertThat(results, is(ImmutableMap.of("8", + assertThat(results, is(ImmutableMap.of(8L, new GenericRow(Arrays.asList(80, "true", 8.0, "80.0"))))); } @@ -200,12 +200,12 @@ public void testTimestampColumnSelection() { ksqlContext.sql(queryString); // Then: - final Map results = consumeOutputMessages(); + final Map results = consumeOutputMessages(); - final long ts = testData.recordMetadata.get("8").timestamp(); + final long ts = testData.recordMetadata.get(8L).timestamp(); - assertThat(results, equalTo(ImmutableMap.of("8", - new GenericRow(Arrays.asList("8", ts, "8", ts + 10000, ts + 100, "ORDER_6", "ITEM_8"))))); + assertThat(results, equalTo(ImmutableMap.of(8L, + new GenericRow(Arrays.asList(8L, ts, 8, ts + 10000, ts + 100, "ORDER_6", "ITEM_8"))))); } @Test @@ -231,11 +231,13 @@ public void testApplyUdfsToColumnsDelimited() { private void createSourceStream() { if (testData.format == DELIMITED) { // Delimited does not support array or map types, so use simplier schema: - ksqlContext.sql(String.format("CREATE STREAM %s (ID varchar, DESCRIPTION varchar) WITH " + ksqlContext.sql(String.format("CREATE STREAM %s " + + "(ROWKEY STRING KEY, ID varchar, DESCRIPTION varchar) WITH " + "(kafka_topic='%s', value_format='%s');", testData.sourceStreamName, testData.sourceTopicName, testData.format.name())); } else { ksqlContext.sql(String.format("CREATE STREAM %s (" + + "ROWKEY BIGINT KEY, " + "ORDERTIME bigint, " + "ORDERID varchar, " + "ITEMID varchar, " @@ -248,7 +250,7 @@ private void createSourceStream() { } } - private Map consumeOutputMessages() { + private Map consumeOutputMessages() { final DataSource source = ksqlContext .getMetaStore() @@ -271,13 +273,13 @@ private static class TestData { private final Format format; private final String sourceStreamName; private final String sourceTopicName; - private final Map recordMetadata; + private final Map recordMetadata; private TestData( final Format format, final String sourceTopicName, final String sourceStreamName, - final Map recordMetadata) { + final Map recordMetadata) { this.format = format; this.sourceStreamName = sourceStreamName; this.sourceTopicName = sourceTopicName; diff --git a/ksql-engine/src/test/java/io/confluent/ksql/materialization/ks/KsMaterializationFunctionalTest.java b/ksql-engine/src/test/java/io/confluent/ksql/materialization/ks/KsMaterializationFunctionalTest.java index 459b12e71a81..cc32b451d620 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/materialization/ks/KsMaterializationFunctionalTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/materialization/ks/KsMaterializationFunctionalTest.java @@ -578,8 +578,8 @@ private static LogicalSchema schema( private static void initializeKsql(final TestKsqlContext ksqlContext) { ksqlContext.ensureStarted(); - ksqlContext.sql("CREATE TABLE " + USER_TABLE + " " - + USER_DATA_PROVIDER.ksqlSchemaString() + ksqlContext.sql("CREATE TABLE " + USER_TABLE + + " (" + USER_DATA_PROVIDER.ksqlSchemaString() + ")" + " WITH (" + " kafka_topic='" + USERS_TOPIC + "', " + " value_format='" + VALUE_FORMAT + "', " @@ -588,7 +588,7 @@ private static void initializeKsql(final TestKsqlContext ksqlContext) { ); ksqlContext.sql("CREATE STREAM " + USER_STREAM + " " - + USER_DATA_PROVIDER.ksqlSchemaString() + + " (" + USER_DATA_PROVIDER.ksqlSchemaString() + ")" + " WITH (" + " kafka_topic='" + USERS_TOPIC + "', " + " value_format='" + VALUE_FORMAT + "', " diff --git a/ksql-engine/src/test/java/io/confluent/ksql/util/ItemDataProvider.java b/ksql-engine/src/test/java/io/confluent/ksql/util/ItemDataProvider.java index 5d3b3d52febf..80434656b37c 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/util/ItemDataProvider.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/util/ItemDataProvider.java @@ -15,6 +15,7 @@ package io.confluent.ksql.util; +import com.google.common.collect.ImmutableMap; import io.confluent.ksql.GenericRow; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.schema.ksql.LogicalSchema; @@ -22,43 +23,29 @@ import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.serde.SerdeOption; import java.util.Arrays; -import java.util.HashMap; import java.util.Map; -public class ItemDataProvider extends TestDataProvider { +public class ItemDataProvider extends TestDataProvider { - private static final String namePrefix = - "ITEM"; - - private static final String ksqlSchemaString = - "(ID varchar, DESCRIPTION varchar)"; - - private static final String key = "ID"; - - private static final LogicalSchema schema = LogicalSchema.builder() + private static final LogicalSchema LOGICAL_SCHEMA = LogicalSchema.builder() .valueColumn(ColumnName.of("ID"), SqlTypes.STRING) .valueColumn(ColumnName.of("DESCRIPTION"), SqlTypes.STRING) .build(); - - private static final Map data = buildData(); + private static final PhysicalSchema PHYSICAL_SCHEMA = PhysicalSchema + .from(LOGICAL_SCHEMA, SerdeOption.none()); + + private static final Map ROWS = ImmutableMap.builder() + .put("ITEM_1", new GenericRow(Arrays.asList("ITEM_1", "home cinema"))) + .put("ITEM_2", new GenericRow(Arrays.asList("ITEM_2", "clock radio"))) + .put("ITEM_3", new GenericRow(Arrays.asList("ITEM_3", "road bike"))) + .put("ITEM_4", new GenericRow(Arrays.asList("ITEM_4", "mountain bike"))) + .put("ITEM_5", new GenericRow(Arrays.asList("ITEM_5", "snowboard"))) + .put("ITEM_6", new GenericRow(Arrays.asList("ITEM_6", "iphone 10"))) + .put("ITEM_7", new GenericRow(Arrays.asList("ITEM_7", "gopro"))) + .put("ITEM_8", new GenericRow(Arrays.asList("ITEM_8", "cat"))) + .build(); public ItemDataProvider() { - super(namePrefix, ksqlSchemaString, key, PhysicalSchema.from(schema, SerdeOption.none()), data); + super("ITEM", "ID", PHYSICAL_SCHEMA, ROWS); } - - private static Map buildData() { - - final Map dataMap = new HashMap<>(); - dataMap.put("ITEM_1", new GenericRow(Arrays.asList("ITEM_1", "home cinema"))); - dataMap.put("ITEM_2", new GenericRow(Arrays.asList("ITEM_2", "clock radio"))); - dataMap.put("ITEM_3", new GenericRow(Arrays.asList("ITEM_3", "road bike"))); - dataMap.put("ITEM_4", new GenericRow(Arrays.asList("ITEM_4", "mountain bike"))); - dataMap.put("ITEM_5", new GenericRow(Arrays.asList("ITEM_5", "snowboard"))); - dataMap.put("ITEM_6", new GenericRow(Arrays.asList("ITEM_6", "iphone 10"))); - dataMap.put("ITEM_7", new GenericRow(Arrays.asList("ITEM_7", "gopro"))); - dataMap.put("ITEM_8", new GenericRow(Arrays.asList("ITEM_8", "cat"))); - - return dataMap; - } - } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/util/OrderDataProvider.java b/ksql-engine/src/test/java/io/confluent/ksql/util/OrderDataProvider.java index 8108985fbd0a..5764e722ed75 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/util/OrderDataProvider.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/util/OrderDataProvider.java @@ -15,27 +15,20 @@ package io.confluent.ksql.util; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import io.confluent.ksql.GenericRow; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.PhysicalSchema; import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.serde.SerdeOption; -import java.util.Arrays; -import java.util.HashMap; import java.util.Map; -public class OrderDataProvider extends TestDataProvider { +public class OrderDataProvider extends TestDataProvider { - private static final String namePrefix = - "ORDER"; - - private static final String ksqlSchemaString = - "(ORDERTIME bigint, ORDERID varchar, ITEMID varchar, ORDERUNITS double, TIMESTAMP varchar, PRICEARRAY array, KEYVALUEMAP map)"; - - private static final String key = "ORDERTIME"; - - private static final LogicalSchema schema = LogicalSchema.builder() + private static final LogicalSchema LOGICAL_SCHEMA = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) .valueColumn(ColumnName.of("ORDERTIME"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("ORDERID"), SqlTypes.STRING) .valueColumn(ColumnName.of("ITEMID"), SqlTypes.STRING) @@ -45,92 +38,106 @@ public class OrderDataProvider extends TestDataProvider { .valueColumn(ColumnName.of("KEYVALUEMAP"), SqlTypes.map(SqlTypes.DOUBLE)) .build(); - private static final Map data = buildData(); + private static final PhysicalSchema PHYSICAL_SCHEMA = PhysicalSchema + .from(LOGICAL_SCHEMA, SerdeOption.none()); + + private static final Map MAP_FIELD = ImmutableMap.of( + "key1", 1.0, + "key2", 2.0, + "key3", 3.0 + ); + + private static final Map ROWS = ImmutableMap.builder() + .put( + 1L, + new GenericRow(ImmutableList.of( + 1L, + "ORDER_1", + "ITEM_1", + 10.0, + "2018-01-01", + ImmutableList.of(100.0, 110.99, 90.0), + MAP_FIELD + ))) + .put( + 2L, + new GenericRow(ImmutableList.of( + 2L, + "ORDER_2", + "ITEM_2", + 20.0, + "2018-01-02", + ImmutableList.of(10.0, 10.99, 9.0), + MAP_FIELD + ))) + .put( + 3L, + new GenericRow(ImmutableList.of( + 3L, + "ORDER_3", + "ITEM_3", + 30.0, + "2018-01-03", + ImmutableList.of(10.0, 10.99, 91.0), + MAP_FIELD + ))) + .put( + 4L, + new GenericRow(ImmutableList.of( + 4L, + "ORDER_4", + "ITEM_4", + 40.0, + "2018-01-04", + ImmutableList.of(10.0, 140.99, 94.0), + MAP_FIELD))) + .put( + 5L, + new GenericRow(ImmutableList.of( + 5L, + "ORDER_5", + "ITEM_5", + 50.0, + "2018-01-05", + ImmutableList.of(160.0, 160.99, 98.0), + MAP_FIELD + ))) + .put( + 6L, + new GenericRow(ImmutableList.of( + 6L, + "ORDER_6", + "ITEM_6", + 60.0, + "2018-01-06", + ImmutableList.of(1000.0, 1100.99, 900.0), + MAP_FIELD + ))) + .put( + 7L, + new GenericRow(ImmutableList.of( + 7L, + "ORDER_6", + "ITEM_7", + 70.0, + "2018-01-07", + ImmutableList.of(1100.0, 1110.99, 190.0), + MAP_FIELD + ))) + .put( + 8L, + new GenericRow(ImmutableList.of( + 8L, + "ORDER_6", + "ITEM_8", + 80.0, + "2018-01-08", + ImmutableList.of(1100.0, 1110.99, 970.0), + MAP_FIELD + ))) + .build(); public OrderDataProvider() { - super(namePrefix, ksqlSchemaString, key, PhysicalSchema.from(schema, SerdeOption.none()), data); - } - - private static Map buildData() { - - final Map mapField = new HashMap<>(); - mapField.put("key1", 1.0); - mapField.put("key2", 2.0); - mapField.put("key3", 3.0); - - final Map dataMap = new HashMap<>(); - dataMap.put("1", new GenericRow(Arrays.asList( - 1L, - "ORDER_1", - "ITEM_1", - 10.0, - "2018-01-01", - Arrays.asList(100.0, 110.99, 90.0 ), - mapField))); - dataMap.put("2", new GenericRow(Arrays.asList( - 2L, - "ORDER_2", - "ITEM_2", - 20.0, - "2018-01-02", - Arrays.asList(10.0, 10.99, 9.0), - mapField))); - - dataMap.put("3", new GenericRow(Arrays.asList( - 3L, - "ORDER_3", - "ITEM_3", - 30.0, - "2018-01-03", - Arrays.asList(10.0, 10.99, 91.0), - mapField))); - - dataMap.put("4", new GenericRow(Arrays.asList( - 4L, - "ORDER_4", - "ITEM_4", - 40.0, - "2018-01-04", - Arrays.asList(10.0, 140.99, 94.0), - mapField))); - - dataMap.put("5", new GenericRow(Arrays.asList( - 5L, - "ORDER_5", - "ITEM_5", - 50.0, - "2018-01-05", - Arrays.asList(160.0, 160.99, 98.0), - mapField))); - - dataMap.put("6", new GenericRow(Arrays.asList( - 6L, - "ORDER_6", - "ITEM_6", - 60.0, - "2018-01-06", - Arrays.asList(1000.0, 1100.99, 900.0), - mapField))); - - dataMap.put("7", new GenericRow(Arrays.asList( - 7L, - "ORDER_6", - "ITEM_7", - 70.0, - "2018-01-07", - Arrays.asList(1100.0, 1110.99, 190.0), - mapField))); - - dataMap.put("8", new GenericRow(Arrays.asList( - 8L, - "ORDER_6", - "ITEM_8", - 80.0, - "2018-01-08", - Arrays.asList(1100.0, 1110.99, 970.0), - mapField))); - - return dataMap; + super("ORDER", "ORDERTIME", PHYSICAL_SCHEMA, ROWS); } - } \ No newline at end of file diff --git a/ksql-engine/src/test/java/io/confluent/ksql/util/PageViewDataProvider.java b/ksql-engine/src/test/java/io/confluent/ksql/util/PageViewDataProvider.java index 2b5c7ffdf45e..408bf1d08ed6 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/util/PageViewDataProvider.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/util/PageViewDataProvider.java @@ -14,53 +14,40 @@ */ package io.confluent.ksql.util; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import io.confluent.ksql.GenericRow; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.PhysicalSchema; import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.serde.SerdeOption; -import java.util.Arrays; -import java.util.HashMap; import java.util.Map; -public class PageViewDataProvider extends TestDataProvider { - private static final String namePrefix = - "PAGEVIEW"; +public class PageViewDataProvider extends TestDataProvider { - private static final String ksqlSchemaString = "(VIEWTIME bigint, USERID varchar, PAGEID varchar)"; - - private static final String key = "VIEWTIME"; - - private static final LogicalSchema schema = LogicalSchema.builder() + private static final LogicalSchema LOGICAL_SCHEMA = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) .valueColumn(ColumnName.of("VIEWTIME"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("USERID"), SqlTypes.STRING) .valueColumn(ColumnName.of("PAGEID"), SqlTypes.STRING) .build(); - private static final Map data = buildData(); + private static final PhysicalSchema PHYSICAL_SCHEMA = PhysicalSchema + .from(LOGICAL_SCHEMA, SerdeOption.none()); + + private static final Map ROWS = ImmutableMap.builder() + .put(1L, new GenericRow(ImmutableList.of(1L, "USER_1", "PAGE_1"))) + .put(2L, new GenericRow(ImmutableList.of(2L, "USER_2", "PAGE_2"))) + .put(3L, new GenericRow(ImmutableList.of(3L, "USER_4", "PAGE_3"))) + .put(4L, new GenericRow(ImmutableList.of(4L, "USER_3", "PAGE_4"))) + .put(5L, new GenericRow(ImmutableList.of(5L, "USER_0", "PAGE_5"))) + // Duplicate page views from different users. + .put(6L, new GenericRow(ImmutableList.of(6L, "USER_2", "PAGE_5"))) + .put(7L, new GenericRow(ImmutableList.of(7L, "USER_3", "PAGE_5"))) + .build(); public PageViewDataProvider() { - super(namePrefix, ksqlSchemaString, key, PhysicalSchema.from(schema, SerdeOption.none()), data); + super("PAGEVIEW", "VIEWTIME", PHYSICAL_SCHEMA, ROWS); } - - private static Map buildData() { - final Map dataMap = new HashMap<>(); - - // Create page view records with: - // key = page_id - // value = (view time, user_id, page_id) - dataMap.put("1", new GenericRow(Arrays.asList(1L, "USER_1", "PAGE_1"))); - dataMap.put("2", new GenericRow(Arrays.asList(2L, "USER_2", "PAGE_2"))); - dataMap.put("3", new GenericRow(Arrays.asList(3L, "USER_4", "PAGE_3"))); - dataMap.put("4", new GenericRow(Arrays.asList(4L, "USER_3", "PAGE_4"))); - dataMap.put("5", new GenericRow(Arrays.asList(5L, "USER_0", "PAGE_5"))); - - // Duplicate page views from different users. - dataMap.put("6", new GenericRow(Arrays.asList(6L, "USER_2", "PAGE_5"))); - dataMap.put("7", new GenericRow(Arrays.asList(7L, "USER_3", "PAGE_5"))); - - return dataMap; - } - } \ No newline at end of file diff --git a/ksql-engine/src/test/java/io/confluent/ksql/util/TestDataProvider.java b/ksql-engine/src/test/java/io/confluent/ksql/util/TestDataProvider.java index 82a4db3035a3..b2394ed12520 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/util/TestDataProvider.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/util/TestDataProvider.java @@ -20,24 +20,22 @@ import java.util.Map; import java.util.Objects; -public abstract class TestDataProvider { +public abstract class TestDataProvider { + private final String topicName; - private final String ksqlSchemaString; private final String key; private final PhysicalSchema schema; - private final Map data; + private final Map data; private final String kstreamName; TestDataProvider( final String namePrefix, - final String ksqlSchemaString, final String key, final PhysicalSchema schema, - final Map data + final Map data ) { this.topicName = Objects.requireNonNull(namePrefix, "namePrefix") + "_TOPIC"; this.kstreamName = namePrefix + "_KSTREAM"; - this.ksqlSchemaString = Objects.requireNonNull(ksqlSchemaString, "ksqlSchemaString"); this.key = Objects.requireNonNull(key, "key"); this.schema = Objects.requireNonNull(schema, "schema"); this.data = Objects.requireNonNull(data, "data"); @@ -48,7 +46,7 @@ public String topicName() { } public String ksqlSchemaString() { - return ksqlSchemaString; + return schema.logicalSchema().toString(); } public String key() { @@ -59,7 +57,7 @@ public PhysicalSchema schema() { return schema; } - public Map data() { + public Map data() { return data; } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/util/TopicConsumer.java b/ksql-engine/src/test/java/io/confluent/ksql/util/TopicConsumer.java deleted file mode 100644 index 0003b476e001..000000000000 --- a/ksql-engine/src/test/java/io/confluent/ksql/util/TopicConsumer.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright 2018 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.util; - -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.hasSize; - -import com.google.common.collect.ImmutableMap; -import io.confluent.ksql.GenericRow; -import io.confluent.ksql.logging.processing.ProcessingLogContext; -import io.confluent.ksql.schema.ksql.PhysicalSchema; -import io.confluent.ksql.serde.Format; -import io.confluent.ksql.serde.FormatInfo; -import io.confluent.ksql.serde.GenericRowSerDe; -import io.confluent.ksql.test.util.EmbeddedSingleNodeKafkaCluster; -import java.time.Duration; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; -import java.util.Properties; -import java.util.UUID; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.common.serialization.ByteArrayDeserializer; -import org.apache.kafka.common.serialization.Deserializer; -import org.hamcrest.Matcher; - -public class TopicConsumer { - - private static final long RESULTS_POLL_MAX_TIME_MS = 30000; - private static final Duration RESULTS_EXTRA_POLL_TIME = Duration.ofMillis(250); - - private final EmbeddedSingleNodeKafkaCluster cluster; - private final ProcessingLogContext processingLogContext = ProcessingLogContext.create(); - - public TopicConsumer(final EmbeddedSingleNodeKafkaCluster cluster) { - this.cluster = cluster; - } - - public Map readResults( - final String topic, - final Matcher expectedNumMessages, - final Deserializer valueDeserializer, - final Deserializer keyDeserializer - ) { - final Map result = new HashMap<>(); - - final Properties consumerConfig = new Properties(); - consumerConfig.putAll(cluster.getClientProperties()); - consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); - consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - - try (KafkaConsumer consumer = - new KafkaConsumer<>(consumerConfig, keyDeserializer, valueDeserializer) - ) { - consumer.subscribe(Collections.singleton(topic)); - final long pollStart = System.currentTimeMillis(); - final long pollEnd = pollStart + RESULTS_POLL_MAX_TIME_MS; - while (System.currentTimeMillis() < pollEnd && !expectedNumMessages.matches(result.size())) { - for (final ConsumerRecord record : - consumer.poll(Duration.ofMillis(Math.max(1, pollEnd - System.currentTimeMillis())))) { - if (record.value() != null) { - result.put(record.key(), record.value()); - } - } - } - - for (final ConsumerRecord record : consumer.poll(RESULTS_EXTRA_POLL_TIME)) { - if (record.value() != null) { - result.put(record.key(), record.value()); - } - } - } - return result; - } - - public Map readResults( - final String topic, - final PhysicalSchema schema, - final int expectedNumMessages, - final Deserializer keyDeserializer - ) { - final Deserializer deserializer = GenericRowSerDe.from( - FormatInfo.of(Format.JSON, Optional.empty(), Optional.empty()), - schema.valueSchema(), - new KsqlConfig(ImmutableMap.of()), - () -> null, - "consumer", - processingLogContext - ).deserializer(); - - return readResults( - topic, - greaterThanOrEqualTo(expectedNumMessages), - deserializer, - keyDeserializer - ); - } - - public void verifyRecordsReceived(final String topic, - final Matcher expectedNumMessages) { - verifyRecordsReceived( - topic, - expectedNumMessages, - new ByteArrayDeserializer(), - new ByteArrayDeserializer()); - } - - public Map verifyRecordsReceived(final String topic, - final Matcher expectedNumMessages, - final Deserializer valueDeserializer, - final Deserializer keyDeserializer) { - final Map records = - readResults(topic, expectedNumMessages, valueDeserializer, keyDeserializer); - - assertThat(records.keySet(), hasSize(expectedNumMessages)); - - return records; - } -} diff --git a/ksql-engine/src/test/java/io/confluent/ksql/util/TopicProducer.java b/ksql-engine/src/test/java/io/confluent/ksql/util/TopicProducer.java deleted file mode 100644 index 4a7fdba8ecca..000000000000 --- a/ksql-engine/src/test/java/io/confluent/ksql/util/TopicProducer.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright 2018 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.util; - -import com.google.common.collect.ImmutableMap; -import io.confluent.ksql.GenericRow; -import io.confluent.ksql.logging.processing.NoopProcessingLogContext; -import io.confluent.ksql.schema.ksql.PhysicalSchema; -import io.confluent.ksql.serde.Format; -import io.confluent.ksql.serde.FormatInfo; -import io.confluent.ksql.serde.GenericRowSerDe; -import io.confluent.ksql.test.util.EmbeddedSingleNodeKafkaCluster; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import org.apache.kafka.clients.producer.KafkaProducer; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.apache.kafka.common.serialization.Serializer; -import org.apache.kafka.common.serialization.StringSerializer; - -public class TopicProducer { - - private static final long TEST_RECORD_FUTURE_TIMEOUT_MS = 5000; - - private final Map producerConfig; - - public TopicProducer(final EmbeddedSingleNodeKafkaCluster cluster) { - this.producerConfig = ImmutableMap.builder() - .putAll(cluster.getClientProperties()) - .put(ProducerConfig.ACKS_CONFIG, "all") - .put(ProducerConfig.RETRIES_CONFIG, 0) - .build(); - } - - /** - * Produce data to a topic - * @param topicName the name of the topic, (it will be automatically created if it doesn't exist) - * @param recordsToPublish map of key -> value to publish. - * @param schema the physical schema of the data. - * @return the map of key -> produced record metadata. - */ - public Map produceInputData( - final String topicName, - final Map recordsToPublish, - final PhysicalSchema schema - ) { - - final Serializer serializer = GenericRowSerDe.from( - FormatInfo.of(Format.JSON, Optional.empty(), Optional.empty()), - schema.valueSchema(), - new KsqlConfig(ImmutableMap.of()), - () -> null, - "ignored", - NoopProcessingLogContext.INSTANCE - ).serializer(); - - try (KafkaProducer producer = - new KafkaProducer<>(producerConfig, new StringSerializer(), serializer)) { - - final Map result = new HashMap<>(); - for (final Map.Entry recordEntry : recordsToPublish.entrySet()) { - final String key = recordEntry.getKey(); - final ProducerRecord producerRecord = new ProducerRecord<>(topicName, - key, recordEntry.getValue()); - final Future recordMetadataFuture = producer.send(producerRecord); - result.put(key, - recordMetadataFuture.get(TEST_RECORD_FUTURE_TIMEOUT_MS, TimeUnit.MILLISECONDS)); - } - - return result; - } catch (final Exception e) { - throw new RuntimeException("Failed to produce data", e); - } - } - - /** - * Produce input data to the topic named dataProvider.topicName() - */ - public Map produceInputData(final TestDataProvider dataProvider) { - return produceInputData(dataProvider.topicName(), dataProvider.data(), dataProvider.schema()); - } - -} diff --git a/ksql-engine/src/test/java/io/confluent/ksql/util/UserDataProvider.java b/ksql-engine/src/test/java/io/confluent/ksql/util/UserDataProvider.java index 69ec63848cc4..f9c6278a4275 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/util/UserDataProvider.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/util/UserDataProvider.java @@ -14,47 +14,37 @@ */ package io.confluent.ksql.util; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import io.confluent.ksql.GenericRow; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.PhysicalSchema; import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.serde.SerdeOption; -import java.util.Arrays; -import java.util.HashMap; import java.util.Map; -public class UserDataProvider extends TestDataProvider { - private static final String namePrefix = "USER"; +public class UserDataProvider extends TestDataProvider { - private static final String ksqlSchemaString = "(REGISTERTIME bigint, GENDER varchar, REGIONID varchar, USERID varchar)"; - - private static final String key = "USERID"; - - private static final LogicalSchema schema = LogicalSchema.builder() + private static final LogicalSchema LOGICAL_SCHEMA = LogicalSchema.builder() .valueColumn(ColumnName.of("REGISTERTIME"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("GENDER"), SqlTypes.STRING) .valueColumn(ColumnName.of("REGIONID"), SqlTypes.STRING) .valueColumn(ColumnName.of("USERID"), SqlTypes.STRING) .build(); - private static final Map data = buildData(); + private static final PhysicalSchema PHYSICAL_SCHEMA = PhysicalSchema + .from(LOGICAL_SCHEMA, SerdeOption.none()); - public UserDataProvider() { - super(namePrefix, ksqlSchemaString, key, PhysicalSchema.from(schema, SerdeOption.none()), data); - } - - private static Map buildData() { - final Map dataMap = new HashMap<>(); - // create a records with: - // key == user_id - // value = (creation_time, gender, region, user_id) - dataMap.put("USER_0", new GenericRow(Arrays.asList(0L, "FEMALE", "REGION_0", "USER_0"))); - dataMap.put("USER_1", new GenericRow(Arrays.asList(1L, "MALE", "REGION_1", "USER_1"))); - dataMap.put("USER_2", new GenericRow(Arrays.asList(2L, "FEMALE", "REGION_1", "USER_2"))); - dataMap.put("USER_3", new GenericRow(Arrays.asList(3L, "MALE", "REGION_0", "USER_3"))); - dataMap.put("USER_4", new GenericRow(Arrays.asList(4L, "MALE", "REGION_4", "USER_4"))); + private static final Map ROWS = ImmutableMap.builder() + .put("USER_0", new GenericRow(ImmutableList.of(0L, "FEMALE", "REGION_0", "USER_0"))) + .put("USER_1", new GenericRow(ImmutableList.of(1L, "MALE", "REGION_1", "USER_1"))) + .put("USER_2", new GenericRow(ImmutableList.of(2L, "FEMALE", "REGION_1", "USER_2"))) + .put("USER_3", new GenericRow(ImmutableList.of(3L, "MALE", "REGION_0", "USER_3"))) + .put("USER_4", new GenericRow(ImmutableList.of(4L, "MALE", "REGION_4", "USER_4"))) + .build(); - return dataMap; + public UserDataProvider() { + super("USER", "USERID", PHYSICAL_SCHEMA, ROWS); } } \ No newline at end of file diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/ClusterTerminationTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/ClusterTerminationTest.java index b28f1fa39c8d..a57c8c2dfeb9 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/ClusterTerminationTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/ClusterTerminationTest.java @@ -16,8 +16,8 @@ package io.confluent.ksql.rest.integration; import static io.confluent.ksql.serde.Format.JSON; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertEquals; import com.google.common.collect.ImmutableList; import io.confluent.common.utils.IntegrationTest; @@ -34,7 +34,6 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import kafka.zookeeper.ZooKeeperClientException; -import org.hamcrest.MatcherAssert; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; @@ -44,9 +43,9 @@ @Category({IntegrationTest.class}) public class ClusterTerminationTest { - private static final String PAGE_VIEW_TOPIC = "pageviews"; - private static final String PAGE_VIEW_STREAM = "pageviews_original"; - private static final PageViewDataProvider PAGE_VIEW_DATA_PROVIDER = new PageViewDataProvider(); + private static final PageViewDataProvider PAGE_VIEWS_PROVIDER = new PageViewDataProvider(); + private static final String PAGE_VIEW_TOPIC = PAGE_VIEWS_PROVIDER.topicName(); + private static final String PAGE_VIEW_STREAM = PAGE_VIEWS_PROVIDER.kstreamName(); private static final String SINK_TOPIC = "sink_topic"; private static final String SINK_STREAM = "sink_stream"; @@ -68,7 +67,7 @@ public class ClusterTerminationTest { public static void setUpClass() { TEST_HARNESS.ensureTopics(PAGE_VIEW_TOPIC); - RestIntegrationTestUtil.createStreams(REST_APP, PAGE_VIEW_STREAM, PAGE_VIEW_TOPIC); + RestIntegrationTestUtil.createStream(REST_APP, PAGE_VIEWS_PROVIDER); } @Test @@ -84,7 +83,7 @@ public void shouldCleanUpSinkTopicsAndSchemasDuringClusterTermination() throws E TEST_HARNESS.getKafkaCluster().waitForTopicsToBePresent(SINK_TOPIC); // Produce to stream so that schema is registered by AvroConverter - TEST_HARNESS.produceRows(PAGE_VIEW_TOPIC, PAGE_VIEW_DATA_PROVIDER, JSON, System::currentTimeMillis); + TEST_HARNESS.produceRows(PAGE_VIEW_TOPIC, PAGE_VIEWS_PROVIDER, JSON, System::currentTimeMillis); TEST_HARNESS.waitForSubjectToBePresent(SINK_TOPIC + KsqlConstants.SCHEMA_REGISTRY_VALUE_SUFFIX); @@ -96,7 +95,7 @@ public void shouldCleanUpSinkTopicsAndSchemasDuringClusterTermination() throws E TEST_HARNESS.waitForSubjectToBeAbsent(SINK_TOPIC + KsqlConstants.SCHEMA_REGISTRY_VALUE_SUFFIX); - MatcherAssert.assertThat( + assertThat( "Should not delete non-sink topics", TEST_HARNESS.topicExists(PAGE_VIEW_TOPIC), is(true) @@ -112,7 +111,7 @@ private static void terminateCluster(final List deleteTopicList) { .request(MediaType.APPLICATION_JSON_TYPE) .post(terminateClusterRequest(deleteTopicList))) { - assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()); + assertThat(response.getStatus(), is(Response.Status.OK.getStatusCode())); } finally { client.close(); } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/KsqlResourceFunctionalTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/KsqlResourceFunctionalTest.java index faac08446a5f..01c305576411 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/KsqlResourceFunctionalTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/KsqlResourceFunctionalTest.java @@ -41,6 +41,7 @@ import io.confluent.ksql.serde.avro.AvroSchemas; import io.confluent.ksql.util.KsqlConfig; import io.confluent.ksql.util.KsqlConstants; +import io.confluent.ksql.util.PageViewDataProvider; import java.util.List; import java.util.concurrent.TimeUnit; import kafka.zookeeper.ZooKeeperClientException; @@ -59,8 +60,9 @@ @Category({IntegrationTest.class}) public class KsqlResourceFunctionalTest { - private static final String PAGE_VIEW_TOPIC = "pageviews"; - private static final String PAGE_VIEW_STREAM = "pageviews_original"; + private static final PageViewDataProvider PAGE_VIEWS_PROVIDER = new PageViewDataProvider(); + private static final String PAGE_VIEW_TOPIC = PAGE_VIEWS_PROVIDER.topicName(); + private static final String PAGE_VIEW_STREAM = PAGE_VIEWS_PROVIDER.kstreamName(); private static final IntegrationTestHarness TEST_HARNESS = IntegrationTestHarness.build(); @@ -78,7 +80,8 @@ public class KsqlResourceFunctionalTest { @BeforeClass public static void setUpClass() { TEST_HARNESS.ensureTopics(PAGE_VIEW_TOPIC); - RestIntegrationTestUtil.createStreams(REST_APP, PAGE_VIEW_STREAM, PAGE_VIEW_TOPIC); + + RestIntegrationTestUtil.createStream(REST_APP, PAGE_VIEWS_PROVIDER); } @After diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/PullQueryFunctionalTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/PullQueryFunctionalTest.java index 610408b129f9..d186f44ac913 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/PullQueryFunctionalTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/PullQueryFunctionalTest.java @@ -39,7 +39,6 @@ import io.confluent.ksql.test.util.KsqlIdentifierTestUtil; import io.confluent.ksql.test.util.TestBasicJaasConfig; import io.confluent.ksql.util.KsqlConfig; -import io.confluent.ksql.util.TestDataProvider; import io.confluent.ksql.util.UserDataProvider; import io.confluent.rest.RestConfig; import java.io.IOException; @@ -87,7 +86,7 @@ public class PullQueryFunctionalTest { private static final String USER_WITH_ACCESS = "harry"; private static final String USER_WITH_ACCESS_PWD = "changeme"; - private static final TestDataProvider USER_PROVIDER = new UserDataProvider(); + private static final UserDataProvider USER_PROVIDER = new UserDataProvider(); private static final Format VALUE_FORMAT = Format.JSON; private static final int HEADER = 1; @@ -156,7 +155,7 @@ public static void setUpClass() { makeAdminRequest( "CREATE STREAM " + USERS_STREAM - + " " + USER_PROVIDER.ksqlSchemaString() + + " (" + USER_PROVIDER.ksqlSchemaString() + ")" + " WITH (" + " kafka_topic='" + USER_TOPIC + "', " + " key='" + USER_PROVIDER.key() + "', " diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/RestApiTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/RestApiTest.java index 4bbb7685afdf..b44839cca12e 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/RestApiTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/RestApiTest.java @@ -84,8 +84,11 @@ public class RestApiTest { private static final int HEADER = 1; // <-- some responses include a header as the first message. private static final int FOOTER = 1; // <-- some responses include a footer as the last message. private static final int LIMIT = 2; - private static final String PAGE_VIEW_TOPIC = "pageviews"; - private static final String PAGE_VIEW_STREAM = "pageviews_original"; + + private static final PageViewDataProvider PAGE_VIEWS_PROVIDER = new PageViewDataProvider(); + private static final String PAGE_VIEW_TOPIC = PAGE_VIEWS_PROVIDER.topicName(); + private static final String PAGE_VIEW_STREAM = PAGE_VIEWS_PROVIDER.kstreamName(); + private static final String AGG_TABLE = "AGG_TABLE"; private static final Credentials SUPER_USER = VALID_USER1; private static final Credentials NORMAL_USER = VALID_USER2; @@ -171,12 +174,13 @@ public class RestApiTest { public static void setUpClass() { TEST_HARNESS.ensureTopics(PAGE_VIEW_TOPIC); - TEST_HARNESS.produceRows(PAGE_VIEW_TOPIC, new PageViewDataProvider(), Format.JSON); + TEST_HARNESS.produceRows(PAGE_VIEW_TOPIC, PAGE_VIEWS_PROVIDER, Format.JSON); - RestIntegrationTestUtil.createStreams(REST_APP, PAGE_VIEW_STREAM, PAGE_VIEW_TOPIC); + RestIntegrationTestUtil.createStream(REST_APP, PAGE_VIEWS_PROVIDER); makeKsqlRequest("CREATE TABLE " + AGG_TABLE + " AS " - + "SELECT COUNT(1) AS COUNT FROM " + PAGE_VIEW_STREAM + " GROUP BY USERID;"); + + "SELECT COUNT(1) AS COUNT FROM " + PAGE_VIEW_STREAM + " GROUP BY USERID;" + ); } @After @@ -226,9 +230,9 @@ public void shouldExecutePushQueryOverRest() { assertThat(parseRawRestQueryResponse(response), hasSize(HEADER + LIMIT + FOOTER)); final String[] messages = response.split(System.lineSeparator()); assertThat(messages[0], - is("[{\"header\":{\"queryId\":\"none\",\"schema\":\"`USERID` STRING, `PAGEID` STRING, `VIEWTIME` BIGINT, `ROWKEY` STRING\"}},")); - assertThat(messages[1], is("{\"row\":{\"columns\":[\"USER_1\",\"PAGE_1\",1,\"1\"]}},")); - assertThat(messages[2], is("{\"row\":{\"columns\":[\"USER_2\",\"PAGE_2\",2,\"2\"]}},")); + is("[{\"header\":{\"queryId\":\"none\",\"schema\":\"`USERID` STRING, `PAGEID` STRING, `VIEWTIME` BIGINT, `ROWKEY` BIGINT\"}},")); + assertThat(messages[1], is("{\"row\":{\"columns\":[\"USER_1\",\"PAGE_1\",1,1]}},")); + assertThat(messages[2], is("{\"row\":{\"columns\":[\"USER_2\",\"PAGE_2\",2,2]}},")); assertThat(messages[3], is("{\"finalMessage\":\"Limit Reached\"}]")); } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/RestIntegrationTestUtil.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/RestIntegrationTestUtil.java index 6a34847250ee..3612c9cd0248 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/RestIntegrationTestUtil.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/RestIntegrationTestUtil.java @@ -15,8 +15,6 @@ package io.confluent.ksql.rest.integration; -import static org.junit.Assert.assertEquals; - import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList.Builder; import com.google.common.collect.ImmutableMap; @@ -35,9 +33,9 @@ import io.confluent.ksql.rest.entity.StreamedRow; import io.confluent.ksql.rest.server.TestKsqlRestApp; import io.confluent.ksql.test.util.secure.Credentials; +import io.confluent.ksql.util.TestDataProvider; import io.confluent.rest.validation.JacksonMessageBodyProvider; import java.net.URI; -import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.stream.Collectors; @@ -134,26 +132,13 @@ static String rawRestQueryRequest( } } - static void createStreams(final TestKsqlRestApp restApp, final String streamName, final String topicName) { - final Client client = TestKsqlRestApp.buildClient(); - - try (final Response response = client - .target(restApp.getHttpListener()) - .path("ksql") - .request(MediaType.APPLICATION_JSON_TYPE) - .post(ksqlRequest( - "CREATE STREAM " + streamName + " " - + "(viewtime bigint, pageid varchar, userid varchar) " - + "WITH (kafka_topic='" + topicName + "', value_format='json');"))) { - - assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()); - } finally { - client.close(); - } - } - - private static Entity ksqlRequest(final String sql) { - return Entity.json(new KsqlRequest(sql, Collections.emptyMap(), null)); + static void createStream(final TestKsqlRestApp restApp, final TestDataProvider dataProvider) { + makeKsqlRequest( + restApp, + "CREATE STREAM " + dataProvider.kstreamName() + + " (" + dataProvider.ksqlSchemaString() + ") " + + "WITH (kafka_topic='" + dataProvider.topicName() + "', value_format='json');" + ); } private static List awaitResults( diff --git a/ksql-serde/src/main/java/io/confluent/ksql/serde/kafka/KafkaSerdeFactory.java b/ksql-serde/src/main/java/io/confluent/ksql/serde/kafka/KafkaSerdeFactory.java index 303939647acb..feae69a9e17c 100644 --- a/ksql-serde/src/main/java/io/confluent/ksql/serde/kafka/KafkaSerdeFactory.java +++ b/ksql-serde/src/main/java/io/confluent/ksql/serde/kafka/KafkaSerdeFactory.java @@ -15,6 +15,7 @@ package io.confluent.ksql.serde.kafka; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.errorprone.annotations.Immutable; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; @@ -75,8 +76,9 @@ public Serde createSerde( return Serdes.serdeFrom(serializer, deserializer); } - @SuppressWarnings("unchecked") - private static Serde getPrimitiveSerde(final ConnectSchema schema) { + @VisibleForTesting + @SuppressWarnings({"unchecked", "rawtypes"}) + public static Serde getPrimitiveSerde(final ConnectSchema schema) { if (schema.type() != Type.STRUCT) { throw new IllegalArgumentException("KAFKA format does not support unwrapping"); } diff --git a/ksql-serde/src/test/java/io/confluent/ksql/serde/avro/KsqlAvroSerializerTest.java b/ksql-serde/src/test/java/io/confluent/ksql/serde/avro/KsqlAvroSerializerTest.java index b54ddfd109b8..7e224d6e89ad 100644 --- a/ksql-serde/src/test/java/io/confluent/ksql/serde/avro/KsqlAvroSerializerTest.java +++ b/ksql-serde/src/test/java/io/confluent/ksql/serde/avro/KsqlAvroSerializerTest.java @@ -61,6 +61,7 @@ import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.Struct; import org.hamcrest.CoreMatchers; +import org.hamcrest.Matcher; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -492,7 +493,10 @@ public void shouldSerializeArrayOfMap() { org.apache.avro.Schema.create(Type.LONG) ); - assertThat(deserialize(bytes), is(ImmutableList.of(expectedElements))); + final Matcher>> matcher = is( + ImmutableList.of(expectedElements)); + final List> deserialize = deserialize(bytes); + assertThat(deserialize, matcher); } @Test diff --git a/ksql-test-util/src/main/java/io/confluent/ksql/test/util/EmbeddedSingleNodeKafkaCluster.java b/ksql-test-util/src/main/java/io/confluent/ksql/test/util/EmbeddedSingleNodeKafkaCluster.java index a4aded1479d9..bd71f10e7c0c 100644 --- a/ksql-test-util/src/main/java/io/confluent/ksql/test/util/EmbeddedSingleNodeKafkaCluster.java +++ b/ksql-test-util/src/main/java/io/confluent/ksql/test/util/EmbeddedSingleNodeKafkaCluster.java @@ -16,6 +16,7 @@ package io.confluent.ksql.test.util; import static java.util.Objects.requireNonNull; +import static org.hamcrest.Matchers.hasSize; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -36,11 +37,15 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Objects; import java.util.Properties; import java.util.Set; import java.util.UUID; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; import javax.security.auth.login.Configuration; @@ -51,7 +56,10 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.acl.AccessControlEntry; import org.apache.kafka.common.acl.AclBinding; import org.apache.kafka.common.acl.AclBindingFilter; @@ -67,7 +75,9 @@ import org.apache.kafka.common.security.plain.PlainLoginModule; import org.apache.kafka.common.serialization.ByteArrayDeserializer; import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.test.TestUtils; +import org.hamcrest.Matcher; import org.junit.rules.ExternalResource; import org.junit.rules.TemporaryFolder; import org.slf4j.Logger; @@ -81,6 +91,7 @@ public final class EmbeddedSingleNodeKafkaCluster extends ExternalResource { // CHECKSTYLE_RULES.ON: ClassDataAbstractionCoupling private static final Logger log = LoggerFactory.getLogger(EmbeddedSingleNodeKafkaCluster.class); + private static final Duration PRODUCE_TIMEOUT = Duration.ofSeconds(30); public static final String JAAS_KAFKA_PROPS_NAME = "KafkaServer"; @@ -327,6 +338,47 @@ public void waitForTopicsToBeAbsent(final String... topicNames) { broker.waitForTopicsToBeAbsent(topicNames); } + /** + * Publish test data to the supplied {@code topic}. + * + * @param topic the name of the topic to produce to. + * @param recordsToPublish the records to produce. + * @param keySerializer the serializer to use to serialize keys. + * @param valueSerializer the serializer to use to serialize values. + * @param timestampSupplier supplier of timestamps. + * @return the map of produced rows, with an iteration order that matches produce order. + */ + public Map produceRows( + final String topic, + final Map recordsToPublish, + final Serializer keySerializer, + final Serializer valueSerializer, + final Supplier timestampSupplier + ) { + try (KafkaProducer producer = new KafkaProducer<>( + producerConfig(), + keySerializer, + valueSerializer + )) { + final Map> futures = recordsToPublish.entrySet().stream() + .collect(Collectors.toMap(Entry::getKey, entry -> { + final K key = entry.getKey(); + final V value = entry.getValue(); + final Long timestamp = timestampSupplier.get(); + return producer.send(new ProducerRecord<>(topic, null, timestamp, key, value)); + })); + + return futures.entrySet().stream() + .collect(Collectors.toMap(Entry::getKey, entry -> { + try { + return entry.getValue().get(PRODUCE_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); + } catch (final Exception e) { + throw new RuntimeException("Failed to send record to " + topic, e); + } + })); + } + } + /** * Verify there are {@code expectedCount} records available on the supplied {@code topic}. * @@ -358,6 +410,50 @@ public List> verifyAvailableRecords( final int expectedCount, final Deserializer keyDeserializer, final Deserializer valueDeserializer + ) { + return verifyAvailableRecords( + topic, + hasSize(expectedCount), + keyDeserializer, + valueDeserializer + ); + } + + /** + * Verify there are {@code expectedCount} records available on the supplied {@code topic}. + * + * @param topic the name of the topic to check. + * @param expected the expected records. + * @return the list of consumed records. + */ + public List> verifyAvailableRecords( + final String topic, + final Matcher>> expected, + final Deserializer keyDeserializer, + final Deserializer valueDeserializer + ) { + return verifyAvailableRecords( + topic, + expected, + keyDeserializer, + valueDeserializer, + ConsumerTestUtil.DEFAULT_VERIFY_TIMEOUT + ); + } + + /** + * Verify there are {@code expectedCount} records available on the supplied {@code topic}. + * + * @param topic the name of the topic to check. + * @param expected the expected records. + * @return the list of consumed records. + */ + public List> verifyAvailableRecords( + final String topic, + final Matcher>> expected, + final Deserializer keyDeserializer, + final Deserializer valueDeserializer, + final Duration timeout ) { try (KafkaConsumer consumer = new KafkaConsumer<>( consumerConfig(), @@ -366,7 +462,7 @@ public List> verifyAvailableRecords( ) { consumer.subscribe(Collections.singleton(topic)); - return ConsumerTestUtil.verifyAvailableRecords(consumer, expectedCount); + return ConsumerTestUtil.verifyAvailableRecords(consumer, expected, timeout); } } From eb0fe40b2d99ded47f73dc0ef67fcb206a18ad05 Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Mon, 6 Jan 2020 10:43:24 -0800 Subject: [PATCH 070/123] docs: remove basic auth section from server-config (DOCS-3140) (#4209) --- .../integrate-ksql-with-confluent-control-center.rst | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/docs/installation/server-config/integrate-ksql-with-confluent-control-center.rst b/docs/installation/server-config/integrate-ksql-with-confluent-control-center.rst index 98144da3e1d2..918ae80072d5 100644 --- a/docs/installation/server-config/integrate-ksql-with-confluent-control-center.rst +++ b/docs/installation/server-config/integrate-ksql-with-confluent-control-center.rst @@ -25,17 +25,6 @@ properties in the KSQL Server and |c3-short| configuration files. * By default, the |c3-short| configuration file is installed at ``/etc/confluent-control-center/control-center.properties``. -Secure Communication with KSQL Server -===================================== - -When you use Basic authentication in KSQL, |c3-short| allows passing credentials -as part of the KSQL Server URL in |c3-short| configuration. - -:: - - # KSQL cluster URL - confluent.controlcenter.ksql..url=http://:@localhost:8088 - You can set up KSQL Server to communicate securely with other components in |cp|. For more information, see :ref:`ksql-security`. From c62cc9687b9128c46a91537053cefe7feadde64b Mon Sep 17 00:00:00 2001 From: Tim Fox Date: Tue, 7 Jan 2020 03:50:17 +0900 Subject: [PATCH 071/123] refactor: Use FunctionName not String in FunctionRegistry API (#4225) --- .../ksql/function/FunctionRegistry.java | 17 ++--- .../ksql/analyzer/AggregateAnalyzer.java | 13 ++-- .../analyzer/AggregateExpressionRewriter.java | 3 +- .../io/confluent/ksql/analyzer/Analyzer.java | 5 +- .../function/InternalFunctionRegistry.java | 33 ++++----- .../ksql/planner/plan/FlatMapNode.java | 3 +- .../InternalFunctionRegistryTest.java | 16 ++--- .../function/KudafUndoAggregatorTest.java | 3 +- .../ksql/function/UdfLoaderTest.java | 67 ++++++++++--------- .../ksql/function/UdtfLoaderTest.java | 34 +++++++--- .../ksql/execution/codegen/CodeGenRunner.java | 2 +- .../execution/codegen/SqlToJavaVisitor.java | 2 +- .../ksql/execution/function/UdafUtil.java | 2 +- .../ksql/execution/function/UdtfUtil.java | 2 +- .../execution/util/ExpressionTypeManager.java | 10 +-- .../codegen/SqlToJavaVisitorTest.java | 4 +- .../ksql/execution/function/UdafUtilTest.java | 2 +- .../sqlpredicate/SqlPredicateTest.java | 2 +- .../util/ExpressionTypeManagerTest.java | 8 +-- .../ksql/metastore/MetaStoreImpl.java | 15 +++-- .../execution/DescribeFunctionExecutor.java | 18 +++-- .../streams/AggregateParamsFactoryTest.java | 10 +-- .../streams/StepSchemaResolverTest.java | 7 +- 23 files changed, 155 insertions(+), 123 deletions(-) diff --git a/ksql-common/src/main/java/io/confluent/ksql/function/FunctionRegistry.java b/ksql-common/src/main/java/io/confluent/ksql/function/FunctionRegistry.java index 2ca170256ebc..c281858523c4 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/function/FunctionRegistry.java +++ b/ksql-common/src/main/java/io/confluent/ksql/function/FunctionRegistry.java @@ -15,6 +15,7 @@ package io.confluent.ksql.function; +import io.confluent.ksql.name.FunctionName; import io.confluent.ksql.schema.ksql.types.SqlType; import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.testing.EffectivelyImmutable; @@ -34,7 +35,7 @@ public interface FunctionRegistry { * @param functionName the name of the function to test * @return {@code true} if it is an aggregate function, {@code false} otherwise. */ - boolean isAggregate(String functionName); + boolean isAggregate(FunctionName functionName); /** * Test if the supplied {@code functionName} is a table function. @@ -44,7 +45,7 @@ public interface FunctionRegistry { * @param functionName the name of the function to test * @return {@code true} if it is a table function, {@code false} otherwise. */ - boolean isTableFunction(String functionName); + boolean isTableFunction(FunctionName functionName); /** * Get the factory for a UDF. @@ -53,7 +54,7 @@ public interface FunctionRegistry { * @return the factory. * @throws KsqlException on unknown UDF. */ - UdfFactory getUdfFactory(String functionName); + UdfFactory getUdfFactory(FunctionName functionName); /** * Get the factory for a table function. @@ -62,7 +63,7 @@ public interface FunctionRegistry { * @return the factory. * @throws KsqlException on unknown table function. */ - TableFunctionFactory getTableFunctionFactory(String functionName); + TableFunctionFactory getTableFunctionFactory(FunctionName functionName); /** * Get the factory for a UDAF. @@ -71,7 +72,7 @@ public interface FunctionRegistry { * @return the factory. * @throws KsqlException on unknown UDAF. */ - AggregateFunctionFactory getAggregateFactory(String functionName); + AggregateFunctionFactory getAggregateFactory(FunctionName functionName); /** * Get an instance of an aggregate function. @@ -92,7 +93,7 @@ public interface FunctionRegistry { * @throws KsqlException on unknown UDAF, or on unsupported {@code argumentType}. */ KsqlAggregateFunction getAggregateFunction( - String functionName, + FunctionName functionName, SqlType argumentType, AggregateFunctionInitArguments initArgs ); @@ -100,12 +101,12 @@ public interface FunctionRegistry { /** * Get a table function. * - * @param functionName the name of the function. + * @param functionName the name of the function. * @param argumentTypes the schemas of the arguments. * @return the function instance. * @throws KsqlException on unknown table function, or on unsupported {@code argumentType}. */ - KsqlTableFunction getTableFunction(String functionName, List argumentTypes); + KsqlTableFunction getTableFunction(FunctionName functionName, List argumentTypes); /** * @return all UDF factories. diff --git a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/AggregateAnalyzer.java b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/AggregateAnalyzer.java index 802c31879340..aeadb42a52f5 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/AggregateAnalyzer.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/AggregateAnalyzer.java @@ -21,6 +21,7 @@ import io.confluent.ksql.execution.expression.tree.FunctionCall; import io.confluent.ksql.execution.expression.tree.TraversalExpressionVisitor; import io.confluent.ksql.function.FunctionRegistry; +import io.confluent.ksql.name.FunctionName; import io.confluent.ksql.util.KsqlException; import java.util.HashSet; import java.util.Objects; @@ -65,7 +66,7 @@ void processGroupBy(final Expression expression) { final AggregateVisitor visitor = new AggregateVisitor((aggFuncName, node) -> { if (aggFuncName.isPresent()) { throw new KsqlException("GROUP BY does not support aggregate functions: " - + aggFuncName.get() + " is an aggregate function."); + + aggFuncName.get().name() + " is an aggregate function."); } }); @@ -83,12 +84,12 @@ void processHaving(final Expression expression) { private final class AggregateVisitor extends TraversalExpressionVisitor { - private final BiConsumer, ColumnReferenceExp> dereferenceCollector; - private Optional aggFunctionName = Optional.empty(); + private final BiConsumer, ColumnReferenceExp> dereferenceCollector; + private Optional aggFunctionName = Optional.empty(); private boolean visitedAggFunction = false; private AggregateVisitor( - final BiConsumer, ColumnReferenceExp> dereferenceCollector + final BiConsumer, ColumnReferenceExp> dereferenceCollector ) { this.dereferenceCollector = Objects.requireNonNull(dereferenceCollector, "dereferenceCollector"); @@ -96,7 +97,7 @@ private AggregateVisitor( @Override public Void visitFunctionCall(final FunctionCall node, final Void context) { - final String functionName = node.getName().name(); + final FunctionName functionName = node.getName(); final boolean aggregateFunc = functionRegistry.isAggregate(functionName); final FunctionCall functionCall = aggregateFunc && node.getArguments().isEmpty() @@ -106,7 +107,7 @@ public Void visitFunctionCall(final FunctionCall node, final Void context) { if (aggregateFunc) { if (aggFunctionName.isPresent()) { throw new KsqlException("Aggregate functions can not be nested: " - + aggFunctionName.get() + "(" + functionName + "())"); + + aggFunctionName.get().name() + "(" + functionName.name() + "())"); } visitedAggFunction = true; diff --git a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/AggregateExpressionRewriter.java b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/AggregateExpressionRewriter.java index 0a05b26a66ce..2ba3cf3b4d5e 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/AggregateExpressionRewriter.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/AggregateExpressionRewriter.java @@ -23,6 +23,7 @@ import io.confluent.ksql.execution.expression.tree.VisitParentExpressionVisitor; import io.confluent.ksql.function.FunctionRegistry; import io.confluent.ksql.name.ColumnName; +import io.confluent.ksql.name.FunctionName; import io.confluent.ksql.schema.ksql.ColumnRef; import java.util.ArrayList; import java.util.List; @@ -43,7 +44,7 @@ public AggregateExpressionRewriter(final FunctionRegistry functionRegistry) { public Optional visitFunctionCall( final FunctionCall node, final ExpressionTreeRewriter.Context context) { - final String functionName = node.getName().name(); + final FunctionName functionName = node.getName(); if (functionRegistry.isAggregate(functionName)) { final ColumnName aggVarName = ColumnName.aggregateColumn(aggVariableIndex); aggVariableIndex++; diff --git a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analyzer.java b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analyzer.java index 8d152e8d2028..cb23dec9a5e2 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analyzer.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analyzer.java @@ -33,6 +33,7 @@ import io.confluent.ksql.metastore.MetaStore; import io.confluent.ksql.metastore.model.DataSource; import io.confluent.ksql.name.ColumnName; +import io.confluent.ksql.name.FunctionName; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.parser.DefaultTraversalVisitor; import io.confluent.ksql.parser.NodeLocation; @@ -620,11 +621,11 @@ private void visitTableFunctions(final Expression expression) { private final class TableFunctionVisitor extends TraversalExpressionVisitor { - private Optional tableFunctionName = Optional.empty(); + private Optional tableFunctionName = Optional.empty(); @Override public Void visitFunctionCall(final FunctionCall functionCall, final Void context) { - final String functionName = functionCall.getName().name(); + final FunctionName functionName = functionCall.getName(); final boolean isTableFunction = metaStore.isTableFunction(functionName); if (isTableFunction) { diff --git a/ksql-engine/src/main/java/io/confluent/ksql/function/InternalFunctionRegistry.java b/ksql-engine/src/main/java/io/confluent/ksql/function/InternalFunctionRegistry.java index c10cf9fa99e6..635e1468852f 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/function/InternalFunctionRegistry.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/function/InternalFunctionRegistry.java @@ -58,10 +58,11 @@ public InternalFunctionRegistry() { } @Override - public synchronized UdfFactory getUdfFactory(final String functionName) { - final UdfFactory udfFactory = udfs.get(functionName.toUpperCase()); + public synchronized UdfFactory getUdfFactory(final FunctionName functionName) { + final UdfFactory udfFactory = udfs.get(functionName.name().toUpperCase()); if (udfFactory == null) { - throw new KsqlException("Can't find any functions with the name '" + functionName + "'"); + throw new KsqlException( + "Can't find any functions with the name '" + functionName.name() + "'"); } return udfFactory; } @@ -99,22 +100,22 @@ public synchronized UdfFactory ensureFunctionFactory(final UdfFactory factory) { } @Override - public synchronized boolean isAggregate(final String functionName) { - return udafs.containsKey(functionName.toUpperCase()); + public synchronized boolean isAggregate(final FunctionName functionName) { + return udafs.containsKey(functionName.name().toUpperCase()); } @Override - public synchronized boolean isTableFunction(final String functionName) { - return udtfs.containsKey(functionName.toUpperCase()); + public synchronized boolean isTableFunction(final FunctionName functionName) { + return udtfs.containsKey(functionName.name().toUpperCase()); } @Override public synchronized KsqlAggregateFunction getAggregateFunction( - final String functionName, + final FunctionName functionName, final SqlType argumentType, final AggregateFunctionInitArguments initArgs ) { - final AggregateFunctionFactory udafFactory = udafs.get(functionName.toUpperCase()); + final AggregateFunctionFactory udafFactory = udafs.get(functionName.name().toUpperCase()); if (udafFactory == null) { throw new KsqlException("No aggregate function with name " + functionName + " exists!"); } @@ -126,10 +127,10 @@ public synchronized KsqlAggregateFunction getAggregateFunction( @Override public synchronized KsqlTableFunction getTableFunction( - final String functionName, + final FunctionName functionName, final List argumentTypes ) { - final TableFunctionFactory udtfFactory = udtfs.get(functionName.toUpperCase()); + final TableFunctionFactory udtfFactory = udtfs.get(functionName.name().toUpperCase()); if (udtfFactory == null) { throw new KsqlException("No table function with name " + functionName + " exists!"); } @@ -187,8 +188,9 @@ public synchronized List listFunctions() { } @Override - public synchronized AggregateFunctionFactory getAggregateFactory(final String functionName) { - final AggregateFunctionFactory udafFactory = udafs.get(functionName.toUpperCase()); + public synchronized AggregateFunctionFactory getAggregateFactory( + final FunctionName functionName) { + final AggregateFunctionFactory udafFactory = udafs.get(functionName.name().toUpperCase()); if (udafFactory == null) { throw new KsqlException( "Can not find any aggregate functions with the name '" + functionName + "'"); @@ -198,8 +200,9 @@ public synchronized AggregateFunctionFactory getAggregateFactory(final String fu } @Override - public synchronized TableFunctionFactory getTableFunctionFactory(final String functionName) { - final TableFunctionFactory tableFunctionFactory = udtfs.get(functionName.toUpperCase()); + public synchronized TableFunctionFactory getTableFunctionFactory( + final FunctionName functionName) { + final TableFunctionFactory tableFunctionFactory = udtfs.get(functionName.name().toUpperCase()); if (tableFunctionFactory == null) { throw new KsqlException( "Can not find any table functions with the name '" + functionName + "'"); diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/FlatMapNode.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/FlatMapNode.java index 79743acb6179..f304fc0d1854 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/FlatMapNode.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/FlatMapNode.java @@ -31,6 +31,7 @@ import io.confluent.ksql.function.FunctionRegistry; import io.confluent.ksql.metastore.model.KeyField; import io.confluent.ksql.name.ColumnName; +import io.confluent.ksql.name.FunctionName; import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.services.KafkaTopicClient; @@ -147,7 +148,7 @@ public Optional visitFunctionCall( final FunctionCall node, final Context context ) { - final String functionName = node.getName().name(); + final FunctionName functionName = node.getName(); if (functionRegistry.isTableFunction(functionName)) { final ColumnName varName = ColumnName.synthesisedSchemaColumn(variableIndex); variableIndex++; diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/InternalFunctionRegistryTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/InternalFunctionRegistryTest.java index b394b2cdde28..d5495e42a6f8 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/InternalFunctionRegistryTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/InternalFunctionRegistryTest.java @@ -114,7 +114,7 @@ public void shouldAddFunction() { functionRegistry.addFunction(func); // Then: - final UdfFactory factory = functionRegistry.getUdfFactory("func"); + final UdfFactory factory = functionRegistry.getUdfFactory(FunctionName.of("func")); assertThat(factory.getFunction(Collections.emptyList()), is(this.func)); } @@ -235,14 +235,14 @@ public void shouldThrowOnInvalidUdafFunctionName() { @Test public void shouldKnowIfFunctionIsAggregate() { - assertFalse(functionRegistry.isAggregate("lcase")); - assertTrue(functionRegistry.isAggregate("topk")); + assertFalse(functionRegistry.isAggregate(FunctionName.of("lcase"))); + assertTrue(functionRegistry.isAggregate(FunctionName.of("topk"))); } @Test public void shouldAddAggregateFunction() { functionRegistry.addAggregateFunctionFactory(createAggregateFunctionFactory()); - assertThat(functionRegistry.getAggregateFunction("my_aggregate", + assertThat(functionRegistry.getAggregateFunction(FunctionName.of("my_aggregate"), SqlTypes.INTEGER, AggregateFunctionInitArguments.EMPTY_ARGS), not(nullValue())); } @@ -250,7 +250,7 @@ public void shouldAddAggregateFunction() { @Test public void shouldAddTableFunction() { functionRegistry.addTableFunctionFactory(createTableFunctionFactory()); - assertThat(functionRegistry.getTableFunction("my_tablefunction", + assertThat(functionRegistry.getTableFunction(FunctionName.of("my_tablefunction"), ImmutableList.of(SqlTypes.INTEGER) ), not(nullValue())); } @@ -288,9 +288,9 @@ public void shouldAddFunctionWithSameNameClassButDifferentArguments() { functionRegistry.addFunction(func2); // Then: - assertThat(functionRegistry.getUdfFactory("func") + assertThat(functionRegistry.getUdfFactory(FunctionName.of("func")) .getFunction(Collections.singletonList(SqlTypes.BIGINT)), equalTo(func2)); - assertThat(functionRegistry.getUdfFactory("func") + assertThat(functionRegistry.getUdfFactory(FunctionName.of("func")) .getFunction(Collections.emptyList()), equalTo(func)); } @@ -298,7 +298,7 @@ public void shouldAddFunctionWithSameNameClassButDifferentArguments() { public void shouldThrowExceptionIfNoFunctionsWithNameExist() { expectedException.expect(KsqlException.class); expectedException.expectMessage("'foo_bar'"); - functionRegistry.getUdfFactory("foo_bar"); + functionRegistry.getUdfFactory(FunctionName.of("foo_bar")); } @Test diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/KudafUndoAggregatorTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/KudafUndoAggregatorTest.java index a603f4f159cf..98fd41c957b7 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/KudafUndoAggregatorTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/KudafUndoAggregatorTest.java @@ -22,6 +22,7 @@ import io.confluent.ksql.GenericRow; import io.confluent.ksql.execution.function.TableAggregationFunction; import io.confluent.ksql.execution.function.udaf.KudafUndoAggregator; +import io.confluent.ksql.name.FunctionName; import io.confluent.ksql.schema.ksql.types.SqlTypes; import java.util.Arrays; import java.util.List; @@ -31,7 +32,7 @@ public class KudafUndoAggregatorTest { private static final InternalFunctionRegistry FUNCTION_REGISTRY = new InternalFunctionRegistry(); private static final KsqlAggregateFunction SUM_INFO = FUNCTION_REGISTRY.getAggregateFunction( - "SUM", + FunctionName.of("SUM"), SqlTypes.INTEGER, new AggregateFunctionInitArguments(2) ); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/UdfLoaderTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/UdfLoaderTest.java index 1ac708308cf3..14b150f95b3f 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/UdfLoaderTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/UdfLoaderTest.java @@ -110,7 +110,7 @@ public void before() { @Test public void shouldLoadFunctionsInKsqlEngine() { - final UdfFactory function = FUNC_REG.getUdfFactory("substring"); + final UdfFactory function = FUNC_REG.getUdfFactory(FunctionName.of("substring")); assertThat(function, not(nullValue())); final Kudf substring1 = function.getFunction( @@ -126,7 +126,7 @@ public void shouldLoadFunctionsInKsqlEngine() { @Test public void shouldLoadUdafs() { final KsqlAggregateFunction instance = FUNC_REG - .getAggregateFunction("test_udaf", SqlTypes.BIGINT, + .getAggregateFunction(FunctionName.of("test_udaf"), SqlTypes.BIGINT, AggregateFunctionInitArguments.EMPTY_ARGS); assertThat(instance.getInitialValueSupplier().get(), equalTo(0L)); assertThat(instance.aggregate(1L, 1L), equalTo(2L)); @@ -147,7 +147,8 @@ public void shouldLoadStructUdafs() { .build(); final KsqlAggregateFunction instance = FUNC_REG - .getAggregateFunction("test_udaf", sqlSchema, AggregateFunctionInitArguments.EMPTY_ARGS); + .getAggregateFunction(FunctionName.of("test_udaf"), sqlSchema, + AggregateFunctionInitArguments.EMPTY_ARGS); assertThat(instance.getInitialValueSupplier().get(), equalTo(new Struct(schema).put("A", 0).put("B", 0))); @@ -169,7 +170,7 @@ public void shouldLoadDecimalUdfs() { final SqlDecimal schema = SqlTypes.decimal(2, 1); // When: - final KsqlScalarFunction fun = FUNC_REG.getUdfFactory("floor") + final KsqlScalarFunction fun = FUNC_REG.getUdfFactory(FunctionName.of("floor")) .getFunction(ImmutableList.of(schema)); // Then: @@ -178,8 +179,8 @@ public void shouldLoadDecimalUdfs() { @Test public void shouldLoadFunctionsFromJarsInPluginDir() { - final UdfFactory toString = FUNC_REG.getUdfFactory("tostring"); - final UdfFactory multi = FUNC_REG.getUdfFactory("multiply"); + final UdfFactory toString = FUNC_REG.getUdfFactory(FunctionName.of("tostring")); + final UdfFactory multi = FUNC_REG.getUdfFactory(FunctionName.of("multiply")); assertThat(toString, not(nullValue())); assertThat(multi, not(nullValue())); } @@ -187,7 +188,7 @@ public void shouldLoadFunctionsFromJarsInPluginDir() { @Test public void shouldLoadFunctionWithListReturnType() { // Given: - final UdfFactory toList = FUNC_REG.getUdfFactory("tolist"); + final UdfFactory toList = FUNC_REG.getUdfFactory(FunctionName.of("tolist")); // When: final List args = Collections.singletonList(SqlTypes.STRING); @@ -202,7 +203,7 @@ public void shouldLoadFunctionWithListReturnType() { @Test public void shouldLoadFunctionWithMapReturnType() { // Given: - final UdfFactory toMap = FUNC_REG.getUdfFactory("tomap"); + final UdfFactory toMap = FUNC_REG.getUdfFactory(FunctionName.of("tomap")); // When: final List args = Collections.singletonList(SqlTypes.STRING); @@ -219,7 +220,7 @@ public void shouldLoadFunctionWithMapReturnType() { @Test public void shouldLoadFunctionWithStructReturnType() { // Given: - final UdfFactory toStruct = FUNC_REG.getUdfFactory("tostruct"); + final UdfFactory toStruct = FUNC_REG.getUdfFactory(FunctionName.of("tostruct")); // When: final List args = Collections.singletonList(SqlTypes.STRING); @@ -236,7 +237,7 @@ public void shouldLoadFunctionWithStructReturnType() { @Test public void shouldLoadFunctionWithSchemaProvider() { // Given: - final UdfFactory returnDecimal = FUNC_REG.getUdfFactory("returndecimal"); + final UdfFactory returnDecimal = FUNC_REG.getUdfFactory(FunctionName.of("returndecimal")); // When: final SqlDecimal decimal = SqlTypes.decimal(2, 1); @@ -250,15 +251,17 @@ public void shouldLoadFunctionWithSchemaProvider() { @Test public void shouldThrowOnReturnTypeMismatch() { // Given: - final UdfFactory returnIncompatible = FUNC_REG.getUdfFactory("returnincompatible"); + final UdfFactory returnIncompatible = FUNC_REG + .getUdfFactory(FunctionName.of("returnincompatible")); final SqlDecimal decimal = SqlTypes.decimal(2, 1); final List args = Collections.singletonList(decimal); final KsqlScalarFunction function = returnIncompatible.getFunction(args); // Expect: expectedException.expect(KsqlException.class); - expectedException.expectMessage(is("Return type DECIMAL(2, 1) of UDF RETURNINCOMPATIBLE does not " - + "match the declared return type STRING.")); + expectedException + .expectMessage(is("Return type DECIMAL(2, 1) of UDF RETURNINCOMPATIBLE does not " + + "match the declared return type STRING.")); // When: function.getReturnType(args); @@ -345,8 +348,8 @@ public void shouldThrowOnReturnDecimalWithoutSchemaProvider() throws ClassNotFou @Test public void shouldPutJarUdfsInClassLoaderForJar() throws Exception { - final UdfFactory toString = FUNC_REG.getUdfFactory("tostring"); - final UdfFactory multiply = FUNC_REG.getUdfFactory("multiply"); + final UdfFactory toString = FUNC_REG.getUdfFactory(FunctionName.of("tostring")); + final UdfFactory multiply = FUNC_REG.getUdfFactory(FunctionName.of("multiply")); final Kudf toStringUdf = toString.getFunction(ImmutableList.of(SqlTypes.STRING)) .newInstance(ksqlConfig); @@ -376,18 +379,18 @@ public void shouldAllowClassesWithSameFQCNInDifferentUDFJars() throws Exception PARENT_CLASS_LOADER, value -> false, Optional.empty(), - true) - ; + true); udfLoader.load(); - final UdfFactory multiply = functionRegistry.getUdfFactory("multiply"); - final UdfFactory multiply2 = functionRegistry.getUdfFactory("multiply2"); + final UdfFactory multiply = functionRegistry.getUdfFactory(FunctionName.of("multiply")); + final UdfFactory multiply2 = functionRegistry.getUdfFactory(FunctionName.of("multiply2")); final Kudf multiplyUdf = multiply.getFunction(Arrays.asList(SqlTypes.INTEGER, SqlTypes.INTEGER)) .newInstance(ksqlConfig); - final Kudf multiply2Udf = multiply2.getFunction(Arrays.asList(SqlTypes.INTEGER, SqlTypes.INTEGER)) + final Kudf multiply2Udf = multiply2 + .getFunction(Arrays.asList(SqlTypes.INTEGER, SqlTypes.INTEGER)) .newInstance(ksqlConfig); assertThat(multiplyUdf.evaluate(2, 2), equalTo(4L)); @@ -396,19 +399,19 @@ public void shouldAllowClassesWithSameFQCNInDifferentUDFJars() throws Exception @Test public void shouldCreateUdfFactoryWithJarPathWhenExternal() { - final UdfFactory tostring = FUNC_REG.getUdfFactory("tostring"); + final UdfFactory tostring = FUNC_REG.getUdfFactory(FunctionName.of("tostring")); assertThat(tostring.getMetadata().getPath(), equalTo("src/test/resources/udf-example.jar")); } @Test public void shouldCreateUdfFactoryWithInternalPathWhenInternal() { - final UdfFactory substring = FUNC_REG.getUdfFactory("substring"); + final UdfFactory substring = FUNC_REG.getUdfFactory(FunctionName.of("substring")); assertThat(substring.getMetadata().getPath(), equalTo(KsqlScalarFunction.INTERNAL_PATH)); } @Test public void shouldSupportUdfParameterAnnotation() { - final UdfFactory substring = FUNC_REG.getUdfFactory("somefunction"); + final UdfFactory substring = FUNC_REG.getUdfFactory(FunctionName.of("somefunction")); final KsqlScalarFunction function = substring.getFunction( ImmutableList.of( SqlTypes.STRING, @@ -429,7 +432,7 @@ public void shouldSupportUdfParameterAnnotation() { @Test public void shouldPutKsqlFunctionsInParentClassLoader() throws Exception { - final UdfFactory substring = FUNC_REG.getUdfFactory("substring"); + final UdfFactory substring = FUNC_REG.getUdfFactory(FunctionName.of("substring")); final Kudf kudf = substring.getFunction( Arrays.asList(SqlTypes.STRING, SqlTypes.INTEGER)) .newInstance(ksqlConfig); @@ -439,14 +442,14 @@ public void shouldPutKsqlFunctionsInParentClassLoader() throws Exception { @Test public void shouldLoadUdfsInKSQLIfLoadCustomerUdfsFalse() { // udf in ksql-engine will throw if not found - FUNC_REG_WITHOUT_CUSTOM.getUdfFactory("substring"); + FUNC_REG_WITHOUT_CUSTOM.getUdfFactory(FunctionName.of("substring")); } @Test public void shouldNotLoadCustomUDfsIfLoadCustomUdfsFalse() { // udf in udf-example.jar try { - FUNC_REG_WITHOUT_CUSTOM.getUdfFactory("tostring"); + FUNC_REG_WITHOUT_CUSTOM.getUdfFactory(FunctionName.of("tostring")); fail("Should have thrown as function doesn't exist"); } catch (final KsqlException e) { // pass @@ -470,7 +473,7 @@ public void shouldNotLoadInternalUdfs() { expectedException.expectMessage(is("Can't find any functions with the name 'substring'")); // When: - functionRegistry.getUdfFactory("substring"); + functionRegistry.getUdfFactory(FunctionName.of("substring")); } @Test @@ -490,7 +493,7 @@ public void shouldLoadSomeFunction() { // When: udfLoader.loadUdfFromClass(UdfLoaderTest.SomeFunctionUdf.class); - final UdfFactory udfFactory = functionRegistry.getUdfFactory("somefunction"); + final UdfFactory udfFactory = functionRegistry.getUdfFactory(FunctionName.of("somefunction")); // Then: assertThat(udfFactory, not(nullValue())); @@ -502,7 +505,7 @@ public void shouldLoadSomeFunction() { @Test public void shouldCollectMetricsWhenMetricCollectionEnabled() { // Given: - final UdfFactory substring = FUNC_REG_WITH_METRICS.getUdfFactory("substring"); + final UdfFactory substring = FUNC_REG_WITH_METRICS.getUdfFactory(FunctionName.of("substring")); final KsqlScalarFunction function = substring .getFunction(Arrays.asList(SqlTypes.STRING, SqlTypes.INTEGER)); @@ -535,7 +538,7 @@ public void shouldUseConfigForExtDir() { = new KsqlConfig(configMap); UserFunctionLoader.newInstance(config, functionRegistry, "").load(); // will throw if it doesn't exist - functionRegistry.getUdfFactory("tostring"); + functionRegistry.getUdfFactory(FunctionName.of("tostring")); } @Test @@ -558,7 +561,7 @@ public void shouldConfigureConfigurableUdfsOnInstantiation() { KSQL_FUNCTIONS_PROPERTY_PREFIX + "_global_.expected-param", "expected-value" )); - final KsqlScalarFunction udf = FUNC_REG.getUdfFactory("ConfigurableUdf") + final KsqlScalarFunction udf = FUNC_REG.getUdfFactory(FunctionName.of("ConfigurableUdf")) .getFunction(ImmutableList.of(SqlTypes.INTEGER)); // When: @@ -577,7 +580,7 @@ public void shouldConfigureConfigurableUdfsOnInstantiation() { public void shouldEnsureFunctionReturnTypeIsDeepOptional() { final List args = Collections.singletonList(SqlTypes.STRING); final KsqlScalarFunction complexFunction = FUNC_REG - .getUdfFactory("ComplexFunction") + .getUdfFactory(FunctionName.of("ComplexFunction")) .getFunction(args); assertThat(complexFunction.getReturnType(args), is( diff --git a/ksql-engine/src/test/java/io/confluent/ksql/function/UdtfLoaderTest.java b/ksql-engine/src/test/java/io/confluent/ksql/function/UdtfLoaderTest.java index c5c3dd063dd9..9eab660b13f4 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/function/UdtfLoaderTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/function/UdtfLoaderTest.java @@ -23,6 +23,7 @@ import io.confluent.ksql.function.udtf.Udtf; import io.confluent.ksql.function.udtf.UdtfDescription; import io.confluent.ksql.metastore.TypeRegistry; +import io.confluent.ksql.name.FunctionName; import io.confluent.ksql.schema.ksql.SqlTypeParser; import io.confluent.ksql.schema.ksql.types.SqlType; import io.confluent.ksql.schema.ksql.types.SqlTypes; @@ -63,7 +64,8 @@ public void shouldLoadSimpleParams() { ); // When: - final KsqlTableFunction function = FUNC_REG.getTableFunction("test_udtf", args); + final KsqlTableFunction function = FUNC_REG + .getTableFunction(FunctionName.of("test_udtf"), args); // Then: assertThat(function.getReturnType(args), equalTo(SqlTypes.STRING)); @@ -84,7 +86,8 @@ public void shouldLoadParameterizedListParams() { ); // When: - final KsqlTableFunction function = FUNC_REG.getTableFunction("test_udtf", args); + final KsqlTableFunction function = FUNC_REG + .getTableFunction(FunctionName.of("test_udtf"), args); // Then: assertThat(function.getReturnType(args), equalTo(SqlTypes.STRING)); @@ -105,7 +108,8 @@ public void shouldLoadParameterizedMapParams() { ); // When: - final KsqlTableFunction function = FUNC_REG.getTableFunction("test_udtf", args); + final KsqlTableFunction function = FUNC_REG + .getTableFunction(FunctionName.of("test_udtf"), args); // Then: assertThat(function.getReturnType(args), equalTo(SqlTypes.STRING)); @@ -118,7 +122,8 @@ public void shouldLoadListIntReturn() { final List args = ImmutableList.of(SqlTypes.INTEGER); // When: - final KsqlTableFunction function = FUNC_REG.getTableFunction("test_udtf", args); + final KsqlTableFunction function = FUNC_REG + .getTableFunction(FunctionName.of("test_udtf"), args); // Then: assertThat(function.getReturnType(args), equalTo(SqlTypes.INTEGER)); @@ -131,7 +136,8 @@ public void shouldLoadListLongReturn() { final List args = ImmutableList.of(SqlTypes.BIGINT); // When: - final KsqlTableFunction function = FUNC_REG.getTableFunction("test_udtf", args); + final KsqlTableFunction function = FUNC_REG + .getTableFunction(FunctionName.of("test_udtf"), args); // Then: assertThat(function.getReturnType(args), equalTo(SqlTypes.BIGINT)); @@ -144,7 +150,8 @@ public void shouldLoadListDoubleReturn() { final List args = ImmutableList.of(SqlTypes.DOUBLE); // When: - final KsqlTableFunction function = FUNC_REG.getTableFunction("test_udtf", args); + final KsqlTableFunction function = FUNC_REG + .getTableFunction(FunctionName.of("test_udtf"), args); // Then: assertThat(function.getReturnType(args), equalTo(SqlTypes.DOUBLE)); @@ -157,7 +164,8 @@ public void shouldLoadListBooleanReturn() { final List args = ImmutableList.of(SqlTypes.BOOLEAN); // When: - final KsqlTableFunction function = FUNC_REG.getTableFunction("test_udtf", args); + final KsqlTableFunction function = FUNC_REG + .getTableFunction(FunctionName.of("test_udtf"), args); // Then: assertThat(function.getReturnType(args), equalTo(SqlTypes.BOOLEAN)); @@ -170,7 +178,8 @@ public void shouldLoadListStringReturn() { final List args = ImmutableList.of(SqlTypes.STRING); // When: - final KsqlTableFunction function = FUNC_REG.getTableFunction("test_udtf", args); + final KsqlTableFunction function = FUNC_REG + .getTableFunction(FunctionName.of("test_udtf"), args); // Then: assertThat(function.getReturnType(args), equalTo(SqlTypes.STRING)); @@ -183,7 +192,8 @@ public void shouldLoadListBigDecimalReturnWithSchemaProvider() { final List args = ImmutableList.of(DECIMAL_SCHEMA); // When: - final KsqlTableFunction function = FUNC_REG.getTableFunction("test_udtf", args); + final KsqlTableFunction function = FUNC_REG + .getTableFunction(FunctionName.of("test_udtf"), args); // Then: assertThat(function.getReturnType(args), equalTo(SqlTypes.decimal(30, 10))); @@ -196,7 +206,8 @@ public void shouldLoadListStructReturnWithSchemaAnnotation() { final List args = ImmutableList.of(STRUCT_SCHEMA); // When: - final KsqlTableFunction function = FUNC_REG.getTableFunction("test_udtf", args); + final KsqlTableFunction function = FUNC_REG + .getTableFunction(FunctionName.of("test_udtf"), args); // Then: assertThat(function.getReturnType(args), equalTo(STRUCT_SCHEMA)); @@ -209,7 +220,8 @@ public void shouldLoadVarArgsMethod() { final List args = ImmutableList.of(STRUCT_SCHEMA); // When: - final KsqlTableFunction function = FUNC_REG.getTableFunction("test_udtf", args); + final KsqlTableFunction function = FUNC_REG + .getTableFunction(FunctionName.of("test_udtf"), args); // Then: assertThat(function.getReturnType(args), equalTo(STRUCT_SCHEMA)); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenRunner.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenRunner.java index 3e79f8a48996..d90abb9e2ca5 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenRunner.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/CodeGenRunner.java @@ -163,7 +163,7 @@ public Void visitFunctionCall(final FunctionCall node, final Void context) { argumentTypes.add(expressionTypeManager.getExpressionSqlType(argExpr)); } - final UdfFactory holder = functionRegistry.getUdfFactory(functionName.name()); + final UdfFactory holder = functionRegistry.getUdfFactory(functionName); final KsqlScalarFunction function = holder.getFunction(argumentTypes); spec.addFunction( function.name(), diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java index b8e8515994c0..c445ac22cb79 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitor.java @@ -346,7 +346,7 @@ public Pair visitFunctionCall(final FunctionCall node, final Vo } private SqlType getFunctionReturnSchema(final FunctionCall node) { - final UdfFactory udfFactory = functionRegistry.getUdfFactory(node.getName().name()); + final UdfFactory udfFactory = functionRegistry.getUdfFactory(node.getName()); final List argumentSchemas = node.getArguments().stream() .map(expressionTypeManager::getExpressionSqlType) .collect(Collectors.toList()); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdafUtil.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdafUtil.java index 412f691baea2..acbeb3d77a0e 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdafUtil.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdafUtil.java @@ -65,7 +65,7 @@ private UdafUtil() { createAggregateFunctionInitArgs(valueColumn.index(), functionCall); return functionRegistry.getAggregateFunction( - functionCall.getName().name(), + functionCall.getName(), argumentType, aggregateFunctionInitArguments ); diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdtfUtil.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdtfUtil.java index 95120fc6dcef..68e8cfb5280a 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdtfUtil.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/function/UdtfUtil.java @@ -47,7 +47,7 @@ public static KsqlTableFunction resolveTableFunction( .collect(Collectors.toList()); return functionRegistry.getTableFunction( - functionCall.getName().name(), + functionCall.getName(), argTypes ); } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java index 029f25773842..d518cad2d81d 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/util/ExpressionTypeManager.java @@ -352,7 +352,7 @@ public Void visitFunctionCall( final FunctionCall node, final ExpressionTypeContext expressionTypeContext ) { - if (functionRegistry.isAggregate(node.getName().name())) { + if (functionRegistry.isAggregate(node.getName())) { final SqlType schema = node.getArguments().isEmpty() ? FunctionRegistry.DEFAULT_FUNCTION_ARG_SCHEMA : getExpressionSqlType(node.getArguments().get(0)); @@ -361,26 +361,26 @@ public Void visitFunctionCall( UdafUtil.createAggregateFunctionInitArgs(0, node); final KsqlAggregateFunction aggFunc = functionRegistry - .getAggregateFunction(node.getName().name(), schema, args); + .getAggregateFunction(node.getName(), schema, args); expressionTypeContext.setSqlType(aggFunc.returnType()); return null; } - if (functionRegistry.isTableFunction(node.getName().name())) { + if (functionRegistry.isTableFunction(node.getName())) { final List argumentTypes = node.getArguments().isEmpty() ? ImmutableList.of(FunctionRegistry.DEFAULT_FUNCTION_ARG_SCHEMA) : node.getArguments().stream().map(ExpressionTypeManager.this::getExpressionSqlType) .collect(Collectors.toList()); final KsqlTableFunction tableFunction = functionRegistry - .getTableFunction(node.getName().name(), argumentTypes); + .getTableFunction(node.getName(), argumentTypes); expressionTypeContext.setSqlType(tableFunction.getReturnType(argumentTypes)); return null; } - final UdfFactory udfFactory = functionRegistry.getUdfFactory(node.getName().name()); + final UdfFactory udfFactory = functionRegistry.getUdfFactory(node.getName()); final List argTypes = new ArrayList<>(); for (final Expression expression : node.getArguments()) { diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitorTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitorTest.java index c7c709fd5a74..cf219302e395 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitorTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/codegen/SqlToJavaVisitorTest.java @@ -808,8 +808,8 @@ public void shouldThrowOnTimestampLiteral() { private void givenUdf( final String name, final UdfFactory factory, final KsqlScalarFunction function ) { - when(functionRegistry.isAggregate(name)).thenReturn(false); - when(functionRegistry.getUdfFactory(name)).thenReturn(factory); + when(functionRegistry.isAggregate(FunctionName.of(name))).thenReturn(false); + when(functionRegistry.getUdfFactory(FunctionName.of(name))).thenReturn(factory); when(factory.getFunction(anyList())).thenReturn(function); when(function.getReturnType(anyList())).thenReturn(SqlTypes.STRING); final UdfMetadata metadata = mock(UdfMetadata.class); diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/function/UdafUtilTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/function/UdafUtilTest.java index e3b8622b9e42..d17ecbf91ae3 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/function/UdafUtilTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/function/UdafUtilTest.java @@ -81,7 +81,7 @@ public void shouldGetAggregateWithCorrectName() { UdafUtil.resolveAggregateFunction(functionRegistry, FUNCTION_CALL, SCHEMA); // Then: - verify(functionRegistry).getAggregateFunction(eq("AGG"), any(), any()); + verify(functionRegistry).getAggregateFunction(eq(FunctionName.of("AGG")), any(), any()); } @Test diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/transform/sqlpredicate/SqlPredicateTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/transform/sqlpredicate/SqlPredicateTest.java index 3989add8d7b6..8a96733d0f2f 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/transform/sqlpredicate/SqlPredicateTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/transform/sqlpredicate/SqlPredicateTest.java @@ -108,7 +108,7 @@ public class SqlPredicateTest { @Before public void init() { - when(functionRegistry.getUdfFactory("LEN")).thenReturn(lenFactory); + when(functionRegistry.getUdfFactory(FunctionName.of("LEN"))).thenReturn(lenFactory); when(lenFactory.getFunction(any())).thenReturn(LEN_FUNCTION); } diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java index 37b41aab9cc3..9c07a3688c77 100644 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/util/ExpressionTypeManagerTest.java @@ -26,8 +26,8 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -108,7 +108,7 @@ public void init() { final UdfMetadata metadata = mock(UdfMetadata.class); when(internalFactory.getMetadata()).thenReturn(metadata); - when(functionRegistry.getUdfFactory(anyString())) + when(functionRegistry.getUdfFactory(any())) .thenReturn(internalFactory); } @@ -579,8 +579,8 @@ private void givenUdfWithNameAndReturnType(final String name, final SqlType retu private void givenUdfWithNameAndReturnType( final String name, final SqlType returnType, final UdfFactory factory, final KsqlScalarFunction function ) { - when(functionRegistry.isAggregate(name)).thenReturn(false); - when(functionRegistry.getUdfFactory(name)).thenReturn(factory); + when(functionRegistry.isAggregate(FunctionName.of(name))).thenReturn(false); + when(functionRegistry.getUdfFactory(FunctionName.of(name))).thenReturn(factory); when(factory.getFunction(anyList())).thenReturn(function); when(function.getReturnType(anyList())).thenReturn(returnType); final UdfMetadata metadata = mock(UdfMetadata.class); diff --git a/ksql-metastore/src/main/java/io/confluent/ksql/metastore/MetaStoreImpl.java b/ksql-metastore/src/main/java/io/confluent/ksql/metastore/MetaStoreImpl.java index c788eee9fb8f..585c0ca9fedd 100644 --- a/ksql-metastore/src/main/java/io/confluent/ksql/metastore/MetaStoreImpl.java +++ b/ksql-metastore/src/main/java/io/confluent/ksql/metastore/MetaStoreImpl.java @@ -23,6 +23,7 @@ import io.confluent.ksql.function.TableFunctionFactory; import io.confluent.ksql.function.UdfFactory; import io.confluent.ksql.metastore.model.DataSource; +import io.confluent.ksql.name.FunctionName; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.schema.ksql.FormatOptions; import io.confluent.ksql.schema.ksql.types.SqlType; @@ -203,20 +204,20 @@ public MutableMetaStore copy() { } @Override - public UdfFactory getUdfFactory(final String functionName) { + public UdfFactory getUdfFactory(final FunctionName functionName) { return functionRegistry.getUdfFactory(functionName); } - public boolean isAggregate(final String functionName) { + public boolean isAggregate(final FunctionName functionName) { return functionRegistry.isAggregate(functionName); } - public boolean isTableFunction(final String functionName) { + public boolean isTableFunction(final FunctionName functionName) { return functionRegistry.isTableFunction(functionName); } public KsqlAggregateFunction getAggregateFunction( - final String functionName, + final FunctionName functionName, final SqlType argumentType, final AggregateFunctionInitArguments initArgs ) { @@ -224,7 +225,7 @@ public boolean isTableFunction(final String functionName) { } public KsqlTableFunction getTableFunction( - final String functionName, + final FunctionName functionName, final List argumentTypes ) { return functionRegistry.getTableFunction(functionName, argumentTypes); @@ -236,12 +237,12 @@ public List listFunctions() { } @Override - public AggregateFunctionFactory getAggregateFactory(final String functionName) { + public AggregateFunctionFactory getAggregateFactory(final FunctionName functionName) { return functionRegistry.getAggregateFactory(functionName); } @Override - public TableFunctionFactory getTableFunctionFactory(final String functionName) { + public TableFunctionFactory getTableFunctionFactory(final FunctionName functionName) { return functionRegistry.getTableFunctionFactory(functionName); } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/DescribeFunctionExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/DescribeFunctionExecutor.java index 906684e5080b..b2ea5cdbf0ee 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/DescribeFunctionExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/DescribeFunctionExecutor.java @@ -24,6 +24,7 @@ import io.confluent.ksql.function.types.ArrayType; import io.confluent.ksql.function.types.ParamType; import io.confluent.ksql.function.udf.UdfMetadata; +import io.confluent.ksql.name.FunctionName; import io.confluent.ksql.parser.tree.DescribeFunction; import io.confluent.ksql.rest.entity.ArgumentInfo; import io.confluent.ksql.rest.entity.FunctionDescriptionList; @@ -53,25 +54,28 @@ public static Optional execute( final ServiceContext serviceContext ) { final DescribeFunction describeFunction = statement.getStatement(); - final String functionName = describeFunction.getFunctionName(); + final FunctionName functionName = FunctionName.of(describeFunction.getFunctionName()); if (executionContext.getMetaStore().isAggregate(functionName)) { return Optional.of( - describeAggregateFunction(executionContext, functionName, statement.getStatementText())); + describeAggregateFunction(executionContext, functionName, + statement.getStatementText())); } if (executionContext.getMetaStore().isTableFunction(functionName)) { return Optional.of( - describeTableFunction(executionContext, functionName, statement.getStatementText())); + describeTableFunction(executionContext, functionName, + statement.getStatementText())); } return Optional.of( - describeNonAggregateFunction(executionContext, functionName, statement.getStatementText())); + describeNonAggregateFunction(executionContext, functionName, + statement.getStatementText())); } private static FunctionDescriptionList describeAggregateFunction( final KsqlExecutionContext ksqlEngine, - final String functionName, + final FunctionName functionName, final String statementText ) { final AggregateFunctionFactory aggregateFactory @@ -89,7 +93,7 @@ private static FunctionDescriptionList describeAggregateFunction( private static FunctionDescriptionList describeTableFunction( final KsqlExecutionContext executionContext, - final String functionName, + final FunctionName functionName, final String statementText ) { final TableFunctionFactory tableFunctionFactory = executionContext.getMetaStore() @@ -115,7 +119,7 @@ private static FunctionDescriptionList describeTableFunction( private static FunctionDescriptionList describeNonAggregateFunction( final KsqlExecutionContext executionContext, - final String functionName, + final FunctionName functionName, final String statementText ) { final UdfFactory udfFactory = executionContext.getMetaStore().getUdfFactory(functionName); diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/AggregateParamsFactoryTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/AggregateParamsFactoryTest.java index 5a02a3782f57..9d16abaf60aa 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/AggregateParamsFactoryTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/AggregateParamsFactoryTest.java @@ -94,23 +94,25 @@ public class AggregateParamsFactoryTest { @Before @SuppressWarnings("unchecked") public void init() { - when(functionRegistry.getAggregateFunction(same(AGG0.getName().name()), any(), any())).thenReturn(agg0); + when(functionRegistry.getAggregateFunction(same(AGG0.getName()), any(), any())) + .thenReturn(agg0); when(agg0.getInitialValueSupplier()).thenReturn(() -> INITIAL_VALUE0); when(agg0.name()).thenReturn(AGG0.getName()); when(agg0.returnType()).thenReturn(SqlTypes.INTEGER); when(agg0.getAggregateType()).thenReturn(SqlTypes.BIGINT); - when(functionRegistry.getAggregateFunction(same(AGG1.getName().name()), any(), any())).thenReturn(agg1); + when(functionRegistry.getAggregateFunction(same(AGG1.getName()), any(), any())) + .thenReturn(agg1); when(agg1.getInitialValueSupplier()).thenReturn(() -> INITIAL_VALUE1); when(agg1.name()).thenReturn(AGG1.getName()); when(agg1.returnType()).thenReturn(SqlTypes.STRING); when(agg1.getAggregateType()).thenReturn(SqlTypes.DOUBLE); - when(functionRegistry.getAggregateFunction(same(TABLE_AGG.getName().name()), any(), any())) + when(functionRegistry.getAggregateFunction(same(TABLE_AGG.getName()), any(), any())) .thenReturn(tableAgg); when(tableAgg.getInitialValueSupplier()).thenReturn(() -> INITIAL_VALUE0); when(tableAgg.returnType()).thenReturn(SqlTypes.INTEGER); when(tableAgg.getAggregateType()).thenReturn(SqlTypes.BIGINT); when(tableAgg.name()).thenReturn(TABLE_AGG.getName()); - when(functionRegistry.getAggregateFunction(same(WINDOW_START.getName().name()), any(), any())) + when(functionRegistry.getAggregateFunction(same(WINDOW_START.getName()), any(), any())) .thenReturn(windowStart); when(windowStart.getInitialValueSupplier()).thenReturn(() -> INITIAL_VALUE0); when(windowStart.name()).thenReturn(WINDOW_START.getName()); diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java index 7c3c4c64980d..954f1754e007 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java @@ -450,15 +450,16 @@ public void shouldResolveSchemaForWindowedTableSource() { private void givenTableFunction(final String name, final SqlType returnType) { final KsqlTableFunction tableFunction = mock(KsqlTableFunction.class); - when(functionRegistry.isTableFunction(name)).thenReturn(true); - when(functionRegistry.getTableFunction(eq(name), any())).thenReturn(tableFunction); + when(functionRegistry.isTableFunction(FunctionName.of(name))).thenReturn(true); + when(functionRegistry.getTableFunction(eq(FunctionName.of(name)), any())) + .thenReturn(tableFunction); when(tableFunction.getReturnType(any())).thenReturn(returnType); } @SuppressWarnings("unchecked") private void givenAggregateFunction(final String name, final SqlType returnType) { final KsqlAggregateFunction aggregateFunction = mock(KsqlAggregateFunction.class); - when(functionRegistry.getAggregateFunction(eq(name), any(), any())) + when(functionRegistry.getAggregateFunction(eq(FunctionName.of(name)), any(), any())) .thenReturn(aggregateFunction); when(aggregateFunction.name()).thenReturn(FunctionName.of(name)); when(aggregateFunction.getAggregateType()).thenReturn(SqlTypes.INTEGER); From 0965afab26ae0f55ffa1b12ac33e52d315de6e03 Mon Sep 17 00:00:00 2001 From: Rohan Date: Mon, 6 Jan 2020 14:37:08 -0800 Subject: [PATCH 072/123] chore: add back expected topology tests (#4207) * chore: add back expected topology tests Temporarily add back expected topology tests, until we make the full switchover to the new query plan based tests --- .../ksql/util/PersistentQueryMetadata.java | 4 + .../ksql/test/tools/TestExecutorUtil.java | 18 +- .../ksql/test/tools/TopologyAndConfigs.java | 7 +- .../ksql/test/QueryTranslationTest.java | 11 +- .../ksql/test/TopologyFileGenerator.java | 253 +++++++++++ .../ksql/test/TopologyFileGeneratorTest.java | 38 ++ .../ksql/test/TopologyFileRewriter.java | 424 ++++++++++++++++++ .../loader/ExpectedTopologiesTestLoader.java | 337 ++++++++++++++ .../ksql/test/planned/PlannedTestLoader.java | 2 +- 9 files changed, 1086 insertions(+), 8 deletions(-) create mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileGenerator.java create mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileGeneratorTest.java create mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileRewriter.java create mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/loader/ExpectedTopologiesTestLoader.java diff --git a/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java b/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java index fb38d2a3a3f0..2dea992ffafd 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java @@ -127,6 +127,10 @@ public Map getSchemasDescription() { return schemas.getSchemasDescription(); } + public String getSchemasString() { + return schemas.toString(); + } + public PhysicalSchema getPhysicalSchema() { return resultSchema; } diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java index 935ce292ef84..cf1fd1d7846a 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutorUtil.java @@ -135,8 +135,9 @@ public static Iterable planTestCase( final StubKafkaService stubKafkaService ) { initializeTopics(testCase, engine.getServiceContext(), stubKafkaService); - if (testCase.getExpectedTopology().isPresent()) { - return testCase.getExpectedTopology().get().getPlan() + if (testCase.getExpectedTopology().isPresent() + && testCase.getExpectedTopology().get().getPlan().isPresent()) { + return testCase.getExpectedTopology().get().getPlan().get() .stream() .map(p -> ConfiguredKsqlPlan.of(p, testCase.properties(), ksqlConfig)) .collect(Collectors.toList()); @@ -184,6 +185,19 @@ private static Optional getAvroSchema( return Optional.empty(); } + public static List buildQueries( + final TestCase testCase, + final ServiceContext serviceContext, + final KsqlEngine ksqlEngine, + final KsqlConfig ksqlConfig, + final StubKafkaService stubKafkaService + ) { + return doBuildQueries(testCase, serviceContext, ksqlEngine, ksqlConfig, stubKafkaService) + .stream() + .map(PersistentQueryAndSources::getPersistentQueryMetadata) + .collect(Collectors.toList()); + } + private static List doBuildQueries( final TestCase testCase, final ServiceContext serviceContext, diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TopologyAndConfigs.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TopologyAndConfigs.java index 08cd2c4b7723..1e21bb1d4339 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TopologyAndConfigs.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TopologyAndConfigs.java @@ -19,16 +19,17 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; public class TopologyAndConfigs { - private final List plan; + private final Optional> plan; private final String topology; private final Map schemas; private final Map configs; public TopologyAndConfigs( - final List plan, + final Optional> plan, final String topology, final Map schemas, final Map configs @@ -51,7 +52,7 @@ public Map getConfigs() { return configs; } - public List getPlan() { + public Optional> getPlan() { return plan; } } diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/QueryTranslationTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/QueryTranslationTest.java index b7c75706a061..82bb592745a0 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/QueryTranslationTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/QueryTranslationTest.java @@ -20,6 +20,8 @@ import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.collect.ImmutableList; +import com.google.common.collect.Streams; +import io.confluent.ksql.test.loader.ExpectedTopologiesTestLoader; import io.confluent.ksql.test.loader.JsonTestLoader; import io.confluent.ksql.test.loader.TestFile; import io.confluent.ksql.test.model.TestCaseNode; @@ -50,8 +52,13 @@ public class QueryTranslationTest { @Parameterized.Parameters(name = "{0}") public static Collection data() { - return PlannedTestLoader.of(testFileLoader()) - .load() + return + Streams.concat( + PlannedTestLoader.of(testFileLoader()) + .load(), + ExpectedTopologiesTestLoader.of(testFileLoader(), "expected_topology/") + .load() + ) .map(testCase -> new Object[]{testCase.getName(), testCase}) .collect(Collectors.toCollection(ArrayList::new)); } diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileGenerator.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileGenerator.java new file mode 100644 index 000000000000..b37ea23e412d --- /dev/null +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileGenerator.java @@ -0,0 +1,253 @@ +/* + * Copyright 2018 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test; + +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; + +import io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient; +import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; +import io.confluent.ksql.engine.KsqlEngine; +import io.confluent.ksql.engine.KsqlEngineTestUtil; +import io.confluent.ksql.function.TestFunctionRegistry; +import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.metastore.MetaStoreImpl; +import io.confluent.ksql.metastore.MutableMetaStore; +import io.confluent.ksql.services.ServiceContext; +import io.confluent.ksql.services.TestServiceContext; +import io.confluent.ksql.test.loader.ExpectedTopologiesTestLoader; +import io.confluent.ksql.test.serde.SerdeSupplier; +import io.confluent.ksql.test.tools.TestCase; +import io.confluent.ksql.test.tools.TestExecutor; +import io.confluent.ksql.test.tools.TestExecutorUtil; +import io.confluent.ksql.test.tools.Topic; +import io.confluent.ksql.test.tools.stubs.StubKafkaService; +import io.confluent.ksql.test.utils.SerdeUtil; +import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.PersistentQueryMetadata; +import io.confluent.ksql.util.QueryMetadata; +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import org.junit.Ignore; +import org.junit.Test; +import org.w3c.dom.Document; +import org.w3c.dom.NodeList; + +/** + * This class is used to generate the topology files to ensure safe + * upgrades of KSQL across releases. + * + * There are some manual steps in using this class but this should be ok as + * we only need to create new topology files at the end of a release cycle. + * + * The steps to generate topology files: + * + * 1. Run this class by running the test {@link #manuallyGenerateTopologies} BEFORE you update the + * pom with a new version. + * + * 2. This class will generate expected topology files + * for the version specified in the pom file. The program writes the files to + * ksql-engine/src/test/resources/expected_topology/VERSION_NUM directory. Where + * VERSION_NUM is the version defined in ksql-engine/pom.xml <parent><version> element. + * + */ +@Ignore +public final class TopologyFileGenerator { + + /** + * This test exists only to be able to generate topologies as part of the release process + * It can be run manually from the IDE + * It is deliberately excluded from the test suite + */ + @Test + public void manuallyGenerateTopologies() throws Exception { + generateTopologies(); + } + + private static final StubKafkaService stubKafkaService = StubKafkaService.create(); + private static final String BASE_DIRECTORY = "src/test/resources/expected_topology/"; + + static Path findBaseDir() { + Path path = Paths.get("./ksql-functional-tests"); + if (Files.exists(path)) { + return path.resolve(BASE_DIRECTORY); + } + path = Paths.get("../ksql-functional-tests"); + if (Files.exists(path)) { + return path.resolve(BASE_DIRECTORY); + } + throw new RuntimeException("Failed to determine location of expected topologies directory. " + + "App should be run with current directory set to either the root of the repo or the " + + "root of the ksql-functional-tests module"); + } + + private static void generateTopologies() throws Exception { + generateTopologies(findBaseDir()); + } + + static void generateTopologies(final Path base) throws Exception { + final String formattedVersion = "0_6_0-pre"; + final Path generatedTopologyPath = base.resolve(formattedVersion); + + System.out.println(String.format("Starting to write topology files to %s", generatedTopologyPath)); + + if (!generatedTopologyPath.toFile().exists()) { + Files.createDirectory(generatedTopologyPath); + } else { + System.out.println("Warning: Directory already exists, " + + "this will re-generate topology files. dir: " + generatedTopologyPath); + } + + writeExpectedTopologyFiles(generatedTopologyPath, getTestCases()); + + System.out + .println(String.format("Done writing topology files to %s", generatedTopologyPath)); + } + + static List getTestCases() { + return QueryTranslationTest.findTestCases() + .filter(q -> !q.expectedException().isPresent()) + .collect(Collectors.toList()); + } + + private static String getFormattedVersionFromPomFile() throws Exception { + final File pomFile = new File("pom.xml"); + final DocumentBuilderFactory documentBuilderFactory = DocumentBuilderFactory.newInstance(); + final DocumentBuilder documentBuilder = documentBuilderFactory.newDocumentBuilder(); + final Document pomDoc = documentBuilder.parse(pomFile); + + final NodeList versionNodeList = pomDoc.getElementsByTagName("version"); + final String versionName = versionNodeList.item(0).getTextContent(); + + return versionName.replaceAll("-SNAPSHOT?", "").replaceAll("\\.", "_"); + } + + private static void writeExpectedTopologyFiles( + final Path topologyDir, + final List testCases + ) { + testCases.forEach(testCase -> writeExpectedToplogyFile(topologyDir, testCase)); + } + + private static void writeExpectedToplogyFile(final Path topologyDir, final TestCase testCase) { + try { + final Path topologyFile = buildExpectedTopologyPath(topologyDir, testCase); + + final String topologyContent = buildExpectedTopologyContent(testCase, Optional.empty()); + + Files.write(topologyFile, + topologyContent.getBytes(StandardCharsets.UTF_8), + StandardOpenOption.CREATE, + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING + ); + } catch (final IOException e) { + throw new RuntimeException(e); + } + } + + static Path buildExpectedTopologyPath(final Path topologyDir, final TestCase testCase) { + return ExpectedTopologiesTestLoader.buildExpectedTopologyPath( + testCase.getName(), + topologyDir + ); + } + + static String buildExpectedTopologyContent( + final TestCase testCase, + final Optional> persistedConfigs + ) { + final KsqlConfig baseConfigs = new KsqlConfig(TestExecutor.baseConfig()) + .cloneWithPropertyOverwrite(testCase.properties()); + + final KsqlConfig ksqlConfig = persistedConfigs + .map(baseConfigs::overrideBreakingConfigsWithOriginalValues) + .orElse(baseConfigs); + + try (final ServiceContext serviceContext = getServiceContext(); + final KsqlEngine ksqlEngine = getKsqlEngine(serviceContext) + ) { + final PersistentQueryMetadata queryMetadata = + buildQuery(testCase, serviceContext, ksqlEngine, ksqlConfig); + + final Map configsToPersist + = new HashMap<>(ksqlConfig.getAllConfigPropsWithSecretsObfuscated()); + + // Ignore the KStreams state directory as its different every time: + configsToPersist.remove("ksql.streams.state.dir"); + + return ExpectedTopologiesTestLoader.buildExpectedTopologyContent( + queryMetadata, + configsToPersist + ); + } catch (final Exception e) { + throw new RuntimeException(e); + } + } + + private static ServiceContext getServiceContext() { + final SchemaRegistryClient schemaRegistryClient = new MockSchemaRegistryClient(); + return TestServiceContext.create(() -> schemaRegistryClient); + } + + private static KsqlEngine getKsqlEngine(final ServiceContext serviceContext) { + final MutableMetaStore metaStore = new MetaStoreImpl(TestFunctionRegistry.INSTANCE.get()); + return KsqlEngineTestUtil.createKsqlEngine(serviceContext, metaStore); + } + + private static PersistentQueryMetadata buildQuery( + final TestCase testCase, + final ServiceContext serviceContext, + final KsqlEngine ksqlEngine, + final KsqlConfig ksqlConfig + ) { + final List queries = TestExecutorUtil + .buildQueries(testCase, serviceContext, ksqlEngine, ksqlConfig, stubKafkaService); + + final MetaStore metaStore = ksqlEngine.getMetaStore(); + for (QueryMetadata queryMetadata: queries) { + final PersistentQueryMetadata persistentQueryMetadata + = (PersistentQueryMetadata) queryMetadata; + final String sinkKafkaTopicName = metaStore + .getSource(persistentQueryMetadata.getSinkName()) + .getKafkaTopicName(); + + final Topic sinkTopic = new Topic( + sinkKafkaTopicName, + 1, + 1, + Optional.empty() + ); + + stubKafkaService.createTopic(sinkTopic); + } + + assertThat("test did not generate any queries.", queries.isEmpty(), is(false)); + return queries.get(queries.size() - 1); + } +} diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileGeneratorTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileGeneratorTest.java new file mode 100644 index 000000000000..059323ad74c2 --- /dev/null +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileGeneratorTest.java @@ -0,0 +1,38 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test; + +import org.apache.kafka.test.IntegrationTest; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TemporaryFolder; + +/** + * Do not combine this with `TopologyFileGenerator` as mvn will ignore the tests as the class + * does not end in `Test`. + */ +@Category(IntegrationTest.class) +public final class TopologyFileGeneratorTest { + + @ClassRule + public static final TemporaryFolder TMP = new TemporaryFolder(); + + @Test + public void shouldGenerateTopologies() throws Exception { + TopologyFileGenerator.generateTopologies(TMP.newFolder().toPath()); + } +} diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileRewriter.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileRewriter.java new file mode 100644 index 000000000000..dadb063c7cc7 --- /dev/null +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/TopologyFileRewriter.java @@ -0,0 +1,424 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test; + +import static io.confluent.ksql.test.loader.ExpectedTopologiesTestLoader.CONFIG_END_MARKER; +import static io.confluent.ksql.test.loader.ExpectedTopologiesTestLoader.SCHEMAS_END_MARKER; +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectReader; +import com.google.common.collect.ImmutableSet; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import io.confluent.ksql.test.loader.ExpectedTopologiesTestLoader; +import io.confluent.ksql.test.tools.TestCase; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import org.junit.Ignore; +import org.junit.Test; + +/** + * Utility to help re-write the expected topology files used by {@link QueryTranslationTest}. + * + * Occasionally, things change in the way KStreams generates topologies and we need to update the + * previously saved topologies to bring them back inline. Obviously, care should be taken when + * doing so to ensure no backwards incompatible changes are being hidden by any changes made. * + */ +@Ignore +public final class TopologyFileRewriter { + + /** + * Set {@code REWRITER} to an appropriate rewriter impl. + */ + private static final Rewriter REWRITER = new RewriteTopologyOnly(); + + /** + * Exclude some versions. Anything version starting with one of these strings is excluded: + */ + private static final Set EXCLUDE_VERSIONS = ImmutableSet.builder() + //.add("5_0") + //.add("5_1") + .build(); + + private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); + + public TopologyFileRewriter() { + } + + @Test + public void runMeToRewrite() throws Exception { + final Path baseDir = TopologyFileGenerator.findBaseDir(); + final List testCases = TopologyFileGenerator.getTestCases(); + + Files.list(baseDir) + .filter(Files::isDirectory) + .filter(TopologyFileRewriter::includedVersion) + .forEach(dir -> rewriteTopologyDirectory(dir, testCases)); + } + + private static boolean includedVersion(final Path path) { + + final String version = getVersion(path); + + return EXCLUDE_VERSIONS.stream() + .noneMatch(version::startsWith); + } + + @SuppressFBWarnings("NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE") + private static String getVersion(final Path versionDir) { + try { + final Path versionFile = versionDir + .resolve(ExpectedTopologiesTestLoader.TOPOLOGY_VERSION_FILE); + if (Files.exists(versionFile)) { + return new String(Files.readAllBytes(versionFile), UTF_8); + } + + return versionDir.getFileName().toString(); + } catch (final Exception e) { + throw new RuntimeException("Failed to determine version in " + versionDir, e); + } + } + + private static void rewriteTopologyDirectory( + final Path versionDir, + final List testCases + ) { + try { + System.out.println("Starting to rewrite topology files in " + versionDir); + + for (TestCase testCase : testCases) { + rewriteTopologyFile(versionDir, testCase); + } + + deleteOrphanedFiles(versionDir, testCases); + + System.out.println("Done rewrite topology files in " + versionDir); + } catch (final Exception e) { + throw new RuntimeException("Failed processing version dir: " + versionDir, e); + } + } + + private static void rewriteTopologyFile( + final Path topologyDir, + final TestCase testCase + ) { + final Path path = TopologyFileGenerator.buildExpectedTopologyPath(topologyDir, testCase); + if (!Files.exists(path)) { + System.err.println("WARING: Missing topology file: " + path); + return; + } + + try { + final String rewritten = REWRITER.rewrite(testCase, path); + + Files.write(path, rewritten.getBytes(UTF_8)); + + System.out.println("Rewritten topology file: " + path); + } catch (final Exception e) { + throw new RuntimeException("Failed processing topology file: " + path, e); + } + } + + private static void deleteOrphanedFiles( + final Path versionDir, + final List testCases + ) throws IOException { + final Set paths = testCases.stream() + .map(testCase -> TopologyFileGenerator.buildExpectedTopologyPath(versionDir, testCase)) + .collect(Collectors.toSet()); + + Files.list(versionDir) + .filter(Files::isRegularFile) + .filter(path -> !path.endsWith(ExpectedTopologiesTestLoader.TOPOLOGY_VERSION_FILE)) + .filter(path -> !paths.contains(path)) + .forEach(TopologyFileRewriter::deleteOrphanedFile); + } + + private static void deleteOrphanedFile(final Path orphan) { + try { + System.out.println("WARNING: Deleting orphaned topology file: " + orphan); + Files.delete(orphan); + } catch (final Exception e) { + throw new RuntimeException("Failed to delete orphaned expected topology file", e); + } + } + + private static String grabContent( + final String contents, + final Optional startMarker, + final Optional endMarker + ) { + final int start = startMarker + .map(marker -> { + final int idx = contents.indexOf(marker + System.lineSeparator()); + return idx < 0 ? idx : idx + marker.length() + 1; + }) + .orElse(0); + + if (start < 0) { + throw new RuntimeException("Failed to find marker for start of section: " + startMarker); + } + + final int end = endMarker + .map(contents::indexOf) + .orElse(contents.length()); + + if (end < 0) { + throw new RuntimeException("Failed to find marker for end of section: " + startMarker); + } + + return contents.substring(start, end); + } + + private static Map parseConfigs(final String configs) { + try { + final ObjectReader objectReader = OBJECT_MAPPER.readerFor(Map.class); + final Map parsed = objectReader.readValue(configs); + + final Set toRemove = parsed.entrySet().stream() + .filter(e -> e.getValue() == null) + .map(Entry::getKey) + .collect(Collectors.toSet()); + + parsed.remove("ksql.streams.state.dir"); + parsed.keySet().removeAll(toRemove); + return parsed; + } catch (final Exception e) { + throw new RuntimeException("Failed to parse configs: " + configs, e); + } + } + + private interface Rewriter { + + String rewrite(final TestCase testCase, final Path path) throws Exception; + } + + private interface StructuredRewriter extends Rewriter { + + default String rewrite(final TestCase testCase, final Path path) throws Exception { + final String contents = new String(Files.readAllBytes(path), UTF_8); + + final String newConfig = rewriteConfig( + testCase, + path, + grabContent(contents, Optional.empty(), Optional.of(CONFIG_END_MARKER)) + ) + + CONFIG_END_MARKER + + System.lineSeparator(); + + final boolean hasSchemas = contents.contains(SCHEMAS_END_MARKER); + + final String newSchemas = hasSchemas + ? rewriteSchemas( + testCase, + path, + grabContent(contents, Optional.of(CONFIG_END_MARKER), Optional.of(SCHEMAS_END_MARKER)) + ) + + SCHEMAS_END_MARKER + + System.lineSeparator() + : ""; + + final Optional topologyStart = hasSchemas + ? Optional.of(SCHEMAS_END_MARKER) + : Optional.of(CONFIG_END_MARKER); + + final String newTopologies = rewriteTopologies( + testCase, + path, + grabContent(contents, topologyStart, Optional.empty()) + ); + + return newConfig + newSchemas + newTopologies; + } + + // Overwrite below methods as needed: + default String rewriteConfig( + final TestCase testCase, + final Path path, + final String configs + ) { + return configs; + } + + default String rewriteSchemas( + final TestCase testCase, + final Path path, + final String schemas + ) { + return schemas; + } + + default String rewriteTopologies( + final TestCase testCase, + final Path path, + final String topologies + ) { + return topologies; + } + } + + private static final class RegexRewriter implements StructuredRewriter { + + @Override + public String rewriteSchemas(final TestCase testCase, final Path path, final String schemas) { + + int start; + String result = schemas; + + while ((start = result.indexOf("optional<")) != -1) { + final int end = findCloseTagFor(result, start + "optional".length()); + + final String contents = result.substring(start + "optional<".length(), end); + + result = result.substring(0, start) + + contents + + result.substring(end + 1); + } + + return result + .replaceAll(",(\\S)", ", $1") + .replaceAll("\\n", " NOT NULL" + System.lineSeparator()) + .replaceAll("struct<", "STRUCT<") + .replaceAll("map<", "MAP<") + .replaceAll("array<", "ARRAY<") + .replaceAll("boolean", "BOOLEAN") + .replaceAll("int32", "INT") + .replaceAll("int64", "BIGINT") + .replaceAll("float64", "DOUBLE") + .replaceAll("string", "VARCHAR"); + } + + private static int findCloseTagFor(final String contents, final int startIdx) { + assert (contents.charAt(startIdx) == '<'); + + int depth = 1; + int idx = startIdx + 1; + + while (depth > 0 && idx < contents.length()) { + final char c = contents.charAt(idx++); + switch (c) { + case '<': + depth++; + break; + + case '>': + depth--; + break; + + default: + break; + } + } + + if (depth > 0) { + throw new RuntimeException("Reached end of file before finding close tag"); + } + + return idx - 1; + } + } + + private static final class CustomRewriter implements StructuredRewriter { + + @Override + public String rewriteSchemas(final TestCase testCase, final Path path, final String schemas) { + return Arrays.stream(schemas.split(System.lineSeparator())) + // Add any steps you need to rewrite the schemas here. + // The is generally no need to check such changes in. + .collect(Collectors.joining(System.lineSeparator(), "", System.lineSeparator())); + } + } + + /** + * Uses the standard topology generation code to rewrite expected topology, i.e. it updates + * the topology to match what the current code would output, taking into account any config + */ + private static final class RewriteTopologyOnly implements StructuredRewriter { + + private Map configs; + + @Override + public String rewriteConfig( + final TestCase testCase, + final Path path, + final String configs + ) { + this.configs = parseConfigs(configs); + return configs; + } + + @Override + public String rewriteTopologies( + final TestCase testCase, + final Path path, + final String existing + ) { + final String newContent = TopologyFileGenerator + .buildExpectedTopologyContent(testCase, Optional.of(configs)); + + final boolean hasSchemas = newContent.contains(SCHEMAS_END_MARKER); + + final Optional topologyStart = hasSchemas + ? Optional.of(SCHEMAS_END_MARKER) + : Optional.of(CONFIG_END_MARKER); + + return grabContent(newContent, topologyStart, Optional.empty()); + } + } + + /** + * Uses the standard topology generation code to rewrite expected schemas, i.e. it updates + * the schemes to match what the current code would output, taking into account any config + */ + private static final class RewriteSchemasOnly implements StructuredRewriter { + + private Map configs; + + @Override + public String rewriteConfig( + final TestCase testCase, + final Path path, + final String configs + ) { + this.configs = parseConfigs(configs); + return configs; + } + + @Override + public String rewriteSchemas( + final TestCase testCase, + final Path path, + final String schemas + ) { + final String newContent = TopologyFileGenerator + .buildExpectedTopologyContent(testCase, Optional.of(configs)); + + return grabContent( + newContent, + Optional.of(CONFIG_END_MARKER), + Optional.of(SCHEMAS_END_MARKER) + ); + } + } +} diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/loader/ExpectedTopologiesTestLoader.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/loader/ExpectedTopologiesTestLoader.java new file mode 100644 index 000000000000..14b8e02ebba7 --- /dev/null +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/loader/ExpectedTopologiesTestLoader.java @@ -0,0 +1,337 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test.loader; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectReader; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.google.common.collect.ImmutableMap; +import io.confluent.ksql.model.SemanticVersion; +import io.confluent.ksql.test.model.KsqlVersion; +import io.confluent.ksql.test.tools.TopologyAndConfigs; +import io.confluent.ksql.test.tools.VersionedTest; +import io.confluent.ksql.util.PersistentQueryMetadata; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.apache.commons.lang3.StringUtils; + +/** + * Loads the expected topology files for each test and creates a new test for each expected version + * and sets the expected topology of the test. + */ +public class ExpectedTopologiesTestLoader implements TestLoader { + + public static final String TOPOLOGY_VERSION_FILE = "__version"; + public static final String CONFIG_END_MARKER = "CONFIGS_END"; + public static final String SCHEMAS_END_MARKER = "SCHEMAS_END"; + + private static final Pattern TOPOLOGY_VERSION_PATTERN = Pattern.compile("(\\d+)_(\\d+)(_\\d+)?"); + private static final String TOPOLOGY_VERSIONS_DELIMITER = ","; + private static final String TOPOLOGY_VERSIONS_PROP = "topology.versions"; + private static final String TOPOLOGY_VERSION_LATEST = "latest-only"; + + private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); + private static final KsqlVersion CURRENT_VERSION = KsqlVersion.current(); + private static final String INVALID_FILENAME_CHARS_PATTERN = "\\s|/|\\\\|:|\\*|\\?|\"|<|>|\\|"; + + private final String topologyChecksDir; + private final TestLoader innerLoader; + + public static ExpectedTopologiesTestLoader of( + final TestLoader innerLoader, + final String topologyChecksDir + ) { + return new ExpectedTopologiesTestLoader<>(innerLoader, topologyChecksDir); + } + + private ExpectedTopologiesTestLoader( + final TestLoader innerLoader, + final String topologyChecksDir + ) { + this.topologyChecksDir = Objects.requireNonNull(topologyChecksDir, "topologyChecksDir"); + this.innerLoader = Objects.requireNonNull(innerLoader, "innerLoader"); + } + + public Stream load() { + final List expectedTopologies = loadTopologiesAndVersions(); + + return innerLoader.load() + .flatMap(q -> buildVersionedTestCases(q, expectedTopologies)); + } + + public static Path buildExpectedTopologyPath(final String queryName, final Path topologyDir) { + final String updatedQueryName = formatQueryName(queryName); + return topologyDir.resolve(updatedQueryName); + } + + public static String buildExpectedTopologyContent( + final PersistentQueryMetadata query, + final Map configs + ) { + try { + final ObjectWriter objectWriter = OBJECT_MAPPER.writerWithDefaultPrettyPrinter(); + + final String configString = objectWriter.writeValueAsString(configs); + final String topologyString = query.getTopology().describe().toString(); + final String schemasString = query.getSchemasString(); + + return configString + "\n" + + CONFIG_END_MARKER + "\n" + + schemasString + "\n" + + SCHEMAS_END_MARKER + "\n" + + topologyString; + } catch (final Exception e) { + throw new RuntimeException(e); + } + } + + private List loadTopologiesAndVersions() { + return getTopologyVersions().stream() + .map(version -> new TopologiesAndVersion( + version, + loadExpectedTopologies(topologyChecksDir + version.getName()) + )) + .collect(Collectors.toList()); + } + + private List getTopologyVersions() { + final String versionProp = System.getProperty(TOPOLOGY_VERSIONS_PROP, ""); + + final Stream versionStrings = versionProp.isEmpty() + ? findExpectedTopologyDirectories().stream() + : versionProp.equalsIgnoreCase(TOPOLOGY_VERSION_LATEST) + ? Stream.of() + : Arrays.stream(versionProp.split(TOPOLOGY_VERSIONS_DELIMITER)); + + return versionStrings + .map(this::getVersion) + .collect(Collectors.toList()); + } + + private List findExpectedTopologyDirectories() { + try { + return findContentsOfDirectory(topologyChecksDir).stream() + .filter(file -> !file.endsWith(".md")) + .collect(Collectors.toList()); + } catch (final Exception e) { + throw new RuntimeException("Could not find expected topology directories.", e); + } + } + + private KsqlVersion getVersion(final String dir) { + final Path versionFile = Paths.get(topologyChecksDir, dir, TOPOLOGY_VERSION_FILE); + + try { + final String versionString = loadContents(versionFile.toString()) + .map(content -> String.join("", content)) + .orElse(dir); + + final Matcher matcher = TOPOLOGY_VERSION_PATTERN.matcher(versionString); + if (!matcher.matches()) { + throw new RuntimeException("Version does not match required pattern. " + + TOPOLOGY_VERSION_PATTERN + + ". Correct the directory name, or add a " + TOPOLOGY_VERSION_FILE + "."); + } + + final int major = Integer.parseInt(matcher.group(1)); + final int minor = Integer.parseInt(matcher.group(2)); + final int patch = matcher.groupCount() == 3 + ? 0 + : Integer.parseInt(matcher.group(3).substring(1)); + + return KsqlVersion.of(dir, SemanticVersion.of(major, minor, patch)); + } catch (Exception e) { + throw new RuntimeException("Failed to load version file: " + versionFile, e); + } + } + + @SuppressWarnings("unchecked") + private static Stream buildVersionedTestCases( + final T test, + final List expectedTopologies + ) { + Stream.Builder builder = Stream.builder(); + if (test.getVersionBounds().contains(CURRENT_VERSION)) { + builder.add(test); + } + + for (final TopologiesAndVersion topologies : expectedTopologies) { + if (!test.getVersionBounds().contains(topologies.getVersion())) { + continue; + } + + final TopologyAndConfigs topologyAndConfigs = + topologies.getTopology(formatQueryName(test.getName())); + // could be null if the testCase has expected errors, no topology or configs saved + if (topologyAndConfigs != null) { + final T versionedTest = (T) test.withExpectedTopology( + topologies.getVersion(), + topologyAndConfigs + ); + + builder = builder.add(versionedTest); + } + } + return builder.build(); + } + + private static Map loadExpectedTopologies(final String dir) { + final HashMap expectedTopologyAndConfigs = new HashMap<>(); + final ObjectReader objectReader = new ObjectMapper().readerFor(Map.class); + final List topologyFiles = findExpectedTopologyFiles(dir); + topologyFiles.forEach(fileName -> { + final TopologyAndConfigs topologyAndConfigs = readTopologyFile(dir + "/" + fileName, + objectReader); + expectedTopologyAndConfigs.put(fileName, topologyAndConfigs); + }); + return expectedTopologyAndConfigs; + } + + private static List findExpectedTopologyFiles(final String dir) { + try { + return findContentsOfDirectory(dir); + } catch (final Exception e) { + throw new RuntimeException("Could not find expected topology files. dir: " + dir, e); + } + } + + private static Map parseSchemas(final String asString) { + if (asString == null) { + return Collections.emptyMap(); + } + final ImmutableMap.Builder builder = ImmutableMap.builder(); + final List lines = Arrays.asList(asString.split("\n")); + for (final String line : lines) { + final String[] split = line.split(" *= *"); + if (split.length != 2) { + throw new RuntimeException("Unexpected format for schema string"); + } + builder.put(split[0], split[1]); + } + return builder.build(); + } + + private static TopologyAndConfigs readTopologyFile( + final String file, + final ObjectReader objectReader + ) { + final InputStream s = ExpectedTopologiesTestLoader.class.getClassLoader() + .getResourceAsStream(file); + if (s == null) { + throw new AssertionError("Resource not found: " + file); + } + + try (BufferedReader reader = new BufferedReader(new InputStreamReader(s, UTF_8)) + ) { + final StringBuilder topologyFileBuilder = new StringBuilder(); + + String schemas = null; + String topologyAndConfigLine; + Map persistedConfigs = Collections.emptyMap(); + + while ((topologyAndConfigLine = reader.readLine()) != null) { + if (topologyAndConfigLine.contains(CONFIG_END_MARKER)) { + persistedConfigs = objectReader.readValue(topologyFileBuilder.toString()); + topologyFileBuilder.setLength(0); + } else if (topologyAndConfigLine.contains(SCHEMAS_END_MARKER)) { + schemas = StringUtils.stripEnd(topologyFileBuilder.toString(), "\n"); + topologyFileBuilder.setLength(0); + } else { + topologyFileBuilder.append(topologyAndConfigLine).append("\n"); + } + } + + return new TopologyAndConfigs( + Optional.empty(), + topologyFileBuilder.toString(), + parseSchemas(schemas), + persistedConfigs + ); + + } catch (final IOException e) { + throw new RuntimeException(String.format("Couldn't read topology file %s %s", file, e)); + } + } + + private static Optional> loadContents(final String path) { + final InputStream s = ExpectedTopologiesTestLoader.class.getClassLoader() + .getResourceAsStream(path); + + if (s == null) { + return Optional.empty(); + } + + try (BufferedReader reader = new BufferedReader(new InputStreamReader(s, UTF_8))) { + final List contents = new ArrayList<>(); + String file; + while ((file = reader.readLine()) != null) { + contents.add(file); + } + return Optional.of(contents); + } catch (final IOException e) { + throw new AssertionError("Failed to read path: " + path, e); + } + } + + private static List findContentsOfDirectory(final String path) { + return loadContents(path) + .orElseThrow(() -> new AssertionError("Dir not found: " + path)); + } + + private static String formatQueryName(final String originalQueryName) { + return originalQueryName + .replaceAll(" - (AVRO|JSON|DELIMITED|KAFKA)$", "") + .replaceAll(INVALID_FILENAME_CHARS_PATTERN, "_"); + } + + private static class TopologiesAndVersion { + + private final KsqlVersion version; + private final Map topologies; + + TopologiesAndVersion(final KsqlVersion version, + final Map topologies) { + this.version = Objects.requireNonNull(version, "version"); + this.topologies = Objects.requireNonNull(topologies, "topologies"); + } + + KsqlVersion getVersion() { + return version; + } + + TopologyAndConfigs getTopology(final String name) { + return topologies.get(name); + } + } +} \ No newline at end of file diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestLoader.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestLoader.java index 1d37aea44b2f..a0c31cf194e9 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestLoader.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestLoader.java @@ -75,7 +75,7 @@ private VersionedTest buildHistoricalTestCase( return testCase.withExpectedTopology( version, new TopologyAndConfigs( - planAtVersionNode.getPlan(), + Optional.of(planAtVersionNode.getPlan()), planAtVersionNode.getTopology(), planAtVersionNode.getSchemas(), planAtVersionNode.getConfigs() From e0a7de4314716fc5eed701d7d5867a8413552c87 Mon Sep 17 00:00:00 2001 From: Rohan Date: Mon, 6 Jan 2020 14:37:37 -0800 Subject: [PATCH 073/123] test: add equals testers for plan classes (#4189) test: add equals testers for plan classes --- .../confluent/ksql/engine/KsqlPlanV1Test.java | 48 ++++++ .../confluent/ksql/engine/QueryPlanTest.java | 67 ++++++++ .../execution/plan/AbstractStreamSource.java | 39 +---- .../ksql/execution/plan/StreamFlatMap.java | 18 +++ .../ksql/execution/plan/StreamSource.java | 30 ++++ .../ksql/execution/plan/TableSource.java | 30 ++++ .../execution/plan/WindowedStreamSource.java | 29 ++++ .../execution/plan/WindowedTableSource.java | 29 ++++ .../execution/plan/StreamAggregateTest.java | 75 +++++++++ .../ksql/execution/plan/StreamFilterTest.java | 50 ++++++ .../execution/plan/StreamFlatMapTest.java | 60 +++++++ .../execution/plan/StreamGroupByKeyTest.java | 49 ++++++ .../execution/plan/StreamGroupByTest.java | 65 ++++++++ .../execution/plan/StreamSelectKeyTest.java | 50 ++++++ .../ksql/execution/plan/StreamSelectTest.java | 59 +++++++ .../ksql/execution/plan/StreamSinkTest.java | 50 ++++++ .../ksql/execution/plan/StreamSourceTest.java | 77 +++++++++ .../execution/plan/StreamStreamJoinTest.java | 151 ++++++++++++++++++ .../execution/plan/StreamTableJoinTest.java | 95 +++++++++++ .../plan/StreamWindowedAggregateTest.java | 90 +++++++++++ .../execution/plan/TableAggregateTest.java | 75 +++++++++ .../ksql/execution/plan/TableFilterTest.java | 50 ++++++ .../ksql/execution/plan/TableGroupByTest.java | 65 ++++++++ .../ksql/execution/plan/TableSelectTest.java | 59 +++++++ .../ksql/execution/plan/TableSinkTest.java | 50 ++++++ .../ksql/execution/plan/TableSourceTest.java | 77 +++++++++ .../execution/plan/TableTableJoinTest.java | 78 +++++++++ .../plan/WindowedStreamSourceTest.java | 85 ++++++++++ .../plan/WindowedTableSourceTest.java | 85 ++++++++++ 29 files changed, 1752 insertions(+), 33 deletions(-) create mode 100644 ksql-engine/src/test/java/io/confluent/ksql/engine/KsqlPlanV1Test.java create mode 100644 ksql-engine/src/test/java/io/confluent/ksql/engine/QueryPlanTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamAggregateTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamFilterTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamFlatMapTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamGroupByKeyTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamGroupByTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSelectKeyTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSelectTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSinkTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSourceTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamStreamJoinTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamTableJoinTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamWindowedAggregateTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableAggregateTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableFilterTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableGroupByTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableSelectTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableSinkTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableSourceTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableTableJoinTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/WindowedStreamSourceTest.java create mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/WindowedTableSourceTest.java diff --git a/ksql-engine/src/test/java/io/confluent/ksql/engine/KsqlPlanV1Test.java b/ksql-engine/src/test/java/io/confluent/ksql/engine/KsqlPlanV1Test.java new file mode 100644 index 000000000000..d7afcffeab5e --- /dev/null +++ b/ksql-engine/src/test/java/io/confluent/ksql/engine/KsqlPlanV1Test.java @@ -0,0 +1,48 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.engine; + +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.execution.ddl.commands.DdlCommand; +import java.util.Optional; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class KsqlPlanV1Test { + @Mock + private DdlCommand ddlCommand1; + @Mock + private DdlCommand ddlCommand2; + @Mock + private QueryPlan queryPlan1; + @Mock + private QueryPlan queryPlan2; + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new KsqlPlanV1("foo", Optional.of(ddlCommand1), Optional.of(queryPlan1)), + new KsqlPlanV1("foo", Optional.of(ddlCommand1), Optional.of(queryPlan1))) + .addEqualityGroup(new KsqlPlanV1("bar", Optional.of(ddlCommand1), Optional.of(queryPlan1))) + .addEqualityGroup(new KsqlPlanV1("foo", Optional.of(ddlCommand2), Optional.of(queryPlan1))) + .addEqualityGroup(new KsqlPlanV1("foo", Optional.of(ddlCommand1), Optional.of(queryPlan2))); + + } +} \ No newline at end of file diff --git a/ksql-engine/src/test/java/io/confluent/ksql/engine/QueryPlanTest.java b/ksql-engine/src/test/java/io/confluent/ksql/engine/QueryPlanTest.java new file mode 100644 index 000000000000..163784a3f557 --- /dev/null +++ b/ksql-engine/src/test/java/io/confluent/ksql/engine/QueryPlanTest.java @@ -0,0 +1,67 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.engine; + +import static org.mockito.Mockito.mock; + +import com.google.common.collect.ImmutableSet; +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.execution.plan.ExecutionStep; +import io.confluent.ksql.name.SourceName; +import io.confluent.ksql.query.QueryId; +import java.util.Set; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class QueryPlanTest { + @Mock + private SourceName sink1; + @Mock + private SourceName sink2; + @Mock + private ExecutionStep plan1; + @Mock + private ExecutionStep plan2; + @Mock + private QueryId id1; + @Mock + private QueryId id2; + + private Set sources1; + private Set sources2; + + @Before + public void setup() { + sources1 = ImmutableSet.of(mock(SourceName.class)); + sources2 = ImmutableSet.of(mock(SourceName.class)); + } + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new QueryPlan(sources1, sink1, plan1, id1), + new QueryPlan(sources1, sink1, plan1, id1)) + .addEqualityGroup(new QueryPlan(sources2, sink1, plan1, id1)) + .addEqualityGroup(new QueryPlan(sources1, sink2, plan1, id1)) + .addEqualityGroup(new QueryPlan(sources1, sink1, plan2, id1)) + .addEqualityGroup(new QueryPlan(sources1, sink1, plan1, id2)); + } +} \ No newline at end of file diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/AbstractStreamSource.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/AbstractStreamSource.java index 042e071314e5..72d4fb6ca1b6 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/AbstractStreamSource.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/AbstractStreamSource.java @@ -27,12 +27,12 @@ @Immutable public abstract class AbstractStreamSource implements ExecutionStep { - private final ExecutionStepPropertiesV1 properties; - private final String topicName; - private final Formats formats; - private final Optional timestampColumn; - private final LogicalSchema sourceSchema; - private final SourceName alias; + final ExecutionStepPropertiesV1 properties; + final String topicName; + final Formats formats; + final Optional timestampColumn; + final LogicalSchema sourceSchema; + final SourceName alias; public static LogicalSchemaWithMetaAndKeyFields getSchemaWithMetaAndKeyFields( final SourceName alias, @@ -85,31 +85,4 @@ public String getTopicName() { public SourceName getAlias() { return alias; } - - @Override - public boolean equals(final Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - final AbstractStreamSource that = (AbstractStreamSource) o; - return Objects.equals(properties, that.properties) - && Objects.equals(topicName, that.topicName) - && Objects.equals(formats, that.formats) - && Objects.equals(timestampColumn, that.timestampColumn) - && Objects.equals(sourceSchema, that.sourceSchema); - } - - @Override - public int hashCode() { - return Objects.hash( - properties, - topicName, - formats, - timestampColumn, - sourceSchema - ); - } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamFlatMap.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamFlatMap.java index 5442791f69c6..a60131d5a122 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamFlatMap.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamFlatMap.java @@ -65,4 +65,22 @@ public ExecutionStep> getSource() { return source; } + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final StreamFlatMap that = (StreamFlatMap) o; + return Objects.equals(properties, that.properties) + && Objects.equals(source, that.source) + && Objects.equals(tableFunctions, that.tableFunctions); + } + + @Override + public int hashCode() { + return Objects.hash(properties, source, tableFunctions); + } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSource.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSource.java index 3a4e8eb9be47..df87e0f98d0b 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSource.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/StreamSource.java @@ -19,6 +19,7 @@ import io.confluent.ksql.execution.timestamp.TimestampColumn; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.schema.ksql.LogicalSchema; +import java.util.Objects; import java.util.Optional; import org.apache.kafka.connect.data.Struct; @@ -45,4 +46,33 @@ public StreamSource( public KStreamHolder build(final PlanBuilder builder) { return builder.visitStreamSource(this); } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final StreamSource that = (StreamSource) o; + return Objects.equals(properties, that.properties) + && Objects.equals(topicName, that.topicName) + && Objects.equals(formats, that.formats) + && Objects.equals(timestampColumn, that.timestampColumn) + && Objects.equals(sourceSchema, that.sourceSchema) + && Objects.equals(alias, that.alias); + } + + @Override + public int hashCode() { + return Objects.hash( + properties, + topicName, + formats, + timestampColumn, + sourceSchema, + alias + ); + } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSource.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSource.java index 213cbbcfface..e93544666b62 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSource.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/TableSource.java @@ -20,6 +20,7 @@ import io.confluent.ksql.execution.timestamp.TimestampColumn; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.schema.ksql.LogicalSchema; +import java.util.Objects; import java.util.Optional; import org.apache.kafka.connect.data.Struct; @@ -49,4 +50,33 @@ public TableSource( public KTableHolder build(final PlanBuilder builder) { return builder.visitTableSource(this); } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final TableSource that = (TableSource) o; + return Objects.equals(properties, that.properties) + && Objects.equals(topicName, that.topicName) + && Objects.equals(formats, that.formats) + && Objects.equals(timestampColumn, that.timestampColumn) + && Objects.equals(sourceSchema, that.sourceSchema) + && Objects.equals(alias, that.alias); + } + + @Override + public int hashCode() { + return Objects.hash( + properties, + topicName, + formats, + timestampColumn, + sourceSchema, + alias + ); + } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedStreamSource.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedStreamSource.java index 33040c5b23d8..63894118c5c2 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedStreamSource.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedStreamSource.java @@ -59,4 +59,33 @@ public WindowInfo getWindowInfo() { public KStreamHolder> build(final PlanBuilder builder) { return builder.visitWindowedStreamSource(this); } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final WindowedStreamSource that = (WindowedStreamSource) o; + return Objects.equals(properties, that.properties) + && Objects.equals(topicName, that.topicName) + && Objects.equals(formats, that.formats) + && Objects.equals(timestampColumn, that.timestampColumn) + && Objects.equals(sourceSchema, that.sourceSchema) + && Objects.equals(alias, that.alias); + } + + @Override + public int hashCode() { + return Objects.hash( + properties, + topicName, + formats, + timestampColumn, + sourceSchema, + alias + ); + } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedTableSource.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedTableSource.java index b2d123036ebf..12d9975c722b 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedTableSource.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/WindowedTableSource.java @@ -58,4 +58,33 @@ public WindowInfo getWindowInfo() { public KTableHolder> build(final PlanBuilder builder) { return builder.visitWindowedTableSource(this); } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final WindowedTableSource that = (WindowedTableSource) o; + return Objects.equals(properties, that.properties) + && Objects.equals(topicName, that.topicName) + && Objects.equals(formats, that.formats) + && Objects.equals(timestampColumn, that.timestampColumn) + && Objects.equals(sourceSchema, that.sourceSchema) + && Objects.equals(alias, that.alias); + } + + @Override + public int hashCode() { + return Objects.hash( + properties, + topicName, + formats, + timestampColumn, + sourceSchema, + alias + ); + } } diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamAggregateTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamAggregateTest.java new file mode 100644 index 000000000000..7c1b24937d55 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamAggregateTest.java @@ -0,0 +1,75 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import static org.mockito.Mockito.mock; + +import com.google.common.collect.ImmutableList; +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.execution.expression.tree.FunctionCall; +import io.confluent.ksql.schema.ksql.ColumnRef; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class StreamAggregateTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep source1; + @Mock + private ExecutionStep source2; + @Mock + private Formats formats1; + @Mock + private Formats formats2; + + private ImmutableList columnRefs1; + private ImmutableList columnRefs2; + private ImmutableList functionCalls1; + private ImmutableList functionCalls2; + + @Before + public void setup() { + columnRefs1 = ImmutableList.of(mock(ColumnRef.class), mock(ColumnRef.class)); + columnRefs2 = ImmutableList.of(mock(ColumnRef.class), mock(ColumnRef.class)); + functionCalls1 = ImmutableList.of(mock(FunctionCall.class), mock(FunctionCall.class)); + functionCalls2 = ImmutableList.of(mock(FunctionCall.class), mock(FunctionCall.class)); + } + + @Test + public void shouldImplementEqualsCorrectly() { + new EqualsTester() + .addEqualityGroup( + new StreamAggregate(properties1, source1, formats1, columnRefs1, functionCalls1), + new StreamAggregate(properties1, source1, formats1, columnRefs1, functionCalls1) + ).addEqualityGroup( + new StreamAggregate(properties2, source1, formats1, columnRefs1, functionCalls1) + ).addEqualityGroup( + new StreamAggregate(properties1, source2, formats1, columnRefs1, functionCalls1) + ).addEqualityGroup( + new StreamAggregate(properties1, source1, formats2, columnRefs1, functionCalls1) + ).addEqualityGroup( + new StreamAggregate(properties1, source1, formats1, columnRefs2, functionCalls1) + ).addEqualityGroup( + new StreamAggregate(properties1, source1, formats1, columnRefs1, functionCalls2) + ); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamFilterTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamFilterTest.java new file mode 100644 index 000000000000..461302775180 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamFilterTest.java @@ -0,0 +1,50 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.execution.expression.tree.Expression; +import org.apache.kafka.connect.data.Struct; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class StreamFilterTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep> source1; + @Mock + private ExecutionStep> source2; + @Mock + private Expression filterExpression1; + @Mock + private Expression filterExpression2; + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new StreamFilter<>(properties1, source1, filterExpression1), + new StreamFilter<>(properties1, source1, filterExpression1)) + .addEqualityGroup(new StreamFilter<>(properties2, source1, filterExpression1)) + .addEqualityGroup(new StreamFilter<>(properties1, source2, filterExpression1)) + .addEqualityGroup(new StreamFilter<>(properties1, source1, filterExpression2)); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamFlatMapTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamFlatMapTest.java new file mode 100644 index 000000000000..62685345b824 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamFlatMapTest.java @@ -0,0 +1,60 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import static org.mockito.Mockito.mock; + +import com.google.common.collect.ImmutableList; +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.execution.expression.tree.FunctionCall; +import java.util.List; +import org.apache.kafka.connect.data.Struct; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class StreamFlatMapTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep> source1; + @Mock + private ExecutionStep> source2; + + private List functions1; + private List functions2; + + @Before + public void setup() { + functions1 = ImmutableList.of(mock(FunctionCall.class), mock(FunctionCall.class)); + functions2 = ImmutableList.of(mock(FunctionCall.class), mock(FunctionCall.class)); + } + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new StreamFlatMap<>(properties1, source1, functions1), + new StreamFlatMap<>(properties1, source1, functions1)) + .addEqualityGroup(new StreamFlatMap<>(properties2, source1, functions1)) + .addEqualityGroup(new StreamFlatMap<>(properties1, source2, functions1)) + .addEqualityGroup(new StreamFlatMap<>(properties1, source1, functions2)); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamGroupByKeyTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamGroupByKeyTest.java new file mode 100644 index 000000000000..cf6f9cea244e --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamGroupByKeyTest.java @@ -0,0 +1,49 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import com.google.common.testing.EqualsTester; +import org.apache.kafka.connect.data.Struct; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class StreamGroupByKeyTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep> source1; + @Mock + private ExecutionStep> source2; + @Mock + private Formats formats1; + @Mock + private Formats formats2; + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new StreamGroupByKey(properties1, source1, formats1), + new StreamGroupByKey(properties1, source1, formats1)) + .addEqualityGroup(new StreamGroupByKey(properties2, source1, formats1)) + .addEqualityGroup(new StreamGroupByKey(properties1, source2, formats1)) + .addEqualityGroup(new StreamGroupByKey(properties1, source1, formats2)); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamGroupByTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamGroupByTest.java new file mode 100644 index 000000000000..7e310d0bf421 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamGroupByTest.java @@ -0,0 +1,65 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import static org.mockito.Mockito.mock; + +import com.google.common.collect.ImmutableList; +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.execution.expression.tree.Expression; +import java.util.List; +import org.apache.kafka.connect.data.Struct; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class StreamGroupByTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep> source1; + @Mock + private ExecutionStep> source2; + @Mock + private Formats formats1; + @Mock + private Formats formats2; + + private List expression1; + private List expression2; + + @Before + public void setup() { + expression1 = ImmutableList.of(mock(Expression.class)); + expression2 = ImmutableList.of(mock(Expression.class)); + } + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new StreamGroupBy<>(properties1, source1, formats1, expression1), + new StreamGroupBy<>(properties1, source1, formats1, expression1)) + .addEqualityGroup(new StreamGroupBy<>(properties2, source1, formats1, expression1)) + .addEqualityGroup(new StreamGroupBy<>(properties1, source2, formats1, expression1)) + .addEqualityGroup(new StreamGroupBy<>(properties1, source1, formats2, expression1)) + .addEqualityGroup(new StreamGroupBy<>(properties1, source1, formats1, expression2)); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSelectKeyTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSelectKeyTest.java new file mode 100644 index 000000000000..d69f712cf060 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSelectKeyTest.java @@ -0,0 +1,50 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.execution.expression.tree.Expression; +import org.apache.kafka.connect.data.Struct; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class StreamSelectKeyTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep> source1; + @Mock + private ExecutionStep> source2; + @Mock + private Expression expression1; + @Mock + private Expression expression2; + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new StreamSelectKey(properties1, source1, expression1), + new StreamSelectKey(properties1, source1, expression1)) + .addEqualityGroup(new StreamSelectKey(properties2, source1, expression1)) + .addEqualityGroup(new StreamSelectKey(properties1, source2, expression1)) + .addEqualityGroup(new StreamSelectKey(properties1, source1, expression2)); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSelectTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSelectTest.java new file mode 100644 index 000000000000..230af95c10c4 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSelectTest.java @@ -0,0 +1,59 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import static org.mockito.Mockito.mock; + +import com.google.common.collect.ImmutableList; +import com.google.common.testing.EqualsTester; +import java.util.List; +import org.apache.kafka.connect.data.Struct; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class StreamSelectTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep> source1; + @Mock + private ExecutionStep> source2; + + private List selects1; + private List selects2; + + @Before + public void setup() { + selects1 = ImmutableList.of(mock(SelectExpression.class)); + selects2 = ImmutableList.of(mock(SelectExpression.class)); + } + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new StreamSelect<>(properties1, source1, selects1), + new StreamSelect<>(properties1, source1, selects1)) + .addEqualityGroup(new StreamSelect<>(properties2, source1, selects1)) + .addEqualityGroup(new StreamSelect<>(properties1, source2, selects1)) + .addEqualityGroup(new StreamSelect<>(properties1, source1, selects2)); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSinkTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSinkTest.java new file mode 100644 index 000000000000..19ce8b3c73cb --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSinkTest.java @@ -0,0 +1,50 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import com.google.common.testing.EqualsTester; +import org.apache.kafka.connect.data.Struct; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class StreamSinkTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep> source1; + @Mock + private ExecutionStep> source2; + @Mock + private Formats formats1; + @Mock + private Formats formats2; + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new StreamSink<>(properties1, source1, formats1, "topic1"), + new StreamSink<>(properties1, source1, formats1, "topic1")) + .addEqualityGroup(new StreamSink<>(properties2, source1, formats1, "topic1")) + .addEqualityGroup(new StreamSink<>(properties1, source2, formats1, "topic1")) + .addEqualityGroup(new StreamSink<>(properties1, source1, formats2, "topic1")) + .addEqualityGroup(new StreamSink<>(properties1, source1, formats1, "topic2")); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSourceTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSourceTest.java new file mode 100644 index 000000000000..70207724a8e0 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamSourceTest.java @@ -0,0 +1,77 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.execution.timestamp.TimestampColumn; +import io.confluent.ksql.name.SourceName; +import io.confluent.ksql.schema.ksql.LogicalSchema; +import java.util.Optional; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class StreamSourceTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private Formats formats1; + @Mock + private Formats formats2; + @Mock + private TimestampColumn timestamp1; + @Mock + private TimestampColumn timestamp2; + @Mock + private LogicalSchema schema1; + @Mock + private LogicalSchema schema2; + @Mock + private SourceName alias1; + @Mock + private SourceName alias2; + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new StreamSource( + properties1, "topic1", formats1, Optional.of(timestamp1), schema1, alias1), + new StreamSource( + properties1, "topic1", formats1, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new StreamSource( + properties2, "topic1", formats1, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new StreamSource( + properties1, "topic2", formats1, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new StreamSource( + properties1, "topic1", formats2, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new StreamSource( + properties1, "topic1", formats1, Optional.of(timestamp2), schema1, alias1)) + .addEqualityGroup( + new StreamSource( + properties1, "topic1", formats1, Optional.of(timestamp1), schema2, alias1)) + .addEqualityGroup( + new StreamSource( + properties1, "topic1", formats1, Optional.of(timestamp1), schema1, alias2)); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamStreamJoinTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamStreamJoinTest.java new file mode 100644 index 000000000000..465683407360 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamStreamJoinTest.java @@ -0,0 +1,151 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import com.google.common.testing.EqualsTester; +import java.time.Duration; +import org.apache.kafka.connect.data.Struct; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class StreamStreamJoinTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep> left1; + @Mock + private ExecutionStep> right1; + @Mock + private ExecutionStep> left2; + @Mock + private ExecutionStep> right2; + @Mock + private Formats leftFormats1; + @Mock + private Formats leftFormats2; + @Mock + private Formats rightFormats1; + @Mock + private Formats rightFormats2; + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new StreamStreamJoin<>( + properties1, + JoinType.INNER, + leftFormats1, + rightFormats1, + left1, + right1, + Duration.ofSeconds(10), + Duration.ofSeconds(20)), + new StreamStreamJoin<>( + properties1, + JoinType.INNER, + leftFormats1, + rightFormats1, + left1, + right1, + Duration.ofSeconds(10), + Duration.ofSeconds(20))) + .addEqualityGroup( + new StreamStreamJoin<>( + properties2, + JoinType.INNER, + leftFormats1, + rightFormats1, + left1, + right1, + Duration.ofSeconds(10), + Duration.ofSeconds(20))) + .addEqualityGroup( + new StreamStreamJoin<>( + properties1, + JoinType.LEFT, + leftFormats1, + rightFormats1, + left1, + right1, + Duration.ofSeconds(10), + Duration.ofSeconds(20))) + .addEqualityGroup( + new StreamStreamJoin<>( + properties1, + JoinType.INNER, + leftFormats2, + rightFormats1, + left1, + right1, + Duration.ofSeconds(10), + Duration.ofSeconds(20))) + .addEqualityGroup( + new StreamStreamJoin<>( + properties1, + JoinType.INNER, + leftFormats1, + rightFormats2, + left1, + right1, + Duration.ofSeconds(10), + Duration.ofSeconds(20))) + .addEqualityGroup( + new StreamStreamJoin<>( + properties1, + JoinType.INNER, + leftFormats1, + rightFormats1, + left2, + right1, + Duration.ofSeconds(10), + Duration.ofSeconds(20))) + .addEqualityGroup( + new StreamStreamJoin<>( + properties1, + JoinType.INNER, + leftFormats1, + rightFormats1, + left1, + right2, + Duration.ofSeconds(10), + Duration.ofSeconds(20))) + .addEqualityGroup( + new StreamStreamJoin<>( + properties1, + JoinType.INNER, + leftFormats1, + rightFormats1, + left1, + right1, + Duration.ofSeconds(11), + Duration.ofSeconds(20))) + .addEqualityGroup( + new StreamStreamJoin<>( + properties1, + JoinType.INNER, + leftFormats1, + rightFormats1, + left1, + right1, + Duration.ofSeconds(10), + Duration.ofSeconds(21))); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamTableJoinTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamTableJoinTest.java new file mode 100644 index 000000000000..092a039cf2da --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamTableJoinTest.java @@ -0,0 +1,95 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import com.google.common.testing.EqualsTester; +import org.apache.kafka.connect.data.Struct; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class StreamTableJoinTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep> left1; + @Mock + private ExecutionStep> right1; + @Mock + private ExecutionStep> left2; + @Mock + private ExecutionStep> right2; + @Mock + private Formats leftFormats1; + @Mock + private Formats leftFormats2; + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new StreamTableJoin<>( + properties1, + JoinType.INNER, + leftFormats1, + left1, + right1), + new StreamTableJoin<>( + properties1, + JoinType.INNER, + leftFormats1, + left1, + right1)) + .addEqualityGroup( + new StreamTableJoin<>( + properties2, + JoinType.INNER, + leftFormats1, + left1, + right1)) + .addEqualityGroup( + new StreamTableJoin<>( + properties1, + JoinType.LEFT, + leftFormats1, + left1, + right1)) + .addEqualityGroup( + new StreamTableJoin<>( + properties1, + JoinType.INNER, + leftFormats2, + left1, + right1)) + .addEqualityGroup( + new StreamTableJoin<>( + properties1, + JoinType.INNER, + leftFormats1, + left2, + right1)) + .addEqualityGroup( + new StreamTableJoin<>( + properties1, + JoinType.INNER, + leftFormats1, + left1, + right2)); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamWindowedAggregateTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamWindowedAggregateTest.java new file mode 100644 index 000000000000..4befe4e0ff9f --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/StreamWindowedAggregateTest.java @@ -0,0 +1,90 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import static org.mockito.Mockito.mock; + +import com.google.common.collect.ImmutableList; +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.execution.expression.tree.FunctionCall; +import io.confluent.ksql.execution.windows.KsqlWindowExpression; +import io.confluent.ksql.schema.ksql.ColumnRef; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class StreamWindowedAggregateTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep source1; + @Mock + private ExecutionStep source2; + @Mock + private Formats formats1; + @Mock + private Formats formats2; + @Mock + private KsqlWindowExpression window1; + @Mock + private KsqlWindowExpression window2; + + private ImmutableList columnRefs1; + private ImmutableList columnRefs2; + private ImmutableList functionCalls1; + private ImmutableList functionCalls2; + + @Before + public void setup() { + columnRefs1 = ImmutableList.of(mock(ColumnRef.class), mock(ColumnRef.class)); + columnRefs2 = ImmutableList.of(mock(ColumnRef.class), mock(ColumnRef.class)); + functionCalls1 = ImmutableList.of(mock(FunctionCall.class), mock(FunctionCall.class)); + functionCalls2 = ImmutableList.of(mock(FunctionCall.class), mock(FunctionCall.class)); + } + + @Test + public void shouldImplementEqualsCorrectly() { + new EqualsTester() + .addEqualityGroup( + new StreamWindowedAggregate( + properties1, source1, formats1, columnRefs1, functionCalls1, window1), + new StreamWindowedAggregate( + properties1, source1, formats1, columnRefs1, functionCalls1, window1) + ).addEqualityGroup( + new StreamWindowedAggregate( + properties2, source1, formats1, columnRefs1, functionCalls1, window1) + ).addEqualityGroup( + new StreamWindowedAggregate( + properties1, source2, formats1, columnRefs1, functionCalls1, window1) + ).addEqualityGroup( + new StreamWindowedAggregate( + properties1, source1, formats2, columnRefs1, functionCalls1, window1) + ).addEqualityGroup( + new StreamWindowedAggregate( + properties1, source1, formats1, columnRefs2, functionCalls1, window1) + ).addEqualityGroup( + new StreamWindowedAggregate( + properties1, source1, formats1, columnRefs1, functionCalls2, window1) + ).addEqualityGroup( + new StreamWindowedAggregate( + properties1, source1, formats1, columnRefs1, functionCalls2, window2) + ); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableAggregateTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableAggregateTest.java new file mode 100644 index 000000000000..1f99e24d85a0 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableAggregateTest.java @@ -0,0 +1,75 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import static org.mockito.Mockito.mock; + +import com.google.common.collect.ImmutableList; +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.execution.expression.tree.FunctionCall; +import io.confluent.ksql.schema.ksql.ColumnRef; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class TableAggregateTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep source1; + @Mock + private ExecutionStep source2; + @Mock + private Formats formats1; + @Mock + private Formats formats2; + + private ImmutableList columnRefs1; + private ImmutableList columnRefs2; + private ImmutableList functionCalls1; + private ImmutableList functionCalls2; + + @Before + public void setup() { + columnRefs1 = ImmutableList.of(mock(ColumnRef.class), mock(ColumnRef.class)); + columnRefs2 = ImmutableList.of(mock(ColumnRef.class), mock(ColumnRef.class)); + functionCalls1 = ImmutableList.of(mock(FunctionCall.class), mock(FunctionCall.class)); + functionCalls2 = ImmutableList.of(mock(FunctionCall.class), mock(FunctionCall.class)); + } + + @Test + public void shouldImplementEqualsCorrectly() { + new EqualsTester() + .addEqualityGroup( + new TableAggregate(properties1, source1, formats1, columnRefs1, functionCalls1), + new TableAggregate(properties1, source1, formats1, columnRefs1, functionCalls1) + ).addEqualityGroup( + new TableAggregate(properties2, source1, formats1, columnRefs1, functionCalls1) + ).addEqualityGroup( + new TableAggregate(properties1, source2, formats1, columnRefs1, functionCalls1) + ).addEqualityGroup( + new TableAggregate(properties1, source1, formats2, columnRefs1, functionCalls1) + ).addEqualityGroup( + new TableAggregate(properties1, source1, formats1, columnRefs2, functionCalls1) + ).addEqualityGroup( + new TableAggregate(properties1, source1, formats1, columnRefs1, functionCalls2) + ); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableFilterTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableFilterTest.java new file mode 100644 index 000000000000..a8468646a721 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableFilterTest.java @@ -0,0 +1,50 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.execution.expression.tree.Expression; +import org.apache.kafka.connect.data.Struct; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class TableFilterTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep> source1; + @Mock + private ExecutionStep> source2; + @Mock + private Expression filterExpression1; + @Mock + private Expression filterExpression2; + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new TableFilter<>(properties1, source1, filterExpression1), + new TableFilter<>(properties1, source1, filterExpression1)) + .addEqualityGroup(new TableFilter<>(properties2, source1, filterExpression1)) + .addEqualityGroup(new TableFilter<>(properties1, source2, filterExpression1)) + .addEqualityGroup(new TableFilter<>(properties1, source1, filterExpression2)); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableGroupByTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableGroupByTest.java new file mode 100644 index 000000000000..b9cdde556711 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableGroupByTest.java @@ -0,0 +1,65 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import static org.mockito.Mockito.mock; + +import com.google.common.collect.ImmutableList; +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.execution.expression.tree.Expression; +import java.util.List; +import org.apache.kafka.connect.data.Struct; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class TableGroupByTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep> source1; + @Mock + private ExecutionStep> source2; + @Mock + private Formats formats1; + @Mock + private Formats formats2; + + private List expression1; + private List expression2; + + @Before + public void setup() { + expression1 = ImmutableList.of(mock(Expression.class)); + expression2 = ImmutableList.of(mock(Expression.class)); + } + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new TableGroupBy<>(properties1, source1, formats1, expression1), + new TableGroupBy<>(properties1, source1, formats1, expression1)) + .addEqualityGroup(new TableGroupBy<>(properties2, source1, formats1, expression1)) + .addEqualityGroup(new TableGroupBy<>(properties1, source2, formats1, expression1)) + .addEqualityGroup(new TableGroupBy<>(properties1, source1, formats2, expression1)) + .addEqualityGroup(new TableGroupBy<>(properties1, source1, formats1, expression2)); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableSelectTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableSelectTest.java new file mode 100644 index 000000000000..3a536662b1fa --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableSelectTest.java @@ -0,0 +1,59 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import static org.mockito.Mockito.mock; + +import com.google.common.collect.ImmutableList; +import com.google.common.testing.EqualsTester; +import java.util.List; +import org.apache.kafka.connect.data.Struct; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class TableSelectTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep> source1; + @Mock + private ExecutionStep> source2; + + private List selects1; + private List selects2; + + @Before + public void setup() { + selects1 = ImmutableList.of(mock(SelectExpression.class)); + selects2 = ImmutableList.of(mock(SelectExpression.class)); + } + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new TableSelect<>(properties1, source1, selects1), + new TableSelect<>(properties1, source1, selects1)) + .addEqualityGroup(new TableSelect<>(properties2, source1, selects1)) + .addEqualityGroup(new TableSelect<>(properties1, source2, selects1)) + .addEqualityGroup(new TableSelect<>(properties1, source1, selects2)); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableSinkTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableSinkTest.java new file mode 100644 index 000000000000..091b0c7e7116 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableSinkTest.java @@ -0,0 +1,50 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import com.google.common.testing.EqualsTester; +import org.apache.kafka.connect.data.Struct; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class TableSinkTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep> source1; + @Mock + private ExecutionStep> source2; + @Mock + private Formats formats1; + @Mock + private Formats formats2; + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new TableSink<>(properties1, source1, formats1, "topic1"), + new TableSink<>(properties1, source1, formats1, "topic1")) + .addEqualityGroup(new TableSink<>(properties2, source1, formats1, "topic1")) + .addEqualityGroup(new TableSink<>(properties1, source2, formats1, "topic1")) + .addEqualityGroup(new TableSink<>(properties1, source1, formats2, "topic1")) + .addEqualityGroup(new TableSink<>(properties1, source1, formats1, "topic2")); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableSourceTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableSourceTest.java new file mode 100644 index 000000000000..f58a6ec986a4 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableSourceTest.java @@ -0,0 +1,77 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.execution.timestamp.TimestampColumn; +import io.confluent.ksql.name.SourceName; +import io.confluent.ksql.schema.ksql.LogicalSchema; +import java.util.Optional; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class TableSourceTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private Formats formats1; + @Mock + private Formats formats2; + @Mock + private TimestampColumn timestamp1; + @Mock + private TimestampColumn timestamp2; + @Mock + private LogicalSchema schema1; + @Mock + private LogicalSchema schema2; + @Mock + private SourceName alias1; + @Mock + private SourceName alias2; + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new TableSource( + properties1, "topic1", formats1, Optional.of(timestamp1), schema1, alias1), + new TableSource( + properties1, "topic1", formats1, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new TableSource( + properties2, "topic1", formats1, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new TableSource( + properties1, "topic2", formats1, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new TableSource( + properties1, "topic1", formats2, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new TableSource( + properties1, "topic1", formats1, Optional.of(timestamp2), schema1, alias1)) + .addEqualityGroup( + new TableSource( + properties1, "topic1", formats1, Optional.of(timestamp1), schema2, alias1)) + .addEqualityGroup( + new TableSource( + properties1, "topic1", formats1, Optional.of(timestamp1), schema1, alias2)); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableTableJoinTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableTableJoinTest.java new file mode 100644 index 000000000000..953e66a775af --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/TableTableJoinTest.java @@ -0,0 +1,78 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import com.google.common.testing.EqualsTester; +import org.apache.kafka.connect.data.Struct; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class TableTableJoinTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private ExecutionStep> left1; + @Mock + private ExecutionStep> right1; + @Mock + private ExecutionStep> left2; + @Mock + private ExecutionStep> right2; + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new TableTableJoin<>( + properties1, + JoinType.INNER, + left1, + right1), + new TableTableJoin<>( + properties1, + JoinType.INNER, + left1, + right1)) + .addEqualityGroup( + new TableTableJoin<>( + properties2, + JoinType.INNER, + left1, + right1)) + .addEqualityGroup( + new TableTableJoin<>( + properties1, + JoinType.LEFT, + left1, + right1)) + .addEqualityGroup( + new TableTableJoin<>( + properties1, + JoinType.INNER, + left2, + right1)) + .addEqualityGroup( + new TableTableJoin<>( + properties1, + JoinType.INNER, + left1, + right2)); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/WindowedStreamSourceTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/WindowedStreamSourceTest.java new file mode 100644 index 000000000000..b1d1bf8b2141 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/WindowedStreamSourceTest.java @@ -0,0 +1,85 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.execution.timestamp.TimestampColumn; +import io.confluent.ksql.name.SourceName; +import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.serde.WindowInfo; +import java.util.Optional; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class WindowedStreamSourceTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private Formats formats1; + @Mock + private Formats formats2; + @Mock + private TimestampColumn timestamp1; + @Mock + private TimestampColumn timestamp2; + @Mock + private LogicalSchema schema1; + @Mock + private LogicalSchema schema2; + @Mock + private SourceName alias1; + @Mock + private SourceName alias2; + @Mock + private WindowInfo window1; + @Mock + private WindowInfo window2; + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new WindowedStreamSource( + properties1, "topic1", formats1, window1, Optional.of(timestamp1), schema1, alias1), + new WindowedStreamSource( + properties1, "topic1", formats1, window1, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new WindowedStreamSource( + properties2, "topic1", formats1, window1, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new WindowedStreamSource( + properties1, "topic2", formats1, window1, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new WindowedStreamSource( + properties1, "topic1", formats2, window1, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new WindowedStreamSource( + properties1, "topic1", formats1, window1, Optional.of(timestamp2), schema1, alias1)) + .addEqualityGroup( + new WindowedStreamSource( + properties1, "topic1", formats1, window1, Optional.of(timestamp1), schema2, alias1)) + .addEqualityGroup( + new WindowedStreamSource( + properties1, "topic1", formats1, window1, Optional.of(timestamp1), schema1, alias2)) + .addEqualityGroup( + new WindowedStreamSource( + properties1, "topic1", formats1, window2, Optional.of(timestamp1), schema1, alias2)); + } +} \ No newline at end of file diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/WindowedTableSourceTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/WindowedTableSourceTest.java new file mode 100644 index 000000000000..dd5099275016 --- /dev/null +++ b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/WindowedTableSourceTest.java @@ -0,0 +1,85 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License; you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.execution.plan; + +import com.google.common.testing.EqualsTester; +import io.confluent.ksql.execution.timestamp.TimestampColumn; +import io.confluent.ksql.name.SourceName; +import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.serde.WindowInfo; +import java.util.Optional; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class WindowedTableSourceTest { + @Mock + private ExecutionStepPropertiesV1 properties1; + @Mock + private ExecutionStepPropertiesV1 properties2; + @Mock + private Formats formats1; + @Mock + private Formats formats2; + @Mock + private TimestampColumn timestamp1; + @Mock + private TimestampColumn timestamp2; + @Mock + private LogicalSchema schema1; + @Mock + private LogicalSchema schema2; + @Mock + private SourceName alias1; + @Mock + private SourceName alias2; + @Mock + private WindowInfo window1; + @Mock + private WindowInfo window2; + + @Test + public void shouldImplementEquals() { + new EqualsTester() + .addEqualityGroup( + new WindowedTableSource( + properties1, "topic1", formats1, window1, Optional.of(timestamp1), schema1, alias1), + new WindowedTableSource( + properties1, "topic1", formats1, window1, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new WindowedTableSource( + properties2, "topic1", formats1, window1, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new WindowedTableSource( + properties1, "topic2", formats1, window1, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new WindowedTableSource( + properties1, "topic1", formats2, window1, Optional.of(timestamp1), schema1, alias1)) + .addEqualityGroup( + new WindowedTableSource( + properties1, "topic1", formats1, window1, Optional.of(timestamp2), schema1, alias1)) + .addEqualityGroup( + new WindowedTableSource( + properties1, "topic1", formats1, window1, Optional.of(timestamp1), schema2, alias1)) + .addEqualityGroup( + new WindowedTableSource( + properties1, "topic1", formats1, window1, Optional.of(timestamp1), schema1, alias2)) + .addEqualityGroup( + new WindowedTableSource( + properties1, "topic1", formats1, window2, Optional.of(timestamp1), schema1, alias2)); + } +} \ No newline at end of file From 34a87951a1146382810f5227fbf34612f8f0e788 Mon Sep 17 00:00:00 2001 From: elismaga Date: Mon, 6 Jan 2020 15:48:41 -0800 Subject: [PATCH 074/123] Disable building docker images (#4230) --- Jenkinsfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 098ea3407b11..c5da3d141b71 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -4,6 +4,5 @@ dockerfile { slackChannel = '#ksql-alerts' upstreamProjects = 'confluentinc/schema-registry' dockerRepos = ['confluentinc/ksql-cli'] - extraBuildArgs = '-Ddocker.skip=false' extraDeployArgs = '-Ddocker.skip=true' } From e17d1e1b211f7ef65478fbc31a5f37a80ffcce60 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Tue, 7 Jan 2020 10:12:53 +0000 Subject: [PATCH 075/123] chore: fix result schema on table GROUP BY for primitive keys (MINOR) (#4205) * chore: fix result schema on table GROUP BY for primitive keys The intermediate schema was incorrect as it wasn't updating the SQL type of ROWKEY to match the grouping column. Also, added test to `KeyField` to enforce the key field matches the type of the key column. --- .../ksql/analyzer/AnalyzerFunctionalTest.java | 4 +-- .../analyzer/QueryAnalyzerFunctionalTest.java | 2 +- .../ksql/codegen/CodeGenRunnerTest.java | 2 ++ .../rewrite/ExpressionTreeRewriterTest.java | 2 +- .../StatementRewriteForRowtimeTest.java | 2 +- .../DefaultSchemaInjectorFunctionalTest.java | 2 +- .../ksql/structured/SchemaKTableTest.java | 10 ++++--- .../ksql/testutils/AnalysisTestUtil.java | 2 +- .../{parser => util}/KsqlParserTestUtil.java | 6 +++-- .../ksql/metastore/model/KeyField.java | 27 ++++++++++++++++++- .../ksql/metastore/model/KeyFieldTest.java | 12 +++++++++ .../confluent/ksql/parser/KsqlParserTest.java | 6 +++-- .../ksql/parser/SqlFormatterTest.java | 7 +++-- .../entity/SourceDescriptionFactoryTest.java | 2 +- .../server/execution/RequestHandlerTest.java | 14 +++++----- .../execution/streams/StepSchemaResolver.java | 23 +++++++++++++--- .../streams/StepSchemaResolverTest.java | 7 +++-- 17 files changed, 98 insertions(+), 32 deletions(-) rename ksql-engine/src/test/java/io/confluent/ksql/{parser => util}/KsqlParserTestUtil.java (91%) diff --git a/ksql-engine/src/test/java/io/confluent/ksql/analyzer/AnalyzerFunctionalTest.java b/ksql-engine/src/test/java/io/confluent/ksql/analyzer/AnalyzerFunctionalTest.java index e3b769165ecb..e9c5ad74986b 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/analyzer/AnalyzerFunctionalTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/analyzer/AnalyzerFunctionalTest.java @@ -49,7 +49,6 @@ import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; -import io.confluent.ksql.parser.KsqlParserTestUtil; import io.confluent.ksql.parser.properties.with.CreateSourceAsProperties; import io.confluent.ksql.parser.tree.CreateStreamAsSelect; import io.confluent.ksql.parser.tree.Query; @@ -65,6 +64,7 @@ import io.confluent.ksql.serde.SerdeOption; import io.confluent.ksql.serde.ValueFormat; import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.KsqlParserTestUtil; import io.confluent.ksql.util.MetaStoreFixture; import java.util.HashMap; import java.util.List; @@ -316,7 +316,7 @@ public void shouldNotInheritNamespaceExplicitlySetUpstreamForAvro() { SourceName.of("S0"), schema, SerdeOption.none(), - KeyField.of(ColumnRef.withoutSource(ColumnName.of("FIELD1"))), + KeyField.none(), Optional.empty(), false, ksqlTopic diff --git a/ksql-engine/src/test/java/io/confluent/ksql/analyzer/QueryAnalyzerFunctionalTest.java b/ksql-engine/src/test/java/io/confluent/ksql/analyzer/QueryAnalyzerFunctionalTest.java index 5499c8be8c11..2ebf408907c6 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/analyzer/QueryAnalyzerFunctionalTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/analyzer/QueryAnalyzerFunctionalTest.java @@ -42,7 +42,6 @@ import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; -import io.confluent.ksql.parser.KsqlParserTestUtil; import io.confluent.ksql.parser.tree.CreateStreamAsSelect; import io.confluent.ksql.parser.tree.CreateTableAsSelect; import io.confluent.ksql.parser.tree.InsertInto; @@ -52,6 +51,7 @@ import io.confluent.ksql.serde.Format; import io.confluent.ksql.serde.SerdeOption; import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.KsqlParserTestUtil; import io.confluent.ksql.util.MetaStoreFixture; import java.io.File; import java.util.Arrays; diff --git a/ksql-engine/src/test/java/io/confluent/ksql/codegen/CodeGenRunnerTest.java b/ksql-engine/src/test/java/io/confluent/ksql/codegen/CodeGenRunnerTest.java index 3ce7f8796508..4fbaf11dd948 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/codegen/CodeGenRunnerTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/codegen/CodeGenRunnerTest.java @@ -60,6 +60,7 @@ import io.confluent.ksql.util.KsqlConfig; import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.MetaStoreFixture; +import io.confluent.ksql.util.SchemaUtil; import java.math.BigDecimal; import java.util.ArrayList; import java.util.Arrays; @@ -82,6 +83,7 @@ public class CodeGenRunnerTest { private static final String COL_INVALID_JAVA = "col!Invalid:("; private static final LogicalSchema META_STORE_SCHEMA = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) .valueColumn(ColumnName.of("COL0"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("COL1"), SqlTypes.STRING) .valueColumn(ColumnName.of("COL2"), SqlTypes.STRING) diff --git a/ksql-engine/src/test/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriterTest.java b/ksql-engine/src/test/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriterTest.java index 47e53d7a0ce0..931eca229581 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriterTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/engine/rewrite/ExpressionTreeRewriterTest.java @@ -63,12 +63,12 @@ import io.confluent.ksql.metastore.MetaStore; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; -import io.confluent.ksql.parser.KsqlParserTestUtil; import io.confluent.ksql.parser.tree.Query; import io.confluent.ksql.parser.tree.SelectItem; import io.confluent.ksql.parser.tree.SingleColumn; import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.types.SqlPrimitiveType; +import io.confluent.ksql.util.KsqlParserTestUtil; import io.confluent.ksql.util.MetaStoreFixture; import java.util.List; import java.util.Optional; diff --git a/ksql-engine/src/test/java/io/confluent/ksql/engine/rewrite/StatementRewriteForRowtimeTest.java b/ksql-engine/src/test/java/io/confluent/ksql/engine/rewrite/StatementRewriteForRowtimeTest.java index 60b8a249672c..db9a0c1bf15d 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/engine/rewrite/StatementRewriteForRowtimeTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/engine/rewrite/StatementRewriteForRowtimeTest.java @@ -27,9 +27,9 @@ import io.confluent.ksql.execution.expression.tree.Expression; import io.confluent.ksql.function.FunctionRegistry; import io.confluent.ksql.metastore.MetaStore; -import io.confluent.ksql.parser.KsqlParserTestUtil; import io.confluent.ksql.parser.tree.Query; import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.util.KsqlParserTestUtil; import io.confluent.ksql.util.MetaStoreFixture; import io.confluent.ksql.util.timestamp.PartialStringToTimestampParser; import io.confluent.ksql.util.timestamp.StringToTimestampParser; diff --git a/ksql-engine/src/test/java/io/confluent/ksql/schema/ksql/inference/DefaultSchemaInjectorFunctionalTest.java b/ksql-engine/src/test/java/io/confluent/ksql/schema/ksql/inference/DefaultSchemaInjectorFunctionalTest.java index 96eafc9933ae..3514cd5b061e 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/schema/ksql/inference/DefaultSchemaInjectorFunctionalTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/schema/ksql/inference/DefaultSchemaInjectorFunctionalTest.java @@ -25,7 +25,6 @@ import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.metastore.MetaStore; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; -import io.confluent.ksql.parser.KsqlParserTestUtil; import io.confluent.ksql.parser.tree.CreateSource; import io.confluent.ksql.parser.tree.Statement; import io.confluent.ksql.parser.tree.TableElement; @@ -35,6 +34,7 @@ import io.confluent.ksql.util.DecimalUtil; import io.confluent.ksql.util.IdentifierUtil; import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlParserTestUtil; import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.SchemaBuilder; import org.junit.Assert; diff --git a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java index b514f3ba8d33..b8465f8d0033 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKTableTest.java @@ -125,7 +125,7 @@ public class SchemaKTableTest { private final KsqlConfig ksqlConfig = new KsqlConfig(Collections.emptyMap()); private final MetaStore metaStore = MetaStoreFixture.getNewMetaStore(new InternalFunctionRegistry()); private final GroupedFactory groupedFactory = mock(GroupedFactory.class); - private final Grouped grouped = Grouped.with( + private final Grouped grouped = Grouped.with( "group", Serdes.String(), Serdes.String()); private SchemaKTable initialSchemaKTable; @@ -180,7 +180,7 @@ public void init() { mockKTable = EasyMock.niceMock(KTable.class); validKeyField = KeyField - .of(Optional.of(ColumnRef.of(ksqlTable.getName(), ColumnName.of("COL1")))); + .of(Optional.of(ColumnRef.of(ksqlTable.getName(), ColumnName.of("COL0")))); firstSchemaKTable = buildSchemaKTableForJoin(ksqlTable, mockKTable); secondSchemaKTable = buildSchemaKTableForJoin(secondKsqlTable, secondKTable); @@ -822,7 +822,9 @@ public void shouldSetKeyOnGroupBySingleExpressionThatIsInProjection() { final SchemaKTable selected = initialSchemaKTable .select(selectExpressions, childContextStacker, queryBuilder); - final List groupByExprs = ImmutableList.of(TEST_2_COL_1); + final List groupByExprs = ImmutableList.of( + new ColumnReferenceExp(ColumnRef.withoutSource(ColumnName.of("COL0"))) + ); // When: final SchemaKGroupedTable result = selected @@ -830,7 +832,7 @@ public void shouldSetKeyOnGroupBySingleExpressionThatIsInProjection() { // Then: assertThat(result.getKeyField(), - is(KeyField.of(ColumnRef.withoutSource(ColumnName.of("COL1"))))); + is(KeyField.of(ColumnRef.withoutSource(ColumnName.of("COL0"))))); } private List givenInitialKTableOf(final String selectQuery) { diff --git a/ksql-engine/src/test/java/io/confluent/ksql/testutils/AnalysisTestUtil.java b/ksql-engine/src/test/java/io/confluent/ksql/testutils/AnalysisTestUtil.java index 42869c37f8b5..7094b93d1053 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/testutils/AnalysisTestUtil.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/testutils/AnalysisTestUtil.java @@ -23,7 +23,6 @@ import io.confluent.ksql.analyzer.QueryAnalyzer; import io.confluent.ksql.metastore.MetaStore; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; -import io.confluent.ksql.parser.KsqlParserTestUtil; import io.confluent.ksql.parser.tree.Query; import io.confluent.ksql.parser.tree.QueryContainer; import io.confluent.ksql.parser.tree.Sink; @@ -32,6 +31,7 @@ import io.confluent.ksql.planner.plan.OutputNode; import io.confluent.ksql.serde.SerdeOption; import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlParserTestUtil; import java.util.List; import java.util.Optional; diff --git a/ksql-engine/src/test/java/io/confluent/ksql/parser/KsqlParserTestUtil.java b/ksql-engine/src/test/java/io/confluent/ksql/util/KsqlParserTestUtil.java similarity index 91% rename from ksql-engine/src/test/java/io/confluent/ksql/parser/KsqlParserTestUtil.java rename to ksql-engine/src/test/java/io/confluent/ksql/util/KsqlParserTestUtil.java index bcf88da42d00..a1d58e4ea11a 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/parser/KsqlParserTestUtil.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/util/KsqlParserTestUtil.java @@ -1,5 +1,5 @@ /* - * Copyright 2018 Confluent Inc. + * Copyright 2020 Confluent Inc. * * Licensed under the Confluent Community License (the "License"); you may not use * this file except in compliance with the License. You may obtain a copy of the @@ -13,13 +13,15 @@ * specific language governing permissions and limitations under the License. */ -package io.confluent.ksql.parser; +package io.confluent.ksql.util; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.hasSize; import io.confluent.ksql.engine.rewrite.AstSanitizer; import io.confluent.ksql.metastore.MetaStore; +import io.confluent.ksql.parser.DefaultKsqlParser; +import io.confluent.ksql.parser.KsqlParser; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; import io.confluent.ksql.parser.tree.Statement; import java.util.List; diff --git a/ksql-metastore/src/main/java/io/confluent/ksql/metastore/model/KeyField.java b/ksql-metastore/src/main/java/io/confluent/ksql/metastore/model/KeyField.java index e8c7fd96802a..80940607a168 100644 --- a/ksql-metastore/src/main/java/io/confluent/ksql/metastore/model/KeyField.java +++ b/ksql-metastore/src/main/java/io/confluent/ksql/metastore/model/KeyField.java @@ -21,6 +21,7 @@ import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.FormatOptions; import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.schema.ksql.types.SqlType; import java.util.Objects; import java.util.Optional; @@ -84,11 +85,15 @@ public Optional ref() { * @throws IllegalArgumentException if new key field is required but not available in the schema. */ public Optional resolve(final LogicalSchema schema) { - return keyField + final Optional resolved = keyField .map(colRef -> schema.findValueColumn(colRef) .orElseThrow(() -> new IllegalArgumentException( "Invalid key field, not found in schema: " + colRef.toString(FormatOptions.noEscape())))); + + resolved.ifPresent(col -> throwOnTypeMismatch(schema, col)); + + return resolved; } /** @@ -123,4 +128,24 @@ public int hashCode() { public String toString() { return "KeyField(" + keyField + ')'; } + + private static void throwOnTypeMismatch(final LogicalSchema schema, final Column keyField) { + if (schema.key().size() != 1) { + throw new UnsupportedOperationException("Only single key column supported"); + } + + final Column keyCol = schema.key().get(0); + final SqlType keyType = keyCol.type(); + final SqlType keyFieldType = keyField.type(); + + if (!keyType.equals(keyFieldType)) { + throw new IllegalArgumentException("The type of the KEY field defined in the WITH clause " + + "does not match the type of the actual row key column." + + System.lineSeparator() + + "KEY column in WITH clause: " + keyField + + System.lineSeparator() + + "actual key column:" + keyCol + ); + } + } } diff --git a/ksql-metastore/src/test/java/io/confluent/ksql/metastore/model/KeyFieldTest.java b/ksql-metastore/src/test/java/io/confluent/ksql/metastore/model/KeyFieldTest.java index 5a2b7bf91cf1..8a6d1c0a6886 100644 --- a/ksql-metastore/src/test/java/io/confluent/ksql/metastore/model/KeyFieldTest.java +++ b/ksql-metastore/src/test/java/io/confluent/ksql/metastore/model/KeyFieldTest.java @@ -137,6 +137,18 @@ public void shouldThrowOnResolveIfSchemaDoesNotContainKeyField() { keyField.resolve(SCHEMA); } + @Test + public void shouldThrowIfKeyColumnTypeDoesNotMatchWithKeyFieldType() { + // Given: + final KeyField keyField = KeyField.of(SCHEMA.value().get(1).ref()); + + // Then: + expectedException.expect(IllegalArgumentException.class); + + // When: + keyField.validateKeyExistsIn(SCHEMA); + } + @SuppressWarnings("OptionalGetWithoutIsPresent") @Test public void shouldResolveKeyField() { diff --git a/ksql-parser/src/test/java/io/confluent/ksql/parser/KsqlParserTest.java b/ksql-parser/src/test/java/io/confluent/ksql/parser/KsqlParserTest.java index 8363ef10e2fb..f4e3630cfffe 100644 --- a/ksql-parser/src/test/java/io/confluent/ksql/parser/KsqlParserTest.java +++ b/ksql-parser/src/test/java/io/confluent/ksql/parser/KsqlParserTest.java @@ -92,6 +92,7 @@ import io.confluent.ksql.serde.SerdeOption; import io.confluent.ksql.serde.ValueFormat; import io.confluent.ksql.util.MetaStoreFixture; +import io.confluent.ksql.util.SchemaUtil; import java.util.List; import java.util.Objects; import java.util.Optional; @@ -134,6 +135,7 @@ public class KsqlParserTest { .build(); private static final LogicalSchema ORDERS_SCHEMA = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.STRING) .valueColumn(ColumnName.of("ORDERTIME"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("ORDERID"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("ITEMID"), SqlTypes.STRING) @@ -154,12 +156,12 @@ public void init() { ValueFormat.of(FormatInfo.of(Format.JSON)) ); - final KsqlStream ksqlStreamOrders = new KsqlStream<>( + final KsqlStream ksqlStreamOrders = new KsqlStream<>( "sqlexpression", SourceName.of("ADDRESS"), ORDERS_SCHEMA, SerdeOption.none(), - KeyField.of(ColumnRef.withoutSource(ColumnName.of("ORDERTIME"))), + KeyField.none(), Optional.empty(), false, ksqlTopicOrders diff --git a/ksql-parser/src/test/java/io/confluent/ksql/parser/SqlFormatterTest.java b/ksql-parser/src/test/java/io/confluent/ksql/parser/SqlFormatterTest.java index 7e6bc887344c..92774cffda08 100644 --- a/ksql-parser/src/test/java/io/confluent/ksql/parser/SqlFormatterTest.java +++ b/ksql-parser/src/test/java/io/confluent/ksql/parser/SqlFormatterTest.java @@ -68,6 +68,7 @@ import io.confluent.ksql.serde.SerdeOption; import io.confluent.ksql.serde.ValueFormat; import io.confluent.ksql.util.MetaStoreFixture; +import io.confluent.ksql.util.SchemaUtil; import java.util.Optional; import java.util.concurrent.TimeUnit; import org.junit.Before; @@ -109,16 +110,18 @@ public class SqlFormatterTest { .build(); private static final LogicalSchema ITEM_INFO_SCHEMA = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) .valueColumn(ColumnName.of("ITEMID"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("NAME"), SqlTypes.STRING) .valueColumn(ColumnName.of("CATEGORY"), categorySchema) .build(); - private static final LogicalSchema tableSchema = LogicalSchema.builder() + private static final LogicalSchema TABLE_SCHEMA = LogicalSchema.builder() .valueColumn(ColumnName.of("TABLE"), SqlTypes.STRING) .build(); private static final LogicalSchema ORDERS_SCHEMA = LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.BIGINT) .valueColumn(ColumnName.of("ORDERTIME"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("ORDERID"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("ITEMID"), SqlTypes.STRING) @@ -200,7 +203,7 @@ public void setUp() { final KsqlTable ksqlTableTable = new KsqlTable<>( "sqlexpression", SourceName.of("TABLE"), - tableSchema, + TABLE_SCHEMA, SerdeOption.none(), KeyField.of(ColumnRef.withoutSource(ColumnName.of("TABLE"))), Optional.empty(), diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionFactoryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionFactoryTest.java index 1160eec09f5c..27fcacf6c4f0 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionFactoryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionFactoryTest.java @@ -86,7 +86,7 @@ private static DataSource buildDataSource( SourceName.of("stream"), schema, SerdeOption.none(), - KeyField.of(schema.value().get(0).ref()), + KeyField.none(), timestampColumn, false, topic diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java index 407dba9999ed..c5c318d3d556 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java @@ -63,12 +63,12 @@ public class RequestHandlerTest { private static final String SOME_STREAM_SQL = "CREATE STREAM x WITH (value_format='json', kafka_topic='x');"; - @Mock KsqlEngine ksqlEngine; - @Mock KsqlConfig ksqlConfig; - @Mock ServiceContext serviceContext; - @Mock DistributingExecutor distributor; - @Mock KsqlEntity entity; - @Mock CommandQueueSync sync; + @Mock private KsqlEngine ksqlEngine; + @Mock private KsqlConfig ksqlConfig; + @Mock private ServiceContext serviceContext; + @Mock private DistributingExecutor distributor; + @Mock private KsqlEntity entity; + @Mock private CommandQueueSync sync; private MetaStore metaStore; private RequestHandler handler; @@ -76,8 +76,6 @@ public class RequestHandlerTest { @Before public void setUp() { metaStore = new MetaStoreImpl(new InternalFunctionRegistry()); - when(ksqlEngine.parse(any())) - .thenAnswer(inv -> new DefaultKsqlParser().parse(inv.getArgument(0))); when(ksqlEngine.prepare(any())) .thenAnswer(invocation -> new DefaultKsqlParser().prepare(invocation.getArgument(0), metaStore)); diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java index b4d5eefe7ae6..1fc0bbac5978 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/StepSchemaResolver.java @@ -57,6 +57,7 @@ */ @SuppressWarnings("MethodMayBeStatic") // Methods can not be used in HANDLERS is static. public final class StepSchemaResolver { + @SuppressWarnings("rawtypes") private static final HandlerMaps.ClassHandlerMapR2 HANDLERS = HandlerMaps.forClass(ExecutionStep.class) @@ -66,7 +67,7 @@ public final class StepSchemaResolver { .put(StreamWindowedAggregate.class, StepSchemaResolver::handleStreamWindowedAggregate) .put(StreamFilter.class, StepSchemaResolver::sameSchema) .put(StreamFlatMap.class, StepSchemaResolver::handleStreamFlatMap) - .put(StreamGroupBy.class, StepSchemaResolver::handleGroupBy) + .put(StreamGroupBy.class, StepSchemaResolver::handleStreamGroupBy) .put(StreamGroupByKey.class, StepSchemaResolver::sameSchema) .put(StreamSelect.class, StepSchemaResolver::handleStreamSelect) .put(StreamSelectKey.class, StepSchemaResolver::handleSelectKey) @@ -75,13 +76,14 @@ public final class StepSchemaResolver { .put(WindowedStreamSource.class, StepSchemaResolver::handleSource) .put(TableAggregate.class, StepSchemaResolver::handleTableAggregate) .put(TableFilter.class, StepSchemaResolver::sameSchema) - .put(TableGroupBy.class, StepSchemaResolver::sameSchema) + .put(TableGroupBy.class, StepSchemaResolver::handleTableGroupBy) .put(TableSelect.class, StepSchemaResolver::handleTableSelect) .put(TableSink.class, StepSchemaResolver::sameSchema) .put(TableSource.class, StepSchemaResolver::handleSource) .put(WindowedTableSource.class, StepSchemaResolver::handleSource) .build(); + @SuppressWarnings("rawtypes") private static final HandlerMaps.ClassHandlerMapR2 JOIN_HANDLERS = HandlerMaps.forClass(ExecutionStep.class) @@ -164,7 +166,7 @@ private LogicalSchema handleStreamFlatMap( ); } - private LogicalSchema handleGroupBy( + private LogicalSchema handleStreamGroupBy( final LogicalSchema sourceSchema, final StreamGroupBy streamGroupBy ) { @@ -179,6 +181,21 @@ private LogicalSchema handleGroupBy( return GroupByParamsFactory.build(sourceSchema, compiledGroupBy).getSchema(); } + private LogicalSchema handleTableGroupBy( + final LogicalSchema sourceSchema, + final TableGroupBy tableGroupBy + ) { + final List compiledGroupBy = CodeGenRunner.compileExpressions( + tableGroupBy.getGroupByExpressions().stream(), + "Group By", + sourceSchema, + ksqlConfig, + functionRegistry + ); + + return GroupByParamsFactory.build(sourceSchema, compiledGroupBy).getSchema(); + } + private LogicalSchema handleStreamSelect( final LogicalSchema schema, final StreamSelect streamSelect diff --git a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java index 954f1754e007..d264e2042ae8 100644 --- a/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java +++ b/ksql-streams/src/test/java/io/confluent/ksql/execution/streams/StepSchemaResolverTest.java @@ -358,14 +358,17 @@ public void shouldResolveSchemaForTableGroupBy() { PROPERTIES, tableSource, formats, - Collections.emptyList() + ImmutableList.of(new ColumnReferenceExp(Optional.empty(), ORANGE_COL_REF)) ); // When: final LogicalSchema result = resolver.resolve(step, SCHEMA); // Then: - assertThat(result, is(SCHEMA)); + assertThat(result, is(LogicalSchema.builder() + .keyColumn(SchemaUtil.ROWKEY_NAME, SqlTypes.INTEGER) + .valueColumns(SCHEMA.value()) + .build())); } @Test From da570025a0ecb16da6da17a62b9292e7898ec72d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergio=20Pe=C3=B1a?= Date: Tue, 7 Jan 2020 12:43:09 -0600 Subject: [PATCH 076/123] refactor: inject a KsqlSecurityContext to REST requests (#4184) --- .../ksql/security/KsqlSecurityContext.java | 61 +++++++++++++++++++ .../ksql/rest/server/KsqlRestApplication.java | 8 ++- ...er.java => KsqlSecurityContextBinder.java} | 20 +++--- ... => KsqlSecurityContextBinderFactory.java} | 32 +++++----- .../server/resources/HealthCheckResource.java | 5 +- .../rest/server/resources/KsqlResource.java | 11 ++-- .../streaming/StreamedQueryResource.java | 15 ++--- .../services/ServerInternalKsqlClient.java | 10 +-- .../rest/server/KsqlRestApplicationTest.java | 39 ++++++++---- .../ksql/rest/server/TestKsqlRestApp.java | 21 ++++--- .../rest/server/computation/RecoveryTest.java | 9 ++- ...KsqlSecurityContextBinderFactoryTest.java} | 24 +++++--- .../server/resources/KsqlResourceTest.java | 20 +++--- .../streaming/StreamedQueryResourceTest.java | 30 +++++---- .../ServerInternalKsqlClientTest.java | 9 +-- 15 files changed, 210 insertions(+), 104 deletions(-) create mode 100644 ksql-engine/src/main/java/io/confluent/ksql/security/KsqlSecurityContext.java rename ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/{KsqlRestServiceContextBinder.java => KsqlSecurityContextBinder.java} (63%) rename ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/{KsqlRestServiceContextFactory.java => KsqlSecurityContextBinderFactory.java} (80%) rename ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/context/{KsqlRestServiceContextFactoryTest.java => KsqlSecurityContextBinderFactoryTest.java} (80%) diff --git a/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlSecurityContext.java b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlSecurityContext.java new file mode 100644 index 000000000000..44c03156b532 --- /dev/null +++ b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlSecurityContext.java @@ -0,0 +1,61 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.security; + +import io.confluent.ksql.services.ServiceContext; + +import java.security.Principal; +import java.util.Optional; + +/** + * A class that provides KSQL security related information for KSQL user requests. + */ +public class KsqlSecurityContext { + private final Optional userPrincipal; + private final ServiceContext serviceContext; + + public KsqlSecurityContext( + final Optional userPrincipal, + final ServiceContext serviceContext + ) { + this.userPrincipal = userPrincipal; + this.serviceContext = serviceContext; + } + + /** + * Returns a {@code java.security.Principal} object containing the name of the current + * authenticated user. If the user has not been authenticated, the method returns + * {@code Optional.empty}. + * + * @return a {@code java.security.Principal} containing the name of the user making this request; + * {@code Optional.empty} if the user has not been authenticated + */ + public Optional getUserPrincipal() { + return userPrincipal; + } + + /** + * Returns a {@link ServiceContext} object with injected credentials of the authenticated + * user. If KSQL does not have user authentication configured, the method returns the default + * {@code ServiceContext} containing the KSQL server configuration (with KSQL credentials or not). + * + * @return a {@code ServiceContext} with injected user credentials or default KSQL server + * configuration. + */ + public ServiceContext getServiceContext() { + return serviceContext; + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java index 8bc4574c9dd0..5759222d6429 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java @@ -48,7 +48,7 @@ import io.confluent.ksql.rest.server.computation.CommandRunner; import io.confluent.ksql.rest.server.computation.CommandStore; import io.confluent.ksql.rest.server.computation.InteractiveStatementExecutor; -import io.confluent.ksql.rest.server.context.KsqlRestServiceContextBinder; +import io.confluent.ksql.rest.server.context.KsqlSecurityContextBinder; import io.confluent.ksql.rest.server.filters.KsqlAuthorizationFilter; import io.confluent.ksql.rest.server.resources.HealthCheckResource; import io.confluent.ksql.rest.server.resources.KsqlConfigurable; @@ -72,6 +72,7 @@ import io.confluent.ksql.security.KsqlAuthorizationValidator; import io.confluent.ksql.security.KsqlAuthorizationValidatorFactory; import io.confluent.ksql.security.KsqlDefaultSecurityExtension; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.security.KsqlSecurityExtension; import io.confluent.ksql.services.LazyServiceContext; import io.confluent.ksql.services.ServiceContext; @@ -468,7 +469,7 @@ static KsqlRestApplication buildApplication( versionCheckerFactory, Integer.MAX_VALUE, serviceContext, - KsqlRestServiceContextBinder::new); + KsqlSecurityContextBinder::new); } static KsqlRestApplication buildApplication( @@ -688,7 +689,8 @@ private static void maybeCreateProcessingLogStream( try { final SimpleKsqlClient internalClient = - new ServerInternalKsqlClient(ksqlResource, serviceContext); + new ServerInternalKsqlClient(ksqlResource, new KsqlSecurityContext( + Optional.empty(), serviceContext)); final URI serverEndpoint = ServerUtil.getServerAddress(restConfig); final RestResponse response = internalClient.makeKsqlRequest( diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlRestServiceContextBinder.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinder.java similarity index 63% rename from ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlRestServiceContextBinder.java rename to ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinder.java index 66c59d81c26f..1692b14fe1f5 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlRestServiceContextBinder.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinder.java @@ -15,31 +15,31 @@ package io.confluent.ksql.rest.server.context; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.security.KsqlSecurityExtension; -import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.util.KsqlConfig; import org.glassfish.hk2.utilities.binding.AbstractBinder; import org.glassfish.jersey.process.internal.RequestScoped; /** - * Configures the {@link ServiceContext} class for dependency injection using the - * {@link javax.ws.rs.core.Context} annotation. + * Configures the {@link KsqlSecurityContext} class for dependency injection using + * the {@link javax.ws.rs.core.Context} annotation. *

- * Inject {@code ServiceContext} on each REST method as follows: - * i.e. myMethod(@Context ServiceContext serviceContext) + * Inject {@code KsqlSecurityContext} on each REST method as follows: + * i.e. myMethod(@Context KsqlSecurityContext securityContext) */ -public class KsqlRestServiceContextBinder extends AbstractBinder { - public KsqlRestServiceContextBinder( +public class KsqlSecurityContextBinder extends AbstractBinder { + public KsqlSecurityContextBinder( final KsqlConfig ksqlConfig, final KsqlSecurityExtension securityExtension ) { - KsqlRestServiceContextFactory.configure(ksqlConfig, securityExtension); + KsqlSecurityContextBinderFactory.configure(ksqlConfig, securityExtension); } @Override protected void configure() { - bindFactory(KsqlRestServiceContextFactory.class) - .to(ServiceContext.class) + bindFactory(KsqlSecurityContextBinderFactory.class) + .to(KsqlSecurityContext.class) .in(RequestScoped.class); } } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlRestServiceContextFactory.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinderFactory.java similarity index 80% rename from ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlRestServiceContextFactory.java rename to ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinderFactory.java index d19868ed4e65..79c30795b607 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlRestServiceContextFactory.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinderFactory.java @@ -21,8 +21,8 @@ import io.confluent.ksql.rest.server.services.RestServiceContextFactory; import io.confluent.ksql.rest.server.services.RestServiceContextFactory.DefaultServiceContextFactory; import io.confluent.ksql.rest.server.services.RestServiceContextFactory.UserServiceContextFactory; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.security.KsqlSecurityExtension; -import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.util.KsqlConfig; import java.security.Principal; import java.util.Optional; @@ -34,9 +34,9 @@ /** * This class implements {@link Factory}, which allows a REST application to create - * a new {@link ServiceContext} during REST requests. + * a new {@link KsqlSecurityContext} during REST requests. */ -public class KsqlRestServiceContextFactory implements Factory { +public class KsqlSecurityContextBinderFactory implements Factory { private static KsqlConfig ksqlConfig; private static KsqlSecurityExtension securityExtension; @@ -44,8 +44,8 @@ public static void configure( final KsqlConfig ksqlConfig, final KsqlSecurityExtension securityExtension ) { - KsqlRestServiceContextFactory.ksqlConfig = requireNonNull(ksqlConfig, "ksqlConfig"); - KsqlRestServiceContextFactory.securityExtension + KsqlSecurityContextBinderFactory.ksqlConfig = requireNonNull(ksqlConfig, "ksqlConfig"); + KsqlSecurityContextBinderFactory.securityExtension = requireNonNull(securityExtension, "securityExtension"); } @@ -55,7 +55,7 @@ public static void configure( private final HttpServletRequest request; @Inject - public KsqlRestServiceContextFactory( + public KsqlSecurityContextBinderFactory( final SecurityContext securityContext, final HttpServletRequest request ) { @@ -68,7 +68,7 @@ public KsqlRestServiceContextFactory( } @VisibleForTesting - KsqlRestServiceContextFactory( + KsqlSecurityContextBinderFactory( final SecurityContext securityContext, final HttpServletRequest request, final DefaultServiceContextFactory defaultServiceContextFactory, @@ -83,27 +83,31 @@ public KsqlRestServiceContextFactory( } @Override - public ServiceContext provide() { + public KsqlSecurityContext provide() { + final Principal principal = securityContext.getUserPrincipal(); final Optional authHeader = Optional.ofNullable(request.getHeader(HttpHeaders.AUTHORIZATION)); if (!securityExtension.getUserContextProvider().isPresent()) { - return defaultServiceContextFactory.create(ksqlConfig, authHeader); + return new KsqlSecurityContext( + Optional.ofNullable(principal), + defaultServiceContextFactory.create(ksqlConfig, authHeader) + ); } - final Principal principal = securityContext.getUserPrincipal(); return securityExtension.getUserContextProvider() - .map(provider -> + .map(provider -> new KsqlSecurityContext( + Optional.ofNullable(principal), userServiceContextFactory.create( ksqlConfig, authHeader, provider.getKafkaClientSupplier(principal), - provider.getSchemaRegistryClientFactory(principal))) + provider.getSchemaRegistryClientFactory(principal)))) .get(); } @Override - public void dispose(final ServiceContext serviceContext) { - serviceContext.close(); + public void dispose(final KsqlSecurityContext ksqlSecurityContext) { + ksqlSecurityContext.getServiceContext().close(); } } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/HealthCheckResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/HealthCheckResource.java index 75ac76b56565..4c1d05b7ef83 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/HealthCheckResource.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/HealthCheckResource.java @@ -24,9 +24,11 @@ import io.confluent.ksql.rest.healthcheck.HealthCheckAgent; import io.confluent.ksql.rest.server.KsqlRestConfig; import io.confluent.ksql.rest.server.services.ServerInternalKsqlClient; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.services.ServiceContext; import java.time.Duration; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.TimeUnit; import javax.annotation.Nonnull; import javax.ws.rs.GET; @@ -70,7 +72,8 @@ public static HealthCheckResource create( ) { return new HealthCheckResource( new HealthCheckAgent( - new ServerInternalKsqlClient(ksqlResource, serviceContext), + new ServerInternalKsqlClient(ksqlResource, + new KsqlSecurityContext(Optional.empty(), serviceContext)), restConfig), Duration.ofMillis(restConfig.getLong(KsqlRestConfig.KSQL_HEALTHCHECK_INTERVAL_MS_CONFIG)) ); diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java index e41b8a89a14e..3388eee1ea6d 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java @@ -45,6 +45,7 @@ import io.confluent.ksql.rest.util.CommandStoreUtil; import io.confluent.ksql.rest.util.TerminateCluster; import io.confluent.ksql.security.KsqlAuthorizationValidator; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.services.SandboxedServiceContext; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.Injector; @@ -180,7 +181,7 @@ public void configure(final KsqlConfig config) { @POST @Path("/terminate") public Response terminateCluster( - @Context final ServiceContext serviceContext, + @Context final KsqlSecurityContext securityContext, final ClusterTerminateRequest request ) { LOG.info("Received: " + request); @@ -190,7 +191,7 @@ public Response terminateCluster( ensureValidPatterns(request.getDeleteTopicList()); try { final KsqlEntityList entities = handler.execute( - serviceContext, + securityContext.getServiceContext(), TERMINATE_CLUSTER, request.getStreamsProperties() ); @@ -203,7 +204,7 @@ public Response terminateCluster( @POST public Response handleKsqlStatements( - @Context final ServiceContext serviceContext, + @Context final KsqlSecurityContext securityContext, final KsqlRequest request ) { LOG.info("Received: " + request); @@ -220,14 +221,14 @@ public Response handleKsqlStatements( final List statements = ksqlEngine.parse(request.getKsql()); validator.validate( - SandboxedServiceContext.create(serviceContext), + SandboxedServiceContext.create(securityContext.getServiceContext()), statements, request.getStreamsProperties(), request.getKsql() ); final KsqlEntityList entities = handler.execute( - serviceContext, + securityContext.getServiceContext(), statements, request.getStreamsProperties() ); diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java index cc2558cc23ed..9ba10711c840 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java @@ -36,6 +36,7 @@ import io.confluent.ksql.rest.server.resources.KsqlRestException; import io.confluent.ksql.rest.util.CommandStoreUtil; import io.confluent.ksql.security.KsqlAuthorizationValidator; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; import io.confluent.ksql.util.KsqlConfig; @@ -139,7 +140,7 @@ public void configure(final KsqlConfig config) { @POST public Response streamQuery( - @Context final ServiceContext serviceContext, + @Context final KsqlSecurityContext securityContext, final KsqlRequest request ) { throwIfNotConfigured(); @@ -151,7 +152,7 @@ public Response streamQuery( CommandStoreUtil.httpWaitForCommandSequenceNumber( commandQueue, request, commandQueueCatchupTimeout); - return handleStatement(serviceContext, request, statement); + return handleStatement(securityContext, request, statement); } private void throwIfNotConfigured() { @@ -175,14 +176,14 @@ private PreparedStatement parseStatement(final KsqlRequest request) { @SuppressWarnings("unchecked") private Response handleStatement( - final ServiceContext serviceContext, + final KsqlSecurityContext securityContext, final KsqlRequest request, final PreparedStatement statement ) { try { final Consumer authValidationConsumer = ksqlAuthorizationValidator -> ksqlAuthorizationValidator.checkAuthorization( - serviceContext, + securityContext.getServiceContext(), ksqlEngine.getMetaStore(), statement.getStatement() ); @@ -202,7 +203,7 @@ private Response handleStatement( } return handlePullQuery( - serviceContext, + securityContext.getServiceContext(), queryStmt, request.getStreamsProperties() ); @@ -210,7 +211,7 @@ private Response handleStatement( authorizationValidator.ifPresent(authValidationConsumer); return handlePushQuery( - serviceContext, + securityContext.getServiceContext(), queryStmt, request.getStreamsProperties() ); @@ -219,7 +220,7 @@ private Response handleStatement( if (statement.getStatement() instanceof PrintTopic) { authorizationValidator.ifPresent(authValidationConsumer); return handlePrintTopic( - serviceContext, + securityContext.getServiceContext(), request.getStreamsProperties(), (PreparedStatement) statement); } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/ServerInternalKsqlClient.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/ServerInternalKsqlClient.java index 7de5260d51a1..d83a2191c0c6 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/ServerInternalKsqlClient.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/ServerInternalKsqlClient.java @@ -23,7 +23,7 @@ import io.confluent.ksql.rest.entity.KsqlRequest; import io.confluent.ksql.rest.entity.StreamedRow; import io.confluent.ksql.rest.server.resources.KsqlResource; -import io.confluent.ksql.services.ServiceContext; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.services.SimpleKsqlClient; import java.net.URI; import java.util.Collections; @@ -40,14 +40,14 @@ public class ServerInternalKsqlClient implements SimpleKsqlClient { private static final String KSQL_PATH = "/ksql"; private final KsqlResource ksqlResource; - private final ServiceContext serviceContext; + private final KsqlSecurityContext securityContext; public ServerInternalKsqlClient( final KsqlResource ksqlResource, - final ServiceContext serviceContext + final KsqlSecurityContext securityContext ) { this.ksqlResource = requireNonNull(ksqlResource, "ksqlResource"); - this.serviceContext = requireNonNull(serviceContext, "serviceContext"); + this.securityContext = requireNonNull(securityContext, "securityContext"); } @Override @@ -56,7 +56,7 @@ public RestResponse makeKsqlRequest( final String sql ) { final KsqlRequest request = new KsqlRequest(sql, Collections.emptyMap(), null); - final Response response = ksqlResource.handleKsqlStatements(serviceContext, request); + final Response response = ksqlResource.handleKsqlStatements(securityContext, request); return KsqlClientUtil.toRestResponse( response, KSQL_PATH, diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java index 577387493b88..3e6bb79664f9 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java @@ -40,7 +40,7 @@ import io.confluent.ksql.rest.entity.KsqlRequest; import io.confluent.ksql.rest.server.computation.CommandRunner; import io.confluent.ksql.rest.server.computation.CommandStore; -import io.confluent.ksql.rest.server.context.KsqlRestServiceContextBinder; +import io.confluent.ksql.rest.server.context.KsqlSecurityContextBinder; import io.confluent.ksql.rest.server.filters.KsqlAuthorizationFilter; import io.confluent.ksql.rest.server.resources.KsqlResource; import io.confluent.ksql.rest.server.resources.RootDocument; @@ -49,6 +49,7 @@ import io.confluent.ksql.rest.server.state.ServerState; import io.confluent.ksql.rest.util.ProcessingLogServerUtils; import io.confluent.ksql.security.KsqlAuthorizationProvider; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.security.KsqlSecurityExtension; import io.confluent.ksql.services.KafkaTopicClient; import io.confluent.ksql.services.ServiceContext; @@ -68,6 +69,7 @@ import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; import org.mockito.InOrder; import org.mockito.Mock; import org.mockito.Mockito; @@ -126,6 +128,10 @@ public class KsqlRestApplicationTest { private String logCreateStatement; private KsqlRestApplication app; private KsqlRestConfig restConfig; + private KsqlSecurityContext securityContext; + + private ArgumentCaptor securityContextArgumentCaptor = + ArgumentCaptor.forClass(KsqlSecurityContext.class); @SuppressWarnings("unchecked") @Before @@ -149,6 +155,8 @@ public void setUp() { when(precondition1.checkPrecondition(any(), any())).thenReturn(Optional.empty()); when(precondition2.checkPrecondition(any(), any())).thenReturn(Optional.empty()); + securityContext = new KsqlSecurityContext(Optional.empty(), serviceContext); + logCreateStatement = ProcessingLogServerUtils.processingLogStreamCreateStatement( processingLogConfig, ksqlConfig @@ -208,10 +216,11 @@ public void shouldCreateLogStreamThroughKsqlResource() { // Then: verify(ksqlResource).handleKsqlStatements( - serviceContext, - new KsqlRequest(logCreateStatement, Collections.emptyMap(), null) + securityContextArgumentCaptor.capture(), + eq(new KsqlRequest(logCreateStatement, Collections.emptyMap(), null)) ); - + assertThat(securityContextArgumentCaptor.getValue().getUserPrincipal(), is(Optional.empty())); + assertThat(securityContextArgumentCaptor.getValue().getServiceContext(), is(serviceContext)); } @Test @@ -225,7 +234,7 @@ public void shouldNotCreateLogStreamIfAutoCreateNotConfigured() { // Then: verify(ksqlResource, never()).handleKsqlStatements( - serviceContext, + securityContext, new KsqlRequest(logCreateStatement, Collections.emptyMap(), null) ); } @@ -241,9 +250,11 @@ public void shouldStartCommandStoreAndCommandRunnerBeforeCreatingLogStream() { inOrder.verify(commandRunner).processPriorCommands(); inOrder.verify(commandRunner).start(); inOrder.verify(ksqlResource).handleKsqlStatements( - serviceContext, - new KsqlRequest(logCreateStatement, Collections.emptyMap(), null) + securityContextArgumentCaptor.capture(), + eq(new KsqlRequest(logCreateStatement, Collections.emptyMap(), null)) ); + assertThat(securityContextArgumentCaptor.getValue().getUserPrincipal(), is(Optional.empty())); + assertThat(securityContextArgumentCaptor.getValue().getServiceContext(), is(serviceContext)); } @Test @@ -255,9 +266,11 @@ public void shouldCreateLogTopicBeforeSendingCreateStreamRequest() { final InOrder inOrder = Mockito.inOrder(topicClient, ksqlResource); inOrder.verify(topicClient).createTopic(eq(LOG_TOPIC_NAME), anyInt(), anyShort()); inOrder.verify(ksqlResource).handleKsqlStatements( - serviceContext, - new KsqlRequest(logCreateStatement, Collections.emptyMap(), null) + securityContextArgumentCaptor.capture(), + eq(new KsqlRequest(logCreateStatement, Collections.emptyMap(), null)) ); + assertThat(securityContextArgumentCaptor.getValue().getUserPrincipal(), is(Optional.empty())); + assertThat(securityContextArgumentCaptor.getValue().getServiceContext(), is(serviceContext)); } @Test @@ -291,9 +304,11 @@ public void shouldSendCreateStreamRequestBeforeSettingReady() { // Then: final InOrder inOrder = Mockito.inOrder(ksqlResource, serverState); verify(ksqlResource).handleKsqlStatements( - serviceContext, - new KsqlRequest(logCreateStatement, Collections.emptyMap(), null) + securityContextArgumentCaptor.capture(), + eq(new KsqlRequest(logCreateStatement, Collections.emptyMap(), null)) ); + assertThat(securityContextArgumentCaptor.getValue().getUserPrincipal(), is(Optional.empty())); + assertThat(securityContextArgumentCaptor.getValue().getServiceContext(), is(serviceContext)); inOrder.verify(serverState).setReady(); } @@ -402,7 +417,7 @@ private void givenAppWithRestConfig(final Map restConfigMap) { streamedQueryResource, ksqlResource, versionCheckerAgent, - KsqlRestServiceContextBinder::new, + KsqlSecurityContextBinder::new, securityExtension, serverState, processingLogContext, diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java index e6cfd89a8c82..d42dad375290 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java @@ -36,8 +36,9 @@ import io.confluent.ksql.rest.entity.SourceInfo; import io.confluent.ksql.rest.entity.StreamsList; import io.confluent.ksql.rest.entity.TablesList; -import io.confluent.ksql.rest.server.context.KsqlRestServiceContextBinder; +import io.confluent.ksql.rest.server.context.KsqlSecurityContextBinder; import io.confluent.ksql.rest.util.KsqlInternalTopicUtils; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.security.KsqlSecurityExtension; import io.confluent.ksql.services.DisabledKsqlClient; import io.confluent.ksql.services.ServiceContext; @@ -431,8 +432,8 @@ public static final class Builder { private final Map additionalProps = new HashMap<>(); private Supplier serviceContext; - private BiFunction serviceContextBinder - = KsqlRestServiceContextBinder::new; + private BiFunction securityContextBinder + = KsqlSecurityContextBinder::new; private Optional credentials = Optional.empty(); @@ -456,23 +457,23 @@ public Builder withProperties(final Map props) { public Builder withStaticServiceContext(final Supplier serviceContext) { this.serviceContext = serviceContext; - this.serviceContextBinder = (config, extension) -> new AbstractBinder() { + this.securityContextBinder = (config, extension) -> new AbstractBinder() { @Override protected void configure() { - final Factory factory = new Factory() { + final Factory factory = new Factory() { @Override - public ServiceContext provide() { - return serviceContext.get(); + public KsqlSecurityContext provide() { + return new KsqlSecurityContext(Optional.empty(), serviceContext.get()); } @Override - public void dispose(final ServiceContext serviceContext) { + public void dispose(final KsqlSecurityContext securityContext) { // do nothing because context is shared } }; bindFactory(factory) - .to(ServiceContext.class) + .to(KsqlSecurityContext.class) .in(RequestScoped.class); } }; @@ -501,7 +502,7 @@ public TestKsqlRestApp build() { bootstrapServers, additionalProps, serviceContext, - serviceContextBinder, + securityContextBinder, credentials ); } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java index 6cad7478961c..9fc499be9520 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/RecoveryTest.java @@ -48,6 +48,7 @@ import io.confluent.ksql.rest.server.state.ServerState; import io.confluent.ksql.rest.util.ClusterTerminator; import io.confluent.ksql.schema.ksql.LogicalSchema; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.serde.ValueFormat; import io.confluent.ksql.services.FakeKafkaTopicClient; import io.confluent.ksql.services.ServiceContext; @@ -88,6 +89,8 @@ public class RecoveryTest { private final SpecificQueryIdGenerator queryIdGenerator = new SpecificQueryIdGenerator(); private final ServiceContext serviceContext = TestServiceContext.create(topicClient); + private KsqlSecurityContext securityContext; + @Mock @SuppressWarnings("unchecked") private final Producer transactionalProducer = (Producer) mock(Producer.class); @@ -97,7 +100,9 @@ public class RecoveryTest { @Before - public void setup() { } + public void setup() { + securityContext = new KsqlSecurityContext(Optional.empty(), serviceContext); + } @After public void tearDown() { @@ -238,7 +243,7 @@ void executeCommands() { void submitCommands(final String ...statements) { for (final String statement : statements) { - final Response response = ksqlResource.handleKsqlStatements(serviceContext, + final Response response = ksqlResource.handleKsqlStatements(securityContext, new KsqlRequest(statement, Collections.emptyMap(), null)); assertThat(response.getStatus(), equalTo(200)); executeCommands(); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/context/KsqlRestServiceContextFactoryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinderFactoryTest.java similarity index 80% rename from ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/context/KsqlRestServiceContextFactoryTest.java rename to ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinderFactoryTest.java index 49a849f17431..4c4c25dbd218 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/context/KsqlRestServiceContextFactoryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinderFactoryTest.java @@ -24,6 +24,7 @@ import io.confluent.ksql.rest.server.services.RestServiceContextFactory.DefaultServiceContextFactory; import io.confluent.ksql.rest.server.services.RestServiceContextFactory.UserServiceContextFactory; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.security.KsqlSecurityExtension; import io.confluent.ksql.security.KsqlUserContextProvider; import io.confluent.ksql.services.ServiceContext; @@ -40,8 +41,8 @@ import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) -public class KsqlRestServiceContextFactoryTest { - private KsqlRestServiceContextFactory serviceContextFactory; +public class KsqlSecurityContextBinderFactoryTest { + private KsqlSecurityContextBinderFactory securityContextBinderFactory; @Mock private SecurityContext securityContext; @@ -66,8 +67,8 @@ public class KsqlRestServiceContextFactoryTest { @Before public void setUp() { - KsqlRestServiceContextFactory.configure(ksqlConfig, securityExtension); - serviceContextFactory = new KsqlRestServiceContextFactory( + KsqlSecurityContextBinderFactory.configure(ksqlConfig, securityExtension); + securityContextBinderFactory = new KsqlSecurityContextBinderFactory( securityContext, request, defaultServiceContextProvider, @@ -84,13 +85,15 @@ public void setUp() { public void shouldCreateDefaultServiceContextIfUserContextProviderIsNotEnabled() { // Given: when(securityExtension.getUserContextProvider()).thenReturn(Optional.empty()); + when(securityContext.getUserPrincipal()).thenReturn(null); // When: - final ServiceContext serviceContext = serviceContextFactory.provide(); + final KsqlSecurityContext ksqlSecurityContext = securityContextBinderFactory.provide(); // Then: verify(defaultServiceContextProvider).create(ksqlConfig, Optional.empty()); - assertThat(serviceContext, is(defaultServiceContext)); + assertThat(ksqlSecurityContext.getUserPrincipal(), is(Optional.empty())); + assertThat(ksqlSecurityContext.getServiceContext(), is(defaultServiceContext)); } @Test @@ -99,11 +102,12 @@ public void shouldCreateUserServiceContextIfUserContextProviderIsEnabled() { when(securityExtension.getUserContextProvider()).thenReturn(Optional.of(userContextProvider)); // When: - final ServiceContext serviceContext = serviceContextFactory.provide(); + final KsqlSecurityContext ksqlSecurityContext = securityContextBinderFactory.provide(); // Then: verify(userServiceContextFactory).create(eq(ksqlConfig), eq(Optional.empty()), any(), any()); - assertThat(serviceContext, is(userServiceContext)); + assertThat(ksqlSecurityContext.getUserPrincipal(), is(Optional.of(user1))); + assertThat(ksqlSecurityContext.getServiceContext(), is(userServiceContext)); } @Test @@ -113,7 +117,7 @@ public void shouldPassAuthHeaderToDefaultFactory() { when(request.getHeader(HttpHeaders.AUTHORIZATION)).thenReturn("some-auth"); // When: - serviceContextFactory.provide(); + securityContextBinderFactory.provide(); // Then: verify(defaultServiceContextProvider).create(any(), eq(Optional.of("some-auth"))); @@ -126,7 +130,7 @@ public void shouldPassAuthHeaderToUserFactory() { when(request.getHeader(HttpHeaders.AUTHORIZATION)).thenReturn("some-auth"); // When: - serviceContextFactory.provide(); + securityContextBinderFactory.provide(); // Then: verify(userServiceContextFactory).create(any(), eq(Optional.of("some-auth")), any(), any()); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java index 16870b922376..ca0a2e48258c 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java @@ -126,6 +126,7 @@ import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.security.KsqlAuthorizationValidator; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.serde.Format; import io.confluent.ksql.serde.FormatInfo; import io.confluent.ksql.serde.KeyFormat; @@ -281,6 +282,7 @@ public class KsqlResourceTest { private QueuedCommandStatus commandStatus1; private MetaStoreImpl metaStore; private ServiceContext serviceContext; + private KsqlSecurityContext securityContext; private String streamName; @@ -309,6 +311,8 @@ public void setUp() throws IOException, RestClientException { metaStore ); + securityContext = new KsqlSecurityContext(Optional.empty(), serviceContext); + when(commandStore.createTransactionalProducer()) .thenReturn(transactionalProducer); @@ -388,7 +392,7 @@ public void shouldThrowOnHandleStatementIfNotConfigured() { // When: ksqlResource.handleKsqlStatements( - serviceContext, + securityContext, new KsqlRequest("query", Collections.emptyMap(), null) ); } @@ -417,7 +421,7 @@ public void shouldThrowOnHandleTerminateIfNotConfigured() { // When: ksqlResource.terminateCluster( - serviceContext, + securityContext, new ClusterTerminateRequest(ImmutableList.of("")) ); } @@ -1706,7 +1710,7 @@ public void shouldReturnServiceUnavailableIfTimeoutWaitingForCommandSequenceNumb @Test public void shouldUpdateTheLastRequestTime() { // When: - ksqlResource.handleKsqlStatements(serviceContext, VALID_EXECUTABLE_REQUEST); + ksqlResource.handleKsqlStatements(securityContext, VALID_EXECUTABLE_REQUEST); // Then: verify(activenessRegistrar).updateLastRequestTime(); @@ -1716,7 +1720,7 @@ public void shouldUpdateTheLastRequestTime() { public void shouldHandleTerminateRequestCorrectly() { // When: final Response response = ksqlResource.terminateCluster( - serviceContext, + securityContext, VALID_TERMINATE_REQUEST ); @@ -1746,7 +1750,7 @@ public void shouldFailIfCannotWriteTerminateCommand() { // When: final Response response = ksqlResource.terminateCluster( - serviceContext, + securityContext, VALID_TERMINATE_REQUEST ); @@ -1770,7 +1774,7 @@ public void shouldFailTerminateOnInvalidDeleteTopicPattern() { "Invalid pattern: [Invalid Regex")))); // When: - ksqlResource.terminateCluster(serviceContext, request); + ksqlResource.terminateCluster(securityContext, request); } @Test @@ -1972,7 +1976,7 @@ private KsqlErrorMessage makeFailingRequestWithSequenceNumber( private KsqlErrorMessage makeFailingRequest(final KsqlRequest ksqlRequest, final Code errorCode) { try { - final Response response = ksqlResource.handleKsqlStatements(serviceContext, ksqlRequest); + final Response response = ksqlResource.handleKsqlStatements(securityContext, ksqlRequest); assertThat(response.getStatus(), is(errorCode.getCode())); assertThat(response.getEntity(), instanceOf(KsqlErrorMessage.class)); return (KsqlErrorMessage) response.getEntity(); @@ -2028,7 +2032,7 @@ private List makeMultipleRequest( final KsqlRequest ksqlRequest, final Class expectedEntityType) { - final Response response = ksqlResource.handleKsqlStatements(serviceContext, ksqlRequest); + final Response response = ksqlResource.handleKsqlStatements(securityContext, ksqlRequest); if (response.getStatus() != Response.Status.OK.getStatusCode()) { throw new KsqlRestException(response); } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java index 2b31f5ef51fb..c6a8db34ed28 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java @@ -58,6 +58,7 @@ import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.security.KsqlAuthorizationValidator; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.services.KafkaTopicClient; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; @@ -148,6 +149,7 @@ public class StreamedQueryResourceTest { private PreparedStatement invalid; private PreparedStatement query; private PreparedStatement print; + private KsqlSecurityContext securityContext; @Before public void setup() { @@ -162,6 +164,8 @@ public void setup() { when(mockStatementParser.parseSingleStatement(PULL_QUERY_STRING)).thenReturn(pullQueryStatement); when(errorsHandler.accessDeniedFromKafkaResponse(any(Exception.class))).thenReturn(AUTHORIZATION_ERROR_RESPONSE); + securityContext = new KsqlSecurityContext(Optional.empty(), serviceContext); + testResource = new StreamedQueryResource( mockKsqlEngine, mockStatementParser, @@ -207,7 +211,7 @@ public void shouldThrowOnHandleStatementIfNotConfigured() { // When: testResource.streamQuery( - serviceContext, + securityContext, new KsqlRequest("query", Collections.emptyMap(), null) ); } @@ -227,7 +231,7 @@ public void shouldReturn400OnBadStatement() { // When: testResource.streamQuery( - serviceContext, + securityContext, new KsqlRequest("query", Collections.emptyMap(), null) ); } @@ -236,7 +240,7 @@ public void shouldReturn400OnBadStatement() { public void shouldNotWaitIfCommandSequenceNumberSpecified() throws Exception { // When: testResource.streamQuery( - serviceContext, + securityContext, new KsqlRequest(PUSH_QUERY_STRING, Collections.emptyMap(), null) ); @@ -248,7 +252,7 @@ public void shouldNotWaitIfCommandSequenceNumberSpecified() throws Exception { public void shouldWaitIfCommandSequenceNumberSpecified() throws Exception { // When: testResource.streamQuery( - serviceContext, + securityContext, new KsqlRequest(PUSH_QUERY_STRING, Collections.emptyMap(), 3L) ); @@ -273,7 +277,7 @@ public void shouldReturnServiceUnavailableIfTimeoutWaitingForCommandSequenceNumb // When: testResource.streamQuery( - serviceContext, + securityContext, new KsqlRequest(PUSH_QUERY_STRING, Collections.emptyMap(), 3L) ); } @@ -288,7 +292,7 @@ public void shouldNotCreateExternalClientsForPullQuery() { // When: testResource.streamQuery( - serviceContext, + securityContext, new KsqlRequest(PULL_QUERY_STRING, Collections.emptyMap(), null) ); @@ -303,7 +307,7 @@ public void shouldNotCreateExternalClientsForPullQuery() { public void shouldThrowExceptionForPullQueryIfValidating() { // When: final Response response = testResource.streamQuery( - serviceContext, + securityContext, new KsqlRequest(PULL_QUERY_STRING, Collections.emptyMap(), null) ); @@ -327,7 +331,7 @@ public void shouldPassCheckForPullQueryIfNotValidating() { // When: final Response response = testResource.streamQuery( - serviceContext, + securityContext, new KsqlRequest(PULL_QUERY_STRING, Collections.emptyMap(), null) ); @@ -399,7 +403,7 @@ public void shouldStreamRowsCorrectly() throws Throwable { final Response response = testResource.streamQuery( - serviceContext, + securityContext, new KsqlRequest(queryString, requestStreamsProperties, null) ); final PipedOutputStream responseOutputStream = new EOFPipedOutputStream(); @@ -538,7 +542,7 @@ public void write(final int b) throws IOException { public void shouldUpdateTheLastRequestTime() { /// When: testResource.streamQuery( - serviceContext, + securityContext, new KsqlRequest(PUSH_QUERY_STRING, Collections.emptyMap(), null) ); @@ -557,7 +561,7 @@ public void shouldReturnForbiddenKafkaAccessIfKsqlTopicAuthorizationException() // When: final Response response = testResource.streamQuery( - serviceContext, + securityContext, new KsqlRequest(PUSH_QUERY_STRING, Collections.emptyMap(), null) ); @@ -580,7 +584,7 @@ public void shouldReturnForbiddenKafkaAccessIfPrintTopicKsqlTopicAuthorizationEx // When: final Response response = testResource.streamQuery( - serviceContext, + securityContext, new KsqlRequest(PRINT_TOPIC, Collections.emptyMap(), null) ); @@ -626,7 +630,7 @@ public void shouldSuggestAlternativesIfPrintTopicDoesNotExist() { // When: testResource.streamQuery( - serviceContext, + securityContext, new KsqlRequest(PRINT_TOPIC, Collections.emptyMap(), null) ); } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/services/ServerInternalKsqlClientTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/services/ServerInternalKsqlClientTest.java index bd75e367c960..409b11f33b4e 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/services/ServerInternalKsqlClientTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/services/ServerInternalKsqlClientTest.java @@ -23,7 +23,8 @@ import io.confluent.ksql.rest.entity.KsqlEntityList; import io.confluent.ksql.rest.entity.KsqlRequest; import io.confluent.ksql.rest.server.resources.KsqlResource; -import io.confluent.ksql.services.ServiceContext; +import io.confluent.ksql.security.KsqlSecurityContext; + import java.net.URI; import java.util.Collections; import javax.ws.rs.core.Response; @@ -44,7 +45,7 @@ public class ServerInternalKsqlClientTest { @Mock private KsqlResource ksqlResource; @Mock - private ServiceContext serviceContext; + private KsqlSecurityContext securityContext; @Mock private URI unused; @Mock @@ -58,13 +59,13 @@ public void setUp() { when(response.getStatus()).thenReturn(Status.OK.getStatusCode()); when(response.getEntity()).thenReturn(entities); - ksqlClient = new ServerInternalKsqlClient(ksqlResource, serviceContext); + ksqlClient = new ServerInternalKsqlClient(ksqlResource, securityContext); } @Test public void shouldMakeKsqlRequest() { // Given: - when(ksqlResource.handleKsqlStatements(serviceContext, EXPECTED_REQUEST)).thenReturn(response); + when(ksqlResource.handleKsqlStatements(securityContext, EXPECTED_REQUEST)).thenReturn(response); // When: final RestResponse restResponse = From 096b78f8648382c4792411bb83914d1e1d7e0c03 Mon Sep 17 00:00:00 2001 From: Victoria Xia Date: Tue, 7 Jan 2020 11:22:57 -0800 Subject: [PATCH 077/123] fix: CLI commands may be terminated with semicolon+whitespace (MINOR) (#4234) --- .../io/confluent/ksql/cli/console/Console.java | 2 +- .../io/confluent/ksql/cli/console/ConsoleTest.java | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java index 47493f2d5a63..7cc90f5c8799 100644 --- a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java @@ -409,7 +409,7 @@ public OutputFormat getOutputFormat() { } private Optional getCliCommand(final String line) { - final List parts = splitByUnquotedWhitespace(StringUtils.stripEnd(line, ";")); + final List parts = splitByUnquotedWhitespace(StringUtils.stripEnd(line.trim(), ";")); if (parts.isEmpty()) { return Optional.empty(); } diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java index 21e196b14faf..aa8f29638bfd 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java @@ -1356,6 +1356,20 @@ public void shouldSupportCmdBeingTerminatedWithSemiColon() { verify(cliCommand).execute(eq(ImmutableList.of("Arg0")), any()); } + @Test + public void shouldSupportCmdBeingTerminatedWithSemiColonAndWhitespace() { + // Given: + when(lineSupplier.get()) + .thenReturn(CLI_CMD_NAME + WHITE_SPACE + "Arg0; \n") + .thenReturn("not a CLI command;"); + + // When: + console.readLine(); + + // Then: + verify(cliCommand).execute(eq(ImmutableList.of("Arg0")), any()); + } + @Test public void shouldSupportCmdWithQuotedArgBeingTerminatedWithSemiColon() { // Given: From 665f2078895c773541b3600d6bb38b1e62f5698b Mon Sep 17 00:00:00 2001 From: Rohan Date: Tue, 7 Jan 2020 11:31:53 -0800 Subject: [PATCH 078/123] feat: add test topology rewriter (#4204) * feat: add test topology rewriter This patch adds a rewriter tool for rewriting test topologies in the event that the streams topology changes in a backwards compatible way. --- .../ksql/test/model/KsqlVersion.java | 7 +- .../confluent/ksql/test/tools/TestCase.java | 3 +- .../ksql/test/PlannedTestRewriterTest.java | 29 ++++++ .../ksql/test/PlannedTestsUpToDateTest.java | 23 +---- .../test/planned/PlannedTestGenerator.java | 59 +------------ .../ksql/test/planned/PlannedTestLoader.java | 29 +----- .../ksql/test/planned/PlannedTestPath.java | 84 ++++++++++++++++++ .../test/planned/PlannedTestRewriter.java | 48 ++++++++++ .../ksql/test/planned/PlannedTestUtils.java | 46 +++++----- .../ksql/test/planned/TestCasePlanLoader.java | 88 +++++++++++++------ .../ksql/test/planned/TestCasePlanWriter.java | 56 ++++++++++++ 11 files changed, 313 insertions(+), 159 deletions(-) create mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/PlannedTestRewriterTest.java create mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestPath.java create mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestRewriter.java create mode 100644 ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanWriter.java diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/KsqlVersion.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/KsqlVersion.java index 1f4e2ecbe266..245c2fc75e6b 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/KsqlVersion.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/model/KsqlVersion.java @@ -21,6 +21,7 @@ import io.confluent.ksql.util.Version; import java.util.Comparator; import java.util.Objects; +import java.util.OptionalLong; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -33,7 +34,7 @@ public final class KsqlVersion implements Comparable { @EffectivelyImmutable private static final Comparator COMPARATOR = Comparator.comparing(KsqlVersion::getVersion) - .thenComparingLong(KsqlVersion::getTimestamp); + .thenComparingLong(v -> v.timestamp); private final transient String name; private final SemanticVersion version; @@ -78,8 +79,8 @@ public SemanticVersion getVersion() { return version; } - public long getTimestamp() { - return timestamp; + public OptionalLong getTimestamp() { + return timestamp == Long.MAX_VALUE ? OptionalLong.empty() : OptionalLong.of(timestamp); } @Override diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCase.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCase.java index e47a8855c4b3..a5291ecd0d60 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCase.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestCase.java @@ -114,7 +114,8 @@ public TestCase withExpectedTopology( throw new IllegalArgumentException("Test does not support supplied version: " + version); } - final String newName = name + "-" + version.getName(); + final String newName = name + "-" + version.getName() + + (version.getTimestamp().isPresent() ? "-" + version.getTimestamp().getAsLong() : ""); final TestCase copy = new TestCase( testPath, newName, diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/PlannedTestRewriterTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/PlannedTestRewriterTest.java new file mode 100644 index 000000000000..8f6be8f3042c --- /dev/null +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/PlannedTestRewriterTest.java @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test; + +import io.confluent.ksql.test.planned.PlannedTestRewriter; +import org.junit.Ignore; +import org.junit.Test; + +public class PlannedTestRewriterTest { + @Test + @Ignore + public void rewritePlans() { + new PlannedTestRewriter(PlannedTestRewriter.FULL) + .rewriteTestCases(QueryTranslationTest.findTestCases()); + } +} diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/PlannedTestsUpToDateTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/PlannedTestsUpToDateTest.java index 6d571de9f0d9..45a4a819280d 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/PlannedTestsUpToDateTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/PlannedTestsUpToDateTest.java @@ -15,20 +15,15 @@ package io.confluent.ksql.test; -import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; import com.fasterxml.jackson.databind.ObjectMapper; import io.confluent.ksql.execution.json.PlanJsonMapper; import io.confluent.ksql.test.planned.TestCasePlan; import io.confluent.ksql.test.planned.TestCasePlanLoader; -import io.confluent.ksql.test.planned.PlannedTestLoader; import io.confluent.ksql.test.planned.PlannedTestUtils; import io.confluent.ksql.test.tools.TestCase; import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; import java.util.Collection; import java.util.Objects; import java.util.Optional; @@ -69,22 +64,8 @@ public PlannedTestsUpToDateTest(final String name, final TestCase testCase) { @Test public void shouldHaveLatestPlans() { - final Path testCaseDir = Paths.get( - PlannedTestLoader.PLANS_DIR, - PlannedTestUtils.formatName(testCase.getName()) - ); - - assertThat( - String.format( - "Missing test plan directory for: %s. Please re-generate QTT plans." - + " See `ksql-functional-tests/README.md` for more info.", - testCase.getName() - ), - Files.isDirectory(PlannedTestUtils.findBaseDir().resolve(testCaseDir)), is(true) - ); - - final Optional latest = TestCasePlanLoader.fromLatest(testCaseDir); - final TestCasePlan current = TestCasePlanLoader.fromTestCase(testCase); + final Optional latest = TestCasePlanLoader.latestForTestCase(testCase); + final TestCasePlan current = TestCasePlanLoader.currentForTestCase(testCase); assertThat( String.format( "Current query plan differs from latest for: %s. Please re-generate QTT plans." diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestGenerator.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestGenerator.java index 2fa7498cea03..4b5cda6c215c 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestGenerator.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestGenerator.java @@ -15,15 +15,7 @@ package io.confluent.ksql.test.planned; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Charsets; -import io.confluent.ksql.execution.json.PlanJsonMapper; import io.confluent.ksql.test.tools.TestCase; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.StandardOpenOption; import java.util.Optional; import java.util.stream.Stream; @@ -31,7 +23,6 @@ * Tool for generating new TestCasePlans and writing them to the local filesystem */ public class PlannedTestGenerator { - private static final ObjectMapper MAPPER = PlanJsonMapper.create(); public static void generatePlans(final Stream testCases) { testCases @@ -40,55 +31,11 @@ public static void generatePlans(final Stream testCases) { } private static void maybeGenerateTestCase(final TestCase testCase) { - final String testCaseName = PlannedTestUtils.formatName(testCase.getName()); - final Path testCaseDir = Paths.get(PlannedTestLoader.PLANS_DIR, testCaseName); - createDirectory(testCaseDir); - final Optional latest = TestCasePlanLoader.fromLatest(testCaseDir); - final TestCasePlan current = TestCasePlanLoader.fromTestCase(testCase); + final Optional latest = TestCasePlanLoader.latestForTestCase(testCase); + final TestCasePlan current = TestCasePlanLoader.currentForTestCase(testCase); if (PlannedTestUtils.isSamePlan(latest, current)) { return; } - dumpTestCase(testCaseDir, current); - } - - private static String getTestDirName(final TestCasePlan planAtVersionNode) { - return String.format("%s_%s", planAtVersionNode.getVersion(), planAtVersionNode.getTimestamp()); - } - - private static void dumpTestCase(final Path dir, final TestCasePlan planAtVersion) { - final Path parent = PlannedTestUtils.findBaseDir() - .resolve(dir) - .resolve(getTestDirName(planAtVersion)); - final Path specPath = parent.resolve(PlannedTestLoader.SPEC_FILE); - final Path topologyPath = parent.resolve(PlannedTestLoader.TOPOLOGY_FILE); - try { - Files.createDirectories(parent); - Files.write( - specPath, - MAPPER.writerWithDefaultPrettyPrinter() - .writeValueAsString(planAtVersion.getNode()) - .getBytes(Charsets.UTF_8), - StandardOpenOption.CREATE, - StandardOpenOption.WRITE, - StandardOpenOption.TRUNCATE_EXISTING - ); - Files.write( - topologyPath, - planAtVersion.getTopology().getBytes(Charsets.UTF_8), - StandardOpenOption.CREATE, - StandardOpenOption.WRITE, - StandardOpenOption.TRUNCATE_EXISTING - ); - } catch (final IOException e) { - throw new RuntimeException(e); - } - } - - private static void createDirectory(final Path path) { - try { - Files.createDirectories(PlannedTestUtils.findBaseDir().resolve(path)); - } catch (final IOException e) { - throw new RuntimeException(e); - } + TestCasePlanWriter.writeTestCasePlan(testCase, current); } } diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestLoader.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestLoader.java index a0c31cf194e9..05b88cf1ff21 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestLoader.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestLoader.java @@ -21,9 +21,7 @@ import io.confluent.ksql.test.tools.TopologyAndConfigs; import io.confluent.ksql.test.tools.VersionedTest; import java.nio.file.Path; -import java.nio.file.Paths; import java.util.Objects; -import java.util.Optional; import java.util.stream.Stream; /** @@ -31,9 +29,6 @@ * against a saved physical plan (according to {@link PlannedTestUtils#isPlannedTestCase}) */ public class PlannedTestLoader implements TestLoader { - public static final String PLANS_DIR = "historical_plans/"; - public static final String SPEC_FILE = "spec.json"; - public static final String TOPOLOGY_FILE = "topology"; private final TestLoader innerLoader; @@ -54,32 +49,12 @@ public Stream load() { private Stream buildHistoricalTestCases(final TestCase testCase) { if (PlannedTestUtils.isPlannedTestCase(testCase)) { - final Path rootforCase - = Paths.get(PLANS_DIR, PlannedTestUtils.formatName(testCase.getName())); - return PlannedTestUtils.findContentsOfDirectory(rootforCase.toString()).stream() - .map(d -> buildHistoricalTestCase(testCase, rootforCase.resolve(d))); + return TestCasePlanLoader.allForTestCase(testCase).stream() + .map(plan -> PlannedTestUtils.buildPlannedTestCase(testCase, plan)); } else if (testCase.getVersionBounds().contains(KsqlVersion.current())) { return Stream.of(testCase); } else { return Stream.empty(); } } - - private VersionedTest buildHistoricalTestCase( - final VersionedTest testCase, - final Path dir - ) { - final TestCasePlan planAtVersionNode = TestCasePlanLoader.fromSpecific(dir); - final KsqlVersion version = KsqlVersion.parse(planAtVersionNode.getVersion()) - .withTimestamp(planAtVersionNode.getTimestamp()); - return testCase.withExpectedTopology( - version, - new TopologyAndConfigs( - Optional.of(planAtVersionNode.getPlan()), - planAtVersionNode.getTopology(), - planAtVersionNode.getSchemas(), - planAtVersionNode.getConfigs() - ) - ); - } } diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestPath.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestPath.java new file mode 100644 index 000000000000..f4e4354006a3 --- /dev/null +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestPath.java @@ -0,0 +1,84 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test.planned; + +import io.confluent.ksql.test.tools.TestCase; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Objects; + +public final class PlannedTestPath { + private static final String INVALID_FILENAME_CHARS_PATTERN = "\\s|/|\\\\|:|\\*|\\?|\"|<|>|\\|"; + private static final String BASE_DIRECTORY = "src/test/resources/"; + private static final String PLANS_DIR = "historical_plans/"; + public static final String SPEC_FILE = "spec.json"; + public static final String TOPOLOGY_FILE = "topology"; + + private final Path path; + + private PlannedTestPath(final Path path) { + this.path = Objects.requireNonNull(path, "path"); + } + + public static PlannedTestPath forTestCase(final TestCase testCase) { + return new PlannedTestPath(Paths.get(PLANS_DIR, formatName(testCase.getName()))); + } + + public static PlannedTestPath forTestCasePlan(final TestCase testCase, final TestCasePlan plan) { + return new PlannedTestPath( + forTestCase(testCase).path().resolve( + String.format("%s_%s", plan.getVersion(), plan.getTimestamp())) + ); + } + + public PlannedTestPath resolve(final Path path) { + return new PlannedTestPath(this.path.resolve(path)); + } + + public PlannedTestPath resolve(final String path) { + return new PlannedTestPath(this.path.resolve(path)); + } + + public Path path() { + return path; + } + + public Path relativePath() { + return findBaseDir().resolve(path); + } + + private static Path findBaseDir() { + Path path = Paths.get("./ksql-functional-tests"); + if (Files.exists(path)) { + return path.resolve(BASE_DIRECTORY); + } + path = Paths.get("../ksql-functional-tests"); + if (Files.exists(path)) { + return path.resolve(BASE_DIRECTORY); + } + throw new RuntimeException("Failed to determine location of expected topologies directory. " + + "App should be run with current directory set to either the root of the repo or the " + + "root of the ksql-functional-tests module"); + } + + private static String formatName(final String originalName) { + return originalName + .replaceAll(" - (AVRO|JSON|DELIMITED|KAFKA)$", "") + .replaceAll(INVALID_FILENAME_CHARS_PATTERN, "_"); + } +} + diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestRewriter.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestRewriter.java new file mode 100644 index 000000000000..4c1e33c414e6 --- /dev/null +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestRewriter.java @@ -0,0 +1,48 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test.planned; + +import io.confluent.ksql.test.tools.TestCase; +import java.util.Objects; +import java.util.function.BiFunction; +import java.util.stream.Stream; + +/** + * Tool for rewriting planned test cases + */ +public class PlannedTestRewriter { + private final BiFunction rewriter; + + public static final BiFunction FULL + = TestCasePlanLoader::rebuiltForTestCase; + + public PlannedTestRewriter(final BiFunction rewriter) { + this.rewriter = Objects.requireNonNull(rewriter, "rewriter"); + } + + public void rewriteTestCases(final Stream testCases) { + testCases + .filter(PlannedTestUtils::isPlannedTestCase) + .forEach(this::rewriteTestCase); + } + + private void rewriteTestCase(final TestCase testCase) { + for (final TestCasePlan testCasePlan : TestCasePlanLoader.allForTestCase(testCase)) { + final TestCasePlan rewritten = rewriter.apply(testCase, testCasePlan); + TestCasePlanWriter.writeTestCasePlan(testCase, rewritten); + } + } +} diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestUtils.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestUtils.java index 281c49419d73..d24bebef1cf8 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestUtils.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/PlannedTestUtils.java @@ -17,8 +17,13 @@ import static java.nio.charset.StandardCharsets.UTF_8; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Charsets; import com.google.common.collect.ImmutableList; +import io.confluent.ksql.test.model.KsqlVersion; import io.confluent.ksql.test.tools.TestCase; +import io.confluent.ksql.test.tools.TopologyAndConfigs; +import io.confluent.ksql.test.tools.VersionedTest; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -26,13 +31,12 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.List; import java.util.Optional; public final class PlannedTestUtils { - private static final String BASE_DIRECTORY = "src/test/resources/"; - private static final String INVALID_FILENAME_CHARS_PATTERN = "\\s|/|\\\\|:|\\*|\\?|\"|<|>|\\|"; // this is temporary private static final List WHITELIST = ImmutableList.of( "average - calculate average in select" @@ -71,28 +75,20 @@ public static Optional> loadContents(final String path) { } } - public static List findContentsOfDirectory(final String path) { - return loadContents(path) - .orElseThrow(() -> new AssertionError("Dir not found: " + path)); - } - - public static String formatName(final String originalName) { - return originalName - .replaceAll(" - (AVRO|JSON|DELIMITED|KAFKA)$", "") - .replaceAll(INVALID_FILENAME_CHARS_PATTERN, "_"); - } - - public static Path findBaseDir() { - Path path = Paths.get("./ksql-functional-tests"); - if (Files.exists(path)) { - return path.resolve(BASE_DIRECTORY); - } - path = Paths.get("../ksql-functional-tests"); - if (Files.exists(path)) { - return path.resolve(BASE_DIRECTORY); - } - throw new RuntimeException("Failed to determine location of expected topologies directory. " - + "App should be run with current directory set to either the root of the repo or the " - + "root of the ksql-functional-tests module"); + public static TestCase buildPlannedTestCase( + final TestCase testCase, + final TestCasePlan planAtVersionNode + ) { + final KsqlVersion version = KsqlVersion.parse(planAtVersionNode.getVersion()) + .withTimestamp(planAtVersionNode.getTimestamp()); + return testCase.withExpectedTopology( + version, + new TopologyAndConfigs( + Optional.of(planAtVersionNode.getPlan()), + planAtVersionNode.getTopology(), + planAtVersionNode.getSchemas(), + planAtVersionNode.getConfigs() + ) + ); } } diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanLoader.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanLoader.java index caaa5bfc75b3..26b3af084397 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanLoader.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanLoader.java @@ -43,8 +43,10 @@ import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Path;; +import java.util.Collections; import java.util.List; import java.util.Optional; +import java.util.stream.Collectors; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import org.w3c.dom.Document; @@ -68,52 +70,84 @@ private TestCasePlanLoader() { * @param testCase the test case to build plans for * @return the built plan. */ - public static TestCasePlan fromTestCase(final TestCase testCase) { + public static TestCasePlan currentForTestCase(final TestCase testCase) { final KsqlConfig configs = BASE_CONFIG.cloneWithPropertyOverwrite(testCase.properties()); try ( final ServiceContext serviceContext = getServiceContext(); final KsqlEngine engine = getKsqlEngine(serviceContext)) { - return buildStatementsInTestCase(testCase, configs, serviceContext, engine); + return buildStatementsInTestCase( + testCase, + configs, + serviceContext, + engine, + CURRENT_VERSION, + System.currentTimeMillis() + ); + } + } + + /** + * Rebuilds a TestCasePlan given a TestCase and a TestCasePlan + * @param testCase the test case to rebuild the plan for + * @param original the plan to rebuild + * @return the rebuilt plan. + */ + public static TestCasePlan rebuiltForTestCase( + final TestCase testCase, + final TestCasePlan original) { + final TestCase withOriginal = PlannedTestUtils.buildPlannedTestCase(testCase, original); + final KsqlConfig configs = BASE_CONFIG.cloneWithPropertyOverwrite(testCase.properties()); + try ( + final ServiceContext serviceContext = getServiceContext(); + final KsqlEngine engine = getKsqlEngine(serviceContext)) { + return buildStatementsInTestCase( + withOriginal, + configs, + serviceContext, + engine, + original.getVersion(), + original.getTimestamp() + ); } } /** * Create a TestCasePlan by loading it from the local filesystem. This factory loads the * most recent plan from a given test case directory. - * @param testCaseDir The directory to load the plan from. + * @param testCase The test case to load the latest plan for * @return the loaded plan. */ - public static Optional fromLatest(final Path testCaseDir) { - final Optional> existing = PlannedTestUtils.loadContents(testCaseDir.toString()); - if (!existing.isPresent()) { - return Optional.empty(); - } + public static Optional latestForTestCase(final TestCase testCase) { KsqlVersion latestVersion = null; TestCasePlan latest = null; - for (final String versionDir : existing.get()) { - final TestCasePlan planAtVersionNode = parseSpec(testCaseDir.resolve(versionDir)); - final KsqlVersion version = KsqlVersion.parse(planAtVersionNode.getVersion()) - .withTimestamp(planAtVersionNode.getTimestamp()); + for (final TestCasePlan candidate : allForTestCase(testCase)) { + final KsqlVersion version = KsqlVersion.parse(candidate.getVersion()) + .withTimestamp(candidate.getTimestamp()); if (latestVersion == null || latestVersion.compareTo(version) < 0) { latestVersion = version; - latest = planAtVersionNode; + latest = candidate; } } return Optional.ofNullable(latest); } /** - * Create a TestCasePlan by loading a specific plan from the local filesystem. - * @param versionDir the directory to load the plan from. - * @return the loaded plan. + * Create a TestCasePlan for all saved plans for a test case + * @param testCase the test case to load saved lans for + * @return a list of the loaded plans. */ - public static TestCasePlan fromSpecific(final Path versionDir) { - return parseSpec(versionDir); + public static List allForTestCase(final TestCase testCase) { + final PlannedTestPath rootforCase = PlannedTestPath.forTestCase(testCase); + return PlannedTestUtils.loadContents(rootforCase.path().toString()) + .orElse(Collections.emptyList()) + .stream() + .map(p -> parseSpec(rootforCase.resolve(p))) + .collect(Collectors.toList()); } - private static TestCasePlan parseSpec(final Path versionDir) { - final Path specPath = versionDir.resolve(PlannedTestLoader.SPEC_FILE); - final Path topologyPath = versionDir.resolve(PlannedTestLoader.TOPOLOGY_FILE); + private static TestCasePlan parseSpec(final PlannedTestPath versionDir) { + final PlannedTestPath specPath = versionDir.resolve(PlannedTestPath.SPEC_FILE); + final PlannedTestPath topologyPath = versionDir.resolve(PlannedTestPath.TOPOLOGY_FILE); try { return new TestCasePlan( MAPPER.readValue(slurp(specPath), TestCasePlanNode.class), @@ -124,9 +158,9 @@ private static TestCasePlan parseSpec(final Path versionDir) { } } - private static String slurp(final Path path) throws IOException { + private static String slurp(final PlannedTestPath path) throws IOException { return new String( - Files.readAllBytes(PlannedTestUtils.findBaseDir().resolve(path)), + Files.readAllBytes(path.relativePath()), Charset.defaultCharset() ); } @@ -135,7 +169,9 @@ private static TestCasePlan buildStatementsInTestCase( final TestCase testCase, final KsqlConfig ksqlConfig, final ServiceContext serviceContext, - final KsqlEngine ksqlEngine) { + final KsqlEngine ksqlEngine, + final String version, + final long timestamp) { final Iterable configuredPlans = TestExecutorUtil.planTestCase( ksqlEngine, testCase, @@ -159,8 +195,8 @@ private static TestCasePlan buildStatementsInTestCase( throw new AssertionError("test case does not build a query"); } return new TestCasePlan( - CURRENT_VERSION, - System.currentTimeMillis(), + version, + timestamp, plansBuilder.build(), queryMetadata.getTopologyDescription(), queryMetadata.getSchemasDescription(), diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanWriter.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanWriter.java new file mode 100644 index 000000000000..2e376efae421 --- /dev/null +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanWriter.java @@ -0,0 +1,56 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.test.planned; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Charsets; +import io.confluent.ksql.execution.json.PlanJsonMapper; +import io.confluent.ksql.test.tools.TestCase; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; + +public class TestCasePlanWriter { + private static final ObjectMapper MAPPER = PlanJsonMapper.create(); + + public static void writeTestCasePlan(final TestCase testCase, final TestCasePlan planAtVersion) { + final Path parent = PlannedTestPath.forTestCasePlan(testCase, planAtVersion).relativePath(); + final Path specPath = parent.resolve(PlannedTestPath.SPEC_FILE); + final Path topologyPath = parent.resolve(PlannedTestPath.TOPOLOGY_FILE); + try { + Files.createDirectories(parent); + Files.write( + specPath, + MAPPER.writerWithDefaultPrettyPrinter() + .writeValueAsString(planAtVersion.getNode()) + .getBytes(Charsets.UTF_8), + StandardOpenOption.CREATE, + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING + ); + Files.write( + topologyPath, + planAtVersion.getTopology().getBytes(Charsets.UTF_8), + StandardOpenOption.CREATE, + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING + ); + } catch (final IOException e) { + throw new RuntimeException(e); + } + } +} From 58ed39c8a1e5f21962dee82ba78ee39b391513f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergio=20Pe=C3=B1a?= Date: Tue, 7 Jan 2020 16:10:59 -0600 Subject: [PATCH 079/123] feat: add a KSQL cache for Kafka authorization validator (#4186) Implement a cache for permission checks done by the KsqlAuthorizationValidator. --- .../io/confluent/ksql/util/KsqlConfig.java | 25 ++ .../ksql/security/KsqlAccessValidator.java | 33 +++ .../security/KsqlAuthorizationValidator.java | 15 +- .../KsqlAuthorizationValidatorFactory.java | 18 +- .../KsqlAuthorizationValidatorImpl.java | 72 +++--- .../security/KsqlBackendAccessValidator.java | 45 ++++ .../security/KsqlCacheAccessValidator.java | 154 +++++++++++++ ...KsqlAuthorizationValidatorFactoryTest.java | 35 ++- .../KsqlAuthorizationValidatorImplTest.java | 215 ++++++------------ .../KsqlBackendAccessValidatorTest.java | 128 +++++++++++ .../KsqlCacheAccessValidatorTest.java | 115 ++++++++++ .../computation/DistributingExecutor.java | 13 +- .../rest/server/execution/RequestHandler.java | 12 +- .../rest/server/resources/KsqlResource.java | 4 +- .../streaming/StreamedQueryResource.java | 2 +- .../resources/streaming/WSQueryEndpoint.java | 56 +++-- .../computation/DistributingExecutorTest.java | 36 ++- .../server/execution/RequestHandlerTest.java | 19 +- .../streaming/WSQueryEndpointTest.java | 8 +- 19 files changed, 741 insertions(+), 264 deletions(-) create mode 100644 ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAccessValidator.java create mode 100644 ksql-engine/src/main/java/io/confluent/ksql/security/KsqlBackendAccessValidator.java create mode 100644 ksql-engine/src/main/java/io/confluent/ksql/security/KsqlCacheAccessValidator.java create mode 100644 ksql-engine/src/test/java/io/confluent/ksql/security/KsqlBackendAccessValidatorTest.java create mode 100644 ksql-engine/src/test/java/io/confluent/ksql/security/KsqlCacheAccessValidatorTest.java diff --git a/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java b/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java index 3c2bfb659720..2a3bd7709548 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java +++ b/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java @@ -196,6 +196,19 @@ public class KsqlConfig extends AbstractConfig { public static final String KSQL_SHUTDOWN_TIMEOUT_MS_DOC = "Timeout in " + "milliseconds to block waiting for the underlying streams instance to exit"; + public static final String KSQL_AUTH_CACHE_EXPIRY_TIME_SECS = + "ksql.authorization.cache.expiry.time.secs"; + public static final Long KSQL_AUTH_CACHE_EXPIRY_TIME_SECS_DEFAULT = 30L; + public static final String KSQL_AUTH_CACHE_EXPIRY_TIME_SECS_DOC = "Time in " + + "seconds to keep KSQL authorization responses in the cache. (The cache is disabled if " + + "0 or a negative number is set)."; + + public static final String KSQL_AUTH_CACHE_MAX_ENTRIES = + "ksql.authorization.cache.max.entries"; + public static final Long KSQL_AUTH_CACHE_MAX_ENTRIES_DEFAULT = 10000L; + public static final String KSQL_AUTH_CACHE_MAX_ENTRIES_DOC = "Controls the size of the cache " + + "to a maximum number of KSQL authorization responses entries."; + private enum ConfigGeneration { LEGACY, CURRENT @@ -520,6 +533,18 @@ private static ConfigDef buildConfigDef(final ConfigGeneration generation) { KSQL_EXECUTION_PLANS_ENABLE_DEFAULT, Importance.LOW, "Feature flag to enable writing KSQL execution plans. For testing only." + ).define( + KSQL_AUTH_CACHE_EXPIRY_TIME_SECS, + Type.LONG, + KSQL_AUTH_CACHE_EXPIRY_TIME_SECS_DEFAULT, + Importance.LOW, + KSQL_AUTH_CACHE_EXPIRY_TIME_SECS_DOC + ).define( + KSQL_AUTH_CACHE_MAX_ENTRIES, + Type.LONG, + KSQL_AUTH_CACHE_MAX_ENTRIES_DEFAULT, + Importance.LOW, + KSQL_AUTH_CACHE_MAX_ENTRIES_DOC ) .withClientSslSupport(); diff --git a/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAccessValidator.java b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAccessValidator.java new file mode 100644 index 000000000000..4f6f3912f093 --- /dev/null +++ b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAccessValidator.java @@ -0,0 +1,33 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.security; + +import org.apache.kafka.common.acl.AclOperation; + +/** + * An interface that provides ACL validation on Kafka topics. + */ +public interface KsqlAccessValidator { + /** + * Checks if an authenticated user provided by the {@code securityContext} has authorization + * to execute the {@code operation} on the kafka {@code topicName}. + * + * @param securityContext The context for the authenticated user. + * @param topicName The topic name to check access. + * @param operation The {@code AclOperation} to validate against the {@code topicName}. + */ + void checkAccess(KsqlSecurityContext securityContext, String topicName, AclOperation operation); +} diff --git a/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidator.java b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidator.java index b12c5f364394..fd36c312a779 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidator.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidator.java @@ -20,17 +20,20 @@ import io.confluent.ksql.services.ServiceContext; /** - * Checks if a user (configured in the {@link ServiceContext}) has permissions to execute the - * specified KSQL {@link Statement}. + * Checks if a user or {@link ServiceContext} have permissions to execute the specified KSQL + * {@link Statement}. */ public interface KsqlAuthorizationValidator { /** - * Checks if a user (configured in the {@link ServiceContext}) has permissions to execute the - * specified KSQL {@link Statement}. + * Checks if a user or {@link ServiceContext} have permissions to execute the specified KSQL + * {@link Statement}. * - * @param serviceContext The service context to validate Kafka/SR authorization. + * @param securityContext The security context to validate Kafka/SR authorization. * @param metaStore The metastore object to obtain extra statement metadata. * @param statement The statement to check for authorization. */ - void checkAuthorization(ServiceContext serviceContext, MetaStore metaStore, Statement statement); + void checkAuthorization( + KsqlSecurityContext securityContext, + MetaStore metaStore, + Statement statement); } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidatorFactory.java b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidatorFactory.java index 4acd3f545122..1b567de19488 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidatorFactory.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidatorFactory.java @@ -41,7 +41,7 @@ public static Optional create( final String enabled = ksqlConfig.getString(KsqlConfig.KSQL_ENABLE_TOPIC_ACCESS_VALIDATOR); if (enabled.equals(KsqlConfig.KSQL_ACCESS_VALIDATOR_ON)) { LOG.info("Forcing topic access validator"); - return Optional.of(new KsqlAuthorizationValidatorImpl()); + return Optional.of(createAuthorizationValidator(ksqlConfig)); } else if (enabled.equals(KsqlConfig.KSQL_ACCESS_VALIDATOR_OFF)) { return Optional.empty(); } @@ -51,7 +51,7 @@ public static Optional create( if (isKafkaAuthorizerEnabled(adminClient)) { if (KafkaClusterUtil.isAuthorizedOperationsSupported(adminClient)) { LOG.info("KSQL topic authorization checks enabled."); - return Optional.of(new KsqlAuthorizationValidatorImpl()); + return Optional.of(createAuthorizationValidator(ksqlConfig)); } LOG.warn("The Kafka broker has an authorization service enabled, but the Kafka " @@ -61,6 +61,20 @@ public static Optional create( return Optional.empty(); } + private static KsqlAuthorizationValidator createAuthorizationValidator( + final KsqlConfig ksqlConfig + ) { + KsqlAccessValidator accessValidator = new KsqlBackendAccessValidator(); + + // The cache expiry time is used to decided whether to enable the cache or not + final long expiryTime = ksqlConfig.getLong(KsqlConfig.KSQL_AUTH_CACHE_EXPIRY_TIME_SECS); + if (expiryTime > 0) { + accessValidator = new KsqlCacheAccessValidator(ksqlConfig, accessValidator); + } + + return new KsqlAuthorizationValidatorImpl(accessValidator); + } + private static boolean isKafkaAuthorizerEnabled(final Admin adminClient) { try { final ConfigEntry configEntry = diff --git a/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidatorImpl.java b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidatorImpl.java index 3204f156131e..96ce8a4a4efd 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidatorImpl.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlAuthorizationValidatorImpl.java @@ -15,7 +15,6 @@ package io.confluent.ksql.security; -import io.confluent.ksql.exception.KsqlTopicAuthorizationException; import io.confluent.ksql.metastore.MetaStore; import io.confluent.ksql.metastore.model.DataSource; import io.confluent.ksql.name.SourceName; @@ -25,11 +24,8 @@ import io.confluent.ksql.parser.tree.PrintTopic; import io.confluent.ksql.parser.tree.Query; import io.confluent.ksql.parser.tree.Statement; -import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.topic.SourceTopicsExtractor; import io.confluent.ksql.util.KsqlException; -import java.util.Collections; -import java.util.Set; import org.apache.kafka.common.acl.AclOperation; /** @@ -39,39 +35,49 @@ * This validator only works on Kakfa 2.3 or later. */ public class KsqlAuthorizationValidatorImpl implements KsqlAuthorizationValidator { + private final KsqlAccessValidator accessValidator; + + public KsqlAuthorizationValidatorImpl(final KsqlAccessValidator accessValidator) { + this.accessValidator = accessValidator; + } + + KsqlAccessValidator getAccessValidator() { + return accessValidator; + } + @Override public void checkAuthorization( - final ServiceContext serviceContext, + final KsqlSecurityContext securityContext, final MetaStore metaStore, final Statement statement ) { if (statement instanceof Query) { - validateQuery(serviceContext, metaStore, (Query)statement); + validateQuery(securityContext, metaStore, (Query)statement); } else if (statement instanceof InsertInto) { - validateInsertInto(serviceContext, metaStore, (InsertInto)statement); + validateInsertInto(securityContext, metaStore, (InsertInto)statement); } else if (statement instanceof CreateAsSelect) { - validateCreateAsSelect(serviceContext, metaStore, (CreateAsSelect)statement); + validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement); } else if (statement instanceof PrintTopic) { - validatePrintTopic(serviceContext, (PrintTopic)statement); + validatePrintTopic(securityContext, (PrintTopic)statement); } else if (statement instanceof CreateSource) { - validateCreateSource(serviceContext, (CreateSource)statement); + validateCreateSource(securityContext, (CreateSource)statement); } } private void validateQuery( - final ServiceContext serviceContext, + final KsqlSecurityContext securityContext, final MetaStore metaStore, final Query query ) { final SourceTopicsExtractor extractor = new SourceTopicsExtractor(metaStore); extractor.process(query, null); - for (final String kafkaTopic : extractor.getSourceTopics()) { - checkAccess(serviceContext, kafkaTopic, AclOperation.READ); + for (String kafkaTopic : extractor.getSourceTopics()) { + accessValidator.checkAccess(securityContext, kafkaTopic, AclOperation.READ); } } private void validateCreateAsSelect( - final ServiceContext serviceContext, + final KsqlSecurityContext securityContext, final MetaStore metaStore, final CreateAsSelect createAsSelect ) { @@ -84,15 +90,15 @@ private void validateCreateAsSelect( * the target topic using the same ServiceContext used for validation. */ - validateQuery(serviceContext, metaStore, createAsSelect.getQuery()); + validateQuery(securityContext, metaStore, createAsSelect.getQuery()); // At this point, the topic should have been created by the TopicCreateInjector final String kafkaTopic = getCreateAsSelectSinkTopic(metaStore, createAsSelect); - checkAccess(serviceContext, kafkaTopic, AclOperation.WRITE); + accessValidator.checkAccess(securityContext, kafkaTopic, AclOperation.WRITE); } private void validateInsertInto( - final ServiceContext serviceContext, + final KsqlSecurityContext securityContext, final MetaStore metaStore, final InsertInto insertInto ) { @@ -102,25 +108,25 @@ private void validateInsertInto( * Validates Write on the target topic, and Read on the query sources topics. */ - validateQuery(serviceContext, metaStore, insertInto.getQuery()); + validateQuery(securityContext, metaStore, insertInto.getQuery()); final String kafkaTopic = getSourceTopicName(metaStore, insertInto.getTarget()); - checkAccess(serviceContext, kafkaTopic, AclOperation.WRITE); + accessValidator.checkAccess(securityContext, kafkaTopic, AclOperation.WRITE); } private void validatePrintTopic( - final ServiceContext serviceContext, + final KsqlSecurityContext securityContext, final PrintTopic printTopic ) { - checkAccess(serviceContext, printTopic.getTopic(), AclOperation.READ); + accessValidator.checkAccess(securityContext, printTopic.getTopic(), AclOperation.READ); } private void validateCreateSource( - final ServiceContext serviceContext, + final KsqlSecurityContext securityContext, final CreateSource createSource ) { final String sourceTopic = createSource.getProperties().getKafkaTopic(); - checkAccess(serviceContext, sourceTopic, AclOperation.READ); + accessValidator.checkAccess(securityContext, sourceTopic, AclOperation.READ); } private String getSourceTopicName(final MetaStore metaStore, final SourceName streamOrTable) { @@ -133,26 +139,6 @@ private String getSourceTopicName(final MetaStore metaStore, final SourceName st return dataSource.getKafkaTopicName(); } - /** - * Checks if the ServiceContext has access to the topic with the specified AclOperation. - */ - private void checkAccess( - final ServiceContext serviceContext, - final String topicName, - final AclOperation operation - ) { - final Set authorizedOperations = serviceContext.getTopicClient() - .describeTopic(topicName).authorizedOperations(); - - // Kakfa 2.2 or lower do not support authorizedOperations(). In case of running on a - // unsupported broker version, then the authorizeOperation will be null. - if (authorizedOperations != null && !authorizedOperations.contains(operation)) { - // This error message is similar to what Kafka throws when it cannot access the topic - // due to an authorization error. I used this message to keep a consistent message. - throw new KsqlTopicAuthorizationException(operation, Collections.singleton(topicName)); - } - } - private String getCreateAsSelectSinkTopic( final MetaStore metaStore, final CreateAsSelect createAsSelect diff --git a/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlBackendAccessValidator.java b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlBackendAccessValidator.java new file mode 100644 index 000000000000..ceb7eabf182a --- /dev/null +++ b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlBackendAccessValidator.java @@ -0,0 +1,45 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.security; + +import io.confluent.ksql.exception.KsqlTopicAuthorizationException; +import java.util.Collections; +import java.util.Set; +import org.apache.kafka.common.acl.AclOperation; + +/** + * An implementation of {@link KsqlAccessValidator} that provides authorization checks + * from the Kafka service. + */ +public class KsqlBackendAccessValidator implements KsqlAccessValidator { + @Override + public void checkAccess( + final KsqlSecurityContext securityContext, + final String topicName, + final AclOperation operation + ) { + final Set authorizedOperations = securityContext.getServiceContext() + .getTopicClient().describeTopic(topicName).authorizedOperations(); + + // Kakfa 2.2 or lower do not support authorizedOperations(). In case of running on a + // unsupported broker version, then the authorizeOperation will be null. + if (authorizedOperations != null && !authorizedOperations.contains(operation)) { + // This error message is similar to what Kafka throws when it cannot access the topic + // due to an authorization error. I used this message to keep a consistent message. + throw new KsqlTopicAuthorizationException(operation, Collections.singleton(topicName)); + } + } +} diff --git a/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlCacheAccessValidator.java b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlCacheAccessValidator.java new file mode 100644 index 000000000000..1a4c72e18f28 --- /dev/null +++ b/ksql-engine/src/main/java/io/confluent/ksql/security/KsqlCacheAccessValidator.java @@ -0,0 +1,154 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.security; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Ticker; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import io.confluent.ksql.exception.KsqlTopicAuthorizationException; +import io.confluent.ksql.util.KsqlConfig; + +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import javax.annotation.concurrent.ThreadSafe; +import org.apache.kafka.common.acl.AclOperation; + +/** + * An implementation of {@link KsqlAccessValidator} that provides authorization checks + * from a memory cache. + */ +@ThreadSafe +public class KsqlCacheAccessValidator implements KsqlAccessValidator { + private static final boolean ALLOW_ACCESS = true; + + static class CacheKey { + private static final String UNKNOWN_USER = ""; + + private final KsqlSecurityContext securityContext; + private final String topicName; + private final AclOperation operation; + + CacheKey( + final KsqlSecurityContext securityContext, + final String topicName, + final AclOperation operation + ) { + this.securityContext = securityContext; + this.topicName = topicName; + this.operation = operation; + } + + @Override + public boolean equals(final Object o) { + if (o == null || !(o instanceof CacheKey)) { + return false; + } + + final CacheKey other = (CacheKey)o; + return getUserName(securityContext).equals(getUserName(other.securityContext)) + && topicName.equals(other.topicName) + && operation.code() == other.operation.code(); + } + + @Override + public int hashCode() { + return Objects.hash( + getUserName(securityContext), + topicName, + operation.code() + ); + } + + private String getUserName(final KsqlSecurityContext securityContext) { + return (securityContext.getUserPrincipal().isPresent()) + ? securityContext.getUserPrincipal().get().getName() + : UNKNOWN_USER; + } + } + + static class CacheValue { + private final boolean allowAccess; + private final Optional denialReason; + + CacheValue(final boolean allowAccess, final Optional denialReason) { + this.allowAccess = allowAccess; + this.denialReason = denialReason; + } + } + + private final LoadingCache cache; + private final KsqlAccessValidator backendValidator; + + public KsqlCacheAccessValidator( + final KsqlConfig ksqlConfig, + final KsqlAccessValidator backendValidator + ) { + this(ksqlConfig, backendValidator, Ticker.systemTicker()); + } + + @VisibleForTesting + KsqlCacheAccessValidator( + final KsqlConfig ksqlConfig, + final KsqlAccessValidator backendValidator, + final Ticker cacheTicker + ) { + this.backendValidator = backendValidator; + + final long expiryTime = ksqlConfig.getLong(KsqlConfig.KSQL_AUTH_CACHE_EXPIRY_TIME_SECS); + final long maxEntries = ksqlConfig.getLong(KsqlConfig.KSQL_AUTH_CACHE_MAX_ENTRIES); + + cache = CacheBuilder.newBuilder() + .expireAfterWrite(expiryTime, TimeUnit.SECONDS) + .maximumSize(maxEntries) + .ticker(cacheTicker) + .build(buildCacheLoader()); + } + + private CacheLoader buildCacheLoader() { + return new CacheLoader() { + @Override + public CacheValue load(final CacheKey cacheKey) { + try { + backendValidator.checkAccess( + cacheKey.securityContext, + cacheKey.topicName, + cacheKey.operation + ); + } catch (KsqlTopicAuthorizationException e) { + return new CacheValue(!ALLOW_ACCESS, Optional.of(e)); + } + + return new CacheValue(ALLOW_ACCESS, Optional.empty()); + } + }; + } + + @Override + public void checkAccess( + final KsqlSecurityContext securityContext, + final String topicName, + final AclOperation operation + ) { + final CacheKey cacheKey = new CacheKey(securityContext, topicName, operation); + final CacheValue cacheValue = cache.getUnchecked(cacheKey); + if (!cacheValue.allowAccess) { + throw cacheValue.denialReason.get(); + } + } +} diff --git a/ksql-engine/src/test/java/io/confluent/ksql/security/KsqlAuthorizationValidatorFactoryTest.java b/ksql-engine/src/test/java/io/confluent/ksql/security/KsqlAuthorizationValidatorFactoryTest.java index 081ffc2a8753..fb32318d9050 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/security/KsqlAuthorizationValidatorFactoryTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/security/KsqlAuthorizationValidatorFactoryTest.java @@ -42,12 +42,12 @@ import org.apache.kafka.common.acl.AclOperation; import org.apache.kafka.common.config.ConfigResource; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; +import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.junit.MockitoJUnit; -import org.mockito.junit.MockitoRule; +import org.mockito.junit.MockitoJUnitRunner; +@RunWith(MockitoJUnitRunner.class) public class KsqlAuthorizationValidatorFactoryTest { private static final String KAFKA_AUTHORIZER_CLASS_NAME = "authorizer.class.name"; @@ -60,9 +60,6 @@ public class KsqlAuthorizationValidatorFactoryTest { private Node node; - @Rule - final public MockitoRule mockitoJUnit = MockitoJUnit.rule(); - @Before public void setUp() { node = new Node(1, "host", 9092); @@ -125,6 +122,30 @@ public void shouldReturnAuthorizationValidatorIfEnabled() { // Given: when(ksqlConfig.getString(KsqlConfig.KSQL_ENABLE_TOPIC_ACCESS_VALIDATOR)) .thenReturn(KsqlConfig.KSQL_ACCESS_VALIDATOR_ON); + when(ksqlConfig.getLong(KsqlConfig.KSQL_AUTH_CACHE_EXPIRY_TIME_SECS)) + .thenReturn(0L); + + // When: + final Optional validator = KsqlAuthorizationValidatorFactory.create( + ksqlConfig, + serviceContext + ); + + // Then: + assertThat("validator should be present", validator.isPresent()); + assertThat(validator.get(), is(instanceOf(KsqlAuthorizationValidatorImpl.class))); + assertThat(((KsqlAuthorizationValidatorImpl)validator.get()).getAccessValidator(), + is(instanceOf(KsqlBackendAccessValidator.class))); + verifyZeroInteractions(adminClient); + } + + @Test + public void shouldReturnAuthorizationValidatorWithCacheExpiryTimeIsPositive() { + // Given: + when(ksqlConfig.getString(KsqlConfig.KSQL_ENABLE_TOPIC_ACCESS_VALIDATOR)) + .thenReturn(KsqlConfig.KSQL_ACCESS_VALIDATOR_ON); + when(ksqlConfig.getLong(KsqlConfig.KSQL_AUTH_CACHE_EXPIRY_TIME_SECS)) + .thenReturn(1L); // When: final Optional validator = KsqlAuthorizationValidatorFactory.create( @@ -135,6 +156,8 @@ public void shouldReturnAuthorizationValidatorIfEnabled() { // Then: assertThat("validator should be present", validator.isPresent()); assertThat(validator.get(), is(instanceOf(KsqlAuthorizationValidatorImpl.class))); + assertThat(((KsqlAuthorizationValidatorImpl)validator.get()).getAccessValidator(), + is(instanceOf(KsqlCacheAccessValidator.class))); verifyZeroInteractions(adminClient); } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/security/KsqlAuthorizationValidatorImplTest.java b/ksql-engine/src/test/java/io/confluent/ksql/security/KsqlAuthorizationValidatorImplTest.java index 92cc8b8c1733..1835dff58297 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/security/KsqlAuthorizationValidatorImplTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/security/KsqlAuthorizationValidatorImplTest.java @@ -15,11 +15,11 @@ package io.confluent.ksql.security; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.when; import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.engine.KsqlEngineTestUtil; -import io.confluent.ksql.exception.KafkaResponseGetFailedException; import io.confluent.ksql.exception.KsqlTopicAuthorizationException; import io.confluent.ksql.execution.ddl.commands.KsqlTopic; import io.confluent.ksql.function.InternalFunctionRegistry; @@ -41,8 +41,6 @@ import io.confluent.ksql.services.ServiceContext; import java.util.Collections; import java.util.Optional; -import java.util.Set; -import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.common.acl.AclOperation; import org.junit.After; import org.junit.Before; @@ -62,17 +60,13 @@ public class KsqlAuthorizationValidatorImplTest { private static final String STREAM_TOPIC_1 = "s1"; private static final String STREAM_TOPIC_2 = "s2"; - private final static String TOPIC_NAME_1 = "topic1"; - private final static String TOPIC_NAME_2 = "topic2"; + private final static String TOPIC_1 = "topic1"; + private final static String TOPIC_2 = "topic2"; @Mock - private ServiceContext serviceContext; - @Mock - private KafkaTopicClient kafkaTopicClient; + private KsqlAccessValidator accessValidator; @Mock - private TopicDescription TOPIC_1; - @Mock - private TopicDescription TOPIC_2; + private ServiceContext serviceContext; @Rule public final ExpectedException expectedException = ExpectedException.none(); @@ -80,19 +74,17 @@ public class KsqlAuthorizationValidatorImplTest { private KsqlAuthorizationValidator authorizationValidator; private KsqlEngine ksqlEngine; private MutableMetaStore metaStore; + private KsqlSecurityContext securityContext; @Before public void setUp() { metaStore = new MetaStoreImpl(new InternalFunctionRegistry()); ksqlEngine = KsqlEngineTestUtil.createKsqlEngine(serviceContext, metaStore); - authorizationValidator = new KsqlAuthorizationValidatorImpl(); - when(serviceContext.getTopicClient()).thenReturn(kafkaTopicClient); + authorizationValidator = new KsqlAuthorizationValidatorImpl(accessValidator); + securityContext = new KsqlSecurityContext(Optional.empty(), serviceContext); - givenTopic(TOPIC_NAME_1, TOPIC_1); givenStreamWithTopic(STREAM_TOPIC_1, TOPIC_1); - - givenTopic(TOPIC_NAME_2, TOPIC_2); givenStreamWithTopic(STREAM_TOPIC_2, TOPIC_2); } @@ -105,36 +97,19 @@ private Statement givenStatement(final String sql) { return ksqlEngine.prepare(ksqlEngine.parse(sql).get(0)).getStatement(); } - @Test - public void shouldAllowAnyOperationIfPermissionsAreNull() { - // Given: - givenTopicPermissions(TOPIC_1, null); - final Statement statement = givenStatement("SELECT * FROM " + STREAM_TOPIC_1 + ";"); - - // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); - - // Then: - // Above command should not throw any exception - } - @Test public void shouldSingleSelectWithReadPermissionsAllowed() { // Given: - givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.READ)); final Statement statement = givenStatement("SELECT * FROM " + STREAM_TOPIC_1 + ";"); - // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); - - // Then: - // Above command should not throw any exception + // When/Then: + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test public void shouldThrowWhenSingleSelectWithoutReadPermissionsDenied() { // Given: - givenTopicPermissions(TOPIC_1, Collections.emptySet()); + givenAccessDenied(TOPIC_1, AclOperation.READ); final Statement statement = givenStatement(String.format( "SELECT * FROM %s;", STREAM_TOPIC_1) ); @@ -142,34 +117,28 @@ public void shouldThrowWhenSingleSelectWithoutReadPermissionsDenied() { // Then: expectedException.expect(KsqlTopicAuthorizationException.class); expectedException.expectMessage(String.format( - "Authorization denied to Read on topic(s): [%s]", TOPIC_1.name() + "Authorization denied to Read on topic(s): [%s]", TOPIC_1 )); // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test public void shouldJoinSelectWithReadPermissionsAllowed() { // Given: - givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.READ)); - givenTopicPermissions(TOPIC_2, Collections.singleton(AclOperation.READ)); final Statement statement = givenStatement(String.format( "SELECT * FROM %s A JOIN %s B ON A.F1 = B.F1;", STREAM_TOPIC_1, STREAM_TOPIC_2) ); - // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); - - // Then: - // Above command should not throw any exception + // When/Then: + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test public void shouldThrowWhenJoinSelectWithoutReadPermissionsDenied() { // Given: - givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.WRITE)); - givenTopicPermissions(TOPIC_2, Collections.singleton(AclOperation.WRITE)); + givenAccessDenied(TOPIC_1, AclOperation.READ); final Statement statement = givenStatement(String.format( "SELECT * FROM %s A JOIN %s B ON A.F1 = B.F1;", STREAM_TOPIC_1, STREAM_TOPIC_2) ); @@ -177,18 +146,17 @@ public void shouldThrowWhenJoinSelectWithoutReadPermissionsDenied() { // Then: expectedException.expect(KsqlTopicAuthorizationException.class); expectedException.expectMessage(String.format( - "Authorization denied to Read on topic(s): [%s]", TOPIC_1.name() + "Authorization denied to Read on topic(s): [%s]", TOPIC_1 )); // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test public void shouldThrowWhenJoinWithOneRightTopicWithReadPermissionsDenied() { // Given: - givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.READ)); - givenTopicPermissions(TOPIC_2, Collections.singleton(AclOperation.WRITE)); + givenAccessDenied(TOPIC_2, AclOperation.READ); final Statement statement = givenStatement(String.format( "SELECT * FROM %s A JOIN %s B ON A.F1 = B.F1;", STREAM_TOPIC_1, STREAM_TOPIC_2) ); @@ -196,18 +164,17 @@ public void shouldThrowWhenJoinWithOneRightTopicWithReadPermissionsDenied() { // Then: expectedException.expect(KsqlTopicAuthorizationException.class); expectedException.expectMessage(String.format( - "Authorization denied to Read on topic(s): [%s]", TOPIC_2.name() + "Authorization denied to Read on topic(s): [%s]", TOPIC_2 )); // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test public void shouldThrowWhenJoinWitOneLeftTopicWithReadPermissionsDenied() { // Given: - givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.WRITE)); - givenTopicPermissions(TOPIC_2, Collections.singleton(AclOperation.READ)); + givenAccessDenied(TOPIC_1, AclOperation.READ); final Statement statement = givenStatement(String.format( "SELECT * FROM %s A JOIN %s B ON A.F1 = B.F1;", STREAM_TOPIC_1, STREAM_TOPIC_2) ); @@ -215,34 +182,28 @@ public void shouldThrowWhenJoinWitOneLeftTopicWithReadPermissionsDenied() { // Then: expectedException.expect(KsqlTopicAuthorizationException.class); expectedException.expectMessage(String.format( - "Authorization denied to Read on topic(s): [%s]", TOPIC_1.name() + "Authorization denied to Read on topic(s): [%s]", TOPIC_1 )); // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test public void shouldInsertIntoWithAllPermissionsAllowed() { // Given: - givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.READ)); - givenTopicPermissions(TOPIC_2, Collections.singleton(AclOperation.WRITE)); final Statement statement = givenStatement(String.format( "INSERT INTO %s SELECT * FROM %s;", STREAM_TOPIC_2, STREAM_TOPIC_1) ); - // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); - - // Then: - // Above command should not throw any exception + // When/then: + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test - public void shouldThrowWhenInsertIntoWithOnlyReadPermissionsDenied() { + public void shouldThrowWhenInsertIntoWithOnlyReadPermissionsAllowed() { // Given: - givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.READ)); - givenTopicPermissions(TOPIC_2, Collections.singleton(AclOperation.READ)); + givenAccessDenied(TOPIC_2, AclOperation.WRITE); final Statement statement = givenStatement(String.format( "INSERT INTO %s SELECT * FROM %s;", STREAM_TOPIC_2, STREAM_TOPIC_1) ); @@ -250,18 +211,17 @@ public void shouldThrowWhenInsertIntoWithOnlyReadPermissionsDenied() { // Then: expectedException.expect(KsqlTopicAuthorizationException.class); expectedException.expectMessage(String.format( - "Authorization denied to Write on topic(s): [%s]", TOPIC_2.name() + "Authorization denied to Write on topic(s): [%s]", TOPIC_2 )); // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test - public void shouldThrowWhenInsertIntoWithOnlyWritePermissionsDenied() { + public void shouldThrowWhenInsertIntoWithOnlyWritePermissionsAllowed() { // Given: - givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.WRITE)); - givenTopicPermissions(TOPIC_2, Collections.singleton(AclOperation.WRITE)); + givenAccessDenied(TOPIC_1, AclOperation.READ); final Statement statement = givenStatement(String.format( "INSERT INTO %s SELECT * FROM %s;", STREAM_TOPIC_2, STREAM_TOPIC_1) ); @@ -269,17 +229,17 @@ public void shouldThrowWhenInsertIntoWithOnlyWritePermissionsDenied() { // Then: expectedException.expect(KsqlTopicAuthorizationException.class); expectedException.expectMessage(String.format( - "Authorization denied to Read on topic(s): [%s]", TOPIC_1.name() + "Authorization denied to Read on topic(s): [%s]", TOPIC_1 )); // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test public void shouldThrowWhenCreateAsSelectWithoutReadPermissionsDenied() { // Given: - givenTopicPermissions(TOPIC_1, Collections.emptySet()); + givenAccessDenied(TOPIC_1, AclOperation.READ); final Statement statement = givenStatement(String.format( "CREATE STREAM newStream AS SELECT * FROM %s;", STREAM_TOPIC_1) ); @@ -287,34 +247,28 @@ public void shouldThrowWhenCreateAsSelectWithoutReadPermissionsDenied() { // Then: expectedException.expect(KsqlTopicAuthorizationException.class); expectedException.expectMessage(String.format( - "Authorization denied to Read on topic(s): [%s]", TOPIC_1.name() + "Authorization denied to Read on topic(s): [%s]", TOPIC_1 )); // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test public void shouldCreateAsSelectExistingTopicWithWritePermissionsAllowed() { // Given: - givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.READ)); - givenTopicPermissions(TOPIC_2, Collections.singleton(AclOperation.WRITE)); final Statement statement = givenStatement(String.format( "CREATE STREAM %s AS SELECT * FROM %s;", STREAM_TOPIC_2, STREAM_TOPIC_1) ); - // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); - - // Then: - // Above command should not throw any exception + // When/Then: + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test public void shouldThrowWhenCreateAsSelectExistingStreamWithoutWritePermissionsDenied() { // Given: - givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.READ)); - givenTopicPermissions(TOPIC_2, Collections.singleton(AclOperation.READ)); + givenAccessDenied(TOPIC_2, AclOperation.WRITE); final Statement statement = givenStatement(String.format( "CREATE STREAM %s AS SELECT * FROM %s;", STREAM_TOPIC_2, STREAM_TOPIC_1) ); @@ -322,125 +276,91 @@ public void shouldThrowWhenCreateAsSelectExistingStreamWithoutWritePermissionsDe // Then: expectedException.expect(KsqlTopicAuthorizationException.class); expectedException.expectMessage(String.format( - "Authorization denied to Write on topic(s): [%s]", TOPIC_2.name() + "Authorization denied to Write on topic(s): [%s]", TOPIC_2 )); // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test public void shouldCreateAsSelectWithTopicAndWritePermissionsAllowed() { // Given: - givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.READ)); - givenTopicPermissions(TOPIC_2, Collections.singleton(AclOperation.WRITE)); final Statement statement = givenStatement(String.format( "CREATE STREAM newStream WITH (kafka_topic='%s') AS SELECT * FROM %s;", - TOPIC_2.name(), STREAM_TOPIC_1) + TOPIC_2, STREAM_TOPIC_1) ); - // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); - - // Then: - // Above command should not throw any exception + // When/Then: + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test public void shouldPrintTopicWithReadPermissionsAllowed() { // Given: - givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.READ)); - final Statement statement = givenStatement(String.format("Print '%s';", TOPIC_NAME_1)); - - // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); + final Statement statement = givenStatement(String.format("Print '%s';", TOPIC_1)); - // Then: - // Above command should not throw any exception + // When/Then + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test - public void shouldThrowWhenThrowPrintTopicWithoutReadPermissionsDenied() { + public void shouldThrowWhenPrintTopicWithoutReadPermissionsDenied() { // Given: - givenTopicPermissions(TOPIC_1, Collections.emptySet()); - final Statement statement = givenStatement(String.format("Print '%s';", TOPIC_NAME_1)); + givenAccessDenied(TOPIC_1, AclOperation.READ); + final Statement statement = givenStatement(String.format("Print '%s';", TOPIC_1)); // Then: expectedException.expect(KsqlTopicAuthorizationException.class); expectedException.expectMessage(String.format( - "Authorization denied to Read on topic(s): [%s]", TOPIC_1.name() + "Authorization denied to Read on topic(s): [%s]", TOPIC_1 )); // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test public void shouldCreateSourceWithReadPermissionsAllowed() { // Given: - givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.READ)); final Statement statement = givenStatement(String.format( - "CREATE STREAM s1 WITH (kafka_topic='%s', value_format='JSON');", TOPIC_NAME_1) + "CREATE STREAM s1 WITH (kafka_topic='%s', value_format='JSON');", TOPIC_1) ); - // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); - - // Then: - // Above command should not throw any exception + // When/Then: + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } @Test public void shouldThrowWhenCreateSourceWithoutReadPermissionsDenied() { // Given: - givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.WRITE)); + givenAccessDenied(TOPIC_1, AclOperation.READ); final Statement statement = givenStatement(String.format( - "CREATE STREAM s1 WITH (kafka_topic='%s', value_format='JSON');", TOPIC_NAME_1) + "CREATE STREAM s1 WITH (kafka_topic='%s', value_format='JSON');", TOPIC_1) ); // Then: expectedException.expect(KsqlTopicAuthorizationException.class); expectedException.expectMessage(String.format( - "Authorization denied to Read on topic(s): [%s]", TOPIC_1.name() + "Authorization denied to Read on topic(s): [%s]", TOPIC_1 )); // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); - } - - @Test - public void shouldThrowExceptionWhenTopicClientFails() { - // Given: - givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.READ)); - final Statement statement = givenStatement("SELECT * FROM " + STREAM_TOPIC_1 + ";"); - givenTopicClientError(TOPIC_1); - - // Then: - expectedException.expect(KafkaResponseGetFailedException.class); - - // When: - authorizationValidator.checkAuthorization(serviceContext, metaStore, statement); - } - - private void givenTopic(final String topicName, final TopicDescription topicDescription) { - when(topicDescription.name()).thenReturn(topicName); - when(kafkaTopicClient.describeTopic(topicDescription.name())).thenReturn(topicDescription); + authorizationValidator.checkAuthorization(securityContext, metaStore, statement); } - private void givenTopicPermissions( - final TopicDescription topicDescription, - final Set operations - ) { - when(topicDescription.authorizedOperations()).thenReturn(operations); + private void givenAccessDenied(final String topicName, final AclOperation operation) { + doThrow(new KsqlTopicAuthorizationException(operation, Collections.singleton(topicName))) + .when(accessValidator).checkAccess(securityContext, topicName, operation); } private void givenStreamWithTopic( final String streamName, - final TopicDescription topicDescription + final String topicName ) { final KsqlTopic sourceTopic = new KsqlTopic( - topicDescription.name(), + topicName, KeyFormat.nonWindowed(FormatInfo.of(Format.KAFKA)), ValueFormat.of(FormatInfo.of(Format.JSON)) ); @@ -458,9 +378,4 @@ private void givenStreamWithTopic( metaStore.putSource(streamSource); } - - private void givenTopicClientError(final TopicDescription topic) { - when(kafkaTopicClient.describeTopic(topic.name())) - .thenThrow(KafkaResponseGetFailedException.class); - } } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/security/KsqlBackendAccessValidatorTest.java b/ksql-engine/src/test/java/io/confluent/ksql/security/KsqlBackendAccessValidatorTest.java new file mode 100644 index 000000000000..758a9007c899 --- /dev/null +++ b/ksql-engine/src/test/java/io/confluent/ksql/security/KsqlBackendAccessValidatorTest.java @@ -0,0 +1,128 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.security; + +import io.confluent.ksql.exception.KafkaResponseGetFailedException; +import io.confluent.ksql.exception.KsqlTopicAuthorizationException; +import io.confluent.ksql.services.KafkaTopicClient; +import io.confluent.ksql.services.ServiceContext; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.common.acl.AclOperation; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Collections; +import java.util.Optional; +import java.util.Set; + +import static org.mockito.Mockito.when; + +@RunWith(MockitoJUnitRunner.class) +public class KsqlBackendAccessValidatorTest { + private final static String TOPIC_NAME_1 = "topic1"; + private final static String TOPIC_NAME_2 = "topic2"; + + @Mock + private ServiceContext serviceContext; + @Mock + private KafkaTopicClient kafkaTopicClient; + @Mock + private TopicDescription TOPIC_1; + @Mock + private TopicDescription TOPIC_2; + + @Rule + public final ExpectedException expectedException = ExpectedException.none(); + + private KsqlSecurityContext securityContext; + private KsqlAccessValidator accessValidator; + + @Before + public void setUp() { + when(serviceContext.getTopicClient()).thenReturn(kafkaTopicClient); + securityContext = new KsqlSecurityContext(Optional.empty(), serviceContext); + + givenTopic(TOPIC_NAME_1, TOPIC_1); + givenTopic(TOPIC_NAME_2, TOPIC_2); + + accessValidator = new KsqlBackendAccessValidator(); + } + + @Test + public void shouldAllowIfAuthorizedOperationsIsNull() { + // Checks compatibility with unsupported Kafka authorization checks + + // Given: + givenTopicPermissions(TOPIC_1, null); + + // When/Then: + accessValidator.checkAccess(securityContext, TOPIC_NAME_1, AclOperation.READ); + } + + @Test + public void shouldAllowIfAuthorizedOperationsContainsREAD() { + // Given: + givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.READ)); + + // When/Then: + accessValidator.checkAccess(securityContext, TOPIC_NAME_1, AclOperation.READ); + } + + @Test + public void shouldDenyIfAuthorizedOperationsDoesNotContainREAD() { + // Given: + givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.WRITE)); + + // Then: + expectedException.expect(KsqlTopicAuthorizationException.class); + expectedException.expectMessage(String.format( + "Authorization denied to Read on topic(s): [%s]", TOPIC_1.name() + )); + + // When: + accessValidator.checkAccess(securityContext, TOPIC_NAME_1, AclOperation.READ); + } + + @Test + public void shouldThrowExceptionWhenDescribeTopicFails() { + // Given: + when(kafkaTopicClient.describeTopic(TOPIC_NAME_1)) + .thenThrow(KafkaResponseGetFailedException.class); + + // Then: + expectedException.expect(KafkaResponseGetFailedException.class); + + // When: + accessValidator.checkAccess(securityContext, TOPIC_NAME_1, AclOperation.READ); + } + + private void givenTopic(final String topicName, final TopicDescription topicDescription) { + when(topicDescription.name()).thenReturn(topicName); + when(kafkaTopicClient.describeTopic(topicDescription.name())).thenReturn(topicDescription); + } + + private void givenTopicPermissions( + final TopicDescription topicDescription, + final Set operations + ) { + when(topicDescription.authorizedOperations()).thenReturn(operations); + } +} diff --git a/ksql-engine/src/test/java/io/confluent/ksql/security/KsqlCacheAccessValidatorTest.java b/ksql-engine/src/test/java/io/confluent/ksql/security/KsqlCacheAccessValidatorTest.java new file mode 100644 index 000000000000..4c00b9453142 --- /dev/null +++ b/ksql-engine/src/test/java/io/confluent/ksql/security/KsqlCacheAccessValidatorTest.java @@ -0,0 +1,115 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.security; + +import com.google.common.base.Ticker; +import io.confluent.ksql.exception.KsqlTopicAuthorizationException; +import io.confluent.ksql.util.KsqlConfig; +import org.apache.kafka.common.acl.AclOperation; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.concurrent.TimeUnit; + +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +@RunWith(MockitoJUnitRunner.class) +public class KsqlCacheAccessValidatorTest { + private static final String TOPIC_1 = "topic1"; + private static final long ONE_SEC_IN_NS = TimeUnit.NANOSECONDS.convert(1, TimeUnit.SECONDS); + + @Mock + private KsqlSecurityContext securityContext; + @Mock + private KsqlAccessValidator backendValidator; + @Mock + private KsqlConfig ksqlConfig; + @Mock + private Ticker fakeTicker; + + @Rule + public final ExpectedException expectedException = ExpectedException.none(); + + private KsqlAccessValidator cache; + + @Before + public void setUp() { + when(ksqlConfig.getLong(KsqlConfig.KSQL_AUTH_CACHE_EXPIRY_TIME_SECS)).thenReturn(1L); + when(ksqlConfig.getLong(KsqlConfig.KSQL_AUTH_CACHE_MAX_ENTRIES)).thenReturn(10L); + when(fakeTicker.read()).thenReturn(System.nanoTime()); + + cache = new KsqlCacheAccessValidator(ksqlConfig, backendValidator, fakeTicker); + } + + @Test + public void shouldCheckBackendValidatorOnFirstRequest() { + // When + cache.checkAccess(securityContext, TOPIC_1, AclOperation.READ); + + // Then + verify(backendValidator, times(1)) + .checkAccess(securityContext, TOPIC_1, AclOperation.READ); + verifyNoMoreInteractions(backendValidator); + } + + @Test + public void shouldCheckCacheValidatorOnSecondRequest() { + // When + cache.checkAccess(securityContext, TOPIC_1, AclOperation.READ); + when(fakeTicker.read()).thenReturn(ONE_SEC_IN_NS); + cache.checkAccess(securityContext, TOPIC_1, AclOperation.READ); + + // Then + verify(backendValidator, times(1)) + .checkAccess(securityContext, TOPIC_1, AclOperation.READ); + verifyNoMoreInteractions(backendValidator); + } + + @Test + public void shouldThrowAuthorizationExceptionWhenBackendValidatorIsDenied() { + // Given + doThrow(KsqlTopicAuthorizationException.class).when(backendValidator) + .checkAccess(securityContext, TOPIC_1, AclOperation.READ); + + // Then + expectedException.expect(KsqlTopicAuthorizationException.class); + + // When + cache.checkAccess(securityContext, TOPIC_1, AclOperation.READ); + } + + @Test + public void shouldThrowExceptionWhenBackendValidatorThrowsAnException() { + // Given + doThrow(RuntimeException.class).when(backendValidator) + .checkAccess(securityContext, TOPIC_1, AclOperation.READ); + + // Then + expectedException.expect(RuntimeException.class); + + // When + cache.checkAccess(securityContext, TOPIC_1, AclOperation.READ); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/DistributingExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/DistributingExecutor.java index ae3c75c0089d..75144e15d21d 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/DistributingExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/DistributingExecutor.java @@ -22,6 +22,7 @@ import io.confluent.ksql.rest.entity.CommandStatusEntity; import io.confluent.ksql.rest.entity.KsqlEntity; import io.confluent.ksql.security.KsqlAuthorizationValidator; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; import io.confluent.ksql.statement.Injector; @@ -82,13 +83,13 @@ public DistributingExecutor( public Optional execute( final ConfiguredStatement statement, final KsqlExecutionContext executionContext, - final ServiceContext serviceContext + final KsqlSecurityContext securityContext ) { final ConfiguredStatement injected = injectorFactory - .apply(executionContext, serviceContext) + .apply(executionContext, securityContext.getServiceContext()) .inject(statement); - checkAuthorization(injected, serviceContext, executionContext); + checkAuthorization(injected, securityContext, executionContext); final Producer transactionalProducer = commandQueue.createTransactionalProducer(); @@ -136,7 +137,7 @@ public Optional execute( private void checkAuthorization( final ConfiguredStatement configured, - final ServiceContext userServiceContext, + final KsqlSecurityContext userSecurityContext, final KsqlExecutionContext serverExecutionContext ) { final Statement statement = configured.getStatement(); @@ -145,13 +146,13 @@ private void checkAuthorization( // Check the User will be permitted to execute this statement authorizationValidator.ifPresent( validator -> - validator.checkAuthorization(userServiceContext, metaStore, statement)); + validator.checkAuthorization(userSecurityContext, metaStore, statement)); try { // Check the KSQL service principal will be permitted too authorizationValidator.ifPresent( validator -> validator.checkAuthorization( - serverExecutionContext.getServiceContext(), + new KsqlSecurityContext(Optional.empty(), serverExecutionContext.getServiceContext()), metaStore, statement)); } catch (final Exception e) { diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java index 1f5c20c9f0de..3640d541870a 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/RequestHandler.java @@ -22,7 +22,7 @@ import io.confluent.ksql.rest.entity.KsqlEntity; import io.confluent.ksql.rest.entity.KsqlEntityList; import io.confluent.ksql.rest.server.computation.DistributingExecutor; -import io.confluent.ksql.services.ServiceContext; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.statement.ConfiguredStatement; import io.confluent.ksql.util.KsqlConfig; import java.util.HashMap; @@ -67,7 +67,7 @@ public RequestHandler( } public KsqlEntityList execute( - final ServiceContext serviceContext, + final KsqlSecurityContext securityContext, final List statements, final Map propertyOverrides ) { @@ -79,7 +79,7 @@ public KsqlEntityList execute( prepared, scopedPropertyOverrides, ksqlConfig); executeStatement( - serviceContext, + securityContext, configured, scopedPropertyOverrides, entities @@ -90,7 +90,7 @@ public KsqlEntityList execute( @SuppressWarnings("unchecked") private Optional executeStatement( - final ServiceContext serviceContext, + final KsqlSecurityContext securityContext, final ConfiguredStatement configured, final Map mutableScopedProperties, final KsqlEntityList entities @@ -101,14 +101,14 @@ private Optional executeStatement( final StatementExecutor executor = (StatementExecutor) customExecutors.getOrDefault( statementClass, - (stmt, props, ctx, svcCtx) -> distributor.execute(stmt, ctx, svcCtx) + (stmt, props, ctx, svcCtx) -> distributor.execute(stmt, ctx, securityContext) ); return executor.execute( configured, mutableScopedProperties, ksqlEngine, - serviceContext + securityContext.getServiceContext() ); } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java index 3388eee1ea6d..f7d0044b99ae 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/KsqlResource.java @@ -191,7 +191,7 @@ public Response terminateCluster( ensureValidPatterns(request.getDeleteTopicList()); try { final KsqlEntityList entities = handler.execute( - securityContext.getServiceContext(), + securityContext, TERMINATE_CLUSTER, request.getStreamsProperties() ); @@ -228,7 +228,7 @@ public Response handleKsqlStatements( ); final KsqlEntityList entities = handler.execute( - securityContext.getServiceContext(), + securityContext, statements, request.getStreamsProperties() ); diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java index 9ba10711c840..95923d549bdf 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java @@ -183,7 +183,7 @@ private Response handleStatement( try { final Consumer authValidationConsumer = ksqlAuthorizationValidator -> ksqlAuthorizationValidator.checkAuthorization( - securityContext.getServiceContext(), + securityContext, ksqlEngine.getMetaStore(), statement.getStatement() ); diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java index 0d9fd46614fc..bfd46f281f80 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java @@ -36,6 +36,7 @@ import io.confluent.ksql.rest.server.state.ServerState; import io.confluent.ksql.rest.util.CommandStoreUtil; import io.confluent.ksql.security.KsqlAuthorizationValidator; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.security.KsqlSecurityExtension; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.statement.ConfiguredStatement; @@ -98,7 +99,7 @@ public class WSQueryEndpoint { private final Errors errorHandler; private WebSocketSubscriber subscriber; - private ServiceContext serviceContext; + private KsqlSecurityContext securityContext; // CHECKSTYLE_RULES.OFF: ParameterNumberCheck public WSQueryEndpoint( @@ -218,7 +219,7 @@ public void onOpen(final Session session, final EndpointConfig unused) { final PreparedStatement preparedStatement = parseStatement(request); - serviceContext = createServiceContext(session.getUserPrincipal()); + securityContext = createSecurityContext(session.getUserPrincipal()); final Statement statement = preparedStatement.getStatement(); final Class type = statement.getClass(); @@ -227,7 +228,7 @@ public void onOpen(final Session session, final EndpointConfig unused) { HANDLER_MAP .getOrDefault(type, WSQueryEndpoint::handleUnsupportedStatement) - .handle(this, new RequestContext(session, request, serviceContext), statement); + .handle(this, new RequestContext(session, request, securityContext), statement); } catch (final TopicAuthorizationException e) { log.debug("Error processing request", e); SessionUtil.closeSilently( @@ -246,8 +247,8 @@ public void onClose(final Session session, final CloseReason closeReason) { subscriber.close(); } - if (serviceContext != null) { - serviceContext.close(); + if (securityContext != null) { + securityContext.getServiceContext().close(); } log.debug( @@ -283,22 +284,26 @@ private void checkAuthorization(final Session session) { ); } - private ServiceContext createServiceContext(final Principal principal) { - // Creates a ServiceContext using the user's credentials, so the WS query topics are - // accessed with the user permission context (defaults to KSQL service context) + private KsqlSecurityContext createSecurityContext(final Principal principal) { + final ServiceContext serviceContext; if (!securityExtension.getUserContextProvider().isPresent()) { - return defaultServiceContextFactory.create(ksqlConfig, Optional.empty()); + serviceContext = defaultServiceContextFactory.create(ksqlConfig, Optional.empty()); + } else { + // Creates a ServiceContext using the user's credentials, so the WS query topics are + // accessed with the user permission context (defaults to KSQL service context) + + serviceContext = securityExtension.getUserContextProvider() + .map(provider -> + serviceContextFactory.create( + ksqlConfig, + Optional.empty(), + provider.getKafkaClientSupplier(principal), + provider.getSchemaRegistryClientFactory(principal))) + .get(); } - return securityExtension.getUserContextProvider() - .map(provider -> - serviceContextFactory.create( - ksqlConfig, - Optional.empty(), - provider.getKafkaClientSupplier(principal), - provider.getSchemaRegistryClientFactory(principal))) - .get(); + return new KsqlSecurityContext(Optional.ofNullable(principal), serviceContext); } private void validateVersion(final Session session) { @@ -368,7 +373,7 @@ private void validateKafkaAuthorization(final Statement statement) { } } else { authorizationValidator.ifPresent(validator -> validator.checkAuthorization( - serviceContext, + securityContext, ksqlEngine.getMetaStore(), statement) ); @@ -396,7 +401,7 @@ private void handleQuery(final RequestContext info, final Query query) { queryPublisher.start( ksqlEngine, - info.serviceContext, + info.securityContext.getServiceContext(), exec, configured, streamSubscriber @@ -406,7 +411,7 @@ private void handleQuery(final RequestContext info, final Query query) { private void handlePrintTopic(final RequestContext info, final PrintTopic printTopic) { final String topicName = printTopic.getTopic(); - if (!info.serviceContext.getTopicClient().isTopicExists(topicName)) { + if (!info.securityContext.getServiceContext().getTopicClient().isTopicExists(topicName)) { throw new IllegalArgumentException( "Topic does not exist, or KSQL does not have permission to list the topic: " + topicName); } @@ -417,7 +422,7 @@ private void handlePrintTopic(final RequestContext info, final PrintTopic printT topicPublisher.start( exec, - info.serviceContext, + info.securityContext.getServiceContext(), ksqlConfig.getKsqlStreamConfigProps(), printTopic, topicSubscriber @@ -493,13 +498,16 @@ private static final class RequestContext { private final Session session; private final KsqlRequest request; - private final ServiceContext serviceContext; + private final KsqlSecurityContext securityContext; private RequestContext( - final Session session, final KsqlRequest request, final ServiceContext serviceContext) { + final Session session, + final KsqlRequest request, + final KsqlSecurityContext securityContext + ) { this.session = session; this.request = request; - this.serviceContext = serviceContext; + this.securityContext = securityContext; } } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/DistributingExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/DistributingExecutorTest.java index f95abe7d0a94..3fa668628ff6 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/DistributingExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/computation/DistributingExecutorTest.java @@ -20,6 +20,7 @@ import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -46,6 +47,7 @@ import io.confluent.ksql.rest.entity.CommandStatus.Status; import io.confluent.ksql.rest.entity.CommandStatusEntity; import io.confluent.ksql.security.KsqlAuthorizationValidator; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.services.SandboxedServiceContext; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.services.TestServiceContext; @@ -65,6 +67,7 @@ import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; +import org.mockito.ArgumentMatchers; import org.mockito.InOrder; import org.mockito.Mock; import org.mockito.Mockito; @@ -124,6 +127,7 @@ CommonCreateConfigs.VALUE_FORMAT_PROPERTY, new StringLiteral("json") private DistributingExecutor distributor; private AtomicLong scnCounter; + private KsqlSecurityContext securityContext; @Before public void setUp() throws InterruptedException { @@ -141,6 +145,8 @@ public void setUp() throws InterruptedException { when(validatedCommandFactory.create(any(), any())).thenReturn(command); when(queue.createTransactionalProducer()).thenReturn(transactionalProducer); + securityContext = new KsqlSecurityContext(Optional.empty(), serviceContext); + distributor = new DistributingExecutor( queue, DURATION_10_MS, @@ -153,7 +159,7 @@ public void setUp() throws InterruptedException { @Test public void shouldEnqueueSuccessfulCommandTransactionally() { // When: - distributor.execute(CONFIGURED_STATEMENT, executionContext, serviceContext); + distributor.execute(CONFIGURED_STATEMENT, executionContext, securityContext); // Then: final InOrder inOrder = Mockito.inOrder(transactionalProducer, queue, validatedCommandFactory); @@ -176,7 +182,7 @@ public void shouldEnqueueSuccessfulCommandTransactionally() { @Test public void shouldInferSchemas() { // When: - distributor.execute(CONFIGURED_STATEMENT, executionContext, serviceContext); + distributor.execute(CONFIGURED_STATEMENT, executionContext, securityContext); // Then: verify(schemaInjector, times(1)).inject(eq(CONFIGURED_STATEMENT)); @@ -189,7 +195,7 @@ public void shouldReturnCommandStatus() { (CommandStatusEntity) distributor.execute( CONFIGURED_STATEMENT, executionContext, - serviceContext + securityContext ) .orElseThrow(null); @@ -211,7 +217,7 @@ public void shouldThrowExceptionOnFailureToEnqueue() { expectedException.expectCause(is(cause)); // When: - distributor.execute(CONFIGURED_STATEMENT, executionContext, serviceContext); + distributor.execute(CONFIGURED_STATEMENT, executionContext, securityContext); verify(transactionalProducer, times(1)).abortTransaction(); } @@ -229,43 +235,51 @@ public void shouldThrowFailureIfCannotInferSchema() { expectedException.expectMessage("Could not infer!"); // When: - distributor.execute(configured, executionContext, serviceContext); + distributor.execute(configured, executionContext, securityContext); } @Test public void shouldThrowExceptionIfUserServiceContextIsDeniedAuthorization() { // Given: - final ServiceContext userServiceContext = mock(ServiceContext.class); + final KsqlSecurityContext userSecurityContext = new KsqlSecurityContext( + Optional.empty(), + mock(ServiceContext.class)); final PreparedStatement preparedStatement = PreparedStatement.of("", new ListProperties(Optional.empty())); final ConfiguredStatement configured = ConfiguredStatement.of(preparedStatement, ImmutableMap.of(), KSQL_CONFIG); doThrow(KsqlTopicAuthorizationException.class).when(authorizationValidator) - .checkAuthorization(eq(userServiceContext), any(), eq(configured.getStatement())); + .checkAuthorization(eq(userSecurityContext), any(), eq(configured.getStatement())); // Expect: expectedException.expect(KsqlTopicAuthorizationException.class); // When: - distributor.execute(configured, executionContext, userServiceContext); + distributor.execute(configured, executionContext, userSecurityContext); } @Test public void shouldThrowServerExceptionIfServerServiceContextIsDeniedAuthorization() { // Given: - final ServiceContext userServiceContext = SandboxedServiceContext.create(TestServiceContext.create()); + final KsqlSecurityContext userSecurityContext = new KsqlSecurityContext(Optional.empty(), + SandboxedServiceContext.create(TestServiceContext.create())); final PreparedStatement preparedStatement = PreparedStatement.of("", new ListProperties(Optional.empty())); final ConfiguredStatement configured = ConfiguredStatement.of(preparedStatement, ImmutableMap.of(), KSQL_CONFIG); + doNothing().when(authorizationValidator) + .checkAuthorization(eq(userSecurityContext), any(), any()); doThrow(KsqlTopicAuthorizationException.class).when(authorizationValidator) - .checkAuthorization(eq(serviceContext), any(), eq(configured.getStatement())); + .checkAuthorization( + ArgumentMatchers.argThat(securityContext -> + securityContext.getServiceContext() == serviceContext), + any(), any()); // Expect: expectedException.expect(KsqlServerException.class); expectedException.expectCause(is(instanceOf(KsqlTopicAuthorizationException.class))); // When: - distributor.execute(configured, executionContext, userServiceContext); + distributor.execute(configured, executionContext, userSecurityContext); } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java index c5c318d3d556..ab1c2bef1a2a 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/RequestHandlerTest.java @@ -42,6 +42,8 @@ import io.confluent.ksql.rest.entity.KsqlEntity; import io.confluent.ksql.rest.entity.KsqlEntityList; import io.confluent.ksql.rest.server.computation.DistributingExecutor; +import io.confluent.ksql.security.KsqlAuthorizationValidator; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.util.KsqlConfig; import java.util.Arrays; @@ -53,7 +55,9 @@ import org.hamcrest.Matcher; import org.hamcrest.TypeSafeMatcher; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; @@ -72,6 +76,7 @@ public class RequestHandlerTest { private MetaStore metaStore; private RequestHandler handler; + private KsqlSecurityContext securityContext; @Before public void setUp() { @@ -81,6 +86,8 @@ public void setUp() { new DefaultKsqlParser().prepare(invocation.getArgument(0), metaStore)); when(distributor.execute(any(), any(), any())).thenReturn(Optional.of(entity)); doNothing().when(sync).waitFor(any(), any()); + + securityContext = new KsqlSecurityContext(Optional.empty(), serviceContext); } @Test @@ -94,7 +101,7 @@ public void shouldUseCustomExecutor() { // When final List statements = new DefaultKsqlParser().parse(SOME_STREAM_SQL); - final KsqlEntityList entities = handler.execute(serviceContext, statements, ImmutableMap.of()); + final KsqlEntityList entities = handler.execute(securityContext, statements, ImmutableMap.of()); // Then assertThat(entities, contains(entity)); @@ -117,7 +124,7 @@ public void shouldDefaultToDistributor() { // When final List statements = new DefaultKsqlParser().parse(SOME_STREAM_SQL); - final KsqlEntityList entities = handler.execute(serviceContext, statements, ImmutableMap.of()); + final KsqlEntityList entities = handler.execute(securityContext, statements, ImmutableMap.of()); // Then assertThat(entities, contains(entity)); @@ -127,7 +134,7 @@ public void shouldDefaultToDistributor() { ImmutableMap.of(), ksqlConfig))), eq(ksqlEngine), - eq(serviceContext) + eq(securityContext) ); } @@ -140,7 +147,7 @@ public void shouldDistributeProperties() { final List statements = new DefaultKsqlParser().parse(SOME_STREAM_SQL); final KsqlEntityList entities = handler.execute( - serviceContext, + securityContext, statements, ImmutableMap.of("x", "y") ); @@ -154,7 +161,7 @@ public void shouldDistributeProperties() { ImmutableMap.of("x", "y"), ksqlConfig))), eq(ksqlEngine), - eq(serviceContext) + eq(securityContext) ); } @@ -179,7 +186,7 @@ public void shouldWaitForDistributedStatements() { ); // When - handler.execute(serviceContext, statements, ImmutableMap.of()); + handler.execute(securityContext, statements, ImmutableMap.of()); // Then verify(sync).waitFor(argThat(hasItems(entity1, entity2)), any()); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java index 86a312ec679d..d479bb094260 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java @@ -21,6 +21,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; @@ -58,6 +59,7 @@ import io.confluent.ksql.rest.server.state.ServerState; import io.confluent.ksql.security.KsqlAuthorizationProvider; import io.confluent.ksql.security.KsqlAuthorizationValidator; +import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.security.KsqlSecurityExtension; import io.confluent.ksql.security.KsqlUserContextProvider; import io.confluent.ksql.services.ConfiguredKafkaClientSupplier; @@ -402,7 +404,11 @@ public void shouldReturnErrorMessageWhenTopicAuthorizationException() throws Exc when(errorsHandler.kafkaAuthorizationErrorMessage(any(TopicAuthorizationException.class))) .thenReturn(errorMessage); doThrow(new KsqlTopicAuthorizationException(AclOperation.CREATE, Collections.singleton("topic"))) - .when(authorizationValidator).checkAuthorization(serviceContext, metaStore, query); + .when(authorizationValidator).checkAuthorization( + argThat(securityContext -> + securityContext.getServiceContext() == serviceContext), + eq(metaStore), + eq(query)); // When: wsQueryEndpoint.onOpen(session, null); From 5ac46f4784a11133a4aa4c1edbc45124b929d53d Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Wed, 8 Jan 2020 11:34:03 +0000 Subject: [PATCH 080/123] test: fix build issue caused by new register method on SR client (#4247) --- .../SandboxedSchemaRegistryClientTest.java | 17 +++++++++++++++++ .../confluent/ksql/test/util/TestMethods.java | 1 + 2 files changed, 18 insertions(+) diff --git a/ksql-engine/src/test/java/io/confluent/ksql/services/SandboxedSchemaRegistryClientTest.java b/ksql-engine/src/test/java/io/confluent/ksql/services/SandboxedSchemaRegistryClientTest.java index 5daa0fd6681f..2b538af6dd74 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/services/SandboxedSchemaRegistryClientTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/services/SandboxedSchemaRegistryClientTest.java @@ -21,6 +21,8 @@ import static org.mockito.Mockito.verifyZeroInteractions; import static org.mockito.Mockito.when; +import io.confluent.kafka.schemaregistry.ParsedSchema; +import io.confluent.kafka.schemaregistry.avro.AvroSchema; import io.confluent.kafka.schemaregistry.client.SchemaMetadata; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.test.util.TestMethods; @@ -48,8 +50,12 @@ public static class UnsupportedMethods { @Parameterized.Parameters(name = "{0}") public static Collection> getMethodsToTest() { return TestMethods.builder(SchemaRegistryClient.class) + // Only add methods in here which are NOT handled by the proxy + // when adding, ensure you also add a suitable test to SupportedMethods below. .ignore("register", String.class, Schema.class) + .ignore("register", String.class, ParsedSchema.class) .ignore("register", String.class, Schema.class, int.class, int.class) + .ignore("register", String.class, ParsedSchema.class, int.class, int.class) .ignore("getLatestSchemaMetadata", String.class) .ignore("testCompatibility", String.class, Schema.class) .ignore("deleteSubject", String.class) @@ -127,5 +133,16 @@ public void shouldSwallowDeleteSubject() throws Exception { // Then: verifyZeroInteractions(delegate); } + + @Test + public void shouldSwallowRegister() throws Exception { + // When: + sandboxedClient.register("some subject", schema); + sandboxedClient.register("some subject", new AvroSchema(schema)); + sandboxedClient.register("some subject", new AvroSchema(schema), 1, 1); + + // Then: + verifyZeroInteractions(delegate); + } } } \ No newline at end of file diff --git a/ksql-test-util/src/main/java/io/confluent/ksql/test/util/TestMethods.java b/ksql-test-util/src/main/java/io/confluent/ksql/test/util/TestMethods.java index f68173234c2c..666a5458d798 100644 --- a/ksql-test-util/src/main/java/io/confluent/ksql/test/util/TestMethods.java +++ b/ksql-test-util/src/main/java/io/confluent/ksql/test/util/TestMethods.java @@ -41,6 +41,7 @@ public final class TestMethods { private static final Map BUILT_IN_DEFAULTS = ImmutableMap.builder() + .put(boolean.class, true) .put(int.class, 0) .put(long.class, 0L) .put(float.class, 0.0f) From 1213d9367aa79633c3bd076cb46e78bc55d87261 Mon Sep 17 00:00:00 2001 From: Robert Yokota Date: Wed, 8 Jan 2020 08:41:17 -0800 Subject: [PATCH 081/123] test: Fix Schema Registry tests after new SR API changes (#4243) --- .../ksql/services/SandboxedSchemaRegistryClient.java | 3 +++ .../ksql/services/SandboxedSchemaRegistryClientTest.java | 1 + .../rest/server/resources/streaming/TopicStreamTest.java | 8 +++++--- .../java/io/confluent/ksql/test/util/TestMethods.java | 2 +- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/ksql-engine/src/main/java/io/confluent/ksql/services/SandboxedSchemaRegistryClient.java b/ksql-engine/src/main/java/io/confluent/ksql/services/SandboxedSchemaRegistryClient.java index c6dff540f617..4361017a15fd 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/services/SandboxedSchemaRegistryClient.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/services/SandboxedSchemaRegistryClient.java @@ -18,6 +18,7 @@ import static io.confluent.ksql.util.LimitedProxyBuilder.anyParams; import static io.confluent.ksql.util.LimitedProxyBuilder.methodParams; +import io.confluent.kafka.schemaregistry.ParsedSchema; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.util.LimitedProxyBuilder; import java.util.Collections; @@ -42,6 +43,8 @@ static SchemaRegistryClient createProxy(final SchemaRegistryClient delegate) { .forward("getLatestSchemaMetadata", methodParams(String.class), delegate) .forward("testCompatibility", methodParams(String.class, Schema.class), delegate) + .forward("testCompatibility", + methodParams(String.class, ParsedSchema.class), delegate) .swallow("deleteSubject", methodParams(String.class), Collections.emptyList()) .build(); } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/services/SandboxedSchemaRegistryClientTest.java b/ksql-engine/src/test/java/io/confluent/ksql/services/SandboxedSchemaRegistryClientTest.java index 2b538af6dd74..cde877ef2088 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/services/SandboxedSchemaRegistryClientTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/services/SandboxedSchemaRegistryClientTest.java @@ -58,6 +58,7 @@ public static Collection> getMethodsToTest() { .ignore("register", String.class, ParsedSchema.class, int.class, int.class) .ignore("getLatestSchemaMetadata", String.class) .ignore("testCompatibility", String.class, Schema.class) + .ignore("testCompatibility", String.class, ParsedSchema.class) .ignore("deleteSubject", String.class) .build(); } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamTest.java index 4fbd639abd4e..1cb9f9154ea7 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/TopicStreamTest.java @@ -30,6 +30,9 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; + +import io.confluent.kafka.schemaregistry.ParsedSchema; +import io.confluent.kafka.schemaregistry.avro.AvroSchema; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.kafka.serializers.KafkaAvroSerializer; import io.confluent.ksql.rest.server.resources.streaming.TopicStream.Format; @@ -66,7 +69,6 @@ public void setUp() { } @Test - @Ignore("Temporarily disable this test until new Schema Registry changes land") public void shouldMatchAvroFormatter() throws Exception { // Given: final Schema schema = parseAvroSchema( @@ -81,8 +83,8 @@ public void shouldMatchAvroFormatter() throws Exception { final GenericData.Record avroRecord = new GenericData.Record(schema); avroRecord.put("str1", "My first string"); - expect(schemaRegistryClient.register(anyString(), anyObject(Schema.class))).andReturn(1); - expect(schemaRegistryClient.getById(anyInt())).andReturn(schema).times(2); + expect(schemaRegistryClient.register(anyString(), anyObject(ParsedSchema.class))).andReturn(1); + expect(schemaRegistryClient.getSchemaById(anyInt())).andReturn(new AvroSchema(schema)).times(2); replay(schemaRegistryClient); diff --git a/ksql-test-util/src/main/java/io/confluent/ksql/test/util/TestMethods.java b/ksql-test-util/src/main/java/io/confluent/ksql/test/util/TestMethods.java index 666a5458d798..40f3ad5c95e5 100644 --- a/ksql-test-util/src/main/java/io/confluent/ksql/test/util/TestMethods.java +++ b/ksql-test-util/src/main/java/io/confluent/ksql/test/util/TestMethods.java @@ -41,7 +41,7 @@ public final class TestMethods { private static final Map BUILT_IN_DEFAULTS = ImmutableMap.builder() - .put(boolean.class, true) + .put(boolean.class, false) .put(int.class, 0) .put(long.class, 0L) .put(float.class, 0.0f) From 555c573426de896a5f483a9212603d885585d105 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Thu, 9 Jan 2020 14:06:53 +0000 Subject: [PATCH 082/123] chore: ensure sensible error on use of unsupported primitive key type (#4246) fixes: https://github.com/confluentinc/ksql/issues/4113 Added QTT tests to ensure sensible error messages are output from `CT`/`CS` and `CTAS`/`CSAS` statements where the key format, (currently always `KAFKA`), does not support the Sql type of the key column, e.g. the `KAFKA` format does not support `BOOLEAN`, `DECIMAL`, `MAP`, ARRAY` or `STRUCT` types. Error message will look something like: ``` Key format does not support key schema. format: KAFKA schema: Persistence{schema=STRUCT> NOT NULL, unwrapped=false} reason: The 'KAFKA' format does not support type 'MAP' ``` --- .../confluent/ksql/serde/FormatInfoTest.java | 1 + .../ddl/commands/CreateSourceFactory.java | 53 +++-- .../confluent/ksql/engine/EngineExecutor.java | 8 +- .../ddl/commands/CreateSourceFactoryTest.java | 205 ++++++++++------- .../query-validation-tests/key-schemas.json | 210 +++++++++++++++++- .../query-validation-tests/serdes.json | 2 +- .../confluent/ksql/serde/GenericKeySerDe.java | 17 +- .../confluent/ksql/serde/GenericRowSerDe.java | 15 ++ .../ksql/serde/KsqlSerdeFactories.java | 5 + .../confluent/ksql/serde/SerdeFactories.java | 11 + .../ksql/serde/GenericKeySerDeTest.java | 35 ++- .../ksql/serde/GenericRowSerDeTest.java | 48 +++- .../ksql/serde/KsqlSerdeFactoriesTest.java | 25 ++- 13 files changed, 504 insertions(+), 131 deletions(-) diff --git a/ksql-common/src/test/java/io/confluent/ksql/serde/FormatInfoTest.java b/ksql-common/src/test/java/io/confluent/ksql/serde/FormatInfoTest.java index f7c0426135df..a40d8503b9ec 100644 --- a/ksql-common/src/test/java/io/confluent/ksql/serde/FormatInfoTest.java +++ b/ksql-common/src/test/java/io/confluent/ksql/serde/FormatInfoTest.java @@ -30,6 +30,7 @@ import org.junit.Test; import org.junit.rules.ExpectedException; +@SuppressWarnings("UnstableApiUsage") public class FormatInfoTest { @Rule diff --git a/ksql-engine/src/main/java/io/confluent/ksql/ddl/commands/CreateSourceFactory.java b/ksql-engine/src/main/java/io/confluent/ksql/ddl/commands/CreateSourceFactory.java index b272b21c2e33..9e75682cc685 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/ddl/commands/CreateSourceFactory.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/ddl/commands/CreateSourceFactory.java @@ -37,7 +37,9 @@ import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.PhysicalSchema; import io.confluent.ksql.serde.Format; +import io.confluent.ksql.serde.GenericKeySerDe; import io.confluent.ksql.serde.GenericRowSerDe; +import io.confluent.ksql.serde.KeySerdeFactory; import io.confluent.ksql.serde.SerdeOption; import io.confluent.ksql.serde.SerdeOptions; import io.confluent.ksql.serde.ValueSerdeFactory; @@ -51,24 +53,33 @@ import java.util.Set; public final class CreateSourceFactory { + private final ServiceContext serviceContext; private final SerdeOptionsSupplier serdeOptionsSupplier; - private final ValueSerdeFactory serdeFactory; + private final KeySerdeFactory keySerdeFactory; + private final ValueSerdeFactory valueSerdeFactory; public CreateSourceFactory(final ServiceContext serviceContext) { - this(serviceContext, SerdeOptions::buildForCreateStatement, new GenericRowSerDe()); + this( + serviceContext, + SerdeOptions::buildForCreateStatement, + new GenericKeySerDe(), + new GenericRowSerDe() + ); } @VisibleForTesting CreateSourceFactory( final ServiceContext serviceContext, final SerdeOptionsSupplier serdeOptionsSupplier, - final ValueSerdeFactory serdeFactory + final KeySerdeFactory keySerdeFactory, + final ValueSerdeFactory valueSerdeFactory ) { this.serviceContext = Objects.requireNonNull(serviceContext, "serviceContext"); this.serdeOptionsSupplier = Objects.requireNonNull(serdeOptionsSupplier, "serdeOptionsSupplier"); - this.serdeFactory = Objects.requireNonNull(serdeFactory, "serdeFactory"); + this.keySerdeFactory = Objects.requireNonNull(keySerdeFactory, "keySerdeFactory"); + this.valueSerdeFactory = Objects.requireNonNull(valueSerdeFactory, "valueSerdeFactory"); } public CreateStreamCommand createStreamCommand( @@ -84,19 +95,16 @@ public CreateStreamCommand createStreamCommand( statement.getProperties(), schema ); + final Set serdeOptions = serdeOptionsSupplier.build( schema, topic.getValueFormat().getFormat(), statement.getProperties().getWrapSingleValues(), ksqlConfig ); - validateSerdeCanHandleSchemas( - ksqlConfig, - serviceContext, - serdeFactory, - PhysicalSchema.from(schema, serdeOptions), - topic - ); + + validateSerdesCanHandleSchemas(ksqlConfig, PhysicalSchema.from(schema, serdeOptions), topic); + return new CreateStreamCommand( sourceName, schema, @@ -127,13 +135,9 @@ public CreateTableCommand createTableCommand( statement.getProperties().getWrapSingleValues(), ksqlConfig ); - validateSerdeCanHandleSchemas( - ksqlConfig, - serviceContext, - serdeFactory, - PhysicalSchema.from(schema, serdeOptions), - topic - ); + + validateSerdesCanHandleSchemas(ksqlConfig, PhysicalSchema.from(schema, serdeOptions), topic); + return new CreateTableCommand( sourceName, schema, @@ -213,13 +217,20 @@ private static Optional buildTimestampColumn( return timestampColumn; } - private static void validateSerdeCanHandleSchemas( + private void validateSerdesCanHandleSchemas( final KsqlConfig ksqlConfig, - final ServiceContext serviceContext, - final ValueSerdeFactory valueSerdeFactory, final PhysicalSchema physicalSchema, final KsqlTopic topic ) { + keySerdeFactory.create( + topic.getKeyFormat().getFormatInfo(), + physicalSchema.keySchema(), + ksqlConfig, + serviceContext.getSchemaRegistryClientFactory(), + "", + NoopProcessingLogContext.INSTANCE + ).close(); + valueSerdeFactory.create( topic.getValueFormat().getFormatInfo(), physicalSchema.valueSchema(), diff --git a/ksql-engine/src/main/java/io/confluent/ksql/engine/EngineExecutor.java b/ksql-engine/src/main/java/io/confluent/ksql/engine/EngineExecutor.java index d12155d0fda5..67cb66bd270b 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/engine/EngineExecutor.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/engine/EngineExecutor.java @@ -228,17 +228,20 @@ private ExecutorPlans( private Optional maybeCreateSinkDdl( final String sql, final KsqlStructuredDataOutputNode outputNode, - final KeyField keyField) { + final KeyField keyField + ) { if (!outputNode.isDoCreateInto()) { validateExistingSink(outputNode, keyField); return Optional.empty(); } - final CreateSourceCommand ddl; + final Formats formats = Formats.of( outputNode.getKsqlTopic().getKeyFormat(), outputNode.getKsqlTopic().getValueFormat(), outputNode.getSerdeOptions() ); + + final CreateSourceCommand ddl; if (outputNode.getNodeOutputType() == DataSourceType.KSTREAM) { ddl = new CreateStreamCommand( outputNode.getIntoSourceName(), @@ -260,6 +263,7 @@ private Optional maybeCreateSinkDdl( outputNode.getKsqlTopic().getKeyFormat().getWindowInfo() ); } + final SchemaRegistryClient srClient = serviceContext.getSchemaRegistryClient(); AvroUtil.throwOnInvalidSchemaEvolution(sql, ddl, srClient, ksqlConfig); return Optional.of(ddl); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/CreateSourceFactoryTest.java b/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/CreateSourceFactoryTest.java index abbb3cd9541b..cf499e12b6c2 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/CreateSourceFactoryTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/ddl/commands/CreateSourceFactoryTest.java @@ -26,7 +26,6 @@ import static io.confluent.ksql.serde.Format.KAFKA; import static io.confluent.ksql.util.SchemaUtil.ROWKEY_NAME; import static io.confluent.ksql.util.SchemaUtil.ROWTIME_NAME; -import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.is; @@ -43,7 +42,6 @@ import io.confluent.ksql.ddl.commands.CreateSourceFactory.SerdeOptionsSupplier; import io.confluent.ksql.execution.ddl.commands.CreateStreamCommand; import io.confluent.ksql.execution.ddl.commands.CreateTableCommand; -import io.confluent.ksql.execution.ddl.commands.DdlCommand; import io.confluent.ksql.execution.expression.tree.BooleanLiteral; import io.confluent.ksql.execution.expression.tree.Literal; import io.confluent.ksql.execution.expression.tree.StringLiteral; @@ -65,6 +63,7 @@ import io.confluent.ksql.schema.ksql.PersistenceSchema; import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.serde.FormatInfo; +import io.confluent.ksql.serde.KeySerdeFactory; import io.confluent.ksql.serde.SerdeOption; import io.confluent.ksql.serde.ValueSerdeFactory; import io.confluent.ksql.serde.WindowInfo; @@ -77,6 +76,7 @@ import java.util.Optional; import java.util.Set; import org.apache.kafka.common.serialization.Serde; +import org.apache.kafka.connect.data.Struct; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -87,23 +87,42 @@ @RunWith(MockitoJUnitRunner.class) public class CreateSourceFactoryTest { + private static final SourceName SOME_NAME = SourceName.of("bob"); + + private static final TableElement EXPLICIT_ROWKEY = + tableElement(Namespace.KEY, ROWKEY_NAME.name(), new Type(SqlTypes.INTEGER)); + private static final TableElement ELEMENT1 = tableElement(Namespace.VALUE, "bob", new Type(SqlTypes.STRING)); + private static final TableElement ELEMENT2 = tableElement(Namespace.VALUE, "hojjat", new Type(SqlTypes.BIGINT)); - private static final TableElements SOME_ELEMENTS = TableElements.of(ELEMENT1); - private static final TableElements TWO_ELEMENTS = TableElements.of(ELEMENT1, ELEMENT2); + + private static final TableElements ONE_ELEMENTS = TableElements.of(ELEMENT1); + + private static final TableElements TABLE_ELEMENTS = + TableElements.of(EXPLICIT_ROWKEY, ELEMENT1, ELEMENT2); + + private static final LogicalSchema EXPECTED_SCHEMA = LogicalSchema.builder() + .keyColumn(ROWKEY_NAME, SqlTypes.INTEGER) + .valueColumn(ColumnName.of("bob"), SqlTypes.STRING) + .valueColumn(ColumnName.of("hojjat"), SqlTypes.BIGINT) + .build(); + private static final String TOPIC_NAME = "some topic"; + private static final Map MINIMIM_PROPS = ImmutableMap.of( CommonCreateConfigs.VALUE_FORMAT_PROPERTY, new StringLiteral("JSON"), CommonCreateConfigs.KAFKA_TOPIC_NAME_PROPERTY, new StringLiteral(TOPIC_NAME) ); - private static final TableElements ONE_ELEMENT = TableElements.of( - tableElement(Namespace.VALUE, "bob", new Type(SqlTypes.STRING))); + private static final Set SOME_SERDE_OPTIONS = ImmutableSet .of(SerdeOption.UNWRAP_SINGLE_VALUES); + @Rule + public final ExpectedException expectedException = ExpectedException.none(); + @Mock private KafkaTopicClient topicClient; @Mock @@ -111,24 +130,25 @@ CommonCreateConfigs.KAFKA_TOPIC_NAME_PROPERTY, new StringLiteral(TOPIC_NAME) @Mock private SerdeOptionsSupplier serdeOptionsSupplier; @Mock - private ValueSerdeFactory serdeFactory; + private KeySerdeFactory keySerdeFactory; @Mock - private Serde serde; + private ValueSerdeFactory valueSerdeFactory; + @Mock + private Serde keySerde; + @Mock + private Serde valueSerde; private CreateSourceFactory createSourceFactory; private KsqlConfig ksqlConfig = new KsqlConfig(ImmutableMap.of()); private CreateSourceProperties withProperties = CreateSourceProperties.from(MINIMIM_PROPS); - @Rule - public final ExpectedException expectedException = ExpectedException.none(); - @Before - @SuppressWarnings("unchecked") public void before() { when(serviceContext.getTopicClient()).thenReturn(topicClient); when(topicClient.isTopicExists(any())).thenReturn(true); - when(serdeFactory.create(any(), any(), any(), any(), any(), any())).thenReturn(serde); + when(keySerdeFactory.create(any(), any(), any(), any(), any(), any())).thenReturn(keySerde); + when(valueSerdeFactory.create(any(), any(), any(), any(), any(), any())).thenReturn(valueSerde); givenCommandFactories(); } @@ -141,7 +161,8 @@ private void givenCommandFactoriesWithMocks() { createSourceFactory = new CreateSourceFactory( serviceContext, serdeOptionsSupplier, - serdeFactory + keySerdeFactory, + valueSerdeFactory ); } @@ -149,13 +170,15 @@ private void givenCommandFactoriesWithMocks() { public void shouldCreateCommandForCreateStream() { // Given: final CreateStream ddlStatement = - new CreateStream(SOME_NAME, SOME_ELEMENTS, true, withProperties); + new CreateStream(SOME_NAME, TABLE_ELEMENTS, true, withProperties); // When: - final DdlCommand result = createSourceFactory + final CreateStreamCommand result = createSourceFactory .createStreamCommand(ddlStatement, ksqlConfig); - assertThat(result, instanceOf(CreateStreamCommand.class)); + // Then: + assertThat(result.getSourceName(), is(SOME_NAME)); + assertThat(result.getTopicName(), is(TOPIC_NAME)); } @Test @@ -168,11 +191,12 @@ public void shouldCreateCommandForCreateTable() { true, withProperties); // When: - final DdlCommand result = createSourceFactory + final CreateTableCommand result = createSourceFactory .createTableCommand(ddlStatement, ksqlConfig); // Then: - assertThat(result, instanceOf(CreateTableCommand.class)); + assertThat(result.getSourceName(), is(SOME_NAME)); + assertThat(result.getTopicName(), is(TOPIC_NAME)); } @Test @@ -189,19 +213,17 @@ public void shouldCreateStreamCommandWithSingleValueWrappingFromPropertiesNotCon givenProperty(CommonCreateConfigs.WRAP_SINGLE_VALUE, new BooleanLiteral("false")); final CreateStream statement = - new CreateStream(SOME_NAME, SOME_ELEMENTS, true, withProperties); + new CreateStream(SOME_NAME, ONE_ELEMENTS, true, withProperties); // When: - final DdlCommand cmd = createSourceFactory + final CreateStreamCommand cmd = createSourceFactory .createStreamCommand( statement, ksqlConfig.cloneWithPropertyOverwrite(overrides) ); // Then: - assertThat(cmd, is(instanceOf(CreateStreamCommand.class))); - assertThat(((CreateStreamCommand) cmd).getFormats().getOptions(), - contains(SerdeOption.UNWRAP_SINGLE_VALUES)); + assertThat(cmd.getFormats().getOptions(), contains(SerdeOption.UNWRAP_SINGLE_VALUES)); } @Test @@ -212,32 +234,28 @@ public void shouldCreateStreamCommandWithSingleValueWrappingFromConfig() { )); final CreateStream statement = - new CreateStream(SOME_NAME, SOME_ELEMENTS, true, withProperties); + new CreateStream(SOME_NAME, ONE_ELEMENTS, true, withProperties); // When: - final DdlCommand cmd = createSourceFactory + final CreateStreamCommand cmd = createSourceFactory .createStreamCommand(statement, ksqlConfig); // Then: - assertThat(cmd, is(instanceOf(CreateStreamCommand.class))); - assertThat(((CreateStreamCommand) cmd).getFormats().getOptions(), - contains(SerdeOption.UNWRAP_SINGLE_VALUES)); + assertThat(cmd.getFormats().getOptions(), contains(SerdeOption.UNWRAP_SINGLE_VALUES)); } @Test public void shouldCreateStreamCommandWithSingleValueWrappingFromDefaultConfig() { // Given: final CreateStream statement = - new CreateStream(SOME_NAME, SOME_ELEMENTS, true, withProperties); + new CreateStream(SOME_NAME, ONE_ELEMENTS, true, withProperties); // When: - final DdlCommand cmd = createSourceFactory + final CreateStreamCommand cmd = createSourceFactory .createStreamCommand(statement, ksqlConfig); // Then: - assertThat(cmd, is(instanceOf(CreateStreamCommand.class))); - assertThat(((CreateStreamCommand) cmd).getFormats().getOptions(), - not(contains(SerdeOption.UNWRAP_SINGLE_VALUES))); + assertThat(cmd.getFormats().getOptions(), not(contains(SerdeOption.UNWRAP_SINGLE_VALUES))); } @Test @@ -254,18 +272,16 @@ public void shouldCreateTableCommandWithSingleValueWrappingFromPropertiesNotConf givenProperty(CommonCreateConfigs.WRAP_SINGLE_VALUE, new BooleanLiteral("false")); final CreateTable statement = - new CreateTable(SOME_NAME, SOME_ELEMENTS, true, withProperties); + new CreateTable(SOME_NAME, ONE_ELEMENTS, true, withProperties); // When: - final DdlCommand cmd = createSourceFactory + final CreateTableCommand cmd = createSourceFactory .createTableCommand( statement, ksqlConfig.cloneWithPropertyOverwrite(overrides)); // Then: - assertThat(cmd, is(instanceOf(CreateTableCommand.class))); - assertThat(((CreateTableCommand) cmd).getFormats().getOptions(), - contains(SerdeOption.UNWRAP_SINGLE_VALUES)); + assertThat(cmd.getFormats().getOptions(), contains(SerdeOption.UNWRAP_SINGLE_VALUES)); } @Test @@ -276,32 +292,28 @@ public void shouldCreateTableCommandWithSingleValueWrappingFromConfig() { )); final CreateTable statement = - new CreateTable(SOME_NAME, SOME_ELEMENTS, true, withProperties); + new CreateTable(SOME_NAME, ONE_ELEMENTS, true, withProperties); // When: - final DdlCommand cmd = createSourceFactory + final CreateTableCommand cmd = createSourceFactory .createTableCommand(statement, ksqlConfig); // Then: - assertThat(cmd, is(instanceOf(CreateTableCommand.class))); - assertThat(((CreateTableCommand) cmd).getFormats().getOptions(), - contains(SerdeOption.UNWRAP_SINGLE_VALUES)); + assertThat(cmd.getFormats().getOptions(), contains(SerdeOption.UNWRAP_SINGLE_VALUES)); } @Test public void shouldCreateTableCommandWithSingleValueWrappingFromDefaultConfig() { // Given: final CreateTable statement = - new CreateTable(SOME_NAME, SOME_ELEMENTS, true, withProperties); + new CreateTable(SOME_NAME, ONE_ELEMENTS, true, withProperties); // When: - final DdlCommand cmd = createSourceFactory + final CreateTableCommand cmd = createSourceFactory .createTableCommand(statement, ksqlConfig); // Then: - assertThat(cmd, is(instanceOf(CreateTableCommand.class))); - assertThat(((CreateTableCommand) cmd).getFormats().getOptions(), - not(contains(SerdeOption.UNWRAP_SINGLE_VALUES))); + assertThat(cmd.getFormats().getOptions(), not(contains(SerdeOption.UNWRAP_SINGLE_VALUES))); } @Test @@ -338,7 +350,7 @@ public void shouldThrowOnNoElementsInCreateTable() { public void shouldNotThrowWhenThereAreElementsInCreateStream() { // Given: final CreateStream statement = - new CreateStream(SOME_NAME, SOME_ELEMENTS, true, withProperties); + new CreateStream(SOME_NAME, ONE_ELEMENTS, true, withProperties); // When: createSourceFactory.createStreamCommand(statement, ksqlConfig); @@ -350,7 +362,7 @@ public void shouldNotThrowWhenThereAreElementsInCreateStream() { public void shouldNotThrowWhenThereAreElementsInCreateTable() { // Given: final CreateTable statement = - new CreateTable(SOME_NAME, SOME_ELEMENTS, true, withProperties); + new CreateTable(SOME_NAME, ONE_ELEMENTS, true, withProperties); // When: createSourceFactory.createTableCommand(statement, ksqlConfig); @@ -363,7 +375,7 @@ public void shouldThrowIfTopicDoesNotExistForStream() { // Given: when(topicClient.isTopicExists(any())).thenReturn(false); final CreateStream statement = - new CreateStream(SOME_NAME, SOME_ELEMENTS, true, withProperties); + new CreateStream(SOME_NAME, ONE_ELEMENTS, true, withProperties); // Then: expectedException.expect(KsqlException.class); @@ -378,7 +390,7 @@ public void shouldThrowIfTopicDoesNotExistForStream() { public void shouldNotThrowIfTopicDoesExist() { // Given: final CreateStream statement = - new CreateStream(SOME_NAME, SOME_ELEMENTS, true, withProperties); + new CreateStream(SOME_NAME, ONE_ELEMENTS, true, withProperties); // When: createSourceFactory.createStreamCommand(statement, ksqlConfig); @@ -391,7 +403,7 @@ public void shouldNotThrowIfTopicDoesExist() { public void shouldThrowIfKeyFieldNotInSchemaForStream() { // Given: givenProperty(CreateConfigs.KEY_NAME_PROPERTY, new StringLiteral("`will-not-find-me`")); - final CreateStream statement = new CreateStream(SOME_NAME, SOME_ELEMENTS, true, withProperties); + final CreateStream statement = new CreateStream(SOME_NAME, ONE_ELEMENTS, true, withProperties); // Then: expectedException.expect(KsqlException.class); @@ -411,7 +423,7 @@ public void shouldThrowIfTimestampColumnDoesNotExistForStream() { new StringLiteral("`will-not-find-me`") ); final CreateStream statement = - new CreateStream(SOME_NAME, SOME_ELEMENTS, true, withProperties); + new CreateStream(SOME_NAME, ONE_ELEMENTS, true, withProperties); // Then: expectedException.expect(KsqlException.class); @@ -427,7 +439,7 @@ public void shouldThrowIfTimestampColumnDoesNotExistForStream() { public void shouldBuildSerdeOptionsForStream() { // Given: givenCommandFactoriesWithMocks(); - final CreateStream statement = new CreateStream(SOME_NAME, ONE_ELEMENT, true, withProperties); + final CreateStream statement = new CreateStream(SOME_NAME, ONE_ELEMENTS, true, withProperties); final LogicalSchema schema = LogicalSchema.builder() .valueColumn(ColumnName.of("bob"), SqlTypes.STRING) .build(); @@ -457,7 +469,7 @@ public void shouldBuildTimestampColumnForStream() { new StringLiteral(quote(ELEMENT2.getName().name())) ); final CreateStream statement = - new CreateStream(SOME_NAME, TWO_ELEMENTS, true, withProperties); + new CreateStream(SOME_NAME, TABLE_ELEMENTS, true, withProperties); // When: final CreateStreamCommand cmd = createSourceFactory.createStreamCommand( @@ -482,7 +494,7 @@ public void shouldBuildTimestampColumnForTable() { new StringLiteral(quote(ELEMENT2.getName().name())) ); final CreateTable statement = - new CreateTable(SOME_NAME, TWO_ELEMENTS, true, withProperties); + new CreateTable(SOME_NAME, TABLE_ELEMENTS, true, withProperties); // When: final CreateTableCommand cmd = createSourceFactory.createTableCommand( @@ -509,7 +521,7 @@ public void shouldBuildTimestampColumnWithFormat() { new StringLiteral("%s") )); final CreateStream statement = - new CreateStream(SOME_NAME, TWO_ELEMENTS, true, withProperties); + new CreateStream(SOME_NAME, TABLE_ELEMENTS, true, withProperties); // When: final CreateStreamCommand cmd = createSourceFactory.createStreamCommand( @@ -529,7 +541,8 @@ public void shouldBuildTimestampColumnWithFormat() { @Test public void shouldBuildSchemaWithImplicitKeyFieldForStream() { // Given: - final CreateStream statement = new CreateStream(SOME_NAME, TWO_ELEMENTS, true, withProperties); + final CreateStream statement = new CreateStream(SOME_NAME, TABLE_ELEMENTS, true, + withProperties); // When: final CreateStreamCommand result = createSourceFactory.createStreamCommand( @@ -538,12 +551,7 @@ public void shouldBuildSchemaWithImplicitKeyFieldForStream() { ); // Then: - assertThat(result.getSchema(), is(LogicalSchema.builder() - .keyColumn(ColumnName.of("ROWKEY"), SqlTypes.STRING) - .valueColumn(ColumnName.of("bob"), SqlTypes.STRING) - .valueColumn(ColumnName.of("hojjat"), SqlTypes.BIGINT) - .build() - )); + assertThat(result.getSchema(), is(EXPECTED_SCHEMA)); } @Test @@ -576,32 +584,54 @@ public void shouldBuildSchemaWithExplicitKeyFieldForStream() { } @Test - public void shouldCreateSerdeToValidateValueFormatCanHandleValueSchemaForStream() { + public void shouldValidateKeyFormatCanHandleKeySchema() { // Given: givenCommandFactoriesWithMocks(); - final CreateStream statement = new CreateStream(SOME_NAME, TWO_ELEMENTS, true, withProperties); - final LogicalSchema schema = LogicalSchema.builder() - .valueColumn(ColumnName.of("bob"), SqlTypes.STRING) - .valueColumn(ColumnName.of("hojjat"), SqlTypes.BIGINT) - .build(); + final CreateStream statement = new CreateStream(SOME_NAME, TABLE_ELEMENTS, true, + withProperties); + + when(keySerdeFactory.create( + FormatInfo.of(KAFKA, Optional.empty(), Optional.empty()), + PersistenceSchema.from(EXPECTED_SCHEMA.keyConnectSchema(), false), + ksqlConfig, + serviceContext.getSchemaRegistryClientFactory(), + "", + NoopProcessingLogContext.INSTANCE + )).thenThrow(new RuntimeException("Boom!")); + + // Expect: + expectedException.expectMessage("Boom!"); // When: createSourceFactory.createStreamCommand(statement, ksqlConfig); + } - // Then: - verify(serdeFactory).create( + @Test + public void shouldCreateValueSerdeToValidateValueFormatCanHandleValueSchema() { + // Given: + givenCommandFactoriesWithMocks(); + final CreateTable statement = new CreateTable(SOME_NAME, TABLE_ELEMENTS, true, + withProperties); + + when(valueSerdeFactory.create( FormatInfo.of(JSON, Optional.empty(), Optional.empty()), - PersistenceSchema.from(schema.valueConnectSchema(), false), + PersistenceSchema.from(EXPECTED_SCHEMA.valueConnectSchema(), false), ksqlConfig, serviceContext.getSchemaRegistryClientFactory(), "", NoopProcessingLogContext.INSTANCE - ); + )).thenThrow(new RuntimeException("Boom!")); + + // Expect: + expectedException.expectMessage("Boom!"); + + // When: + createSourceFactory.createTableCommand(statement, ksqlConfig); } @Test public void shouldDefaultToKafkaKeySerdeForStream() { - final CreateStream statement = new CreateStream(SOME_NAME, SOME_ELEMENTS, true, withProperties); + final CreateStream statement = new CreateStream(SOME_NAME, ONE_ELEMENTS, true, withProperties); // When: final CreateStreamCommand cmd = createSourceFactory.createStreamCommand( @@ -620,7 +650,7 @@ public void shouldHandleValueAvroSchemaNameForStream() { givenCommandFactoriesWithMocks(); givenProperty("VALUE_FORMAT", new StringLiteral("Avro")); givenProperty("value_avro_schema_full_name", new StringLiteral("full.schema.name")); - final CreateStream statement = new CreateStream(SOME_NAME, SOME_ELEMENTS, true, withProperties); + final CreateStream statement = new CreateStream(SOME_NAME, ONE_ELEMENTS, true, withProperties); // When: final CreateStreamCommand cmd = createSourceFactory.createStreamCommand( @@ -638,7 +668,7 @@ public void shouldHandleValueAvroSchemaNameForStream() { public void shouldHandleSessionWindowedKeyForStream() { // Given: givenProperty("window_type", new StringLiteral("session")); - final CreateStream statement = new CreateStream(SOME_NAME, SOME_ELEMENTS, true, withProperties); + final CreateStream statement = new CreateStream(SOME_NAME, ONE_ELEMENTS, true, withProperties); // When: final CreateStreamCommand cmd = createSourceFactory.createStreamCommand( @@ -648,7 +678,7 @@ public void shouldHandleSessionWindowedKeyForStream() { // Then: assertThat(cmd.getFormats().getKeyFormat(), is(FormatInfo.of(KAFKA))); - assertThat(cmd.getWindowInfo().get(), is(WindowInfo.of(SESSION, Optional.empty()))); + assertThat(cmd.getWindowInfo(), is(Optional.of(WindowInfo.of(SESSION, Optional.empty())))); } @Test @@ -658,7 +688,7 @@ public void shouldHandleTumblingWindowedKeyForStream() { "window_type", new StringLiteral("tumbling"), "window_size", new StringLiteral("1 MINUTE") )); - final CreateStream statement = new CreateStream(SOME_NAME, SOME_ELEMENTS, true, withProperties); + final CreateStream statement = new CreateStream(SOME_NAME, ONE_ELEMENTS, true, withProperties); // When: final CreateStreamCommand cmd = createSourceFactory.createStreamCommand( @@ -669,8 +699,9 @@ public void shouldHandleTumblingWindowedKeyForStream() { // Then: assertThat(cmd.getFormats().getKeyFormat(), is(FormatInfo.of(KAFKA))); assertThat( - cmd.getWindowInfo().get(), - is(WindowInfo.of(TUMBLING, Optional.of(Duration.ofMinutes(1))))); + cmd.getWindowInfo(), + is(Optional.of(WindowInfo.of(TUMBLING, Optional.of(Duration.ofMinutes(1))))) + ); } @Test @@ -680,7 +711,7 @@ public void shouldHandleHoppingWindowedKeyForStream() { "window_type", new StringLiteral("Hopping"), "window_size", new StringLiteral("2 SECONDS") )); - final CreateStream statement = new CreateStream(SOME_NAME, SOME_ELEMENTS, true, withProperties); + final CreateStream statement = new CreateStream(SOME_NAME, ONE_ELEMENTS, true, withProperties); // When: final CreateStreamCommand cmd = createSourceFactory.createStreamCommand( @@ -691,7 +722,9 @@ public void shouldHandleHoppingWindowedKeyForStream() { // Then: assertThat(cmd.getFormats().getKeyFormat(), is(FormatInfo.of(KAFKA))); assertThat( - cmd.getWindowInfo().get(), is(WindowInfo.of(HOPPING, Optional.of(Duration.ofSeconds(2))))); + cmd.getWindowInfo(), + is(Optional.of(WindowInfo.of(HOPPING, Optional.of(Duration.ofSeconds(2))))) + ); } @Test @@ -826,7 +859,7 @@ private static TableElement tableElement( return te; } - private String quote(final String identifier) { + private static String quote(final String identifier) { return String.format("`%s`", identifier); } } diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/key-schemas.json b/ksql-functional-tests/src/test/resources/query-validation-tests/key-schemas.json index c80206653832..a711eae1a8e5 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/key-schemas.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/key-schemas.json @@ -1,7 +1,7 @@ { "tests": [ { - "name": "stream implicit STRING ROWKEY", + "name": "stream implicit KAFKA STRING ROWKEY", "statements": [ "CREATE STREAM INPUT (ID bigint) WITH (kafka_topic='input',value_format='JSON');", "CREATE STREAM OUTPUT as SELECT ID, ROWKEY as KEY FROM INPUT;" @@ -17,10 +17,20 @@ {"topic": "OUTPUT", "key": "1", "value": {"ID": 2, "KEY": "1"}}, {"topic": "OUTPUT", "key": "", "value": {"ID": 3, "KEY": ""}}, {"topic": "OUTPUT", "key": null, "value": {"ID": 4, "KEY": null}} - ] + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "stream", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY STRING KEY, ID BIGINT, KEY STRING" + } + ] + } }, { - "name": "table implicit STRING ROWKEY", + "name": "table implicit KAFKA STRING ROWKEY", "statements": [ "CREATE TABLE INPUT (ID bigint) WITH (kafka_topic='input',value_format='JSON');", "CREATE TABLE OUTPUT as SELECT ID, ROWKEY as KEY FROM INPUT;" @@ -34,10 +44,20 @@ {"topic": "OUTPUT", "key": "1", "value": {"ID": 1, "KEY": "1"}}, {"topic": "OUTPUT", "key": "1", "value": {"ID": 2, "KEY": "1"}}, {"topic": "OUTPUT", "key": "", "value": {"ID": 3, "KEY": ""}} - ] + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "table", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY STRING KEY, ID BIGINT, KEY STRING" + } + ] + } }, { - "name": "stream explicit STRING ROWKEY", + "name": "stream explicit KAFKA STRING ROWKEY", "statements": [ "CREATE STREAM INPUT (ROWKEY STRING KEY, ID bigint) WITH (kafka_topic='input',value_format='JSON');", "CREATE STREAM OUTPUT as SELECT ID, ROWKEY as KEY FROM INPUT;" @@ -53,10 +73,20 @@ {"topic": "OUTPUT", "key": "1", "value": {"ID": 2, "KEY": "1"}}, {"topic": "OUTPUT", "key": "", "value": {"ID": 3, "KEY": ""}}, {"topic": "OUTPUT", "key": null, "value": {"ID": 4, "KEY": null}} - ] + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "stream", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY STRING KEY, ID BIGINT, KEY STRING" + } + ] + } }, { - "name": "table explicit STRING ROWKEY", + "name": "table explicit KAFKA STRING ROWKEY", "statements": [ "CREATE TABLE INPUT (ROWKEY STRING KEY, ID bigint) WITH (kafka_topic='input',value_format='JSON');", "CREATE TABLE OUTPUT as SELECT ID, ROWKEY as KEY FROM INPUT;" @@ -70,10 +100,20 @@ {"topic": "OUTPUT", "key": "1", "value": {"ID": 1, "KEY": "1"}}, {"topic": "OUTPUT", "key": "1", "value": {"ID": 2, "KEY": "1"}}, {"topic": "OUTPUT", "key": "", "value": {"ID": 3, "KEY": ""}} - ] + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "table", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY STRING KEY, ID BIGINT, KEY STRING" + } + ] + } }, { - "name": "stream explicit non-STRING ROWKEY", + "name": "stream explicit KAFKA INT ROWKEY", "statements": [ "CREATE STREAM INPUT (ROWKEY INT KEY, ID bigint) WITH (kafka_topic='input',value_format='JSON');", "CREATE STREAM OUTPUT as SELECT ID, ROWKEY as KEY FROM INPUT;" @@ -100,7 +140,61 @@ } }, { - "name": "table explicit non-STRING ROWKEY", + "name": "table explicit KAFKA INT ROWKEY", + "statements": [ + "CREATE TABLE INPUT (ROWKEY INT KEY, ID bigint) WITH (kafka_topic='input',value_format='JSON');", + "CREATE TABLE OUTPUT as SELECT ID, ROWKEY as KEY FROM INPUT;" + ], + "inputs": [ + {"topic": "input", "key": 3, "value": {"id": 1}}, + {"topic": "input", "key": 2, "value": {"id": 2}}, + {"topic": "input", "key": 1, "value": {"id": 3}} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 3, "value": {"ID": 1, "KEY": 3}}, + {"topic": "OUTPUT", "key": 2, "value": {"ID": 2, "KEY": 2}}, + {"topic": "OUTPUT", "key": 1, "value": {"ID": 3, "KEY": 1}} + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "table", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY INT KEY, ID BIGINT, KEY INT" + } + ] + } + }, + { + "name": "stream explicit KAFKA BIGINT ROWKEY", + "statements": [ + "CREATE STREAM INPUT (ROWKEY BIGINT KEY, ID bigint) WITH (kafka_topic='input',value_format='JSON');", + "CREATE STREAM OUTPUT as SELECT ID, ROWKEY as KEY FROM INPUT;" + ], + "inputs": [ + {"topic": "input", "key": 3, "value": {"id": 1}}, + {"topic": "input", "key": 2, "value": {"id": 2}}, + {"topic": "input", "key": null, "value": {"id": 3}} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 3, "value": {"ID": 1, "KEY": 3}}, + {"topic": "OUTPUT", "key": 2, "value": {"ID": 2, "KEY": 2}}, + {"topic": "OUTPUT", "key": null, "value": {"ID": 3, "KEY": null}} + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "stream", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY BIGINT KEY, ID BIGINT, KEY BIGINT" + } + ] + } + }, + { + "name": "table explicit KAFKA BIGINT ROWKEY", "statements": [ "CREATE TABLE INPUT (ROWKEY BIGINT KEY, ID bigint) WITH (kafka_topic='input',value_format='JSON');", "CREATE TABLE OUTPUT as SELECT ID, ROWKEY as KEY FROM INPUT;" @@ -126,6 +220,102 @@ ] } }, + { + "name": "stream explicit KAFKA DOUBLE ROWKEY", + "statements": [ + "CREATE STREAM INPUT (ROWKEY DOUBLE KEY, ID bigint) WITH (kafka_topic='input',value_format='JSON');", + "CREATE STREAM OUTPUT as SELECT ID, ROWKEY as KEY FROM INPUT;" + ], + "inputs": [ + {"topic": "input", "key": 3.0, "value": {"id": 1}}, + {"topic": "input", "key": 2.0, "value": {"id": 2}}, + {"topic": "input", "key": null, "value": {"id": 3}} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 3.0, "value": {"ID": 1, "KEY": 3.0}}, + {"topic": "OUTPUT", "key": 2.0, "value": {"ID": 2, "KEY": 2.0}}, + {"topic": "OUTPUT", "key": null, "value": {"ID": 3, "KEY": null}} + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "stream", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY DOUBLE KEY, ID BIGINT, KEY DOUBLE" + } + ] + } + }, + { + "name": "table explicit KAFKA DOUBLE ROWKEY", + "statements": [ + "CREATE TABLE INPUT (ROWKEY DOUBLE KEY, ID bigint) WITH (kafka_topic='input',value_format='JSON');", + "CREATE TABLE OUTPUT as SELECT ID, ROWKEY as KEY FROM INPUT;" + ], + "inputs": [ + {"topic": "input", "key": 3.0, "value": {"id": 1}}, + {"topic": "input", "key": 2.0, "value": {"id": 2}}, + {"topic": "input", "key": 1.0, "value": {"id": 3}} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 3.0, "value": {"ID": 1, "KEY": 3.0}}, + {"topic": "OUTPUT", "key": 2.0, "value": {"ID": 2, "KEY": 2.0}}, + {"topic": "OUTPUT", "key": 1.0, "value": {"ID": 3, "KEY": 1.0}} + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "table", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY DOUBLE KEY, ID BIGINT, KEY DOUBLE" + } + ] + } + }, + { + "name": "create stream explicit unsupported ROWKEY type", + "statements": [ + "CREATE STREAM INPUT (ROWKEY BOOLEAN KEY, ID bigint) WITH (kafka_topic='input',value_format='JSON');" + ], + "expectedException": { + "type": "io.confluent.ksql.util.KsqlStatementException", + "message": "Key format does not support key schema.\nformat: KAFKA\nschema: Persistence{schema=STRUCT NOT NULL, unwrapped=false}\nreason: The 'KAFKA' format does not support type 'BOOLEAN'" + } + }, + { + "name": "create table explicit unsupported ROWKEY type", + "statements": [ + "CREATE TABLE INPUT (ROWKEY DECIMAL(21,19) KEY, ID bigint) WITH (kafka_topic='input',value_format='JSON');" + ], + "expectedException": { + "type": "io.confluent.ksql.util.KsqlStatementException", + "message": "Key format does not support key schema.\nformat: KAFKA\nschema: Persistence{schema=STRUCT NOT NULL, unwrapped=false}\nreason: The 'KAFKA' format does not support type 'DECIMAL'" + } + }, + { + "name": "create stream as explicit unsupported ROWKEY type", + "statements": [ + "CREATE STREAM INPUT (ROWKEY STRING KEY, ID ARRAY) WITH (kafka_topic='input',value_format='JSON');", + "CREATE STREAM OUTPUT AS SELECT * FROM INPUT PARTITION BY ID;" + ], + "expectedException": { + "type": "io.confluent.ksql.util.KsqlException", + "message": "Key format does not support key schema.\nformat: KAFKA\nschema: Persistence{schema=STRUCT> NOT NULL, unwrapped=false}\nreason: The 'KAFKA' format does not support type 'ARRAY'" + } + }, + { + "name": "create table as explicit unsupported ROWKEY type", + "statements": [ + "CREATE STREAM INPUT (ROWKEY STRING KEY, ID MAP) WITH (kafka_topic='input',value_format='JSON');", + "CREATE TABLE OUTPUT AS SELECT COUNT() FROM INPUT GROUP BY ID;" + ], + "expectedException": { + "type": "io.confluent.ksql.util.KsqlException", + "message": "Key format does not support key schema.\nformat: KAFKA\nschema: Persistence{schema=STRUCT> NOT NULL, unwrapped=false}\nreason: The 'KAFKA' format does not support type 'MAP'" + } + }, { "name": "explicit key field named other than ROWKEY", "statements": [ diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/serdes.json b/ksql-functional-tests/src/test/resources/query-validation-tests/serdes.json index 5b79961ad44b..dc322587c0fe 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/serdes.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/serdes.json @@ -628,7 +628,7 @@ "format": ["JSON", "AVRO"], "statements": [ "CREATE TABLE INPUT (f0 STRING, f1 STRING) WITH (kafka_topic='input_topic', value_format='{FORMAT}');", - "CREATE TABLE OUTPUT WITH(WRAP_SINGLE_VALUE=true) AS SELECT * FROM INPUT;" + "CREATE TABLE OUTPUT WITH(WRAP_SINGLE_VALUE=false) AS SELECT * FROM INPUT;" ], "expectedException": { "type": "io.confluent.ksql.util.KsqlStatementException", diff --git a/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericKeySerDe.java b/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericKeySerDe.java index 3df6b9986da0..3c4a20584757 100644 --- a/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericKeySerDe.java +++ b/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericKeySerDe.java @@ -25,6 +25,7 @@ import io.confluent.ksql.logging.processing.ProcessingLogger; import io.confluent.ksql.schema.ksql.PersistenceSchema; import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.SchemaUtil; import java.util.Collections; import java.util.Map; @@ -112,6 +113,20 @@ private Serde createInner( final ProcessingLogContext processingLogContext, final Class targetType ) { + try { + serdeFactories.validate(format, schema); + } catch (final Exception e) { + throw new KsqlException("Key format does not support key schema." + + System.lineSeparator() + + "format: " + format.getFormat() + + System.lineSeparator() + + "schema: " + schema + + System.lineSeparator() + + "reason: " + e.getMessage(), + e + ); + } + final Serde serde = serdeFactories .create(format, schema, ksqlConfig, schemaRegistryClientFactory, targetType); @@ -149,7 +164,7 @@ private static Serde unwrapped( return Serdes.serdeFrom(serializer, deserializer); } - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "rawtypes"}) private static Serde wrapped( final Serde innerSerde, final Class type diff --git a/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericRowSerDe.java b/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericRowSerDe.java index b5251d3d4392..a60b6e9ac8c8 100644 --- a/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericRowSerDe.java +++ b/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericRowSerDe.java @@ -26,6 +26,7 @@ import io.confluent.ksql.logging.processing.ProcessingLogger; import io.confluent.ksql.schema.ksql.PersistenceSchema; import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.SchemaUtil; import java.util.ArrayList; import java.util.Collections; @@ -102,6 +103,20 @@ private Serde from( final ProcessingLogContext processingLogContext, final Class targetType ) { + try { + serdeFactories.validate(format, schema); + } catch (final Exception e) { + throw new KsqlException("Value format does not support value schema." + + System.lineSeparator() + + "format: " + format.getFormat() + + System.lineSeparator() + + "schema: " + schema + + System.lineSeparator() + + "reason: " + e.getMessage(), + e + ); + } + final Serde serde = serdeFactories .create(format, schema, ksqlConfig, schemaRegistryClientFactory, targetType); diff --git a/ksql-serde/src/main/java/io/confluent/ksql/serde/KsqlSerdeFactories.java b/ksql-serde/src/main/java/io/confluent/ksql/serde/KsqlSerdeFactories.java index ce0a0a692f89..64926d7a50d4 100644 --- a/ksql-serde/src/main/java/io/confluent/ksql/serde/KsqlSerdeFactories.java +++ b/ksql-serde/src/main/java/io/confluent/ksql/serde/KsqlSerdeFactories.java @@ -43,6 +43,11 @@ final class KsqlSerdeFactories implements SerdeFactories { this.factoryMethod = Objects.requireNonNull(factoryMethod, "factoryMethod"); } + @Override + public void validate(final FormatInfo format, final PersistenceSchema schema) { + factoryMethod.apply(format).validate(schema); + } + @Override public Serde create( final FormatInfo format, diff --git a/ksql-serde/src/main/java/io/confluent/ksql/serde/SerdeFactories.java b/ksql-serde/src/main/java/io/confluent/ksql/serde/SerdeFactories.java index d5ecb7f6b53e..7822ce616bf8 100644 --- a/ksql-serde/src/main/java/io/confluent/ksql/serde/SerdeFactories.java +++ b/ksql-serde/src/main/java/io/confluent/ksql/serde/SerdeFactories.java @@ -23,6 +23,17 @@ interface SerdeFactories { + /** + * Validate that supplied {@code format} can handle the supplied {@code schema}. + * @param format the format to validate. + * @param schema the schema to validate. + * @throws RuntimeException if format does not support schema. + */ + void validate( + FormatInfo format, + PersistenceSchema schema + ); + /** * Create {@link Serde} for supported KSQL formats. * diff --git a/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericKeySerDeTest.java b/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericKeySerDeTest.java index 470727a0879b..76c406b67b84 100644 --- a/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericKeySerDeTest.java +++ b/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericKeySerDeTest.java @@ -19,6 +19,7 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -32,6 +33,7 @@ import io.confluent.ksql.schema.ksql.PersistenceSchema; import io.confluent.ksql.serde.GenericKeySerDe.UnwrappedKeySerializer; import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlException; import java.time.Duration; import java.util.Collections; import java.util.Optional; @@ -49,7 +51,9 @@ import org.apache.kafka.streams.kstream.TimeWindowedSerializer; import org.apache.kafka.streams.kstream.Windowed; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; @@ -58,7 +62,6 @@ public class GenericKeySerDeTest { private static final FormatInfo FORMAT = FormatInfo.of(Format.JSON); - private static final WindowInfo WINDOW = WindowInfo.of(WindowType.SESSION, Optional.empty()); private static final KsqlConfig CONFIG = new KsqlConfig(ImmutableMap.of()); private static final String LOGGER_NAME_PREFIX = "bob"; @@ -78,6 +81,9 @@ public class GenericKeySerDeTest { true ); + @Rule + public final ExpectedException expectedException = ExpectedException.none(); + @Mock private SerdeFactories serdeFactories; @Mock @@ -109,6 +115,33 @@ public void setUp() { when(innerSerde.deserializer()).thenReturn(innerDeserializer); } + @Test + public void shouldValidateFormatCanHandleSchema() { + // Given: + doThrow(new RuntimeException("Boom!")) + .when(serdeFactories).validate(FORMAT, WRAPPED_SCHEMA); + + // Expect: + expectedException.expect(KsqlException.class); + expectedException.expectMessage("Key format does not support key schema." + + System.lineSeparator() + + "format: JSON" + + System.lineSeparator() + + "schema: Persistence{schema=STRUCT NOT NULL, unwrapped=false}" + + System.lineSeparator() + + "reason: Boom!"); + + // When: + factory.create( + FORMAT, + WRAPPED_SCHEMA, + CONFIG, + srClientFactory, + LOGGER_NAME_PREFIX, + processingLogCxt + ); + } + @Test public void shouldCreateCorrectInnerSerdeForWrapped() { // When: diff --git a/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericRowSerDeTest.java b/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericRowSerDeTest.java index 0875125a08cb..2a9c484fb0e4 100644 --- a/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericRowSerDeTest.java +++ b/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericRowSerDeTest.java @@ -19,6 +19,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -31,6 +32,7 @@ import io.confluent.ksql.logging.processing.ProcessingLoggerFactory; import io.confluent.ksql.schema.ksql.PersistenceSchema; import io.confluent.ksql.util.KsqlConfig; +import io.confluent.ksql.util.KsqlException; import java.nio.charset.StandardCharsets; import java.util.Map; import java.util.Optional; @@ -56,6 +58,9 @@ public class GenericRowSerDeTest { private static final String LOGGER_PREFIX = "bob"; + private static final FormatInfo FORMAT = + FormatInfo.of(Format.JSON, Optional.empty(), Optional.empty()); + private static final PersistenceSchema MUTLI_FIELD_SCHEMA = PersistenceSchema.from( (ConnectSchema) SchemaBuilder.struct() @@ -119,11 +124,38 @@ public void setUp() { valueSerde = new GenericRowSerDe(serdesFactories); } + @Test + public void shouldValidateFormatCanHandleSchema() { + // Given: + doThrow(new RuntimeException("Boom!")) + .when(serdesFactories).validate(FORMAT, MUTLI_FIELD_SCHEMA); + + // Expect: + expectedException.expect(KsqlException.class); + expectedException.expectMessage("Value format does not support value schema." + + System.lineSeparator() + + "format: JSON" + + System.lineSeparator() + + "schema: Persistence{schema=STRUCT NOT NULL, unwrapped=false}" + + System.lineSeparator() + + "reason: Boom!"); + + // When: + valueSerde.create( + FORMAT, + MUTLI_FIELD_SCHEMA, + ksqlConfig, + srClientFactory, + LOGGER_PREFIX, + processingContext + ); + } + @Test public void shouldGetStructSerdeOnConstruction() { // When: valueSerde.create( - FormatInfo.of(Format.JSON, Optional.empty(), Optional.empty()), + FORMAT, MUTLI_FIELD_SCHEMA, ksqlConfig, srClientFactory, @@ -133,7 +165,7 @@ public void shouldGetStructSerdeOnConstruction() { // Then: verify(serdesFactories).create( - FormatInfo.of(Format.JSON, Optional.empty(), Optional.empty()), + FORMAT, MUTLI_FIELD_SCHEMA, ksqlConfig, srClientFactory, @@ -145,7 +177,7 @@ public void shouldGetStructSerdeOnConstruction() { public void shouldGetStringSerdeOnConstruction() { // When: valueSerde.create( - FormatInfo.of(Format.JSON, Optional.empty(), Optional.empty()), + FORMAT, UNWRAPPED_SINGLE_FIELD_SCHEMA, ksqlConfig, srClientFactory, @@ -155,7 +187,7 @@ public void shouldGetStringSerdeOnConstruction() { // Then: verify(serdesFactories).create( - FormatInfo.of(Format.JSON, Optional.empty(), Optional.empty()), + FORMAT, UNWRAPPED_SINGLE_FIELD_SCHEMA, ksqlConfig, srClientFactory, @@ -170,7 +202,7 @@ public void shouldThrowOnNullStructSerde() { // When: valueSerde.create( - FormatInfo.of(Format.JSON, Optional.empty(), Optional.empty()), + FORMAT, MUTLI_FIELD_SCHEMA, ksqlConfig, srClientFactory, @@ -183,7 +215,7 @@ public void shouldThrowOnNullStructSerde() { public void shouldThrowOnNullSchema() { // When: GenericRowSerDe.from( - FormatInfo.of(Format.JSON, Optional.empty(), Optional.empty()), + FORMAT, null, ksqlConfig, srClientFactory, @@ -196,7 +228,7 @@ public void shouldThrowOnNullSchema() { public void shouldCreateProcessingLoggerWithCorrectName() { // When: GenericRowSerDe.from( - FormatInfo.of(Format.JSON, Optional.empty(), Optional.empty()), + FORMAT, MUTLI_FIELD_SCHEMA, ksqlConfig, srClientFactory, @@ -494,7 +526,7 @@ public void shouldDeserializeNullUnwrappedSingleFieldGenericRow() { private Serde givenSerdeForSchema(final PersistenceSchema schema) { return valueSerde.create( - FormatInfo.of(Format.JSON, Optional.empty(), Optional.empty()), + FORMAT, schema, ksqlConfig, srClientFactory, diff --git a/ksql-serde/src/test/java/io/confluent/ksql/serde/KsqlSerdeFactoriesTest.java b/ksql-serde/src/test/java/io/confluent/ksql/serde/KsqlSerdeFactoriesTest.java index 16cbae5c996f..f401c231476c 100644 --- a/ksql-serde/src/test/java/io/confluent/ksql/serde/KsqlSerdeFactoriesTest.java +++ b/ksql-serde/src/test/java/io/confluent/ksql/serde/KsqlSerdeFactoriesTest.java @@ -23,6 +23,7 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -76,6 +77,28 @@ public void setUp() { when(factoryMethod.apply(any())).thenReturn(ksqlSerdeFactory); } + @Test + public void shouldValidateOnValidate() { + // When: + factory.validate(formatInfo, schema); + + // Then: + verify(ksqlSerdeFactory).validate(schema); + } + + @Test + public void shouldThrowOnValidateIfValidationFails() { + // Given: + doThrow(new RuntimeException("Boom!")) + .when(ksqlSerdeFactory).validate(any()); + + // Expect: + expectedException.expectMessage("Boom!"); + + // When: + factory.validate(formatInfo, schema); + } + @Test public void shouldCreateFactory() { // When: @@ -106,7 +129,7 @@ public void shouldValidateSerdeFactoryCanHandleSchema() { verify(ksqlSerdeFactory).validate(schema); } - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "rawtypes"}) @Test public void shouldCreateSerde() { // Given: From 508e5a569b393689428e735dbae2496b2bf2cc2d Mon Sep 17 00:00:00 2001 From: elismaga Date: Mon, 6 Jan 2020 15:48:41 -0800 Subject: [PATCH 083/123] Disable building docker images (#4230) --- Jenkinsfile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 59d2b3564dd1..c5da3d141b71 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -4,6 +4,5 @@ dockerfile { slackChannel = '#ksql-alerts' upstreamProjects = 'confluentinc/schema-registry' dockerRepos = ['confluentinc/ksql-cli'] - extraBuildArgs = '-Dskip.docker.build=false' - extraDeployArgs = '-Dskip.docker.build=true' + extraDeployArgs = '-Ddocker.skip=true' } From 9479fd6eac7bed6e7f6c44781d5424b38f02521f Mon Sep 17 00:00:00 2001 From: Rohan Date: Thu, 9 Jan 2020 10:19:54 -0800 Subject: [PATCH 084/123] fix: don't load current qtt test case from legacy loader (#4245) --- .../ksql/test/loader/ExpectedTopologiesTestLoader.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/loader/ExpectedTopologiesTestLoader.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/loader/ExpectedTopologiesTestLoader.java index 14b8e02ebba7..895248d34bd0 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/loader/ExpectedTopologiesTestLoader.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/loader/ExpectedTopologiesTestLoader.java @@ -182,9 +182,6 @@ private static Stream buildVersionedTestCases( final List expectedTopologies ) { Stream.Builder builder = Stream.builder(); - if (test.getVersionBounds().contains(CURRENT_VERSION)) { - builder.add(test); - } for (final TopologiesAndVersion topologies : expectedTopologies) { if (!test.getVersionBounds().contains(topologies.getVersion())) { From de906c3155768f3608c303eec7465c97da9ac8a4 Mon Sep 17 00:00:00 2001 From: Andy Coates Date: Thu, 9 Jan 2020 18:45:55 +0000 Subject: [PATCH 085/123] chore: remove upstream docker registry as not public --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 33571806ec2a..a110cb258e6c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -23,7 +23,7 @@ # # 3. build docker images from local changes (Note: access to Confluent docker registry is required): # Change `docker.upstream-tag` if you want to depend on anything other than the latest master upstream, e.g. 5.4.x-latest -# > mvn -Pdocker package -DskipTests -Dspotbugs.skip -Dcheckstyle.skip -Ddockerfile.skip=false -Dskip.docker.build=false -Ddocker.upstream-tag=master-latest -Ddocker.tag=local.build -Ddocker.upstream-registry='368821881613.dkr.ecr.us-west-2.amazonaws.com/' +# > mvn -Pdocker package -DskipTests -Dspotbugs.skip -Dcheckstyle.skip -Ddockerfile.skip=false -Dskip.docker.build=false -Ddocker.upstream-tag=master-latest -Ddocker.tag=local.build # # 4. check images build: # > docker image ls | grep ksql.local.build From f9917529b4a0d6eff44c36c6f04aea4268f5381a Mon Sep 17 00:00:00 2001 From: Alan Sheinberg <57688982+AlanConfluent@users.noreply.github.com> Date: Thu, 9 Jan 2020 11:02:02 -0800 Subject: [PATCH 086/123] perf: Improves pull query performance by making the default schema service a singleton (#4216) * perf: Improves pull query performance by making the schema registry client a singleton. --- .../KsqlSchemaRegistryClientFactory.java | 29 ++++++++++++++++--- .../KsqlSchemaRegistryClientFactoryTest.java | 19 +++--------- .../ksql/rest/server/KsqlRestApplication.java | 17 +++++++---- .../context/KsqlSecurityContextBinder.java | 8 +++-- .../KsqlSecurityContextBinderFactory.java | 10 +++++-- .../resources/streaming/WSQueryEndpoint.java | 19 ++++++++---- .../services/RestServiceContextFactory.java | 10 +++---- .../rest/server/KsqlRestApplicationTest.java | 10 ++++++- .../ksql/rest/server/TestKsqlRestApp.java | 9 ++++-- .../KsqlSecurityContextBinderFactoryTest.java | 12 +++++--- .../streaming/WSQueryEndpointTest.java | 8 ++--- 11 files changed, 102 insertions(+), 49 deletions(-) diff --git a/ksql-engine/src/main/java/io/confluent/ksql/schema/registry/KsqlSchemaRegistryClientFactory.java b/ksql-engine/src/main/java/io/confluent/ksql/schema/registry/KsqlSchemaRegistryClientFactory.java index fc433bd28e69..e34617d7cb93 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/schema/registry/KsqlSchemaRegistryClientFactory.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/schema/registry/KsqlSchemaRegistryClientFactory.java @@ -15,6 +15,7 @@ package io.confluent.ksql.schema.registry; +import com.google.common.annotations.VisibleForTesting; import io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.kafka.schemaregistry.client.rest.RestService; @@ -43,14 +44,21 @@ CachedSchemaRegistryClient create(RestService service, Map httpHeaders); } + public KsqlSchemaRegistryClientFactory( + final KsqlConfig config, + final Map schemaRegistryHttpHeaders + ) { + this(config, newSchemaRegistrySslFactory(config), schemaRegistryHttpHeaders); + } public KsqlSchemaRegistryClientFactory( final KsqlConfig config, + final SslFactory sslFactory, final Map schemaRegistryHttpHeaders ) { this(config, () -> new RestService(config.getString(KsqlConfig.SCHEMA_REGISTRY_URL_PROPERTY)), - new SslFactory(Mode.CLIENT), + sslFactory, CachedSchemaRegistryClient::new, schemaRegistryHttpHeaders ); @@ -59,6 +67,7 @@ public KsqlSchemaRegistryClientFactory( config.getString(KsqlConfig.SCHEMA_REGISTRY_URL_PROPERTY); } + @VisibleForTesting KsqlSchemaRegistryClientFactory(final KsqlConfig config, final Supplier serviceSupplier, final SslFactory sslFactory, @@ -69,13 +78,25 @@ public KsqlSchemaRegistryClientFactory( this.schemaRegistryClientConfigs = config.originalsWithPrefix( KsqlConfig.KSQL_SCHEMA_REGISTRY_PREFIX); - this.sslFactory - .configure(config.valuesWithPrefixOverride(KsqlConfig.KSQL_SCHEMA_REGISTRY_PREFIX)); - this.schemaRegistryClientFactory = schemaRegistryClientFactory; this.httpHeaders = httpHeaders; } + /** + * Creates an SslFactory configured to be used with the KsqlSchemaRegistryClient. + */ + public static SslFactory newSchemaRegistrySslFactory(final KsqlConfig config) { + final SslFactory sslFactory = new SslFactory(Mode.CLIENT); + configureSslFactory(config, sslFactory); + return sslFactory; + } + + @VisibleForTesting + static void configureSslFactory(final KsqlConfig config, final SslFactory sslFactory) { + sslFactory + .configure(config.valuesWithPrefixOverride(KsqlConfig.KSQL_SCHEMA_REGISTRY_PREFIX)); + } + public SchemaRegistryClient get() { final RestService restService = serviceSupplier.get(); final SSLContext sslContext = sslFactory.sslEngineBuilder().sslContext(); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/schema/registry/KsqlSchemaRegistryClientFactoryTest.java b/ksql-engine/src/test/java/io/confluent/ksql/schema/registry/KsqlSchemaRegistryClientFactoryTest.java index 4172235dd559..9a2be33e54a2 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/schema/registry/KsqlSchemaRegistryClientFactoryTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/schema/registry/KsqlSchemaRegistryClientFactoryTest.java @@ -88,14 +88,10 @@ public void shouldSetSocketFactoryWhenNoSpecificSslConfig() { final Map expectedConfigs = defaultConfigs(); // When: - final SchemaRegistryClient client = - new KsqlSchemaRegistryClientFactory(config, restServiceSupplier, sslFactory, - srClientFactory, Collections.emptyMap()).get(); + KsqlSchemaRegistryClientFactory.configureSslFactory(config, sslFactory); // Then: - assertThat(client, is(notNullValue())); verify(sslFactory).configure(expectedConfigs); - verify(restService).setSslSocketFactory(isA(SSL_CONTEXT.getSocketFactory().getClass())); } @Test @@ -109,14 +105,10 @@ public void shouldPickUpNonPrefixedSslConfig() { expectedConfigs.put(SslConfigs.SSL_PROTOCOL_CONFIG, "SSLv3"); // When: - final SchemaRegistryClient client = - new KsqlSchemaRegistryClientFactory(config, restServiceSupplier, sslFactory, - srClientFactory, Collections.emptyMap()).get(); + KsqlSchemaRegistryClientFactory.configureSslFactory(config, sslFactory); // Then: - assertThat(client, is(notNullValue())); verify(sslFactory).configure(expectedConfigs); - verify(restService).setSslSocketFactory(isA(SSL_CONTEXT.getSocketFactory().getClass())); } @Test @@ -130,15 +122,11 @@ public void shouldPickUpPrefixedSslConfig() { expectedConfigs.put(SslConfigs.SSL_PROTOCOL_CONFIG, "SSLv3"); // When: - final SchemaRegistryClient client = - new KsqlSchemaRegistryClientFactory(config, restServiceSupplier, sslFactory, - srClientFactory, Collections.emptyMap()).get(); + KsqlSchemaRegistryClientFactory.configureSslFactory(config, sslFactory); // Then: - assertThat(client, is(notNullValue())); verify(sslFactory).configure(expectedConfigs); - verify(restService).setSslSocketFactory(isA(SSL_CONTEXT.getSocketFactory().getClass())); } @Test @@ -160,6 +148,7 @@ public void shouldPassBasicAuthCredentialsToSchemaRegistryClient() { config, restServiceSupplier, sslFactory, srClientFactory, Collections.emptyMap()).get(); // Then: + verify(restService).setSslSocketFactory(isA(SSL_CONTEXT.getSocketFactory().getClass())); srClientFactory.create(same(restService), anyInt(), eq(expectedConfigs), any()); } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java index 5759222d6429..4a96777fed9b 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java @@ -27,6 +27,7 @@ import com.google.common.util.concurrent.ListeningScheduledExecutorService; import com.google.common.util.concurrent.MoreExecutors; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.ServiceInfo; import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.function.InternalFunctionRegistry; @@ -69,6 +70,7 @@ import io.confluent.ksql.rest.util.KsqlUncaughtExceptionHandler; import io.confluent.ksql.rest.util.ProcessingLogServerUtils; import io.confluent.ksql.rest.util.RocksDBConfigSetterHandler; +import io.confluent.ksql.schema.registry.KsqlSchemaRegistryClientFactory; import io.confluent.ksql.security.KsqlAuthorizationValidator; import io.confluent.ksql.security.KsqlAuthorizationValidatorFactory; import io.confluent.ksql.security.KsqlDefaultSecurityExtension; @@ -96,6 +98,7 @@ import java.nio.charset.StandardCharsets; import java.time.Duration; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.LinkedList; import java.util.List; @@ -444,7 +447,8 @@ public T getEndpointInstance(final Class endpointClass) { authorizationValidator, errorHandler, securityExtension, - serverState + serverState, + serviceContext.getSchemaRegistryClientFactory() ); } }) @@ -460,8 +464,11 @@ static KsqlRestApplication buildApplication( final Function, VersionCheckerAgent> versionCheckerFactory ) { final KsqlConfig ksqlConfig = new KsqlConfig(restConfig.getKsqlConfigProperties()); + final Supplier schemaRegistryClientFactory = + new KsqlSchemaRegistryClientFactory(ksqlConfig, Collections.emptyMap())::get; final ServiceContext serviceContext = new LazyServiceContext(() -> - RestServiceContextFactory.create(ksqlConfig, Optional.empty())); + RestServiceContextFactory.create(ksqlConfig, Optional.empty(), + schemaRegistryClientFactory)); return buildApplication( "", @@ -469,7 +476,8 @@ static KsqlRestApplication buildApplication( versionCheckerFactory, Integer.MAX_VALUE, serviceContext, - KsqlSecurityContextBinder::new); + (config, securityExtension) -> + new KsqlSecurityContextBinder(config, securityExtension, schemaRegistryClientFactory)); } static KsqlRestApplication buildApplication( @@ -478,8 +486,7 @@ static KsqlRestApplication buildApplication( final Function, VersionCheckerAgent> versionCheckerFactory, final int maxStatementRetries, final ServiceContext serviceContext, - final BiFunction serviceContextBinderFactory - ) { + final BiFunction serviceContextBinderFactory) { final String ksqlInstallDir = restConfig.getString(KsqlRestConfig.INSTALL_DIR_CONFIG); final KsqlConfig ksqlConfig = new KsqlConfig(restConfig.getKsqlConfigProperties()); diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinder.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinder.java index 1692b14fe1f5..e5c3f6d0d164 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinder.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinder.java @@ -15,9 +15,11 @@ package io.confluent.ksql.rest.server.context; +import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.security.KsqlSecurityExtension; import io.confluent.ksql.util.KsqlConfig; +import java.util.function.Supplier; import org.glassfish.hk2.utilities.binding.AbstractBinder; import org.glassfish.jersey.process.internal.RequestScoped; @@ -31,9 +33,11 @@ public class KsqlSecurityContextBinder extends AbstractBinder { public KsqlSecurityContextBinder( final KsqlConfig ksqlConfig, - final KsqlSecurityExtension securityExtension + final KsqlSecurityExtension securityExtension, + final Supplier schemaRegistryClientFactory ) { - KsqlSecurityContextBinderFactory.configure(ksqlConfig, securityExtension); + KsqlSecurityContextBinderFactory.configure(ksqlConfig, securityExtension, + schemaRegistryClientFactory); } @Override diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinderFactory.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinderFactory.java index 79c30795b607..04a98a137fef 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinderFactory.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinderFactory.java @@ -18,6 +18,7 @@ import static java.util.Objects.requireNonNull; import com.google.common.annotations.VisibleForTesting; +import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.rest.server.services.RestServiceContextFactory; import io.confluent.ksql.rest.server.services.RestServiceContextFactory.DefaultServiceContextFactory; import io.confluent.ksql.rest.server.services.RestServiceContextFactory.UserServiceContextFactory; @@ -26,6 +27,7 @@ import io.confluent.ksql.util.KsqlConfig; import java.security.Principal; import java.util.Optional; +import java.util.function.Supplier; import javax.inject.Inject; import javax.servlet.http.HttpServletRequest; import javax.ws.rs.core.HttpHeaders; @@ -39,14 +41,18 @@ public class KsqlSecurityContextBinderFactory implements Factory { private static KsqlConfig ksqlConfig; private static KsqlSecurityExtension securityExtension; + private static Supplier schemaRegistryClientFactory; public static void configure( final KsqlConfig ksqlConfig, - final KsqlSecurityExtension securityExtension + final KsqlSecurityExtension securityExtension, + final Supplier schemaRegistryClientFactory ) { KsqlSecurityContextBinderFactory.ksqlConfig = requireNonNull(ksqlConfig, "ksqlConfig"); KsqlSecurityContextBinderFactory.securityExtension = requireNonNull(securityExtension, "securityExtension"); + KsqlSecurityContextBinderFactory.schemaRegistryClientFactory + = requireNonNull(schemaRegistryClientFactory, "schemaRegistryClientFactory"); } private final SecurityContext securityContext; @@ -91,7 +97,7 @@ public KsqlSecurityContext provide() { if (!securityExtension.getUserContextProvider().isPresent()) { return new KsqlSecurityContext( Optional.ofNullable(principal), - defaultServiceContextFactory.create(ksqlConfig, authHeader) + defaultServiceContextFactory.create(ksqlConfig, authHeader, schemaRegistryClientFactory) ); } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java index bfd46f281f80..04a088e1c5fc 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java @@ -18,6 +18,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Iterables; import com.google.common.util.concurrent.ListeningScheduledExecutorService; +import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; import io.confluent.ksql.parser.tree.PrintTopic; @@ -53,6 +54,7 @@ import java.util.Objects; import java.util.Optional; import java.util.concurrent.TimeoutException; +import java.util.function.Supplier; import javax.websocket.CloseReason; import javax.websocket.CloseReason.CloseCodes; import javax.websocket.EndpointConfig; @@ -97,6 +99,7 @@ public class WSQueryEndpoint { private final DefaultServiceContextFactory defaultServiceContextFactory; private final ServerState serverState; private final Errors errorHandler; + private final Supplier schemaRegistryClientFactory; private WebSocketSubscriber subscriber; private KsqlSecurityContext securityContext; @@ -115,7 +118,8 @@ public WSQueryEndpoint( final Optional authorizationValidator, final Errors errorHandler, final KsqlSecurityExtension securityExtension, - final ServerState serverState + final ServerState serverState, + final Supplier schemaRegistryClientFactory ) { this(ksqlConfig, mapper, @@ -133,7 +137,8 @@ public WSQueryEndpoint( securityExtension, RestServiceContextFactory::create, RestServiceContextFactory::create, - serverState); + serverState, + schemaRegistryClientFactory); } // CHECKSTYLE_RULES.OFF: ParameterNumberCheck @@ -155,7 +160,8 @@ public WSQueryEndpoint( final KsqlSecurityExtension securityExtension, final UserServiceContextFactory serviceContextFactory, final DefaultServiceContextFactory defaultServiceContextFactory, - final ServerState serverState + final ServerState serverState, + final Supplier schemaRegistryClientFactory ) { this.ksqlConfig = Objects.requireNonNull(ksqlConfig, "ksqlConfig"); this.mapper = Objects.requireNonNull(mapper, "mapper"); @@ -179,7 +185,9 @@ public WSQueryEndpoint( this.defaultServiceContextFactory = Objects.requireNonNull(defaultServiceContextFactory, "defaultServiceContextFactory"); this.serverState = Objects.requireNonNull(serverState, "serverState"); - this.errorHandler = Objects.requireNonNull(errorHandler, "errorHandler");; + this.errorHandler = Objects.requireNonNull(errorHandler, "errorHandler"); + this.schemaRegistryClientFactory = + Objects.requireNonNull(schemaRegistryClientFactory, "schemaRegistryClientFactory"); } @SuppressWarnings("unused") @@ -288,7 +296,8 @@ private KsqlSecurityContext createSecurityContext(final Principal principal) { final ServiceContext serviceContext; if (!securityExtension.getUserContextProvider().isPresent()) { - serviceContext = defaultServiceContextFactory.create(ksqlConfig, Optional.empty()); + serviceContext = defaultServiceContextFactory.create(ksqlConfig, Optional.empty(), + schemaRegistryClientFactory); } else { // Creates a ServiceContext using the user's credentials, so the WS query topics are // accessed with the user permission context (defaults to KSQL service context) diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/RestServiceContextFactory.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/RestServiceContextFactory.java index 2a9d5b89c216..fa108450ac00 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/RestServiceContextFactory.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/RestServiceContextFactory.java @@ -16,12 +16,10 @@ package io.confluent.ksql.rest.server.services; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; -import io.confluent.ksql.schema.registry.KsqlSchemaRegistryClientFactory; import io.confluent.ksql.services.DefaultConnectClient; import io.confluent.ksql.services.ServiceContext; import io.confluent.ksql.services.ServiceContextFactory; import io.confluent.ksql.util.KsqlConfig; -import java.util.Collections; import java.util.Optional; import java.util.function.Supplier; import org.apache.kafka.streams.KafkaClientSupplier; @@ -36,7 +34,8 @@ public interface DefaultServiceContextFactory { ServiceContext create( KsqlConfig config, - Optional authHeader + Optional authHeader, + Supplier srClientFactory ); } @@ -52,13 +51,14 @@ ServiceContext create( public static ServiceContext create( final KsqlConfig ksqlConfig, - final Optional authHeader + final Optional authHeader, + final Supplier schemaRegistryClientFactory ) { return create( ksqlConfig, authHeader, new DefaultKafkaClientSupplier(), - new KsqlSchemaRegistryClientFactory(ksqlConfig, Collections.emptyMap())::get + schemaRegistryClientFactory ); } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java index 3e6bb79664f9..889711990e59 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java @@ -31,6 +31,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.logging.processing.ProcessingLogConfig; import io.confluent.ksql.logging.processing.ProcessingLogContext; @@ -62,6 +63,7 @@ import java.util.Optional; import java.util.Queue; import java.util.function.Consumer; +import java.util.function.Supplier; import javax.ws.rs.core.Configurable; import org.apache.kafka.streams.StreamsConfig; import org.junit.Before; @@ -125,6 +127,10 @@ public class KsqlRestApplicationTest { @Mock private Consumer rocksDBConfigSetterHandler; + @Mock + private SchemaRegistryClient schemaRegistryClient; + + private Supplier schemaRegistryClientFactory; private String logCreateStatement; private KsqlRestApplication app; private KsqlRestConfig restConfig; @@ -136,6 +142,7 @@ public class KsqlRestApplicationTest { @SuppressWarnings("unchecked") @Before public void setUp() { + schemaRegistryClientFactory = () -> schemaRegistryClient; when(processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) .thenReturn(true); when(processingLogConfig.getString(ProcessingLogConfig.STREAM_NAME)) @@ -417,7 +424,8 @@ private void givenAppWithRestConfig(final Map restConfigMap) { streamedQueryResource, ksqlResource, versionCheckerAgent, - KsqlSecurityContextBinder::new, + (config, securityExtension) -> + new KsqlSecurityContextBinder(config, securityExtension, schemaRegistryClientFactory), securityExtension, serverState, processingLogContext, diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java index d42dad375290..7c2dd5cbafbd 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java @@ -23,6 +23,7 @@ import com.fasterxml.jackson.datatype.jdk8.Jdk8Module; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.KsqlExecutionContext; import io.confluent.ksql.json.JsonMapper; import io.confluent.ksql.query.QueryId; @@ -37,7 +38,9 @@ import io.confluent.ksql.rest.entity.StreamsList; import io.confluent.ksql.rest.entity.TablesList; import io.confluent.ksql.rest.server.context.KsqlSecurityContextBinder; +import io.confluent.ksql.rest.server.services.RestServiceContextFactory; import io.confluent.ksql.rest.util.KsqlInternalTopicUtils; +import io.confluent.ksql.schema.registry.KsqlSchemaRegistryClientFactory; import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.security.KsqlSecurityExtension; import io.confluent.ksql.services.DisabledKsqlClient; @@ -432,8 +435,7 @@ public static final class Builder { private final Map additionalProps = new HashMap<>(); private Supplier serviceContext; - private BiFunction securityContextBinder - = KsqlSecurityContextBinder::new; + private BiFunction securityContextBinder; private Optional credentials = Optional.empty(); @@ -441,6 +443,9 @@ private Builder(final Supplier bootstrapServers) { this.bootstrapServers = requireNonNull(bootstrapServers, "bootstrapServers"); this.serviceContext = () -> defaultServiceContext(bootstrapServers, buildBaseConfig(additionalProps)); + this.securityContextBinder = (config, securityExtension) -> + new KsqlSecurityContextBinder(config, securityExtension, + new KsqlSchemaRegistryClientFactory(config, Collections.emptyMap())::get); } @SuppressWarnings("unused") // Part of public API diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinderFactoryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinderFactoryTest.java index 4c4c25dbd218..c6814cc54d97 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinderFactoryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/context/KsqlSecurityContextBinderFactoryTest.java @@ -22,6 +22,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.rest.server.services.RestServiceContextFactory.DefaultServiceContextFactory; import io.confluent.ksql.rest.server.services.RestServiceContextFactory.UserServiceContextFactory; import io.confluent.ksql.security.KsqlSecurityContext; @@ -64,10 +65,13 @@ public class KsqlSecurityContextBinderFactoryTest { private ServiceContext userServiceContext; @Mock private HttpServletRequest request; + @Mock + private SchemaRegistryClient schemaRegistryClient; @Before public void setUp() { - KsqlSecurityContextBinderFactory.configure(ksqlConfig, securityExtension); + KsqlSecurityContextBinderFactory.configure(ksqlConfig, securityExtension, + () -> schemaRegistryClient); securityContextBinderFactory = new KsqlSecurityContextBinderFactory( securityContext, request, @@ -76,7 +80,8 @@ public void setUp() { ); when(securityContext.getUserPrincipal()).thenReturn(user1); - when(defaultServiceContextProvider.create(any(), any())).thenReturn(defaultServiceContext); + when(defaultServiceContextProvider.create(any(), any(), any())) + .thenReturn(defaultServiceContext); when(userServiceContextFactory.create(any(), any(), any(), any())) .thenReturn(userServiceContext); } @@ -91,7 +96,6 @@ public void shouldCreateDefaultServiceContextIfUserContextProviderIsNotEnabled() final KsqlSecurityContext ksqlSecurityContext = securityContextBinderFactory.provide(); // Then: - verify(defaultServiceContextProvider).create(ksqlConfig, Optional.empty()); assertThat(ksqlSecurityContext.getUserPrincipal(), is(Optional.empty())); assertThat(ksqlSecurityContext.getServiceContext(), is(defaultServiceContext)); } @@ -120,7 +124,7 @@ public void shouldPassAuthHeaderToDefaultFactory() { securityContextBinderFactory.provide(); // Then: - verify(defaultServiceContextProvider).create(any(), eq(Optional.of("some-auth"))); + verify(defaultServiceContextProvider).create(any(), eq(Optional.of("some-auth")), any()); } @Test diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java index d479bb094260..641572587a3b 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java @@ -59,7 +59,6 @@ import io.confluent.ksql.rest.server.state.ServerState; import io.confluent.ksql.security.KsqlAuthorizationProvider; import io.confluent.ksql.security.KsqlAuthorizationValidator; -import io.confluent.ksql.security.KsqlSecurityContext; import io.confluent.ksql.security.KsqlSecurityExtension; import io.confluent.ksql.security.KsqlUserContextProvider; import io.confluent.ksql.services.ConfiguredKafkaClientSupplier; @@ -185,7 +184,6 @@ public void setUp() { when(securityExtension.getAuthorizationProvider()) .thenReturn(Optional.of(authorizationProvider)); when(serviceContextFactory.create(any(), any(), any(), any())).thenReturn(serviceContext); - when(defaultServiceContextProvider.create(any(), any())).thenReturn(serviceContext); when(serviceContext.getTopicClient()).thenReturn(topicClient); when(serverState.checkReady()).thenReturn(Optional.empty()); when(ksqlEngine.getMetaStore()).thenReturn(metaStore); @@ -208,7 +206,8 @@ public void setUp() { securityExtension, serviceContextFactory, defaultServiceContextProvider, - serverState + serverState, + schemaRegistryClientSupplier ); } @@ -490,7 +489,8 @@ public void shouldCreateDefaultServiceContextIfUserContextProviderIsNotEnabled() wsQueryEndpoint.onOpen(session, null); // Then: - verify(defaultServiceContextProvider).create(ksqlConfig, Optional.empty()); + verify(defaultServiceContextProvider).create(ksqlConfig, Optional.empty(), + schemaRegistryClientSupplier); verifyZeroInteractions(userContextProvider); } From 5ee1e9ee32b3db79bce34d13030009ab0dff9b7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergio=20Pe=C3=B1a?= Date: Thu, 9 Jan 2020 19:47:17 -0600 Subject: [PATCH 087/123] feat: enable Kafla ACL authorization checks for Pull Queries (#4187) --- .../java/io/confluent/ksql/cli/CliTest.java | 1 - .../ksql/config/ImmutableProperties.java | 1 - .../io/confluent/ksql/util/KsqlConfig.java | 13 ------ .../test/rest/RestQueryTranslationTest.java | 1 - .../streaming/StreamedQueryResource.java | 21 ++------- .../resources/streaming/WSQueryEndpoint.java | 22 ++------- .../integration/PullQueryFunctionalTest.java | 2 - .../ksql/rest/integration/RestApiTest.java | 1 - .../streaming/StreamedQueryResourceTest.java | 46 +++++-------------- .../streaming/WSQueryEndpointTest.java | 15 ++++-- 10 files changed, 32 insertions(+), 91 deletions(-) diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java index 6e1c5b01fcdd..c506ebea33f9 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/CliTest.java @@ -128,7 +128,6 @@ public class CliTest { .builder(TEST_HARNESS::kafkaBootstrapServers) .withProperty(KsqlConfig.SINK_WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_MS_PROPERTY, KsqlConstants.defaultSinkWindowChangeLogAdditionalRetention + 1) - .withProperty(KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG, true) .build(); @ClassRule diff --git a/ksql-common/src/main/java/io/confluent/ksql/config/ImmutableProperties.java b/ksql-common/src/main/java/io/confluent/ksql/config/ImmutableProperties.java index 9d0eaf931b11..77c3f5409523 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/config/ImmutableProperties.java +++ b/ksql-common/src/main/java/io/confluent/ksql/config/ImmutableProperties.java @@ -30,7 +30,6 @@ public final class ImmutableProperties { .add(KsqlConfig.KSQL_EXT_DIR) .add(KsqlConfig.KSQL_ACTIVE_PERSISTENT_QUERY_LIMIT_CONFIG) .add(KsqlConfig.KSQL_PULL_QUERIES_ENABLE_CONFIG) - .add(KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG) .addAll(KsqlConfig.SSL_CONFIG_NAMES) .build(); diff --git a/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java b/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java index 2a3bd7709548..efef01d3252a 100644 --- a/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java +++ b/ksql-common/src/main/java/io/confluent/ksql/util/KsqlConfig.java @@ -159,13 +159,6 @@ public class KsqlConfig extends AbstractConfig { + "\"off\" disables the validator. If set to \"auto\", KSQL will attempt to discover " + "whether the Kafka cluster supports the required API, and enables the validator if " + "it does."; - public static final String KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG = - "ksql.query.pull.skip.access.validator"; - public static final boolean KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_DEFAULT = false; - public static final String KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_DOC = "If \"true\", KSQL will " - + " NOT enforce access validation checks for pull queries, which could expose Kafka topics" - + " which are secured with ACLs. Please enable only after careful consideration." - + " If \"false\", KSQL pull queries will fail against a secure Kafka cluster"; public static final String KSQL_PULL_QUERIES_ENABLE_CONFIG = "ksql.pull.queries.enable"; public static final String KSQL_QUERY_PULL_ENABLE_DOC = @@ -503,12 +496,6 @@ private static ConfigDef buildConfigDef(final ConfigGeneration generation) { KSQL_QUERY_PULL_STREAMSTORE_REBALANCING_TIMEOUT_MS_DEFAULT, Importance.LOW, KSQL_QUERY_PULL_STREAMSTORE_REBALANCING_TIMEOUT_MS_DOC - ).define( - KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG, - Type.BOOLEAN, - KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_DEFAULT, - Importance.LOW, - KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_DOC ).define( KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG, Type.STRING, diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestQueryTranslationTest.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestQueryTranslationTest.java index e29bbe6968c7..d32064dfe353 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestQueryTranslationTest.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestQueryTranslationTest.java @@ -69,7 +69,6 @@ public class RestQueryTranslationTest { private static final TestKsqlRestApp REST_APP = TestKsqlRestApp .builder(TEST_HARNESS::kafkaBootstrapServers) .withProperty(KsqlConfig.KSQL_STREAMS_PREFIX + StreamsConfig.NUM_STREAM_THREADS_CONFIG, 1) - .withProperty(KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG, true) .withStaticServiceContext(TEST_HARNESS::getServiceContext) .build(); diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java index 95923d549bdf..6eecb9f2dafd 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResource.java @@ -51,7 +51,6 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; -import java.util.function.Consumer; import java.util.stream.Collectors; import javax.ws.rs.Consumes; import javax.ws.rs.POST; @@ -181,27 +180,17 @@ private Response handleStatement( final PreparedStatement statement ) { try { - final Consumer authValidationConsumer = - ksqlAuthorizationValidator -> ksqlAuthorizationValidator.checkAuthorization( + authorizationValidator.ifPresent(validator -> + validator.checkAuthorization( securityContext, ksqlEngine.getMetaStore(), - statement.getStatement() - ); + statement.getStatement()) + ); if (statement.getStatement() instanceof Query) { final PreparedStatement queryStmt = (PreparedStatement) statement; if (queryStmt.getStatement().isPullQuery()) { - final boolean skipAccessValidation = ksqlConfig.getBoolean( - KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG); - if (authorizationValidator.isPresent() && !skipAccessValidation) { - return Errors.badRequest("Pull queries are not currently supported when " - + "access validation against Kafka is configured. If you really want to " - + "bypass this limitation please set " - + KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG + "=true " - + KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_DOC); - } - return handlePullQuery( securityContext.getServiceContext(), queryStmt, @@ -209,7 +198,6 @@ private Response handleStatement( ); } - authorizationValidator.ifPresent(authValidationConsumer); return handlePushQuery( securityContext.getServiceContext(), queryStmt, @@ -218,7 +206,6 @@ private Response handleStatement( } if (statement.getStatement() instanceof PrintTopic) { - authorizationValidator.ifPresent(authValidationConsumer); return handlePrintTopic( securityContext.getServiceContext(), request.getStreamsProperties(), diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java index 04a088e1c5fc..0b380f96abba 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpoint.java @@ -370,23 +370,11 @@ private PreparedStatement parseStatement(final KsqlRequest request) { } private void validateKafkaAuthorization(final Statement statement) { - if (statement instanceof Query && ((Query) statement).isPullQuery()) { - final boolean skipAccessValidation = ksqlConfig.getBoolean( - KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG); - if (authorizationValidator.isPresent() && !skipAccessValidation) { - throw new KsqlException("Pull queries are not currently supported when " - + "access validation against Kafka is configured. If you really want to " - + "bypass this limitation please set " - + KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG + "=true " - + KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_DOC); - } - } else { - authorizationValidator.ifPresent(validator -> validator.checkAuthorization( - securityContext, - ksqlEngine.getMetaStore(), - statement) - ); - } + authorizationValidator.ifPresent(validator -> validator.checkAuthorization( + securityContext, + ksqlEngine.getMetaStore(), + statement) + ); } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/PullQueryFunctionalTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/PullQueryFunctionalTest.java index d186f44ac913..56e69f9b19f4 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/PullQueryFunctionalTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/PullQueryFunctionalTest.java @@ -111,7 +111,6 @@ public class PullQueryFunctionalTest { .withBasicCredentials(USER_WITH_ACCESS, USER_WITH_ACCESS_PWD) .withProperty(KSQL_STREAMS_PREFIX + StreamsConfig.NUM_STREAM_THREADS_CONFIG, 1) .withProperty(KSQL_STREAMS_PREFIX + StreamsConfig.STATE_DIR_CONFIG, getNewStateDir()) - .withProperty(KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG, true) .withProperty(RestConfig.AUTHENTICATION_METHOD_CONFIG, RestConfig.AUTHENTICATION_METHOD_BASIC) .withProperty(RestConfig.AUTHENTICATION_REALM_CONFIG, PROPS_JAAS_REALM) .withProperty(RestConfig.AUTHENTICATION_ROLES_CONFIG, KSQL_CLUSTER_ID) @@ -123,7 +122,6 @@ public class PullQueryFunctionalTest { .withBasicCredentials(USER_WITH_ACCESS, USER_WITH_ACCESS_PWD) .withProperty(KSQL_STREAMS_PREFIX + StreamsConfig.NUM_STREAM_THREADS_CONFIG, 1) .withProperty(KSQL_STREAMS_PREFIX + StreamsConfig.STATE_DIR_CONFIG, getNewStateDir()) - .withProperty(KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG, true) .withProperty(RestConfig.AUTHENTICATION_METHOD_CONFIG, RestConfig.AUTHENTICATION_METHOD_BASIC) .withProperty(RestConfig.AUTHENTICATION_REALM_CONFIG, PROPS_JAAS_REALM) .withProperty(RestConfig.AUTHENTICATION_ROLES_CONFIG, KSQL_CLUSTER_ID) diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/RestApiTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/RestApiTest.java index b44839cca12e..56429741f56f 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/RestApiTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/RestApiTest.java @@ -161,7 +161,6 @@ public class RestApiTest { .withProperty("security.protocol", "SASL_SSL") .withProperty("sasl.mechanism", "PLAIN") .withProperty("sasl.jaas.config", SecureKafkaHelper.buildJaasConfig(NORMAL_USER)) - .withProperty(KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG, true) .withProperties(ClientTrustStore.trustStoreProps()) .build(); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java index c6a8db34ed28..7f41dd116f26 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java @@ -23,7 +23,6 @@ import static javax.ws.rs.core.Response.Status.FORBIDDEN; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.junit.Assert.assertEquals; @@ -286,8 +285,7 @@ public void shouldReturnServiceUnavailableIfTimeoutWaitingForCommandSequenceNumb public void shouldNotCreateExternalClientsForPullQuery() { // Given testResource.configure(new KsqlConfig(ImmutableMap.of( - StreamsConfig.APPLICATION_SERVER_CONFIG, "something:1", - KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG, true + StreamsConfig.APPLICATION_SERVER_CONFIG, "something:1" ))); // When: @@ -304,30 +302,13 @@ public void shouldNotCreateExternalClientsForPullQuery() { } @Test - public void shouldThrowExceptionForPullQueryIfValidating() { - // When: - final Response response = testResource.streamQuery( - securityContext, - new KsqlRequest(PULL_QUERY_STRING, Collections.emptyMap(), null) - ); - - // Then: - assertThat(response.getStatus(), is(Errors.badRequest("").getStatus())); - assertThat(response.getEntity(), is(instanceOf(KsqlErrorMessage.class))); - final KsqlErrorMessage expectedEntity = (KsqlErrorMessage) response.getEntity(); - assertThat( - expectedEntity.getMessage(), - containsString(KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG) - ); - } - - @Test - public void shouldPassCheckForPullQueryIfNotValidating() { - // Given - testResource.configure(new KsqlConfig(ImmutableMap.of( - StreamsConfig.APPLICATION_SERVER_CONFIG, "something:1", - KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG, true - ))); + public void shouldReturnForbiddenKafkaAccessForPullQueryAuthorizationDenied() { + // Given: + when(mockStatementParser.parseSingleStatement(PULL_QUERY_STRING)) + .thenReturn(query); + doThrow( + new KsqlTopicAuthorizationException(AclOperation.READ, Collections.singleton(TOPIC_NAME))) + .when(authorizationValidator).checkAuthorization(any(), any(), any()); // When: final Response response = testResource.streamQuery( @@ -335,13 +316,10 @@ public void shouldPassCheckForPullQueryIfNotValidating() { new KsqlRequest(PULL_QUERY_STRING, Collections.emptyMap(), null) ); - // Then: - assertThat(response.getStatus(), is(Errors.badRequest("").getStatus())); - final KsqlErrorMessage expectedEntity = (KsqlErrorMessage) response.getEntity(); - assertThat( - expectedEntity.getMessage(), - not(containsString(KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG)) - ); + final KsqlErrorMessage responseEntity = (KsqlErrorMessage) response.getEntity(); + final KsqlErrorMessage expectedEntity = (KsqlErrorMessage) AUTHORIZATION_ERROR_RESPONSE.getEntity(); + assertEquals(response.getStatus(), AUTHORIZATION_ERROR_RESPONSE.getStatus()); + assertEquals(responseEntity.getMessage(), expectedEntity.getMessage()); } @Test diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java index 641572587a3b..3f18d2d328ce 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/WSQueryEndpointTest.java @@ -422,8 +422,6 @@ public void shouldReturnErrorMessageWhenTopicAuthorizationException() throws Exc @Test public void shouldHandlePullQuery() { // Given: - when(ksqlConfig.getBoolean(KsqlConfig.KSQL_PULL_QUERIES_SKIP_ACCESS_VALIDATOR_CONFIG)) - .thenReturn(true); givenQueryIs(QueryType.PULL); givenRequestIs(query); @@ -445,17 +443,26 @@ public void shouldHandlePullQuery() { } @Test - public void shouldFailPullQueryIfValidating() throws Exception { + public void shouldFailPullQueryIfTopicAuthorizationIsDenied() throws Exception { // Given: + final String errorMessage = "authorization error"; givenQueryIs(QueryType.PULL); givenRequestIs(query); + when(errorsHandler.kafkaAuthorizationErrorMessage(any(TopicAuthorizationException.class))) + .thenReturn(errorMessage); + doThrow(new KsqlTopicAuthorizationException(AclOperation.READ, Collections.singleton("topic"))) + .when(authorizationValidator).checkAuthorization( + argThat(securityContext -> + securityContext.getServiceContext() == serviceContext), + eq(metaStore), + eq(query)); // When: wsQueryEndpoint.onOpen(session, null); // Then: verifyClosedContainingReason( - "Pull queries are not currently supported", + errorMessage, CloseCodes.CANNOT_ACCEPT ); } From 1281ab2cfed24ed7c4dc6be8db5a1d64698f479c Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Fri, 10 Jan 2020 13:00:29 +0000 Subject: [PATCH 088/123] fix: better error message on self-join (#4248) Fixes: https://github.com/confluentinc/ksql/issues/4241 Self-joins are not yet supported. Previously they resulted in an confusing error message: > Invalid topology: Topic has already been registered by another source. They not result in: > Can not join 'something' to 'something': self joins are not yet supported. --- .../io/confluent/ksql/analyzer/Analyzer.java | 8 ++++++ .../ksql/analyzer/AnalyzerFunctionalTest.java | 28 ++++++++++++++++--- .../query-validation-tests/joins.json | 11 ++++++++ 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analyzer.java b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analyzer.java index cb23dec9a5e2..7f0c174529ce 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analyzer.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analyzer.java @@ -341,6 +341,14 @@ protected AstNode visitJoin(final Join node, final Void context) { throw new KsqlException("Only equality join criteria is supported."); } + if (left.getDataSource().getName().equals(right.getDataSource().getName())) { + throw new KsqlException( + "Can not join '" + left.getDataSource().getName().toString(FormatOptions.noEscape()) + + "' to '" + right.getDataSource().getName().toString(FormatOptions.noEscape()) + + "': self joins are not yet supported." + ); + } + final ColumnRef leftJoinField = getJoinFieldName( comparisonExpression, left.getAlias(), diff --git a/ksql-engine/src/test/java/io/confluent/ksql/analyzer/AnalyzerFunctionalTest.java b/ksql-engine/src/test/java/io/confluent/ksql/analyzer/AnalyzerFunctionalTest.java index e9c5ad74986b..4c8587a61d2c 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/analyzer/AnalyzerFunctionalTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/analyzer/AnalyzerFunctionalTest.java @@ -106,7 +106,7 @@ public class AnalyzerFunctionalTest { public final ExpectedException expectedException = ExpectedException.none(); @Mock - private SerdeOptionsSupplier serdeOptiponsSupplier; + private SerdeOptionsSupplier serdeOptionsSupplier; @Mock private Sink sink; @@ -127,7 +127,7 @@ public void init() { jsonMetaStore, "", DEFAULT_SERDE_OPTIONS, - serdeOptiponsSupplier + serdeOptionsSupplier ); when(sink.getName()).thenReturn(SourceName.of("TEST0")); @@ -388,7 +388,7 @@ public void shouldFailIfExplicitNamespaceIsProvidedButEmpty() { public void shouldGetSerdeOptions() { // Given: final Set serdeOptions = ImmutableSet.of(SerdeOption.UNWRAP_SINGLE_VALUES); - when(serdeOptiponsSupplier.build(any(), any(), any(), any())).thenReturn(serdeOptions); + when(serdeOptionsSupplier.build(any(), any(), any(), any())).thenReturn(serdeOptions); givenSinkValueFormat(Format.AVRO); givenWrapSingleValues(true); @@ -397,7 +397,7 @@ public void shouldGetSerdeOptions() { final Analysis result = analyzer.analyze(query, Optional.of(sink)); // Then: - verify(serdeOptiponsSupplier).build( + verify(serdeOptionsSupplier).build( ImmutableList.of("COL0", "COL1").stream().map(ColumnName::of).collect(Collectors.toList()), Format.AVRO, Optional.of(true), @@ -484,6 +484,26 @@ public void shouldNotIncludeMetaColumnsForSelectStartOnStaticQueries() { ))); } + @Test + public void shouldThrowOnSelfJoin() { + // Given: + final CreateStreamAsSelect createStreamAsSelect = parseSingle( + "CREATE STREAM FOO AS " + + "SELECT * FROM test1 t1 JOIN test1 t2 ON t1.rowkey = t2.rowkey;" + ); + + final Query query = createStreamAsSelect.getQuery(); + + final Analyzer analyzer = new Analyzer(jsonMetaStore, "", DEFAULT_SERDE_OPTIONS); + + // Expect: + expectedException.expect(KsqlException.class); + expectedException.expectMessage("Can not join 'TEST1' to 'TEST1': self joins are not yet supported."); + + // When: + analyzer.analyze(query, Optional.of(createStreamAsSelect.getSink())); + } + @SuppressWarnings("unchecked") private T parseSingle(final String simpleQuery) { return (T) Iterables.getOnlyElement(parse(simpleQuery, jsonMetaStore)); diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json b/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json index 7f10d5d5a06d..125764a25f8f 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json @@ -1922,6 +1922,17 @@ {"name": "OUTPUT", "type": "stream", "schema": "ROWKEY STRING KEY, L_ROWKEY STRING, L1 INT, R1 INT"} ] } + }, + { + "name": "self join", + "statements": [ + "CREATE STREAM INPUT (ID bigint) WITH (kafka_topic='left_topic', value_format='JSON');", + "CREATE STREAM OUTPUT as SELECT * FROM INPUT s1 JOIN INPUT s2 WITHIN 1 HOUR ON s1.id = s2.id;" + ], + "expectedException": { + "type": "io.confluent.ksql.util.KsqlStatementException", + "message": "Can not join 'INPUT' to 'INPUT': self joins are not yet supported." + } } ] } From 5cc718b91c30473755429835686d4b57b982ff9f Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Fri, 10 Jan 2020 13:00:51 +0000 Subject: [PATCH 089/123] fix: include path of field that causes JSON deserialization error (#4249) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes: https://github.com/confluentinc/ksql/issues/4238 For complex types, the current deserialization error can be a bit lacking, as it does not include details of which field causes the error. This commit enhances the JSON deserializer to include the JSON path of where the error occurred in the error message. The design keep the happy path quick by using the call stack to maintain the stack of fields, rather than tracking them in some separate stack in memory. Figured this was worth while for such a performance sensitive area of the code. There's no detectable perf hit from this change. I've also increased the times on the `SerdeBenchmark`, as from my own experience 10 seconds is not enough time for the JVM to optimise the byte code. Perf test on JSON deserialization before changes: ``` # JMH version: 1.21 # VM version: JDK 1.8.0_162, Java HotSpot(TM) 64-Bit Server VM, 25.162-b12 # VM invoker: /Library/Java/JavaVirtualMachines/jdk1.8.0_162.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=58499:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Warmup: 6 iterations, 30 s each # Measurement: 3 iterations, 60 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op # Benchmark: io.confluent.ksql.benchmark.SerdeBenchmark.deserialize # Parameters: (schemaName = metrics, serializationFormat = JSON) # Run progress: 0.00% complete, ETA 00:06:00 # Fork: 1 of 1 # Warmup Iteration 1: 5.188 us/op # Warmup Iteration 2: 5.124 us/op # Warmup Iteration 3: 5.193 us/op # Warmup Iteration 4: 5.529 us/op # Warmup Iteration 5: 5.378 us/op # Warmup Iteration 6: 5.266 us/op Iteration 1: 5.209 us/op Iteration 2: 5.183 us/op Iteration 3: 5.274 us/op Result "io.confluent.ksql.benchmark.SerdeBenchmark.deserialize": 5.222 ±(99.9%) 0.854 us/op [Average] (min, avg, max) = (5.183, 5.222, 5.274), stdev = 0.047 CI (99.9%): [4.368, 6.075] (assumes normal distribution) # Run complete. Total time: 00:06:01 REMEMBER: The numbers below are just data. To gain reusable insights, you need to follow up on why the numbers are the way they are. Use profilers (see -prof, -lprof), design factorial experiments, perform baseline and negative tests that provide experimental control, make sure the benchmarking environment is safe on JVM/OS/HW level, ask for reviews from the domain experts. Do not assume the numbers tell you what you want them to tell. Benchmark (schemaName) (serializationFormat) Mode Cnt Score Error Units SerdeBenchmark.deserialize metrics JSON avgt 3 5.222 ± 0.854 us/op ``` After: ``` # JMH version: 1.21 # VM version: JDK 1.8.0_162, Java HotSpot(TM) 64-Bit Server VM, 25.162-b12 # VM invoker: /Library/Java/JavaVirtualMachines/jdk1.8.0_162.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=60943:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Warmup: 6 iterations, 30 s each # Measurement: 3 iterations, 60 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op # Benchmark: io.confluent.ksql.benchmark.SerdeBenchmark.deserialize # Parameters: (schemaName = metrics, serializationFormat = JSON) # Run progress: 0.00% complete, ETA 00:06:00 # Fork: 1 of 1 # Warmup Iteration 1: 5.284 us/op # Warmup Iteration 2: 5.114 us/op # Warmup Iteration 3: 5.185 us/op # Warmup Iteration 4: 5.105 us/op # Warmup Iteration 5: 5.171 us/op # Warmup Iteration 6: 5.119 us/op Iteration 1: 5.208 us/op Iteration 2: 5.248 us/op Iteration 3: 5.198 us/op Result "io.confluent.ksql.benchmark.SerdeBenchmark.deserialize": 5.218 ±(99.9%) 0.477 us/op [Average] (min, avg, max) = (5.198, 5.218, 5.248), stdev = 0.026 CI (99.9%): [4.740, 5.695] (assumes normal distribution) # Run complete. Total time: 00:06:02 REMEMBER: The numbers below are just data. To gain reusable insights, you need to follow up on why the numbers are the way they are. Use profilers (see -prof, -lprof), design factorial experiments, perform baseline and negative tests that provide experimental control, make sure the benchmarking environment is safe on JVM/OS/HW level, ask for reviews from the domain experts. Do not assume the numbers tell you what you want them to tell. Benchmark (schemaName) (serializationFormat) Mode Cnt Score Error Units SerdeBenchmark.deserialize metrics JSON avgt 3 5.218 ± 0.477 us/op ``` --- .../ksql/benchmark/SerdeBenchmark.java | 20 ++- .../src/main/resources/log4j.properties | 52 +++++++ .../ksql/serde/json/KsqlJsonDeserializer.java | 135 ++++++++++-------- .../serde/json/KsqlJsonDeserializerTest.java | 98 +++++++++++-- 4 files changed, 230 insertions(+), 75 deletions(-) create mode 100644 ksql-benchmark/src/main/resources/log4j.properties diff --git a/ksql-benchmark/src/main/java/io/confluent/ksql/benchmark/SerdeBenchmark.java b/ksql-benchmark/src/main/java/io/confluent/ksql/benchmark/SerdeBenchmark.java index 7dabb9ba9a21..3d8b1e7f39d3 100644 --- a/ksql-benchmark/src/main/java/io/confluent/ksql/benchmark/SerdeBenchmark.java +++ b/ksql-benchmark/src/main/java/io/confluent/ksql/benchmark/SerdeBenchmark.java @@ -57,7 +57,7 @@ import org.openjdk.jmh.annotations.Threads; import org.openjdk.jmh.annotations.Warmup; import org.openjdk.jmh.runner.Runner; -import org.openjdk.jmh.runner.RunnerException; +import org.openjdk.jmh.runner.options.CommandLineOptions; import org.openjdk.jmh.runner.options.Options; import org.openjdk.jmh.runner.options.OptionsBuilder; @@ -68,8 +68,8 @@ */ @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) -@Warmup(iterations = 3, time = 10) -@Measurement(iterations = 3, time = 10) +@Warmup(iterations = 6, time = 30) +@Measurement(iterations = 3, time = 60) @Threads(4) @Fork(3) public class SerdeBenchmark { @@ -192,20 +192,26 @@ private static Serde getGenericRowSerde( } } + @SuppressWarnings("MethodMayBeStatic") // Tests can not be static @Benchmark public byte[] serialize(final SerdeState serdeState) { return serdeState.serializer.serialize(TOPIC_NAME, serdeState.row); } + @SuppressWarnings("MethodMayBeStatic") // Tests can not be static @Benchmark public GenericRow deserialize(final SerdeState serdeState) { return serdeState.deserializer.deserialize(TOPIC_NAME, serdeState.bytes); } - public static void main(final String[] args) throws RunnerException { - final Options opt = new OptionsBuilder() - .include(SerdeBenchmark.class.getSimpleName()) - .build(); + public static void main(final String[] args) throws Exception { + + final Options opt = args.length != 0 + ? new CommandLineOptions(args) + : new OptionsBuilder() + .include(SerdeBenchmark.class.getSimpleName()) + .shouldFailOnError(true) + .build(); new Runner(opt).run(); } diff --git a/ksql-benchmark/src/main/resources/log4j.properties b/ksql-benchmark/src/main/resources/log4j.properties new file mode 100644 index 000000000000..75bd597e7b9d --- /dev/null +++ b/ksql-benchmark/src/main/resources/log4j.properties @@ -0,0 +1,52 @@ +# +# Copyright 2019 Confluent Inc. +# +# Licensed under the Confluent Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# http://www.confluent.io/confluent-community-license +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +log4j.rootLogger=INFO,stdout + +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n + +# Disable INFO logs from Config classes, which log out their config on each creation: +log4j.logger.io.confluent.connect.avro.AvroConverterConfig=WARN +log4j.logger.io.confluent.connect.avro.AvroDataConfig=WARN +log4j.logger.io.confluent.kafka.serializers.KafkaAvroDeserializerConfig=WARN +log4j.logger.io.confluent.kafka.serializers.KafkaAvroSerializerConfig=WARN +log4j.logger.io.confluent.kafka.serializers.KafkaJsonDeserializerConfig=WARN +log4j.logger.io.confluent.kafka.serializers.KafkaJsonSerializerConfig=WARN +log4j.logger.io.confluent.ksql.logging.processing.ProcessingLogConfig=WARN +log4j.logger.io.confluent.ksql.rest.server.KsqlRestConfig=WARN +log4j.logger.io.confluent.ksql.util.KsqlConfig=WARN +log4j.logger.io.confluent.ksql.cli.console.CliConfig=WARN +log4j.logger.kafka.server.KafkaConfig=WARN +log4j.logger.org.apache.kafka.clients.admin.AdminClientConfig=WARN +log4j.logger.org.apache.kafka.clients.consumer.ConsumerConfig=WARN +log4j.logger.org.apache.kafka.clients.producer.ProducerConfig=WARN +log4j.logger.org.apache.kafka.connect.json.JsonConverterConfig=WARN +log4j.logger.org.apache.kafka.streams.StreamsConfig=WARN + +# Disable INFO logging from the UDF loader, which logs every UDF ever time it runs: +log4j.logger.io.confluent.ksql.function.UdfLoader=WARN + +# Disable logging of state transitions in KS: +log4j.logger.org.apache.kafka.streams.KafkaStreams=WARN +log4j.logger.org.apache.kafka.streams.processor.internals.StreamThread=WARN +log4j.logger.org.apache.kafka.streams.state.internals.RocksDBTimestampedStore=WARN + +# Disable logging of App info +log4j.logger.org.apache.kafka.common.utils.AppInfoParser=WARN + +# Disable logging from reflections warning for connect classpath scans +log4j.logger.org.reflections=ERROR diff --git a/ksql-serde/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java b/ksql-serde/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java index 39a62b458c2b..bdebce895ce8 100644 --- a/ksql-serde/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java +++ b/ksql-serde/src/main/java/io/confluent/ksql/serde/json/KsqlJsonDeserializer.java @@ -25,12 +25,10 @@ import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Streams; import io.confluent.ksql.schema.connect.SqlSchemaFormatter; import io.confluent.ksql.schema.ksql.PersistenceSchema; import io.confluent.ksql.util.DecimalUtil; import io.confluent.ksql.util.KsqlException; -import java.io.IOException; import java.math.BigDecimal; import java.util.ArrayList; import java.util.HashMap; @@ -46,12 +44,12 @@ import org.apache.kafka.connect.data.Field; import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.Schema.Type; +import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.Struct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; // CHECKSTYLE_RULES.OFF: ClassDataAbstractionCoupling -@SuppressWarnings("UnstableApiUsage") public class KsqlJsonDeserializer implements Deserializer { // CHECKSTYLE_RULES.ON: ClassDataAbstractionCoupling @@ -60,6 +58,9 @@ public class KsqlJsonDeserializer implements Deserializer { private static final ObjectMapper MAPPER = new ObjectMapper() .enable(DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS); + private static final Schema STRING_ARRAY = SchemaBuilder + .array(Schema.OPTIONAL_STRING_SCHEMA).build(); + private static final Map> HANDLERS = ImmutableMap .>builder() .put(Type.BOOLEAN, context -> JsonSerdeUtils.toBoolean(context.val)) @@ -74,6 +75,7 @@ public class KsqlJsonDeserializer implements Deserializer { .build(); private final PersistenceSchema physicalSchema; + private String target = "?"; public KsqlJsonDeserializer( final PersistenceSchema physicalSchema @@ -82,55 +84,53 @@ public KsqlJsonDeserializer( } @Override - public void configure(final Map map, final boolean b) { + public void configure(final Map map, final boolean isKey) { + this.target = isKey ? "key" : "value"; } @Override public Object deserialize(final String topic, final byte[] bytes) { try { - final Object value = deserialize(bytes); + final JsonNode value = bytes == null + ? null + : MAPPER.readTree(bytes); + + final Object coerced = enforceFieldType( + "$", + new JsonValueContext(value, physicalSchema.serializedSchema()) + ); + if (LOG.isTraceEnabled()) { - LOG.trace("Deserialized value. topic:{}, row:{}", topic, value); + LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } - return value; + + return coerced; } catch (final Exception e) { throw new SerializationException( - "Error deserializing JSON message from topic: " + topic, e); - } - } - - private Object deserialize(final byte[] bytes) { - try { - if (bytes == null) { - return null; - } - - final JsonNode value = MAPPER.readTree(bytes); - return enforceFieldType(this, physicalSchema.serializedSchema(), value); - } catch (final IOException e) { - throw new SerializationException(e); + "mvn " + target + " from topic: " + topic, e); } } private static Object enforceFieldType( - final KsqlJsonDeserializer deserializer, - final Schema schema, - final JsonNode columnVal + final String pathPart, + final JsonValueContext context ) { - return enforceFieldType(new JsonValueContext(deserializer, schema, columnVal)); - } - - private static Object enforceFieldType(final JsonValueContext context) { if (context.val == null || context.val instanceof NullNode) { return null; } - final Function handler = HANDLERS.getOrDefault( - context.schema.type(), - type -> { - throw new KsqlException("Type is not supported: " + type); - }); - return handler.apply(context); + try { + final Function handler = HANDLERS.getOrDefault( + context.schema.type(), + type -> { + throw new KsqlException("Type is not supported: " + type); + }); + return handler.apply(context); + } catch (final CoercionException e) { + throw new CoercionException(e.getRawMessage(), pathPart + e.getPath(), e); + } catch (final Exception e) { + throw new CoercionException(e.getMessage(), pathPart, e); + } } private static String processString(final JsonValueContext context) { @@ -142,10 +142,8 @@ private static String processString(final JsonValueContext context) { } } if (context.val instanceof ArrayNode) { - return Streams.stream(context.val.elements()) - .map(val -> processString( - new JsonValueContext(context.deserializer, context.schema, val) - )) + return enforceElementTypeForArray(new JsonValueContext(context.val, STRING_ARRAY)).stream() + .map(Objects::toString) .collect(Collectors.joining(", ", "[", "]")); } return context.val.asText(); @@ -171,10 +169,16 @@ private static List enforceElementTypeForArray(final JsonValueContext context throw invalidConversionException(context.val, context.schema); } + int idx = 0; final ArrayNode list = (ArrayNode) context.val; final List array = new ArrayList<>(list.size()); for (final JsonNode item : list) { - array.add(enforceFieldType(context.deserializer, context.schema.valueSchema(), item)); + final Object element = enforceFieldType( + "[" + idx++ + "]", + new JsonValueContext(item, context.schema.valueSchema()) + ); + + array.add(element); } return array; } @@ -188,15 +192,18 @@ private static Map enforceKeyAndValueTypeForMap(final JsonValueC final Map ksqlMap = new HashMap<>(map.size()); for (final Iterator> it = map.fields(); it.hasNext(); ) { final Entry e = it.next(); - ksqlMap.put( - enforceFieldType( - context.deserializer, - Schema.OPTIONAL_STRING_SCHEMA, - new TextNode(e.getKey())) - .toString(), - enforceFieldType( - context.deserializer, context.schema.valueSchema(), e.getValue()) + + final String key = (String) enforceFieldType( + "." + e.getKey() + ".key", + new JsonValueContext(new TextNode(e.getKey()), Schema.OPTIONAL_STRING_SCHEMA) ); + + final Object value = enforceFieldType( + "." + e.getKey() + ".value", + new JsonValueContext(e.getValue(), context.schema.valueSchema()) + ); + + ksqlMap.put(key, value); } return ksqlMap; } @@ -222,9 +229,8 @@ private static Struct enforceFieldTypesForStruct(final JsonValueContext context) } final Object coerced = enforceFieldType( - context.deserializer, - ksqlField.schema(), - fieldValue + "." + ksqlField.name(), + new JsonValueContext(fieldValue, ksqlField.schema()) ); columnStruct.put(ksqlField.name(), coerced); @@ -257,20 +263,37 @@ private static IllegalArgumentException invalidConversionException( ); } - private static class JsonValueContext { + private static final class JsonValueContext { - private final KsqlJsonDeserializer deserializer; private final Schema schema; private final JsonNode val; JsonValueContext( - final KsqlJsonDeserializer deserializer, - final Schema schema, - final JsonNode val + final JsonNode val, + final Schema schema ) { - this.deserializer = Objects.requireNonNull(deserializer); this.schema = Objects.requireNonNull(schema, "schema"); this.val = val; } } + + private static final class CoercionException extends RuntimeException { + + private final String path; + private final String message; + + CoercionException(final String message, final String path, final Throwable cause) { + super(message + ", path: " + path, cause); + this.message = Objects.requireNonNull(message, "message"); + this.path = Objects.requireNonNull(path, "path"); + } + + public String getRawMessage() { + return message; + } + + public String getPath() { + return path; + } + } } \ No newline at end of file diff --git a/ksql-serde/src/test/java/io/confluent/ksql/serde/json/KsqlJsonDeserializerTest.java b/ksql-serde/src/test/java/io/confluent/ksql/serde/json/KsqlJsonDeserializerTest.java index adf3ad624d56..3813fb76fd54 100644 --- a/ksql-serde/src/test/java/io/confluent/ksql/serde/json/KsqlJsonDeserializerTest.java +++ b/ksql-serde/src/test/java/io/confluent/ksql/serde/json/KsqlJsonDeserializerTest.java @@ -17,9 +17,11 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; import static org.junit.Assert.fail; import static org.junit.internal.matchers.ThrowableMessageMatcher.hasMessage; @@ -179,7 +181,7 @@ public void shouldThrowIfFieldCanNotBeCoerced() { // Then: expectedException.expect(SerializationException.class); - expectedException.expectCause(hasMessage(is( + expectedException.expectCause(hasMessage(startsWith( "Can't convert type. sourceType: BooleanNode, requiredType: BIGINT"))); // When: @@ -226,13 +228,17 @@ public void shouldDeserializeNullAsNull() { @Test public void shouldTreatNullAsNull() { // Given: + final HashMap mapValue = new HashMap<>(); + mapValue.put("a", 1.0); + mapValue.put("b", null); + final Map row = new HashMap<>(); row.put("ordertime", null); row.put("@orderid", null); row.put("itemid", null); row.put("orderunits", null); row.put("arrayCol", new Double[]{0.0, null}); - row.put("mapCol", null); + row.put("mapCol", mapValue); final byte[] bytes = serializeJson(row); @@ -246,7 +252,7 @@ public void shouldTreatNullAsNull() { .put(ITEMID, null) .put(ORDERUNITS, null) .put(ARRAYCOL, Arrays.asList(0.0, null)) - .put(MAPCOL, null) + .put(MAPCOL, mapValue) .put(CASE_SENSITIVE_FIELD, null) )); } @@ -303,7 +309,7 @@ public void shouldThrowIfCanNotCoerceToBoolean() { // Then: expectedException.expect(SerializationException.class); - expectedException.expectCause(hasMessage(is( + expectedException.expectCause(hasMessage(startsWith( "Can't convert type. sourceType: IntNode, requiredType: BOOLEAN"))); // When: @@ -342,7 +348,7 @@ public void shouldThrowIfCanNotCoerceToInt() { // Then: expectedException.expect(SerializationException.class); - expectedException.expectCause(hasMessage(is( + expectedException.expectCause(hasMessage(startsWith( "Can't convert type. sourceType: BooleanNode, requiredType: INTEGER"))); // When: @@ -382,7 +388,7 @@ public void shouldThrowIfCanNotCoerceToBigInt() { // Then: expectedException.expect(SerializationException.class); - expectedException.expectCause(hasMessage(is( + expectedException.expectCause(hasMessage(startsWith( "Can't convert type. sourceType: BooleanNode, requiredType: BIGINT"))); // When: @@ -422,7 +428,7 @@ public void shouldThrowIfCanNotCoerceToDouble() { // Then: expectedException.expect(SerializationException.class); - expectedException.expectCause(hasMessage(is( + expectedException.expectCause(hasMessage(startsWith( "Can't convert type. sourceType: BooleanNode, requiredType: DOUBLE"))); // When: @@ -487,7 +493,7 @@ public void shouldThrowIfCanNotCoerceToBigDecimal() { // Then: expectedException.expect(SerializationException.class); - expectedException.expectCause(hasMessage(is( + expectedException.expectCause(hasMessage(startsWith( "Can't convert type. sourceType: BooleanNode, requiredType: DECIMAL(20, 19)"))); // When: @@ -523,7 +529,7 @@ public void shouldThrowIfNotAnArray() { // Then: expectedException.expect(SerializationException.class); - expectedException.expectCause(hasMessage(is( + expectedException.expectCause(hasMessage(startsWith( "Can't convert type. sourceType: BooleanNode, requiredType: ARRAY"))); // When: @@ -544,7 +550,7 @@ public void shouldThrowIfCanNotCoerceArrayElement() { // Then: expectedException.expect(SerializationException.class); - expectedException.expectCause(hasMessage(is( + expectedException.expectCause(hasMessage(startsWith( "Can't coerce string to type. targetType: INTEGER"))); // When: @@ -580,7 +586,7 @@ public void shouldThrowIfNotAnMap() { // Then: expectedException.expect(SerializationException.class); - expectedException.expectCause(hasMessage(is( + expectedException.expectCause(hasMessage(startsWith( "Can't convert type. sourceType: BooleanNode, requiredType: MAP"))); // When: @@ -599,7 +605,7 @@ public void shouldThrowIfCanNotCoerceMapValue() { // Then: expectedException.expect(SerializationException.class); - expectedException.expectCause(hasMessage(is( + expectedException.expectCause(hasMessage(startsWith( "Can't convert type. sourceType: BooleanNode, requiredType: INTEGER"))); // When: @@ -686,6 +692,74 @@ public void shouldNotIncludeBadValueInExceptionAsThatWouldBeASecurityIssue() { } } + @Test + public void shouldIncludePathForErrorsInRootNode() { + // Given: + givenDeserializerForSchema(Schema.OPTIONAL_FLOAT64_SCHEMA); + + final byte[] bytes = "true".getBytes(StandardCharsets.UTF_8); + + // Then: + expectedException.expectCause(hasMessage(endsWith(", path: $"))); + + // When: + deserializer.deserialize(SOME_TOPIC, bytes); + } + + @Test + public void shouldIncludePathForErrorsInObjectFieldsValue() { + // Given: + final Map value = new HashMap<>(AN_ORDER); + value.put("ordertime", true); + + final byte[] bytes = serializeJson(value); + + // Then: + expectedException.expect(SerializationException.class); + expectedException.expectCause(hasMessage(endsWith(", path: $.ORDERTIME"))); + + // When: + deserializer.deserialize(SOME_TOPIC, bytes); + } + + @Test + public void shouldIncludePathForErrorsInArrayElements() { + // Given: + givenDeserializerForSchema(SchemaBuilder + .array(Schema.OPTIONAL_INT32_SCHEMA) + .build() + ); + + final List expected = ImmutableList.of(0, "not", "numbers"); + + final byte[] bytes = serializeJson(expected); + + // Then: + expectedException.expect(SerializationException.class); + expectedException.expectCause(hasMessage(endsWith("path: $[1]"))); + + // When: + deserializer.deserialize(SOME_TOPIC, bytes); + } + + @Test + public void shouldIncludePathForErrorsInMapValues() { + // Given: + givenDeserializerForSchema(SchemaBuilder + .map(Schema.OPTIONAL_STRING_SCHEMA, Schema.INT32_SCHEMA) + .build() + ); + + final byte[] bytes = serializeJson(ImmutableMap.of("a", 1, "b", true)); + + // Then: + expectedException.expect(SerializationException.class); + expectedException.expectCause(hasMessage(endsWith("path: $.b.value"))); + + // When: + deserializer.deserialize(SOME_TOPIC, bytes); + } + private void givenDeserializerForSchema(final Schema serializedSchema) { final boolean unwrap = serializedSchema.type() != Type.STRUCT; final Schema ksqlSchema = unwrap From e032ea9aad60a63fec54416d68a7e60d7350ad3a Mon Sep 17 00:00:00 2001 From: Almog Gavra Date: Fri, 10 Jan 2020 11:03:38 -0800 Subject: [PATCH 090/123] feat: allow environment variables to configure embedded connect (#4260) --- config/connect.properties.template | 19 ++++++++++ ksql-rest-app/Dockerfile | 2 + ksql-rest-app/src/include/docker/configure | 43 ++++++++++++++++++++++ ksql-rest-app/src/include/docker/run | 12 ++++++ 4 files changed, 76 insertions(+) create mode 100644 config/connect.properties.template create mode 100644 ksql-rest-app/src/include/docker/configure diff --git a/config/connect.properties.template b/config/connect.properties.template new file mode 100644 index 000000000000..d2ef1d22178e --- /dev/null +++ b/config/connect.properties.template @@ -0,0 +1,19 @@ +# +# Copyright 2020 Confluent Inc. +# +# Licensed under the Confluent Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# http://www.confluent.io/confluent-community-license +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. +# + +{% set kr_props = env_to_props('KSQL_CONNECT_', '') -%} +{% for name, value in kr_props.iteritems() -%} +{{name}}={{value}} +{% endfor -%} \ No newline at end of file diff --git a/ksql-rest-app/Dockerfile b/ksql-rest-app/Dockerfile index 78a603accf4d..347fab182468 100644 --- a/ksql-rest-app/Dockerfile +++ b/ksql-rest-app/Dockerfile @@ -12,5 +12,7 @@ ADD target/${ARTIFACT_ID}-${PROJECT_VERSION}-package/bin/docker/* /usr/bin/docke ADD target/${ARTIFACT_ID}-${PROJECT_VERSION}-package/etc/* /etc/ksql/ ADD target/${ARTIFACT_ID}-${PROJECT_VERSION}-package/share/doc/* /usr/share/doc/${ARTIFACT_ID}/ +RUN chmod +x /usr/bin/docker/configure RUN chmod +x /usr/bin/docker/run + CMD ["/usr/bin/docker/run"] diff --git a/ksql-rest-app/src/include/docker/configure b/ksql-rest-app/src/include/docker/configure new file mode 100644 index 000000000000..c8eeaaca2900 --- /dev/null +++ b/ksql-rest-app/src/include/docker/configure @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# +# Copyright 2020 Confluent Inc. +# +# Licensed under the Confluent Community License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at +# +# http://www.confluent.io/confluent-community-license +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. + + +dub ensure KSQL_CONNECT_BOOTSTRAP_SERVERS +dub ensure KSQL_CONNECT_GROUP_ID +dub ensure KSQL_CONNECT_CONFIG_STORAGE_TOPIC +dub ensure KSQL_CONNECT_OFFSET_STORAGE_TOPIC +dub ensure KSQL_CONNECT_STATUS_STORAGE_TOPIC +dub ensure KSQL_CONNECT_KEY_CONVERTER +dub ensure KSQL_CONNECT_VALUE_CONVERTER +# This is required to avoid config bugs. You should set this to a value that is +# resolvable by all containers. +dub ensure KSQL_CONNECT_REST_ADVERTISED_HOST_NAME + +# Default to 8083, which matches the mesos-overrides. This is here in case we extend the containers to remove the mesos overrides. +if [ -z "$KSQL_CONNECT_REST_PORT" ]; then + export KSQL_CONNECT_REST_PORT=8083 +fi + +if [[ $KSQL_CONNECT_KEY_CONVERTER == "io.confluent.connect.avro.AvroConverter" ]] +then + dub ensure KSQL_CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL +fi + +if [[ $KSQL_CONNECT_VALUE_CONVERTER == "io.confluent.connect.avro.AvroConverter" ]] +then + dub ensure KSQL_CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL +fi + diff --git a/ksql-rest-app/src/include/docker/run b/ksql-rest-app/src/include/docker/run index 50741435692e..e35d4e3c0930 100644 --- a/ksql-rest-app/src/include/docker/run +++ b/ksql-rest-app/src/include/docker/run @@ -17,6 +17,18 @@ echo "===> Configuring ksqlDB..." +dub template "/etc/ksql/connect.properties.template" "/etc/ksql/connect.properties" + +# KSQL_CONNECT_GROUP_ID is a required configuration for running embedded connect, +# so we can proxy it to check whether or not to start an embedded connect worker +if ! [[ -z "${KSQL_CONNECT_GROUP_ID}" ]]; then + echo "===> Configuring Embedded Connect" + /usr/bin/docker/configure + + echo "===> Enabling Embedded Connect" + export KSQL_KSQL_CONNECT_WORKER_CONFIG="/etc/ksql/connect.properties" +fi + dub template "/etc/ksql/ksqldb-server.properties.template" "/etc/ksql/ksqldb-server.properties" echo "===> Launching ksqlDB Server..." From 41cbbea753076b017e533657d63ef6470612e72e Mon Sep 17 00:00:00 2001 From: Alberto Santini Date: Fri, 10 Jan 2020 21:52:25 +0100 Subject: [PATCH 091/123] chore: remove unused import (#4273) --- .../java/io/confluent/ksql/test/planned/TestCasePlanLoader.java | 1 - 1 file changed, 1 deletion(-) diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanLoader.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanLoader.java index 26b3af084397..422350f32b0b 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanLoader.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/planned/TestCasePlanLoader.java @@ -42,7 +42,6 @@ import java.io.IOException; import java.nio.charset.Charset; import java.nio.file.Files; -import java.nio.file.Path;; import java.util.Collections; import java.util.List; import java.util.Optional; From 0b135aa9eaa51ecfbef1e08f19c63d8e8156c756 Mon Sep 17 00:00:00 2001 From: Confluent Jenkins Bot Date: Sat, 11 Jan 2020 02:27:51 +0000 Subject: [PATCH 092/123] Set Confluent to 5.4.0, Kafka to 5.4.0. --- build-tools/pom.xml | 2 +- docs/conf.py | 2 +- ksql-benchmark/pom.xml | 2 +- ksql-cli/pom.xml | 2 +- ksql-clickstream-demo/pom.xml | 2 +- ksql-common/pom.xml | 2 +- ksql-console-scripts/pom.xml | 2 +- ksql-engine/pom.xml | 2 +- ksql-etc/pom.xml | 2 +- ksql-examples/pom.xml | 2 +- ksql-execution/pom.xml | 2 +- ksql-functional-tests/pom.xml | 2 +- ksql-metastore/pom.xml | 2 +- ksql-package/pom.xml | 2 +- ksql-parser/pom.xml | 2 +- ksql-rest-app/pom.xml | 2 +- ksql-rest-client/pom.xml | 2 +- ksql-rest-model/pom.xml | 2 +- ksql-rocksdb-config-setter/pom.xml | 2 +- ksql-serde/pom.xml | 2 +- ksql-streams/pom.xml | 2 +- ksql-test-util/pom.xml | 2 +- ksql-tools/pom.xml | 2 +- ksql-udf-quickstart/pom.xml | 2 +- ksql-udf/pom.xml | 2 +- ksql-version-metrics-client/pom.xml | 2 +- licenses/licenses.html | 12 ++++++------ pom.xml | 4 ++-- 28 files changed, 34 insertions(+), 34 deletions(-) diff --git a/build-tools/pom.xml b/build-tools/pom.xml index 2622ad368bcc..0b4a0fcf553f 100644 --- a/build-tools/pom.xml +++ b/build-tools/pom.xml @@ -19,6 +19,6 @@ 4.0.0 io.confluent build-tools - 5.4.0-SNAPSHOT + 5.4.0 Build Tools diff --git a/docs/conf.py b/docs/conf.py index 430a6ad12a4f..65c2eacd144a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -56,7 +56,7 @@ def setup(app): # The short X.Y version. version = '5.4' # The full version, including alpha/beta/rc tags. -release = '5.4.0-SNAPSHOT' +release = '5.4.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/ksql-benchmark/pom.xml b/ksql-benchmark/pom.xml index 1551e5f56c45..a5deddf2db68 100644 --- a/ksql-benchmark/pom.xml +++ b/ksql-benchmark/pom.xml @@ -47,7 +47,7 @@ questions. io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-benchmark diff --git a/ksql-cli/pom.xml b/ksql-cli/pom.xml index 6bb1a00a74cb..da8c09e8bebb 100644 --- a/ksql-cli/pom.xml +++ b/ksql-cli/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-cli diff --git a/ksql-clickstream-demo/pom.xml b/ksql-clickstream-demo/pom.xml index 54f28a966969..620a14dfa778 100644 --- a/ksql-clickstream-demo/pom.xml +++ b/ksql-clickstream-demo/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 io.confluent.ksql diff --git a/ksql-common/pom.xml b/ksql-common/pom.xml index 78affa0a0da4..73f25411a3ae 100644 --- a/ksql-common/pom.xml +++ b/ksql-common/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-common diff --git a/ksql-console-scripts/pom.xml b/ksql-console-scripts/pom.xml index c1152d3b111b..dda21ce94a8f 100644 --- a/ksql-console-scripts/pom.xml +++ b/ksql-console-scripts/pom.xml @@ -22,7 +22,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 io.confluent.ksql diff --git a/ksql-engine/pom.xml b/ksql-engine/pom.xml index ecef4f9c1781..7f84088c836c 100644 --- a/ksql-engine/pom.xml +++ b/ksql-engine/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-engine diff --git a/ksql-etc/pom.xml b/ksql-etc/pom.xml index 6b1ff175473f..8fbc770c3f9d 100644 --- a/ksql-etc/pom.xml +++ b/ksql-etc/pom.xml @@ -22,7 +22,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 io.confluent.ksql diff --git a/ksql-examples/pom.xml b/ksql-examples/pom.xml index ffcf96b127d4..9e503662fa8b 100644 --- a/ksql-examples/pom.xml +++ b/ksql-examples/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-examples diff --git a/ksql-execution/pom.xml b/ksql-execution/pom.xml index cfb83030eade..6afb708b48da 100644 --- a/ksql-execution/pom.xml +++ b/ksql-execution/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-execution diff --git a/ksql-functional-tests/pom.xml b/ksql-functional-tests/pom.xml index c1f7cc813d4a..d0c8ff329056 100644 --- a/ksql-functional-tests/pom.xml +++ b/ksql-functional-tests/pom.xml @@ -21,7 +21,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 4.0.0 diff --git a/ksql-metastore/pom.xml b/ksql-metastore/pom.xml index e11aeb3b6deb..90c2696a47a7 100644 --- a/ksql-metastore/pom.xml +++ b/ksql-metastore/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-metastore diff --git a/ksql-package/pom.xml b/ksql-package/pom.xml index 5d15789cd406..e3d92fc550c3 100644 --- a/ksql-package/pom.xml +++ b/ksql-package/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-package diff --git a/ksql-parser/pom.xml b/ksql-parser/pom.xml index 7f659a747375..5e12bec61a01 100644 --- a/ksql-parser/pom.xml +++ b/ksql-parser/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-parser diff --git a/ksql-rest-app/pom.xml b/ksql-rest-app/pom.xml index 7afdd6f74831..3a5fadf8dc6b 100644 --- a/ksql-rest-app/pom.xml +++ b/ksql-rest-app/pom.xml @@ -23,7 +23,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-rest-app diff --git a/ksql-rest-client/pom.xml b/ksql-rest-client/pom.xml index 2a4f27562c68..95c041ff6e82 100644 --- a/ksql-rest-client/pom.xml +++ b/ksql-rest-client/pom.xml @@ -23,7 +23,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-rest-client diff --git a/ksql-rest-model/pom.xml b/ksql-rest-model/pom.xml index c2a1ba6f6b58..e839abdd48d3 100644 --- a/ksql-rest-model/pom.xml +++ b/ksql-rest-model/pom.xml @@ -23,7 +23,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-rest-model diff --git a/ksql-rocksdb-config-setter/pom.xml b/ksql-rocksdb-config-setter/pom.xml index de5d625c7e83..54abe8ddd8f4 100644 --- a/ksql-rocksdb-config-setter/pom.xml +++ b/ksql-rocksdb-config-setter/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-rocksdb-config-setter diff --git a/ksql-serde/pom.xml b/ksql-serde/pom.xml index 5cf9292115de..ebdb4de45b94 100644 --- a/ksql-serde/pom.xml +++ b/ksql-serde/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-serde diff --git a/ksql-streams/pom.xml b/ksql-streams/pom.xml index 40d9b26c9be5..82051d2de42e 100644 --- a/ksql-streams/pom.xml +++ b/ksql-streams/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-streams diff --git a/ksql-test-util/pom.xml b/ksql-test-util/pom.xml index abebddcf37a0..0bdc5e11e61b 100644 --- a/ksql-test-util/pom.xml +++ b/ksql-test-util/pom.xml @@ -20,7 +20,7 @@ ksql-parent io.confluent.ksql - 5.4.0-SNAPSHOT + 5.4.0 4.0.0 diff --git a/ksql-tools/pom.xml b/ksql-tools/pom.xml index f4a34f94ffbd..411f396cf3f5 100644 --- a/ksql-tools/pom.xml +++ b/ksql-tools/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-tools diff --git a/ksql-udf-quickstart/pom.xml b/ksql-udf-quickstart/pom.xml index c4dc1908ac3d..b6562dbf0660 100644 --- a/ksql-udf-quickstart/pom.xml +++ b/ksql-udf-quickstart/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-udf-quickstart diff --git a/ksql-udf/pom.xml b/ksql-udf/pom.xml index 773a2d1ef312..81b4852980cb 100644 --- a/ksql-udf/pom.xml +++ b/ksql-udf/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-udf diff --git a/ksql-version-metrics-client/pom.xml b/ksql-version-metrics-client/pom.xml index 26d539c4684d..2866ed17b8dd 100644 --- a/ksql-version-metrics-client/pom.xml +++ b/ksql-version-metrics-client/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 ksql-version-metrics-client diff --git a/licenses/licenses.html b/licenses/licenses.html index ba684079d4ff..9fc760d602d2 100644 --- a/licenses/licenses.html +++ b/licenses/licenses.html @@ -67,15 +67,15 @@

License Report


slice-0.29jar0.29 -common-config-5.4.0-SNAPSHOTjar5.4.0-SNAPSHOT +common-config-5.4.0jar5.4.0 -common-utils-5.4.0-SNAPSHOTjar5.4.0-SNAPSHOT +common-utils-5.4.0jar5.4.0 -kafka-avro-serializer-5.4.0-SNAPSHOTjar5.4.0-SNAPSHOT +kafka-avro-serializer-5.4.0jar5.4.0 -kafka-connect-avro-converter-5.4.0-SNAPSHOTjar5.4.0-SNAPSHOT +kafka-connect-avro-converter-5.4.0jar5.4.0 -kafka-schema-registry-client-5.4.0-SNAPSHOTjar5.4.0-SNAPSHOT +kafka-schema-registry-client-5.4.0jar5.4.0 ksql-engine-0.1-SNAPSHOTjar0.1-SNAPSHOT @@ -123,7 +123,7 @@

License Report


kafka-streams-0.11.0.0-cp1jarincluded file -kafka_2.11-5.4.0-ccs-SNAPSHOTjarincluded file +kafka_2.11-5.4.0-ccsjarincluded file lz4-1.3.0jar1.3.0 diff --git a/pom.xml b/pom.xml index e746ce812706..b38849ff1c98 100644 --- a/pom.xml +++ b/pom.xml @@ -22,14 +22,14 @@ io.confluent rest-utils-parent - 5.4.0-SNAPSHOT + 5.4.0 io.confluent.ksql ksql-parent pom ksql-parent - 5.4.0-SNAPSHOT + 5.4.0 Confluent Community License From 8326151d70808444995db4850b1667ce0036490b Mon Sep 17 00:00:00 2001 From: Tim Fox Date: Sat, 11 Jan 2020 18:21:15 +0900 Subject: [PATCH 093/123] docs: klip-15 New client and API (#4069) --- .../klip-15-new-api-and-client.md | 325 ++++++++++++++++++ 1 file changed, 325 insertions(+) create mode 100644 design-proposals/klip-15-new-api-and-client.md diff --git a/design-proposals/klip-15-new-api-and-client.md b/design-proposals/klip-15-new-api-and-client.md new file mode 100644 index 000000000000..0b973dfe8424 --- /dev/null +++ b/design-proposals/klip-15-new-api-and-client.md @@ -0,0 +1,325 @@ +# KLIP 15 - ksqlDB Client and New Server API + +**Author**: Tim Fox (purplefox) | +**Release Target**: ? | +**Status**: _In Discussion_ | +**Discussion**: + +Please read the initial discussion on the ksqlDB developer group first: + +https://groups.google.com/forum/#!topic/ksql-dev/yxcRlsOsNmo + +And there is a partially working prototype for this work described here: + +https://groups.google.com/forum/#!topic/ksql-dev/5mLKvtZFs4Y + + For ksqlDB to be a successful project we need to provide an awesome out of the box developer experience + and it should be super easy and fun to write powerful event streaming applications. + Currently this is somewhat diffificult to do, and it requires the use of multiple clients and multiple moving parts in order to build + a simple event sourcing / CQRS style application. + We should provide a new ksqlDB client that provides that currently missing single interaction point with the event streaming + platform so that we can improve the application development process. + To support the client we should provide an updated HTTP2 based API that allows streaming use cases to be handled better. + +## Motivation and background + +In order to increase adoption of ksqlDB we need the developer experience to be as slick as possible. Currently, in order to write +a simple event streaming application with ksqlDB, up to 4 clients may be needed: + +1. An HTTP client to access the current HTTP/REST API +2. A WebSocket client to access the streaming query WebSockets endpoint +3. A JDBC client to access data in JDBC tables that are synced with KSQL via connect, as the current support for +pull queries does not make the data otherwise easily accessible. +4. A Kafka client for producing and consuming messages from topics. + +This is a lot for an application to handle and creates a steep learning curve for application developers. + +Moreover, our current HTTP API is lacking, especially in the area of streaming query results which makes it difficult to write an application +that uses this API. + +This KLIP proposes that we: + +* Create a ksqlDB client (initially in Java and JavaScript) as the primary interaction point to the event streaming platform +for an application +* Create a new HTTP2 based server API using Vert.x that supports the client and enables the functionality of the client to be delivered in a straightforward +and efficient way +* Migrate the functionality of the current HTTP/REST API over to the new server implementation and retire the parts that are no longer needed. + +## What is in scope + +### The client + +* We will create a new client, initially in Java (and potentially in JavaScript). Clients for other languages such as Python and Go will follow later. +* The client will initially support execution and streaming of queries (both push and pull), inserting of rows into streams, DDL operations and admin operations +such as list and describe. +* The client will support a reactive / async interaction style using Java CompletableFuture and Reactive Streams / JDK9 Flow API (or a copy thereof if we cannot upgrade from Java 8) +* The client will also support a direct / synchronous interaction style. +* The server API will be very simple, based on HTTP2 with a simple text/json based encoding therefore it will be very simple to use directly from vanilla +HTTP2 clients if a ksqldb client is not available + +### The Server API + +We will create a new, simple HTTP2 API for streaming query results from server to client and +for streaming inserts from client to server. We will use a simple text/JSON encoding for data. +Please note, this is not a REST API, it's a streaming HTTP API. The API does not follow REST principles. REST is inherently designed for request/response (RPC) +style interactions, not streaming. + +The API will have the following characteristics: + +* Multiplexed (because of HTTP2 multiplexing) +* Back-pressure (because of HTTP2 flow control) +* Text based so easy to use and view results using command line tools such as curl +* Can be used from any vanilla HTTP2 client for any language +* Simple newline based delimitation so easy to parse results on client side +* JSON encoding of data so very easy to parse as most languages have good JSON support +* CORS support +* HTTP basic auth support +* TLS +* Easy to create new clients using the protocol + +#### Query streaming + +The request method will be a POST. + +Requests will be made to a specific URL, e.g. "/query-stream" (this can be configurable) + +The body of the request is a JSON object UTF-8 encoded as text, containing the arguments for the +operation (newlines have been added here for the sake of clarity but the real JSON must not contain newlines) + +```` +{ +"query": "select * from foo", <----- the SQL of the query to execute +"push": true, <----- if true then push query, else pull query +"limit": 10 <---- If specified return at most this many rows, +"properties": { <----- Optional properties for the query + "prop1": "val1", + "prop2": "val2" + } +} + +```` + +Please note the parameters are not necessarily exhaustive. The description here is an outline not a detailed +low level design. The low level design will evolve during development. + +In the case of a successful query + +```` +{ +"query-id", "xyz123", <---- unique ID of the query, used when terminating the query +"columns":["col", "col2", "col3"], <---- the names of the columns +"column_types":["BIGINT", "STRING", "BOOLEAN"] <---- The types of the columns +"row_count": 101 <---- The number of rows - only set in case of pull query +} +```` + +Followed by zero or more JSON arrays: + +```` +[123, "blah", true] +[432, "foo", true] +[765, "whatever", false] +```` + +Each JSON array or row will be delimited by a newline. + +For a pull query the response will be ended by the server once all rows have been written, for +a push query the response will remain open until the connection is closed or the query is explicitly +terminated. + +#### Terminating queries + +Push queries can be explicitly terminated by the client by making a request to this endpoint + +The request method will be a POST. + +Requests will be made to a specific URL, e.g. "/query-terminate" (this can be configurable) + +The body of the request is a JSON object UTF-8 encoded as text, containing the arguments for the +operation (newlines have been added here for the sake of clarity but the real JSON must not contain newlines) + +```` +{ +"query-id": "xyz123", <----- the ID of the query to terminate +} + +```` + + +#### Inserts + +The request method will be a POST. + +Requests will be made to a specific URL, e.g. "/insert-stream" (this can be configurable) + +The body of the request is a JSON object UTF-8 encoded as text, containing the arguments for the +operation (newlines have been added for clarity, the real JSON must not contain newlines): + +```` +{ +"stream": "my-stream" <----- The name of the KSQL stream to insert into +"acks": true <----- If true then a stream of acks will be returned in the response +} + +```` + +Followed by zero or more JSON objects representing the values to insert: + +```` +{ +"col1" : "val1", +"col2": 2.3, +"col3", true +} +```` +Each JSON object will be separated by a new line. + +To terminate the insert stream the client should end the request. + +If acks are requested then the response will be written to the response when each row has been +successfully committed to the underlying topic. Rows are committed in the order they are provided. +Each ack in the response is just an empty JSON object, separated by newlines: + +```` +{} +{} +{} +{} +```` + +#### Errors + +Apropriate status codes will be returned from failed requests. The response will also contain JSON +with further information on the error: + +{ + "error_code": + "message": +} + +#### Non streaming results + +The API is designed for efficiently streaming rows from client to server or from server to client. + +The amount of data that is streamed in any specific query or insert stream can be huge so we want to +avoid any solution that buffers it all in memory at any one time or requires specialised parsers to +parse. + +For this reason we do not provide query results (or accept streams of inserts) by default as a single +JSON object. If we did so we would force users to find and use a streaming JSON parser in order to +parse the results as they arrive. If no parser is available on their platform they would be forced +to parse the entire result set as a single JSON object in memory - this might not be feasible or +desirable due to memory and latency constraints. Moreover, in the world of streaming it's quite common +to want to pipe the stream from one place to another without looking into it. In that case it's very +inefficient to deserialize the bytes as JSON simply to serialize them back to bytes to write them +to the output stream. For these reasons the results, by default, are sent as a set of JSON arrays +delimited by newline. Newlines are very easy to parse by virtually every target platform without +having to rely on specialist libraries. + +There are, however, some use cases where we can guarantee the results of the query are small, e.g. +when using a limit clause. In this case, the more general streaming use case degenerates into an RPC +use case. In this situation it can be convenient to accept the results as a single JSON object as we +may always want to parse them as a single object in memory. To support this use case we can allow the request +to contain an accept-header specifying the content-type of the response. To receive a response as +a single JSON object content-type should be specified as 'text/json', for our delimited format we +will specify our own custom content type. The delimited format will be the default, as the API +is primarily a streaming API. + +### Migration of existing "REST" API + +We will migrate the existing Jetty based "REST" API to the new Vert.x based implementation as-is or with +minor modifications. + +We will migrate the existing Jetty specific plug-ins to Vert.x + +### Server implementation + +The new server API will be implemented using Vert.x + +The implementation will be designed in a reactive / non blocking way in order to provide the best performance / scalability characteristics with +low resource usage. This will also influence the overall threading model of the server and position us with a better, more scalability internal +server architecture that will help future proof the ksqlDB server. + +## What is not in scope + +* We are not creating a new RESTful API. +* We are not currently looking to write clients for languages other than Java and JavaScript (although those may follow later) +* We will not initially allow for consuming directly from Kafka topics using the client (although tha may follow later) + +## Value/Return + +We hope that by providing a delightful, easy to use client and new HTTP2 based server API, it will enable application developers to easily write powerful +applications that take advantage of their data plane / event streaming platform more effectively. + +We hope that this could be transformative for the project in terms of adoption as it would position ksqlDB as a great choice for +writing typical event sourcing / CQRS / DDD style applications, which are currently hard to write using ksqlDB alone. + +There are also further incidental advantages gained by this work. By using a modern server side implementation such as Vert.x +there are benefits in relation to performance, scalability, simplicity of implementation, +and threading model. Not to mention reduced dependencies and better resource usage. This will set us up better for the kinds of high +throughput operations that we will need to implement efficiently now that the project has pivoted to a more active application +facing server rather than a more passive DDL engine. + +## Public APIS + +The following changes / additions to APIs will occur: + +* We will provide a new HTTP2 based streaming API. This will not be accessible using HTTP1.1 +* The current chunked streaming and websockets streaming endpoints will be retired. +* The old API will be migrated to Vert.x possibly with some modifications and be accessible over HTTP1.1 and HTTP 2 +* The Java client will provide a new public API + +## Design + +### The client + +* The Java client will provide an API based on Reactive Streams and completable future. We can also consider providing a JDK 9 shim using the Flow API for those +users using Java 9+ +* The networking will be handled by Vert.x (which uses Netty). +* The client will have minimal transitive jar dependencies - this is important as the client will be embedded in end user applications. +* Client connections are designed to be re-used. +* The client will be thread-safe. + +### The server + +* The toolkit used on the server side will be Vert.x +* The current Jetty / JAX-RS usage will be retired. +* The current non streaming HTTP/REST endpoints will be migrated to Vert.x - this should modernise and radically simplify and +clarify the server side implementation result in a cleaner implementation and reduced lines of code. +* The current query streaming endpoints (chunked response and Websockets) will be retired. +* Any current Jetty specific plugins (e.g. security plugins) will be migrated to Vert.x +* Vert.x has great support for working with various different network protocols and has has [unrivalled performance/scalability](https://www.techempower.com/benchmarks/) +characteristics for a JVM toolkit. It will also set us up well for a fully async / reactive internal threading model in the server that we +should aim towards for the future. + +## Test plan + +We will require unit/module level tests and integration tests for all the new or changed components as per standard best practice. + +## Documentation Updates + +* We will produce new guide(s) for the Java and JavaScript clients. +* We will provide a new guide for the new HTTP API and retire the existing "REST" API documentation. +* We will produce example applications showing how to use the client in a real app. E.g. using Spring Boot / Vert.x (Java) and Node.js (JavaScript) +* There may be some server side configuration changes due to the new server implementation. + +# Compatibility Implications + +The current chunked response query streaming endpoint will be removed so any users currently using that will have to upgrade. + +The current websockets query streaming endpoint will be removed so any users currently using that will have to upgrade. +This endpoint is currently undocumented so it's not expected a lot of users are using it. It is used by C3 (?) +so that will need to be migrated to the new API. + +There may be some minor incompatible changes on the migrated old server API. + +## Performance Implications + +* Streaming query results with the new client and server side implementation should provide significantly better performance than +the current websockets or HTTP streaming APIs +* Using Vert.x in general for hosting the API will provide excellent performance and scalability with very low resource usage. + +## Security Implications + +The new protocol should be available over TLS and we should support the same auth approach as we do with the current API +so there should be no extra security implications. From 4e32da666aa35bf4e1390698b6e2ca4d67bab66e Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Mon, 13 Jan 2020 12:19:47 +0000 Subject: [PATCH 094/123] fix: add logging during restore (#4270) Fixes: https://github.com/confluentinc/ksql/issues/4269 --- .../server/computation/CommandRunner.java | 87 +++++++++++-------- 1 file changed, 51 insertions(+), 36 deletions(-) diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandRunner.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandRunner.java index 5adb297d554a..fc02c6b00762 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandRunner.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/computation/CommandRunner.java @@ -16,7 +16,6 @@ package io.confluent.ksql.rest.server.computation; import com.google.common.annotations.VisibleForTesting; -import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.rest.entity.ClusterTerminateRequest; import io.confluent.ksql.rest.server.state.ServerState; import io.confluent.ksql.rest.util.ClusterTerminator; @@ -42,12 +41,13 @@ /** * Handles the logic of reading distributed commands, including pre-existing commands that were - * issued before being initialized, and then delegating their execution to a - * {@link InteractiveStatementExecutor}. - * Also responsible for taking care of any exceptions that occur in the process. + * issued before being initialized, and then delegating their execution to a {@link + * InteractiveStatementExecutor}. Also responsible for taking care of any exceptions that occur in + * the process. */ public class CommandRunner implements Closeable { - private static final Logger log = LoggerFactory.getLogger(CommandRunner.class); + + private static final Logger LOG = LoggerFactory.getLogger(CommandRunner.class); private static final int STATEMENT_RETRY_MS = 100; private static final int MAX_STATEMENT_RETRY_MS = 5 * 1000; @@ -152,27 +152,46 @@ public void close() { * Read and execute all commands on the command topic, starting at the earliest offset. */ public void processPriorCommands() { - final List restoreCommands = commandStore.getRestoreCommands(); - final Optional terminateCmd = findTerminateCommand(restoreCommands); - if (terminateCmd.isPresent()) { - terminateCluster(terminateCmd.get().getCommand()); - return; + try { + final List restoreCommands = commandStore.getRestoreCommands(); + + LOG.info("Restoring previous state from {} commands.", restoreCommands.size()); + + final Optional terminateCmd = findTerminateCommand(restoreCommands); + if (terminateCmd.isPresent()) { + LOG.info("Cluster previously terminated: terminating."); + terminateCluster(terminateCmd.get().getCommand()); + return; + } + + restoreCommands.forEach( + command -> { + currentCommandRef.set(new Pair<>(command, clock.instant())); + RetryUtil.retryWithBackoff( + maxRetries, + STATEMENT_RETRY_MS, + MAX_STATEMENT_RETRY_MS, + () -> statementExecutor.handleRestore(command), + WakeupException.class + ); + currentCommandRef.set(null); + } + ); + + final List queries = statementExecutor + .getKsqlEngine() + .getPersistentQueries(); + + LOG.info("Restarting {} queries.", queries.size()); + + queries.forEach(PersistentQueryMetadata::start); + + LOG.info("Restore complete"); + + } catch (final Exception e) { + LOG.error("Error during restore", e); + throw e; } - restoreCommands.forEach( - command -> { - currentCommandRef.set(new Pair<>(command, clock.instant())); - RetryUtil.retryWithBackoff( - maxRetries, - STATEMENT_RETRY_MS, - MAX_STATEMENT_RETRY_MS, - () -> statementExecutor.handleRestore(command), - WakeupException.class - ); - currentCommandRef.set(null); - } - ); - final KsqlEngine ksqlEngine = statementExecutor.getKsqlEngine(); - ksqlEngine.getPersistentQueries().forEach(PersistentQueryMetadata::start); } void fetchAndRunCommands() { @@ -187,7 +206,7 @@ void fetchAndRunCommands() { return; } - log.trace("Found {} new writes to command topic", commands.size()); + LOG.debug("Found {} new writes to command topic", commands.size()); for (final QueuedCommand command : commands) { if (closed) { return; @@ -198,14 +217,14 @@ void fetchAndRunCommands() { } private void executeStatement(final QueuedCommand queuedCommand) { - log.info("Executing statement: " + queuedCommand.getCommand().getStatement()); + LOG.info("Executing statement: " + queuedCommand.getCommand().getStatement()); final Runnable task = () -> { if (closed) { - log.info("Execution aborted as system is closing down"); + LOG.info("Execution aborted as system is closing down"); } else { statementExecutor.handleStatement(queuedCommand); - log.info("Executed statement: " + queuedCommand.getCommand().getStatement()); + LOG.info("Executed statement: " + queuedCommand.getCommand().getStatement()); } }; @@ -232,13 +251,13 @@ private static Optional findTerminateCommand( @SuppressWarnings("unchecked") private void terminateCluster(final Command command) { serverState.setTerminating(); - log.info("Terminating the KSQL server."); + LOG.info("Terminating the KSQL server."); this.close(); final List deleteTopicList = (List) command.getOverwriteProperties() .getOrDefault(ClusterTerminateRequest.DELETE_TOPIC_LIST_PROP, Collections.emptyList()); clusterTerminator.terminateCluster(deleteTopicList); - log.info("The KSQL server was terminated."); + LOG.info("The KSQL server was terminated."); } CommandRunnerStatus checkCommandRunnerStatus() { @@ -252,17 +271,13 @@ CommandRunnerStatus checkCommandRunnerStatus() { ? CommandRunnerStatus.RUNNING : CommandRunnerStatus.ERROR; } - Pair getCurrentCommand() { - return currentCommandRef.get(); - } - private class Runner implements Runnable { @Override public void run() { try { while (!closed) { - log.debug("Polling for new writes to command topic"); + LOG.trace("Polling for new writes to command topic"); fetchAndRunCommands(); } } catch (final WakeupException wue) { From 7a83bbfbcec33314b35b71e0d6cbea014a3204f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergio=20Pe=C3=B1a?= Date: Mon, 13 Jan 2020 09:20:01 -0600 Subject: [PATCH 095/123] feat: ask for password if -p is not provided (#4153) --- .../src/main/java/io/confluent/ksql/Ksql.java | 17 +++++++++++++++++ .../java/io/confluent/ksql/cli/Options.java | 12 ++++++++++++ .../ksql/cli/commands/OptionsTest.java | 17 +++++++++++++++++ 3 files changed, 46 insertions(+) diff --git a/ksql-cli/src/main/java/io/confluent/ksql/Ksql.java b/ksql-cli/src/main/java/io/confluent/ksql/Ksql.java index 51c184989f25..d4ee1d79555d 100644 --- a/ksql-cli/src/main/java/io/confluent/ksql/Ksql.java +++ b/ksql-cli/src/main/java/io/confluent/ksql/Ksql.java @@ -25,6 +25,8 @@ import io.confluent.ksql.util.ErrorMessageUtil; import io.confluent.ksql.version.metrics.KsqlVersionCheckerAgent; import io.confluent.ksql.version.metrics.collector.KsqlModuleType; + +import java.io.Console; import java.io.File; import java.io.IOException; import java.util.Collections; @@ -67,6 +69,11 @@ public static void main(final String[] args) throws IOException { System.exit(-1); } + // ask for password if not set through command parameters + if (!options.getUserName().isEmpty() && !options.isPasswordSet()) { + options.setPassword(readPassword()); + } + try { new Ksql(options, System.getProperties(), KsqlRestClient::create, Cli::build).run(); } catch (final Exception e) { @@ -77,6 +84,16 @@ public static void main(final String[] args) throws IOException { } } + private static String readPassword() { + final Console console = System.console(); + if (console == null) { + System.err.println("Could not get console for enter password; use -p option instead."); + System.exit(-1); + } + + return new String(console.readPassword("Enter password: ")); + } + void run() { final Map configProps = options.getConfigFile() .map(Ksql::loadProperties) diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/Options.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/Options.java index 770c4f2732c8..38467697ef7d 100644 --- a/ksql-cli/src/main/java/io/confluent/ksql/cli/Options.java +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/Options.java @@ -158,6 +158,18 @@ public OutputFormat getOutputFormat() { return OutputFormat.valueOf(outputFormat); } + public String getUserName() { + return userName; + } + + public void setPassword(final String password) { + this.password = password; + } + + public boolean isPasswordSet() { + return (password != null && !password.trim().isEmpty()); + } + public Optional getUserNameAndPassword() { if ((userName == null && password != null) || (password == null && userName != null)) { throw new ConfigException( diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/commands/OptionsTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/commands/OptionsTest.java index 49149805e2bc..d45e704e575f 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/commands/OptionsTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/commands/OptionsTest.java @@ -49,4 +49,21 @@ public void shouldReturnEmptyOptionWhenUserAndPassNotPresent() throws Exception assertFalse(options.getUserNameAndPassword().isPresent()); } + @Test + public void shouldReturnPasswordNotSetIfPasswordIsNull() throws Exception { + final Options options = Options.parse("http://foobar"); + assertFalse(options.isPasswordSet()); + } + + @Test + public void shouldReturnPasswordNotSetIfPasswordIsEmpty() throws Exception { + final Options options = Options.parse("http://foobar", "-u", "joe", "-p", ""); + assertFalse(options.isPasswordSet()); + } + + @Test + public void shouldReturnPasswordSetIfPasswordIsNotEmpty() throws Exception { + final Options options = Options.parse("http://foobar", "-u", "joe", "-p", "joe"); + assertTrue(options.isPasswordSet()); + } } From 307bf4d9ff4b9839d2f4c2191d0053decf0c051e Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Mon, 13 Jan 2020 17:29:33 +0000 Subject: [PATCH 096/123] fix: report clearer error message when AVG used with DELIMITED (#4295) See https://github.com/confluentinc/ksql/issues/4294 `AVG` doesn't work with `DELIMITED` format and the error message isn't great. Example statements that cause the error: ```sql -- Given: CREATE STREAM INPUT (VALUE integer) WITH (kafka_topic='test_topic', value_format='DELIMITED'); -- When: CREATE TABLE OUTPUT AS SELECT avg(value) AS avg FROM INPUT group by ROWKEY; ``` Old error message: ``` ksql> CREATE TABLE OUTPUT AS SELECT avg(value) AS avg FROM INPUT group by ROWKEY; CREATE TABLE OUTPUT AS SELECT avg(value)Value format does not support value schema. format: DELIMITED schema: Persistence{schema=STRUCT> NOT NULL, unwrapped=false} reason: The 'DELIMITED' format does not support type 'STRUCT' Caused by: The 'DELIMITED' format does not support type 'STRUCT' ``` This PR improves the error message a bit: New error message: ``` One of the functions used in the statement has an intermediate type that the value format can not handle. Please remove the function or change the format. Consider up-voting https://github.com/confluentinc/ksql/issues/3950, which will resolve this limitation Caused by: Value format does not support value schema. format: DELIMITED schema: Persistence{schema=STRUCT> NOT NULL, unwrapped=false} reason: The 'DELIMITED' format does not support type 'STRUCT' Caused by: The 'DELIMITED' format does not support type 'STRUCT' ``` --- .../ksql/SchemaNotSupportedException.java | 28 +++++++ .../query-validation-tests/average-udaf.json | 12 +++ .../confluent/ksql/serde/GenericKeySerDe.java | 4 +- .../confluent/ksql/serde/GenericRowSerDe.java | 4 +- .../ksql/serde/GenericKeySerDeTest.java | 4 +- .../ksql/serde/GenericRowSerDeTest.java | 4 +- .../streams/AggregateBuilderUtils.java | 75 ++++++++++++++++--- 7 files changed, 111 insertions(+), 20 deletions(-) create mode 100644 ksql-common/src/main/java/io/confluent/ksql/SchemaNotSupportedException.java diff --git a/ksql-common/src/main/java/io/confluent/ksql/SchemaNotSupportedException.java b/ksql-common/src/main/java/io/confluent/ksql/SchemaNotSupportedException.java new file mode 100644 index 000000000000..2c2954b269a0 --- /dev/null +++ b/ksql-common/src/main/java/io/confluent/ksql/SchemaNotSupportedException.java @@ -0,0 +1,28 @@ +/* + * Copyright 2020 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql; + +import io.confluent.ksql.util.KsqlException; + +/** + * Thrown to indicate the schema is not supported. + */ +public class SchemaNotSupportedException extends KsqlException { + + public SchemaNotSupportedException(final String message, final Throwable cause) { + super(message, cause); + } +} diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/average-udaf.json b/ksql-functional-tests/src/test/resources/query-validation-tests/average-udaf.json index 940de096c401..41bec458d0c8 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/average-udaf.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/average-udaf.json @@ -77,6 +77,18 @@ {"topic": "OUTPUT", "key": "bob", "value": {"AVG": -66666.1}}, {"topic": "OUTPUT", "key": "alice", "value": {"AVG": 3.074457345651058E12}} ] + }, + { + "name": "average - DELIMITED", + "comment": "DELIMITED does not support STRUCT, so can't support AVG until we use a different internal format", + "statements": [ + "CREATE STREAM INPUT (VALUE integer) WITH (kafka_topic='test_topic', value_format='DELIMITED');", + "CREATE TABLE OUTPUT AS SELECT avg(value) AS avg FROM INPUT group by ROWKEY;" + ], + "expectedException": { + "type": "io.confluent.ksql.SchemaNotSupportedException", + "message": "One of the functions used in the statement has an intermediate type that the value format can not handle. Please remove the function or change the format." + } } ] } diff --git a/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericKeySerDe.java b/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericKeySerDe.java index 3c4a20584757..7b3a3de4e0a5 100644 --- a/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericKeySerDe.java +++ b/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericKeySerDe.java @@ -20,12 +20,12 @@ import com.google.common.annotations.VisibleForTesting; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; +import io.confluent.ksql.SchemaNotSupportedException; import io.confluent.ksql.logging.processing.LoggingDeserializer; import io.confluent.ksql.logging.processing.ProcessingLogContext; import io.confluent.ksql.logging.processing.ProcessingLogger; import io.confluent.ksql.schema.ksql.PersistenceSchema; import io.confluent.ksql.util.KsqlConfig; -import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.SchemaUtil; import java.util.Collections; import java.util.Map; @@ -116,7 +116,7 @@ private Serde createInner( try { serdeFactories.validate(format, schema); } catch (final Exception e) { - throw new KsqlException("Key format does not support key schema." + throw new SchemaNotSupportedException("Key format does not support key schema." + System.lineSeparator() + "format: " + format.getFormat() + System.lineSeparator() diff --git a/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericRowSerDe.java b/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericRowSerDe.java index a60b6e9ac8c8..e30810eceb0f 100644 --- a/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericRowSerDe.java +++ b/ksql-serde/src/main/java/io/confluent/ksql/serde/GenericRowSerDe.java @@ -21,12 +21,12 @@ import com.google.common.annotations.VisibleForTesting; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.GenericRow; +import io.confluent.ksql.SchemaNotSupportedException; import io.confluent.ksql.logging.processing.LoggingDeserializer; import io.confluent.ksql.logging.processing.ProcessingLogContext; import io.confluent.ksql.logging.processing.ProcessingLogger; import io.confluent.ksql.schema.ksql.PersistenceSchema; import io.confluent.ksql.util.KsqlConfig; -import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.SchemaUtil; import java.util.ArrayList; import java.util.Collections; @@ -106,7 +106,7 @@ private Serde from( try { serdeFactories.validate(format, schema); } catch (final Exception e) { - throw new KsqlException("Value format does not support value schema." + throw new SchemaNotSupportedException("Value format does not support value schema." + System.lineSeparator() + "format: " + format.getFormat() + System.lineSeparator() diff --git a/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericKeySerDeTest.java b/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericKeySerDeTest.java index 76c406b67b84..3b842ba774ea 100644 --- a/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericKeySerDeTest.java +++ b/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericKeySerDeTest.java @@ -25,6 +25,7 @@ import com.google.common.collect.ImmutableMap; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; +import io.confluent.ksql.SchemaNotSupportedException; import io.confluent.ksql.logging.processing.LoggingDeserializer; import io.confluent.ksql.logging.processing.ProcessingLogContext; import io.confluent.ksql.logging.processing.ProcessingLogger; @@ -33,7 +34,6 @@ import io.confluent.ksql.schema.ksql.PersistenceSchema; import io.confluent.ksql.serde.GenericKeySerDe.UnwrappedKeySerializer; import io.confluent.ksql.util.KsqlConfig; -import io.confluent.ksql.util.KsqlException; import java.time.Duration; import java.util.Collections; import java.util.Optional; @@ -122,7 +122,7 @@ public void shouldValidateFormatCanHandleSchema() { .when(serdeFactories).validate(FORMAT, WRAPPED_SCHEMA); // Expect: - expectedException.expect(KsqlException.class); + expectedException.expect(SchemaNotSupportedException.class); expectedException.expectMessage("Key format does not support key schema." + System.lineSeparator() + "format: JSON" diff --git a/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericRowSerDeTest.java b/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericRowSerDeTest.java index 2a9c484fb0e4..1a00523808c3 100644 --- a/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericRowSerDeTest.java +++ b/ksql-serde/src/test/java/io/confluent/ksql/serde/GenericRowSerDeTest.java @@ -27,12 +27,12 @@ import com.google.common.collect.ImmutableMap; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.GenericRow; +import io.confluent.ksql.SchemaNotSupportedException; import io.confluent.ksql.logging.processing.ProcessingLogContext; import io.confluent.ksql.logging.processing.ProcessingLogger; import io.confluent.ksql.logging.processing.ProcessingLoggerFactory; import io.confluent.ksql.schema.ksql.PersistenceSchema; import io.confluent.ksql.util.KsqlConfig; -import io.confluent.ksql.util.KsqlException; import java.nio.charset.StandardCharsets; import java.util.Map; import java.util.Optional; @@ -131,7 +131,7 @@ public void shouldValidateFormatCanHandleSchema() { .when(serdesFactories).validate(FORMAT, MUTLI_FIELD_SCHEMA); // Expect: - expectedException.expect(KsqlException.class); + expectedException.expect(SchemaNotSupportedException.class); expectedException.expectMessage("Value format does not support value schema." + System.lineSeparator() + "format: JSON" diff --git a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/AggregateBuilderUtils.java b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/AggregateBuilderUtils.java index c39ac1a1c28f..3f39217f632e 100644 --- a/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/AggregateBuilderUtils.java +++ b/ksql-streams/src/main/java/io/confluent/ksql/execution/streams/AggregateBuilderUtils.java @@ -16,6 +16,7 @@ package io.confluent.ksql.execution.streams; import io.confluent.ksql.GenericRow; +import io.confluent.ksql.SchemaNotSupportedException; import io.confluent.ksql.execution.builder.KsqlQueryBuilder; import io.confluent.ksql.execution.context.QueryContext; import io.confluent.ksql.execution.context.QueryContext.Stacker; @@ -32,6 +33,7 @@ import org.apache.kafka.streams.state.KeyValueStore; final class AggregateBuilderUtils { + private static final String MATERIALIZE_OP = "Materialize"; private static final String WINDOW_SELECT_OP = "WindowSelect"; private static final String TO_OUTPUT_SCHEMA_OP = "ToOutputSchema"; @@ -62,23 +64,23 @@ static Materialized> buildMater final LogicalSchema aggregateSchema, final Formats formats, final KsqlQueryBuilder queryBuilder, - final MaterializedFactory materializedFactory) { + final MaterializedFactory materializedFactory + ) { final PhysicalSchema physicalAggregationSchema = PhysicalSchema.from( aggregateSchema, formats.getOptions() ); + final QueryContext queryContext = materializeContext(step); - final Serde keySerde = queryBuilder.buildKeySerde( - formats.getKeyFormat(), - physicalAggregationSchema, - queryContext - ); - final Serde valueSerde = queryBuilder.buildValueSerde( - formats.getValueFormat(), - physicalAggregationSchema, - queryContext - ); - return materializedFactory.create(keySerde, valueSerde, StreamsUtil.buildOpName(queryContext)); + + final Serde keySerde = + buildKeySerde(formats, queryBuilder, physicalAggregationSchema, queryContext); + + final Serde valueSerde = + buildValueSerde(formats, queryBuilder, physicalAggregationSchema, queryContext); + + return materializedFactory + .create(keySerde, valueSerde, StreamsUtil.buildOpName(queryContext)); } static MaterializationInfo.Builder materializationInfoBuilder( @@ -91,4 +93,53 @@ static MaterializationInfo.Builder materializationInfoBuilder( return MaterializationInfo.builder(StreamsUtil.buildOpName(queryContext), aggregationSchema) .map(pl -> aggregator.getResultMapper(), outputSchema, queryContext); } + + private static Serde buildKeySerde( + final Formats formats, + final KsqlQueryBuilder queryBuilder, + final PhysicalSchema physicalAggregationSchema, + final QueryContext queryContext + ) { + try { + return queryBuilder.buildKeySerde( + formats.getKeyFormat(), + physicalAggregationSchema, + queryContext + ); + } catch (final SchemaNotSupportedException e) { + throw schemaNotSupportedException(e, "key"); + } + } + + private static Serde buildValueSerde( + final Formats formats, + final KsqlQueryBuilder queryBuilder, + final PhysicalSchema physicalAggregationSchema, + final QueryContext queryContext + ) { + try { + return queryBuilder.buildValueSerde( + formats.getValueFormat(), + physicalAggregationSchema, + queryContext + ); + } catch (final SchemaNotSupportedException e) { + throw schemaNotSupportedException(e, "value"); + } + } + + private static SchemaNotSupportedException schemaNotSupportedException( + final SchemaNotSupportedException e, + final String type + ) { + return new SchemaNotSupportedException( + "One of the functions used in the statement has an intermediate type that the " + + type + " format can not handle. " + + "Please remove the function or change the format." + + System.lineSeparator() + + "Consider up-voting https://github.com/confluentinc/ksql/issues/3950, " + + "which will resolve this limitation", + e + ); + } } From 4dcab06ee8f7462193b7b42b06d251aa06d179f0 Mon Sep 17 00:00:00 2001 From: elismaga Date: Mon, 13 Jan 2020 10:55:55 -0800 Subject: [PATCH 097/123] build: Do not run twist lock scan and other docker image operations since we no longer build images in this repo (#4299) --- Jenkinsfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index c5da3d141b71..f6160b915d1e 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -3,6 +3,8 @@ dockerfile { slackChannel = '#ksql-alerts' upstreamProjects = 'confluentinc/schema-registry' - dockerRepos = ['confluentinc/ksql-cli'] extraDeployArgs = '-Ddocker.skip=true' + dockerPush = false + dockerScan = false + dockerImageClean = false } From 81f96dcaa885b8d55fa293faa2e92664aea678e2 Mon Sep 17 00:00:00 2001 From: Almog Gavra Date: Mon, 13 Jan 2020 11:39:26 -0800 Subject: [PATCH 098/123] chore: migrate to use ParsedSchema for SchemaRegistryClient (#4259) --- .../SchemaRegistryTopicSchemaSupplier.java | 57 +++++++++++-------- .../SandboxedSchemaRegistryClient.java | 1 + .../DefaultSchemaInjectorFunctionalTest.java | 7 +++ ...SchemaRegistryTopicSchemaSupplierTest.java | 25 +++++--- .../SandboxedSchemaRegistryClientTest.java | 1 + 5 files changed, 59 insertions(+), 32 deletions(-) diff --git a/ksql-engine/src/main/java/io/confluent/ksql/schema/ksql/inference/SchemaRegistryTopicSchemaSupplier.java b/ksql-engine/src/main/java/io/confluent/ksql/schema/ksql/inference/SchemaRegistryTopicSchemaSupplier.java index eed72ab36a2b..4572a656448f 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/schema/ksql/inference/SchemaRegistryTopicSchemaSupplier.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/schema/ksql/inference/SchemaRegistryTopicSchemaSupplier.java @@ -18,7 +18,8 @@ import com.google.common.annotations.VisibleForTesting; import io.confluent.connect.avro.AvroData; import io.confluent.connect.avro.AvroDataConfig; -import io.confluent.kafka.schemaregistry.client.SchemaMetadata; +import io.confluent.kafka.schemaregistry.ParsedSchema; +import io.confluent.kafka.schemaregistry.avro.AvroSchema; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException; import io.confluent.ksql.links.DocumentationLinks; @@ -66,39 +67,24 @@ public SchemaRegistryTopicSchemaSupplier(final SchemaRegistryClient srClient) { @Override public SchemaResult getValueSchema(final String topicName, final Optional schemaId) { - - final Optional metadata = getSchema(topicName, schemaId); - if (!metadata.isPresent()) { - return notFound(topicName); - } - - try { - final Schema connectSchema = toConnectSchema(metadata.get().getSchema()); - - return SchemaResult.success(SchemaAndId.schemaAndId(connectSchema, metadata.get().getId())); - } catch (final Exception e) { - return notCompatible(topicName, metadata.get().getSchema(), e); - } - } - - private Optional getSchema( - final String topicName, - final Optional schemaId - ) { try { final String subject = topicName + KsqlConstants.SCHEMA_REGISTRY_VALUE_SUFFIX; + final int id; if (schemaId.isPresent()) { - return Optional.of(srClient.getSchemaMetadata(subject, schemaId.get())); + id = schemaId.get(); + } else { + id = srClient.getLatestSchemaMetadata(subject).getId(); } - return Optional.of(srClient.getLatestSchemaMetadata(subject)); + final ParsedSchema schema = srClient.getSchemaBySubjectAndId(subject, id); + return fromParsedSchema(topicName, id, schema); } catch (final RestClientException e) { switch (e.getStatus()) { case HttpStatus.SC_NOT_FOUND: case HttpStatus.SC_UNAUTHORIZED: case HttpStatus.SC_FORBIDDEN: - return Optional.empty(); + return notFound(topicName); default: throw new KsqlException("Schema registry fetch for topic " + topicName + " request failed.", e); @@ -109,6 +95,31 @@ private Optional getSchema( } } + public SchemaResult fromParsedSchema( + final String topic, + final int id, + final ParsedSchema parsedSchema + ) { + + try { + final Schema connectSchema; + + switch (parsedSchema.schemaType()) { + case AvroSchema.TYPE: + connectSchema = toConnectSchema(parsedSchema.canonicalString()); + break; + case "JSON": + case "PROTOBUF": + default: + throw new KsqlException("Unsupported schema type: " + parsedSchema.schemaType()); + } + + return SchemaResult.success(SchemaAndId.schemaAndId(connectSchema, id)); + } catch (final Exception e) { + return notCompatible(topic, parsedSchema.canonicalString(), e); + } + } + private Schema toConnectSchema(final String avroSchemaString) { final org.apache.avro.Schema avroSchema = toAvroTranslator.apply(avroSchemaString); final Schema connectSchema = toConnectTranslator.apply(avroSchema); diff --git a/ksql-engine/src/main/java/io/confluent/ksql/services/SandboxedSchemaRegistryClient.java b/ksql-engine/src/main/java/io/confluent/ksql/services/SandboxedSchemaRegistryClient.java index 4361017a15fd..b68b205b2dc0 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/services/SandboxedSchemaRegistryClient.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/services/SandboxedSchemaRegistryClient.java @@ -41,6 +41,7 @@ static SchemaRegistryClient createProxy(final SchemaRegistryClient delegate) { return LimitedProxyBuilder.forClass(SchemaRegistryClient.class) .swallow("register", anyParams(), 123) .forward("getLatestSchemaMetadata", methodParams(String.class), delegate) + .forward("getSchemaBySubjectAndId", methodParams(String.class, int.class), delegate) .forward("testCompatibility", methodParams(String.class, Schema.class), delegate) .forward("testCompatibility", diff --git a/ksql-engine/src/test/java/io/confluent/ksql/schema/ksql/inference/DefaultSchemaInjectorFunctionalTest.java b/ksql-engine/src/test/java/io/confluent/ksql/schema/ksql/inference/DefaultSchemaInjectorFunctionalTest.java index 3514cd5b061e..84481a6917b2 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/schema/ksql/inference/DefaultSchemaInjectorFunctionalTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/schema/ksql/inference/DefaultSchemaInjectorFunctionalTest.java @@ -17,10 +17,12 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.when; import com.google.common.collect.ImmutableMap; import io.confluent.connect.avro.AvroData; +import io.confluent.kafka.schemaregistry.ParsedSchema; import io.confluent.kafka.schemaregistry.client.SchemaMetadata; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.ksql.metastore.MetaStore; @@ -67,6 +69,8 @@ public class DefaultSchemaInjectorFunctionalTest { @Mock private SchemaRegistryClient srClient; @Mock + private ParsedSchema schema; + @Mock private MetaStore metaStore; private DefaultSchemaInjector schemaInjector; @@ -495,6 +499,9 @@ private void shouldInferSchema( try { when(srClient.getLatestSchemaMetadata(any())) .thenReturn(new SchemaMetadata(1, 1, avroSchema.toString())); + when(srClient.getSchemaBySubjectAndId(any(), anyInt())).thenReturn(schema); + when(schema.schemaType()).thenReturn("AVRO"); + when(schema.canonicalString()).thenReturn(avroSchema.toString()); } catch (final Exception e) { throw new AssertionError(e); } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/schema/ksql/inference/SchemaRegistryTopicSchemaSupplierTest.java b/ksql-engine/src/test/java/io/confluent/ksql/schema/ksql/inference/SchemaRegistryTopicSchemaSupplierTest.java index 6d132676aaee..86ea3f2c6fe1 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/schema/ksql/inference/SchemaRegistryTopicSchemaSupplierTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/schema/ksql/inference/SchemaRegistryTopicSchemaSupplierTest.java @@ -24,6 +24,8 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import io.confluent.kafka.schemaregistry.ParsedSchema; +import io.confluent.kafka.schemaregistry.avro.AvroSchema; import io.confluent.kafka.schemaregistry.client.SchemaMetadata; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException; @@ -42,7 +44,6 @@ import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; -@SuppressWarnings("ConstantConditions") @RunWith(MockitoJUnitRunner.class) public class SchemaRegistryTopicSchemaSupplierTest { @@ -64,6 +65,8 @@ public class SchemaRegistryTopicSchemaSupplierTest { @Mock private org.apache.avro.Schema avroSchema; @Mock + private ParsedSchema parsedSchema; + @Mock private Schema connectSchema; @Mock private Schema ksqlSchema; @@ -78,8 +81,12 @@ public void setUp() throws Exception { when(srClient.getLatestSchemaMetadata(any())) .thenReturn(new SchemaMetadata(SCHEMA_ID, -1, AVRO_SCHEMA)); - when(srClient.getSchemaMetadata(any(), anyInt())) - .thenReturn(new SchemaMetadata(SCHEMA_ID, -1, AVRO_SCHEMA)); + when(srClient.getSchemaBySubjectAndId(any(), anyInt())) + .thenReturn(parsedSchema); + + when(parsedSchema.schemaType()).thenReturn(AvroSchema.TYPE); + + when(parsedSchema.canonicalString()).thenReturn(AVRO_SCHEMA); when(toAvroTranslator.apply(any())) .thenReturn(avroSchema); @@ -111,7 +118,7 @@ public void shouldReturnErrorFromGetValueSchemaIfNotFound() throws Exception { @Test public void shouldReturnErrorFromGetValueWithIdSchemaIfNotFound() throws Exception { // Given: - when(srClient.getSchemaMetadata(any(), anyInt())) + when(srClient.getSchemaBySubjectAndId(any(), anyInt())) .thenThrow(notFoundException()); // When: @@ -128,7 +135,7 @@ public void shouldReturnErrorFromGetValueWithIdSchemaIfNotFound() throws Excepti @Test public void shouldReturnErrorFromGetValueIfUnauthorized() throws Exception { // Given: - when(srClient.getSchemaMetadata(any(), anyInt())) + when(srClient.getSchemaBySubjectAndId(any(), anyInt())) .thenThrow(unauthorizedException()); // When: @@ -145,7 +152,7 @@ public void shouldReturnErrorFromGetValueIfUnauthorized() throws Exception { @Test public void shouldReturnErrorFromGetValueIfForbidden() throws Exception { // Given: - when(srClient.getSchemaMetadata(any(), anyInt())) + when(srClient.getSchemaBySubjectAndId(any(), anyInt())) .thenThrow(forbiddenException()); // When: @@ -177,7 +184,7 @@ public void shouldThrowFromGetValueSchemaOnOtherRestExceptions() throws Exceptio @Test public void shouldThrowFromGetValueWithIdSchemaOnOtherRestExceptions() throws Exception { // Given: - when(srClient.getSchemaMetadata(any(), anyInt())) + when(srClient.getSchemaBySubjectAndId(any(), anyInt())) .thenThrow(new RestClientException("failure", 1, 1)); // Then: @@ -207,7 +214,7 @@ public void shouldThrowFromGetValueSchemaOnOtherException() throws Exception { @Test public void shouldThrowFromGetValueWithIdSchemaOnOtherException() throws Exception { // Given: - when(srClient.getSchemaMetadata(any(), anyInt())) + when(srClient.getSchemaBySubjectAndId(any(), anyInt())) .thenThrow(new IOException("boom")); // Then: @@ -288,7 +295,7 @@ public void shouldRequestCorrectSchemaOnGetValueSchemaWithId() throws Exception supplier.getValueSchema(TOPIC_NAME, Optional.of(42)); // Then: - verify(srClient).getSchemaMetadata(TOPIC_NAME + "-value", 42); + verify(srClient).getSchemaBySubjectAndId(TOPIC_NAME + "-value", 42); } @Test diff --git a/ksql-engine/src/test/java/io/confluent/ksql/services/SandboxedSchemaRegistryClientTest.java b/ksql-engine/src/test/java/io/confluent/ksql/services/SandboxedSchemaRegistryClientTest.java index cde877ef2088..e6995ea8aa25 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/services/SandboxedSchemaRegistryClientTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/services/SandboxedSchemaRegistryClientTest.java @@ -57,6 +57,7 @@ public static Collection> getMethodsToTest() { .ignore("register", String.class, Schema.class, int.class, int.class) .ignore("register", String.class, ParsedSchema.class, int.class, int.class) .ignore("getLatestSchemaMetadata", String.class) + .ignore("getSchemaBySubjectAndId", String.class, int.class) .ignore("testCompatibility", String.class, Schema.class) .ignore("testCompatibility", String.class, ParsedSchema.class) .ignore("deleteSubject", String.class) From cfeafe467c63cd0f35a9f953add6658395aa227f Mon Sep 17 00:00:00 2001 From: Derek Nelson Date: Mon, 13 Jan 2020 14:36:01 -0800 Subject: [PATCH 099/123] docs: add embedded Connect tutorial (#4212) --- docs-md/tutorials/embedded-connect.md | 433 ++++++++++++++++++++++++++ docs-md/tutorials/index.md | 6 + mkdocs.yml | 1 + 3 files changed, 440 insertions(+) create mode 100644 docs-md/tutorials/embedded-connect.md diff --git a/docs-md/tutorials/embedded-connect.md b/docs-md/tutorials/embedded-connect.md new file mode 100644 index 000000000000..f12b3500d0bf --- /dev/null +++ b/docs-md/tutorials/embedded-connect.md @@ -0,0 +1,433 @@ +--- +layout: page +title: ksqlDB with Embedded Connect +tagline: Run Kafka Connect embedded within ksqlDB +description: Learn how to use ksqlDB with embedded Connect to integrate with external data sources and sinks +keywords: ksqlDB, connect, PostgreSQL, jdbc +--- + +Overview +============== + +This tutorial will demonstrate how to integrate ksqlDB with an external data +source to power a simple ride sharing app. Our external source will be a +PostgreSQL database containing relatively static data describing each driver’s +vehicle. By combining this human-friendly static data with a continuous stream +of computer-friendly driver and rider location events, we derive an enriched +output stream that the ride sharing app may use to facilitate a rendezvous in +real time. + +When to use embedded Connect +------------------------------ + +ksqlDB natively integrates with {{ site.kconnect }} by either communicating +with an external {{ site.kconnect }} cluster or by running {{ site.kconnect }} +embedded within the KSQL server process. Each of these modes is best suited +for the following environments: + +* **Embedded** - Suitable for development, testing, and simpler production +workloads at lower throughputs when there is no need to scale ksqlDB +independently of {{ site.kconnect }}. +* **External** - Suitable for all production workloads. + +!!! note + The {{ site.kconnect }} integration mode is a deployment configuration + option. The {{ site.kconnect }} integration interface is identical for both + modes, so your `CREATE SOURCE` and `CREATE SINK` statements are independent + of the integration mode. + +1. Get ksqlDB +-------------- + +Since ksqlDB runs natively on {{ site.aktm }}, you need a running {{ site.ak }} +installation that ksqlDB is configured to use. The following docker-compose +files run everything for you via Docker, including ksqlDB running +[Kafka Connect](https://docs.confluent.io/current/connect/index.html) in +embedded mode. Embedded Connect enables you to leverage the power of +{{ site.kconnect }} without having to manage a separate {{ site.kconnect }} +cluster, because ksqlDB manages one for you. Also, this tutorial use PostgreSQL +as an external datastore to integrate with ksqlDB. + +In an empty local working directory, copy and paste the following +`docker-compose` content into a file named `docker-compose.yml`. You will +create and add a number of other files to this directory during this tutorial. + +```yaml +--- +version: '2' + +services: + zookeeper: + image: confluentinc/cp-zookeeper:5.3.2 + hostname: zookeeper + container_name: zookeeper + ports: + - "2181:2181" + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + ZOOKEEPER_TICK_TIME: 2000 + + broker: + image: confluentinc/cp-enterprise-kafka:5.3.2 + hostname: broker + container_name: broker + depends_on: + - zookeeper + ports: + - "29092:29092" + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:9092,PLAINTEXT_HOST://localhost:29092 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + + ksqldb-server: + image: confluentinc/ksqldb-server:0.6.0 + hostname: ksqldb-server + container_name: ksqldb-server + depends_on: + - broker + ports: + - "8088:8088" + environment: + KSQL_LISTENERS: http://0.0.0.0:8088 + KSQL_BOOTSTRAP_SERVERS: broker:9092 + KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: "true" + KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: "true" + KSQL_KSQL_CONNECT_WORKER_CONFIG: "/connect/connect.properties" + volumes: + - ./confluentinc-kafka-connect-jdbc-5.3.2:/usr/share/kafka/plugins/jdbc + - ./connect.properties:/connect/connect.properties + + ksqldb-cli: + image: confluentinc/ksqldb-cli:0.6.0 + container_name: ksqldb-cli + depends_on: + - broker + - ksqldb-server + entrypoint: /bin/sh + tty: true + + postgres: + image: postgres:12 + hostname: postgres + container_name: postgres + ports: + - "5432:5432" +``` + +2. Get the JDBC connector +------------------------- + +[Download the JDBC connector](https://www.confluent.io/hub/confluentinc/kafka-connect-jdbc) +to your local working directory. Next, unzip the downloaded archive: + +```bash +unzip confluentinc-kafka-connect-jdbc-5.3.2.zip +``` + +3. Configure Connect +-------------------- + +In order to tell ksqlDB to run Connect in embedded mode, we must point ksqlDB +to a separate Connect configuration file. In our docker-compose file, this is +done via the `KSQL_KSQL_CONNECT_WORKER_CONFIG` environment variable. From +within your local working directory, run this command to generate the Connect +configuration file: + +```bash +cat << EOF > ./connect.properties +bootstrap.servers=broker:9092 +plugin.path=/usr/share/kafka/plugins +group.id=ksql-connect-cluster +key.converter=org.apache.kafka.connect.json.JsonConverter +value.converter=org.apache.kafka.connect.json.JsonConverter +value.converter.schemas.enable=false +config.storage.topic=ksql-connect-configs +offset.storage.topic=ksql-connect-offsets +status.storage.topic=ksql-connect-statuses +config.storage.replication.factor=1 +offset.storage.replication.factor=1 +status.storage.replication.factor=1 +EOF +``` + +4. Start ksqlDB and PostgreSQL +------------------------------ + +In the directory containing the `docker-compose.yml` file you created in the +first step, run the following command to start all services in the correct +order. + +```bash +docker-compose up +``` + +5. Connect to PostgreSQL +------------------------ + +Run the following command to establish an interactive session with PostgreSQL. + +```bash +docker exec -it postgres psql -U postgres +``` + +6. Populate PostgreSQL with vehicle/driver data +----------------------------------------------- + +In the PostgreSQL session, run the following SQL statements to set up the +driver data. You will join this PostgreSQL data with event streams in ksqlDB. + +```sql +CREATE TABLE driver_profiles ( + driver_id integer PRIMARY KEY, + make text, + model text, + year integer, + license_plate text, + rating float +); + +INSERT INTO driver_profiles (driver_id, make, model, year, license_plate, rating) VALUES + (0, 'Toyota', 'Prius', 2019, 'KAFKA-1', 5.00), + (1, 'Kia', 'Sorento', 2017, 'STREAMS', 4.89), + (2, 'Tesla', 'Model S', 2019, 'CNFLNT', 4.92), + (3, 'Toyota', 'Camry', 2018, 'ILVKSQL', 4.85); +``` + +7. Start ksqlDB's interactive CLI +--------------------------------- + +ksqlDB runs as a server which clients connect to in order to issue queries. + +Run the following command to connect to the ksqlDB server and start an +interactive command-line interface (CLI) session. + +```bash +docker exec -it ksqldb-cli ksql http://ksqldb-server:8088 +``` + +8. Create source connector +-------------------------- + +Make your PostgreSQL data accessible to ksqlDB by creating a *source* +connector. In the ksqlDB CLI, run the following command. + +```sql +CREATE SOURCE CONNECTOR jdbc_source WITH ( + 'connector.class' = 'io.confluent.connect.jdbc.JdbcSourceConnector', + 'connection.url' = 'jdbc:postgresql://postgres:5432/postgres', + 'connection.user' = 'postgres', + 'topic.prefix' = 'jdbc_', + 'table.whitelist' = 'driver_profiles', + 'mode' = 'incrementing', + 'numeric.mapping' = 'best_fit', + 'incrementing.column.name' = 'driver_id', + 'key' = 'driver_id'); +``` + +When the source connector is created, it imports any PostgreSQL tables matching +the specified `table.whitelist`. Tables are imported via {{ site.ak }} topics, +with one topic per imported table. Once these topics are created, you can +interact with them just like any other {{ site.ak }} topic used by ksqlDB. + +9. View imported topic +---------------------- + +In the ksqlDB CLI session, run the following command to verify that the +`driver_profiles` table has been imported as a Kafka topic. Because you specified `jdbc_` as the topic +prefix, you should see a `jdbc_user_profiles` topic in the output. + +```bash +SHOW TOPICS; +``` + +10. Create drivers table in ksqlDB +---------------------------------- + +The driver data is now integrated as a {{ site.ak }} topic, but you need to +create a ksqlDB table over this topic to begin referencing it from ksqlDB +queries. Streams and tables in ksqlDB essentially associate a schema with a +{{ site.ak }} topic, breaking each message in the topic into strongly typed +columns. + +```sql +CREATE TABLE driverProfiles ( + driver_id INTEGER, + make STRING, + model STRING, + year INTEGER, + license_plate STRING, + rating DOUBLE +) +WITH (kafka_topic='jdbc_driver_profiles', value_format='json', key='driver_id'); +``` + +Tables in ksqlDB support update semantics, where each message in the +underlying topic represents a row in the table. For messages in the topic with +the same key, the latest message associated with a given key represents the +latest value for the corresponding row in the table. + +!!! note + When the data is ingested from the database, it's being written +to the {{ site.ak }} topic using JSON serialization. Since JSON itself doesn't +declare a schema, you need to declare it again when you run `CREATE TABLE`. +In practice, you would normally use Avro, since this supports the retention +of schemas, ensuring compatibility between producers and consumers. This means +that you don't have to enter it each time you want to use the data in ksqlDB. + +11. Create streams for driver locations and rider locations +----------------------------------------------------- + +In this step, you create streams over new topics to encapsulate location pings that are sent +every few seconds by drivers’ and riders’ phones. In contrast to tables, +ksqlDB streams are append-only collections of events, so they're suitable for a +continuous stream of location updates. + +```sql +CREATE STREAM driverLocations ( + driver_id INTEGER, + latitude DOUBLE, + longitude DOUBLE, + speed DOUBLE +) +WITH (kafka_topic='driver_locations', value_format='json', partitions=1, key='driver_id'); + +CREATE STREAM riderLocations ( + driver_id INTEGER, + latitude DOUBLE, + longitude DOUBLE +) +WITH (kafka_topic='rider_locations', value_format='json', partitions=1, key='driver_id'); +``` + +12. Enrich driverLocations stream by joining with PostgreSQL data +----------------------------------------------------------------- + +The `driverLocations` stream has a relatively compact schema, and it doesn’t +contain much data that a human would find particularly useful. You can *enrich* +the stream of driver location events by joining them with the human-friendly +vehicle information stored in the PostgreSQL database. This enriched data can +be presented by the rider’s mobile application, ultimately helping the rider to +safely identify the driver’s vehicle. + +You can achieve this result easily by joining the `driverLocations` stream with +the `driver_profiles` table stored in PostgreSQL. + +```sql +CREATE STREAM enrichedDriverLocations AS + SELECT + dl.driver_id AS driver_id, + dl.latitude AS latitude, + dl.longitude AS longitude, + dl.speed AS speed, + jdbc.make AS make, + jdbc.model AS model, + jdbc.year AS year, + jdbc.license_plate AS license_plate, + jdbc.rating AS rating + FROM driverLocations dl JOIN driverProfiles jdbc + ON dl.driver_id = jdbc.driver_id + EMIT CHANGES; +``` + +13. Create the rendezvous stream +---------------------------- + +To put all of this together, create a final stream that the ridesharing app can +use to facilitate a driver-rider rendezvous in real time. This stream is +defined by a query that joins together rider and driver location updates, +resulting in a contextualized output that the app can use to show the rider +their driver’s position as the rider waits to be picked up. + +The rendezvous stream includes human-friendly information describing the +driver’s vehicle for the rider. Also, the rendezvous stream computes +(albeit naively) the driver’s estimated time of arrival (ETA) at the rider’s +location. + +```sql +CREATE STREAM rendezvous AS + SELECT + e.license_plate AS license_plate, + e.make AS make, + e.model AS model, + e.year AS year, + e.latitude AS vehicle_lat, + e.longitude AS vehicle_long, + GEO_DISTANCE(e.latitude, e.longitude, r.latitude, r.longitude) / e.speed AS eta + FROM enrichedDriverLocations e JOIN riderLocations r WITHIN 1 MINUTE + ON e.driver_id = r.driver_id + EMIT CHANGES; +``` + +14. Start two ksqlDB CLI sessions +--------------------------------- + +Run the following command twice to open two separate ksqlDB CLI sessions. If +you still have a CLI session open from a previous step, you can reuse that +session. + +```bash +docker exec -it ksqldb-cli ksql http://ksqldb-server:8088 +``` + +15. Run a continuous query +-------------------------- + +In this step, you run a continuous query over the rendezvous stream. + +This may feel a bit unfamiliar, because the query never returns until you +terminate it. The query perpetually pushes output rows to the client as events +are written to the rendezvous stream. Leave the query running in your CLI +session for now. It will begin producing output as soon as events are written +into ksqlDB. + +```sql +SELECT * FROM rendezvous EMIT CHANGES; +``` + +16. Write data to input streams +------------------------------- + +Your continuous query reads from the `rendezvous` stream, which takes its input +from the `enrichedDriverLocations` and `riderLocations` streams. And +`enrichedDriverLocations` takes its input from the `driverLocations` stream, +so you need to write data into `driverLocations` and `riderLocations` before +`rendezvous` produces the joined output that the continuous query reads. + +```sql +INSERT INTO driverLocations (driver_id, latitude, longitude, speed) VALUES (0, 37.3965, -122.0818, 23.2); +INSERT INTO riderLocations (driver_id, latitude, longitude) VALUES (0, 37.3952, -122.0813); + +INSERT INTO driverLocations (driver_id, latitude, longitude, speed) VALUES (1, 37.7850, -122.40270, 12.0); +INSERT INTO riderLocations (driver_id, latitude, longitude) VALUES (1, 37.7887, -122.4074); + +INSERT INTO driverLocations (driver_id, latitude, longitude, speed) VALUES (2, 37.7925, -122.4148, 11.2); +INSERT INTO riderLocations (driver_id, latitude, longitude) VALUES (2, 37.7876, -122.4235); + +INSERT INTO driverLocations (driver_id, latitude, longitude, speed) VALUES (3, 37.4471, -122.1625, 14.7); +INSERT INTO riderLocations (driver_id, latitude, longitude) VALUES (3, 37.4442, -122.1658); +``` + +As soon as you start writing rows to the input streams, your continuous query +from the previous step starts producing joined output. The rider's location +pings are joined with their inbound driver's location pings in real time, +providing the rider with driver ETA, rating, and additional information +describing the driver's vehicle. + +Next steps +------------- + +This tutorial shows how to run ksqlDB in embedded {{ site.kconnect }} mode +using Docker. It uses the JDBC connector to integrate ksqlDB with PostgreSQL +data, but this is just one of many connectors that are available to help you +integrate ksqlDB with external systems. Check out +[Confluent Hub](https://www.confluent.io/hub/) to learn more about all of the +various connectors that enable integration with a wide variety of external +systems. + +You may also want to take a look at our +[examples](https://ksqldb.io/examples.html) to better understand how you can +use ksqlDB for your specific workload. diff --git a/docs-md/tutorials/index.md b/docs-md/tutorials/index.md index 732a3d6ee037..23bffca7e5f8 100644 --- a/docs-md/tutorials/index.md +++ b/docs-md/tutorials/index.md @@ -11,6 +11,7 @@ keywords: ksqldb, query, application, quickstart, tutorial, walkthrough, how to - [Write Streaming Queries Against {{ site.aktm }} Using ksqlDB (Local)](basics-local.md) - [Write Streaming Queries Against {{ site.aktm }} Using ksqlDB and {{ site.c3 }}](basics-control-center.md) - [Clickstream Data Analysis Pipeline Using ksqlDB (Docker)](clickstream-docker.md) +- [ksqlDB with Embedded Connect](embedded-connect.md) - [Integrating with PostgreSQL](connect-integration.md) - [ksqlDB Examples](examples.md) @@ -101,6 +102,11 @@ installs. Running the Clickstream demo locally without Docker requires that you have {{ site.cp }} installed locally, along with Elasticsearch and Grafana. +ksqlDB with Embedded Connect +------------------------------- + +ksqlDB has native integration with {{ site.kconnect }}. While ksqlDB can integrate with a separate [Kafka Connect](https://docs.confluent.io/current/connect/index.html) cluster, it can also run {{ site.kconnect }} embedded within the ksqlDB server, making it unnecessary to run a separate {{ site.kconnect }} cluster. The [embedded Connect tutorial](embedded-connect.md) shows how you can configure ksqlDB to run {{ site.kconnect }} in embedded mode. + ksqlDB Examples --------------- diff --git a/mkdocs.yml b/mkdocs.yml index bbe8d70757cf..26c3f2cd343b 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -136,6 +136,7 @@ nav: - Write Streaming Queries Against Apache Kafka® Using ksqlDB and Confluent Control Center (Docker): tutorials/basics-docker.md # Could be the canonical quickstart # - Write Streaming Queries Against Apache Kafka® Using ksqlDB and Confluent Control Center (Local): tutorials/basics-local.md # leave in docs.confluent.io - Clickstream Data Analysis Pipeline Using ksqlDB (Docker): tutorials/clickstream-docker.md + - ksqlDB with Embedded Connect: tutorials/embedded-connect.md - Integrate with PostgreSQL: tutorials/connect-integration.md - Troubleshooting: troubleshoot-ksqldb.md - Frequently Asked Questions: faq.md From af8498eb46bae6ae82e39a3ff88f8557677a35b1 Mon Sep 17 00:00:00 2001 From: Vicky Papavasileiou Date: Mon, 13 Jan 2020 14:50:02 -0800 Subject: [PATCH 100/123] docs: KLIP 12 - Implement High-Availability for Pull queries (#4022) * Adding doc for klip-12 Co-authored-by: vinoth chandar --- .../klip-12-pull-high-availability.md | 357 ++++++++++++++++++ 1 file changed, 357 insertions(+) create mode 100644 design-proposals/klip-12-pull-high-availability.md diff --git a/design-proposals/klip-12-pull-high-availability.md b/design-proposals/klip-12-pull-high-availability.md new file mode 100644 index 000000000000..7cd590f88285 --- /dev/null +++ b/design-proposals/klip-12-pull-high-availability.md @@ -0,0 +1,357 @@ +# KLIP 12 - Implement High-Availability for Pull queries + +**Author**: Vicky Papavasileiou, Vinoth Chandar | +**Release Target**: 5.5 | +**Status**: _In Discussion_ +**Discussion**: _link to the design discussion PR_ + +**tl;dr:** Enables high-availability for Ksql pull queries, in case of server failures. Current +design for handling failure of an active ksql server, incurs an unavailability period of several +seconds (10+) to few minutes due to the Streams rebalancing procedure. This work is based on new +Kafka Streams APIs, introduced as a part of [KIP-535](https://cwiki.apache.org/confluence/display/KAFKA/KIP-535%3A+Allow+state+stores+to+serve+stale+reads+during+rebalance), +which allow serving stale data from standby replicas to achieve high availability, while providing +eventual consistency. + +## Motivation and background +Stateful persistent queries persist their state in state stores. These state stores are partitioned +and replicated across a number of standbys for faster recovery in case of +[failures](https://docs.confluent.io/current/streams/architecture.html#fault-tolerance). +A KSQL server may host partitions from multiple state stores serving as the active host for some partitions +and the standby for others. Without loss of generality, we will focus on one partition and one state +store for the remainder of this document. The active KSQL server is the server that hosts the +active partition whereas the standby servers are the ones that host the replicas. + +Assume we have a load balancer (LB) and a cluster of three KSQL servers (`A`, `B`, `C`) where `A` +is the active server and `B`, `C` are the standby replicas. `A` receives updates from the source topic +partition and writes these updates to its state store and changelog topic. The changelog topic is +replicated into the state stores of servers `B` and `C`. + +Now assume LB receives a pull query (1) and sends the query to `B` (2). `B` determines that `A` +is the active server and forwards the request to `A` (3). `A` executes the query successfully and +returns the response. + +Failure scenario: Assume that `A` goes down. `B` receives the request, tries to forward it to `A` +and fails. What to do now? The current implementation tries to forward the request to `A` +(in a busy loop) for a configurable timeout `KSQL_QUERY_PULL_ROUTING_TIMEOUT_MS` . If it has not +succeeded in this timeout, the request fails. The next request `B` receives, it will again try to +forward it to `A`, since `A` is still the active, and it will fail again. This will happen until +rebalancing completes and a new active is elected for the partition. + +There are two steps in the rebalancing procedure that are expensive: + 1) One of `B` or `C` is picked as the new active, this takes ~10 seconds based on default configs. + 2) Once a new active is chosen, depending on how far behind (lag) it is with respect to the changelog, + it may take few seconds or even minutes before the standby has fully caught up to the last committed offset. + +In total, it takes >>10 seconds before a query can start succeeding again. What can we do until a +new active is ready to serve requests? +[KIP-535](https://cwiki.apache.org/confluence/display/KAFKA/KIP-535%3A+Allow+state+stores+to+serve+stale+reads+during+rebalance) +has laid the groundwork to allow us to serve requests from standbys, even if they are still in +rebalancing state or have not caught up to the last committed offset of the active. + +Every KSQL server (active or standby) now has the information of: + +1. Local current offset position per partition: The current offset position of a partition of the changelog topic that has been successfully written into the state store +2. Local end offset position per partition: The last offset written to a partition of the changelog topic +3. Routing Metadata for a key: The partition, active host and standy hosts per key + +This information allows each server (with some communication, discussed below) to compute the global +lag information of every topic partition whether it is the standby or active. This enables us to +implement a policy where `B` having established that `A` is down (after trying to send a request +to `A` and seeing it failed), to decide whether it (`B`) will serve it or `C`. This effectively +means that there will be little to no down time in serving requests at the cost of consistency as +`B` or `C` may serve stale (not caught up) data. Hence, we achieve high availability at the cost of +consistency. Eventual consistency for the win! + + +## What is in scope +Ensure availability for pull queries when KSQL servers fail (with `ksql.streams.num.standby.replicas=N`, +we can tolerate upto N such failures) + +## What is not in scope + +- Address unavailability caused by cold starts of KSQL server i.e when a new server is added to a +KSQL cluster, it must first rebuild its state off the changelog topics and that process still could +take a long time. +- Try to improve consistency guarantees provided by KSQL i.e reduce the amount of time it takes to +rebalance or standby replication to catch up. + +## Value/Return + +The cluster of KSQl server will be able to serve requests even when the active is down. This mitigates +a large current gap in deploying KSQL pull queries for mission critical use-cases, by significantly +reducing failure rate of pull queries during server failures. + +## Public APIS +We will support two ways to set the maximum acceptable lag (`acceptable.offset.lag`): Via each pull query and via a configuration parameter to the `WITH` clause of CTAS statements. These configs will be available to be set also in the json request. + +## Design + +The problem at hand is how to determine if a server is down and where to route a request when it is +down. Every KSQL server must have available the information of what other KSQL servers exist in the +cluster, their status and their lag per topic partition. There are three components to this: +Healthchecking, Lag reporting and Lag-aware routing. + +### Failure detection/Healthcheck + +Failure detection/ healthcheck is implemented via a periodic hearbeat. KSQL servers either broadcast +their heartbeat (N^2 interconnections with N KSQL servers) or we implement a gossip protocol. In +the initial implementation, we will use the REST API to send heartbeats leveraging the N^2 mesh that +already exists between KSQL servers. Hence, we will implement a new REST endpoint, `/heartbeat` that +will register the heartbeats a server receives from other servers. + +Cluster membership is determined using the information provided from the +[Kafka Streams](https://kafka.apache.org/20/javadoc/org/apache/kafka/streams/KafkaStreams.html) +instance and specifically [`StreamsMetadata`](https://kafka.apache.org/20/javadoc/org/apache/kafka/streams/state/StreamsMetadata.html) +from which we can obtain the information of the host and port of the instance. A server periodically +polls for all currently running KS instances (local and remote) and updates the list of remote servers +it has seen. If no KS streams are currently running, cluster membership is not performed. Moreover, +this policy depends on the current design where a new KSQL server replays every command in the +command topic and hence runs every query. + +We want to guarantee 99% uptime. This means that in a 5 minute window, it must take no longer than +2 seconds to determine the active server is down and to forward the request to one of the standbys. +The heartbeats must be light-weight so that we can send multiple heartbeats per second which will +provide us with more datapoints required to implement a well-informed policy for determining when a +server is up and when down. + +Configuration parameters: +1) Heartbeat SEND interval (e.g send heartbeat every 100 ms) +2) Heartbeat WINDOW size (e.g valid heartbeat to consider when counting missed/received heartbeats +and determine if server is down/up) +3) Heartbeat CHECK interval (e.g. process received heartbeats and determine server status every 200 ms) +4) MISSED_THRESHOLD: How many heartbeats in a row constitute a node as down (e.g. 3 missed heartbeats = server is down) +5) RECEIVED_THRESHOLD: How many heartbeats in a row constitute a node as up (e.g. 2 received heartbeats = server is up) + +Description of how these configuration parameters are used: + +Every server sends a heartbeat every SEND interval. Every server processes its received heartbeats via the +`decide-status` process every CHECK interval. The `decide-status` process considers only heartbeats that are +received from `windowStart = now - WINDOW` to `windowEnd = now`. Heartbeats that were received before `windowStart` +are expunged. The `decide-status` process counts the number of missed and received heartbeats in one window by +checking whether there was a heartbeat received every SEND interval starting from `windowStart`. For example, +from `windowStart=0` and `SEND=100` it will check if there was a heartbeat received at timestamp 0, 100, 200, 300 +etc. until `windowEnd`. If there is a timestamp for which no heartbeat was received, the process increases the +missed count. If there are more than MISSED_THRESHOLD heartbeats missed in a row, then a server is marked as down. + +We will provide sane defaults out-of-box, to achieve a 99% uptime. + +Pseudocode for REST endpoint for `/heartbeat`: +```java +Map> receivedHeartbeats; + +@POST +public Response receiveHeartbeat(Request heartbeat) + return Response.ok(processRequest()).build(); +} + +private HeartbeatResponse processRequest() { + KsqlServer server = request.getServer(); + receivedHeartbeats.get(server).add(request.getTimestamp()); +} +``` + +Additionaly, we will implement a REST endpoint `/clusterStatus` that provides the current status of the cluster +i.e. which servers are up and which are down (from the viewpoint of the server that receives the +request). + +Pseudocode for REST endpoint for `/clusterStatus`: +```java +Map hostStatus; + +@GET +public Response checkClusterStatus(Request heartbeat) + return Response.ok(processRequest()).build(); +} + +private ClusterStatusResponse processRequest() { + return hostStatus; +} +``` +### Lag reporting + +Every server neeeds to periodically broadcast their local current offset and end offset positions. We will implement a new +REST endpoint `/reportlag`. The local offsets information at a server is obtained via +`Map> localPositionsMap = kafkaStreams.allLocalStorePartitionLags();`. + +Pseudocode for REST endpoint for `/reportlag`: +```java +Map>> globalPositionsMap; + +@POST +public Response receiveLagInfo(Request lagMap) + return Response.ok(processRequest()).build(); +} + +private LagReportResponse processRequest() { + KsqlServer server = request.getServer(); + Map> localPositionsMap = request.getPositions(); + globalPositionsMap.put(server, localPositionsMap); +} +``` + +### Lag-aware routing + +Given the above information (alive + offsets), how does a KSQL server decide to which standby to route +the request? Every server knows the offsets (current and end) of all partitions hosted by all servers in the cluster. +Given this information, a server can compute the lag of itself and others by determining the maximum end offset +per partition as reported by all server and subtract from it their current offset. +This allows us to implement lag-aware routing where server `B` can a) determine that server `A` is +down and b) decide whether it will serve the request itself or forward it to `C` depending on who +has the smallest lag for the given key in the pull query. + +Pseudocode for routing: +```java +// Map populated periodically through heartbeat information +Map aliveNodes; + +// Map populated periodically through lag reporting +// KsqlServer to store name to partition to lag information +Map>> globalPositionsMap; + +// Fetch the metadata related to the key +KeyQueryMetadata queryMetadata = queryMetadataForKey(store, key, serializer); + +// Acceptable lag for the query +final long acceptableOffsetLag = 10000; + +// Max end offset position +Map> maxEndOffsetPerStorePerPartition; +for (KsqlServer server: globalPositionsMap) { + for (String store: globalPositionsMap.get(server)) { + for (Integer partition: globalPositionsMap.get(server).get(store)) { + long offset = globalPositionsMap.get(server).get(store).get(partition); + maxEndOffsetPerStorePerPartition.computeIfAbsent(store, Map::new); + maxEndOffsetPerStorePerPartition.get(store).computeIfAbsent(partition, Map::new); + maxEndOffsetPerStorePerPartition.get(store).putIfAbsent(partition, -1); + long currentMax = maxEndOffsetPerStore.get(store).get(partition); + maxEndOffsetPerStorePerPartition.get(store).put(partition, Math.max(offset, currentMax); + } + } +} + +// Ordered list of servers, in order of most caught-up to least +List nodesToQuery; +KsqlNode active = queryMetadata.getActiveHost(); +if (aliveNodes.get(active)) { + nodesToQuery.add(active) +} + +// add standbys +nodesToQuery.addAll(queryMetadata.getStandbyHosts().stream() + // filter out all the standbys that are down + .filter(standbyHost -> aliveNodes.get(standByHost) != null) + // get the lag at each standby host for the key's store partition + .map(standbyHost -> new Pair(standbyHostInfo, + maxEndOffsetPerStorePerPartition.get(storeName).get(queryMetadata.partition()) - + globalPositionsMap.get(standbyHost).get(storeName).get(queryMetadata.partition()).currentOffsetPosition)) + // Sort by offset lag, i.e smallest lag first + .sorted(Comaparator.comparing(Pair::getRight()) + .filter(standbyHostLagPair -> standbyHostLagPair.getRight() < acceptableOffsetLag) + .map(standbyHostLagPair -> standbyHostLagPair.getLeft()) + .collect(Collectors.toList())); + +// Query available nodes, starting from active if up +List result = null; +for (KsqlNode server : nodesToQuery) { + try { + result = query(store, key, server); + } catch (Exception e) { + System.err.println("Querying server %s failed", server); + } + if (result != null) { + break; + } +} + +if (result == null) { + throw new Exception("Unable to serve request. All nodes are down or too far behind"); +} + +return result; +``` + +We will also introduce a per-query configuration parameter `acceptable.offset.lag`, that will provide +applications the ability to control how much stale data they are willing to tolerate on a per query +basis. If a standby lags behind by more than the tolerable limit, pull queries will fail. +This parameter can be configured either as part of the `WITH` clause of CTAS queries or be given as +arguments to the request's JSON payload. This is a very +useful knob to handle the scenario of cold start explained above. In such a case, a newly added KSQL +server could be lagging by a lot as it rebuilds the entire state and thus the usefulness of the data +returned by pull queries may diminish significantly. + +### Tradeoffs +1. We decouple the failure detection mechanism from the lag reporting to make the heartbeats +light-weight and achieve smaller heartbeat interval. This way, heartbeats can be sent at a higher +interval than lag information (which is much larger in size). As our goal is to achieve high-availability, +receiving less frequent lag updates is ok as this affects consistency and not availability. +2. We decouple routing decision from healthchecking. The decision of where to route a query is local +(i.e. does not require remote calls) as the information about the status of other servers is already +there. This provides flexibility in changing the lag reporting mechanism down the line more easily. +3. We choose to keep the initial design simple (no request based failure detection, gossip protocols) +and closer to choices made in Kafka/Kafka Streams (no Zookeeper based failure detection), for ease +of deployment and troubleshooting. + +### Rejected/Alternate approaches + +#### Retrieve lag information on-demand +We employ a pull model where servers don't explicitly send their lag information. Rather, communication +happens only when needed, i.e. when a server tries to forward a request to another server. Once +server `B` has determined that server `A` is down, it needs to determine what other server should +evaluate the query. At this point,`B` doesn't have any knowledge of the lag information of the other +standbys. So, in addition to evaluating the query locally, `B` also sends the request to `C`. `B` +then has both its own result and `C`’s result of query evaluation and decides which one is the freshest +to include in the response. `B` can make this decision because the lag information is piggybacked +with the query evaluation result. The advantages of this approach is that it results in less +communication overhead: Lag information is exchanged only when the active is down. Moreover, it is +piggy-backed on the request. On the other side, all standbys need to evaluate the same query. Moreover, +the communication between `B` and `C` involves large messages as they contain the query result +(can be many rows). Finally, latency for a request increases as `B` needs to wait to receive a +response from `C` with query result and lag information. Then only can `B` send a response back to +the client. + +#### More efficient lag propagation +Instead of broadcasting lag information, we could also build a gossip protocol to disseminate this +information, with more round trips but lower network bandwidth consumption. While we concede that +this is an effective and proven technique, debugging such protocols is hard in practice. So, we +decided to keep things simple, learn and iterate. Similarly, we could also encode lag information +in the regular pull query responses themselves, providing very upto-date lag estimates. However, +sending lag information for all local stores in every response will be prohibitively expensive and +we would need a more intelligent, selective propagation that only piggybacks a few stores's lag in +each response. We may pursue both of these approaches in the future, based on initial experience. + +#### Failure detection based on actual request failures +In this proposal, we have argued for a separate health checking mechanism (i.e separate control plane), +while we could have used the pull query requests between servers themselves to gauge whether another +server is up or down. But, any scheme like that would require a fallback mechanism that periodically +probes other servers anyway to keep the availability information upto date. While we recognize that +such an approach could provide much quicker failure detection (and potentially higher number of +samples to base failure detection on) and less communication overhead, it also requires significant +tuning to handle transient application pauses or other corner cases. + +We intend to use the simple heartbeat mechanism proposed here as a baseline implementation, that can +be extended to more advanced schemes like these down the line. + +## Test plan + +We will do unit tests and integration tests with failure scenarios where we cover the cases: +1. Request is forwarded to a standby. +2. Request is forward to the most caught-up standby. +3. Request fails if lag of standbys is more than acceptable lag configuration. + +We will look into muckrake or cc-system-tests. +## Documentation Updates + +Need to add documentation about the configuration parameters regarding the failure detection policy, +acceptable lag, new endpoints. + +# Compatibility Implications + +N/A + +## Performance Implications + +Improve performance of pull queries in case of failure. + +## Security Implications + +N/A From 6b5ce0c1a6f34891dcac88ad7454a06f3ed08f37 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Tue, 14 Jan 2020 12:52:37 +0000 Subject: [PATCH 101/123] fix: deadlock when closing transient push query (#4297) * fix: deadlock when closing transient push query fixes: https://github.com/confluentinc/ksql/issues/4296 The produce side not calls `offer` in a loop, with a short timeout, to try and put the row into the blocking queue. When the consume side closes the query, e.g. on an `EOFException` if the user has closed the connection, the query first closes the queue; setting a flag the producers are checking on each loop; causing any producers to exit the loop. Then it can safely close the KS topology. --- .../ksql/query/BlockingRowQueue.java | 68 ++++++++++++ .../confluent/ksql/query/QueryExecutor.java | 8 +- .../ksql/query/TransientQueryQueue.java | 76 ++++++++----- .../ksql/util/PersistentQueryMetadata.java | 2 +- .../io/confluent/ksql/util/QueryMetadata.java | 5 +- .../ksql/util/TransientQueryMetadata.java | 25 ++--- .../integration/EndToEndIntegrationTest.java | 6 +- .../ksql/query/TransientQueryQueueTest.java | 102 ++++++++++++++---- .../ksql/util/TransientQueryMetadataTest.java | 90 ++++++++++++++++ .../entity/QueryDescriptionFactoryTest.java | 14 +-- .../streaming/QueryStreamWriterTest.java | 21 ++-- .../streaming/StreamedQueryResourceTest.java | 46 +++++++- 12 files changed, 376 insertions(+), 87 deletions(-) create mode 100644 ksql-engine/src/main/java/io/confluent/ksql/query/BlockingRowQueue.java create mode 100644 ksql-engine/src/test/java/io/confluent/ksql/util/TransientQueryMetadataTest.java diff --git a/ksql-engine/src/main/java/io/confluent/ksql/query/BlockingRowQueue.java b/ksql-engine/src/main/java/io/confluent/ksql/query/BlockingRowQueue.java new file mode 100644 index 000000000000..6512ec9e15ce --- /dev/null +++ b/ksql-engine/src/main/java/io/confluent/ksql/query/BlockingRowQueue.java @@ -0,0 +1,68 @@ +/* + * Copyright 2020 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.query; + +import io.confluent.ksql.GenericRow; +import java.util.Collection; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; +import org.apache.kafka.streams.KeyValue; + +/** + * The queue between the Kafka-streams topology and the client connection. + * + *

The KS topology writes to the queue from its {@code StreamThread}, while the KSQL server + * thread that is servicing the client request reads from the queue and writes to the client + * socket. + */ +public interface BlockingRowQueue { + + /** + * Sets the limit handler that will be called when any row limit is reached. + * + *

Replaces any previous handler. + * + * @param limitHandler the handler. + */ + void setLimitHandler(LimitHandler limitHandler); + + /** + * Poll the queue for a single row + * + * @see BlockingQueue#poll(long, TimeUnit) + */ + KeyValue poll(long timeout, TimeUnit unit) + throws InterruptedException; + + /** + * Drain the queue to the supplied {@code collection}. + * + * @see BlockingQueue#drainTo(Collection) + */ + void drainTo(Collection> collection); + + /** + * The size of the queue. + * + * @see BlockingQueue#size() + */ + int size(); + + /** + * Close the queue. + */ + void close(); +} diff --git a/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java b/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java index 34301d205afd..199f009108b3 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java @@ -151,7 +151,7 @@ public TransientQueryMetadata buildTransientQuery( final LogicalSchema schema, final OptionalInt limit ) { - final TransientQueryQueue queue = buildTransientQueryQueue(queryId, physicalPlan, limit); + final BlockingRowQueue queue = buildTransientQueryQueue(queryId, physicalPlan, limit); final String applicationId = addTimeSuffix(getQueryApplicationId( getServiceId(), @@ -171,15 +171,15 @@ public TransientQueryMetadata buildTransientQuery( built.kafkaStreams, transientSchema, sources, - queue::setLimitHandler, planSummary, - queue.getQueue(), + queue, applicationId, built.topology, streamsProperties, overrides, queryCloseCallback, - ksqlConfig.getLong(KSQL_SHUTDOWN_TIMEOUT_MS_CONFIG)); + ksqlConfig.getLong(KSQL_SHUTDOWN_TIMEOUT_MS_CONFIG) + ); } private static Optional getMaterializationInfo(final Object result) { diff --git a/ksql-engine/src/main/java/io/confluent/ksql/query/TransientQueryQueue.java b/ksql-engine/src/main/java/io/confluent/ksql/query/TransientQueryQueue.java index 2d308e374a9a..eb983e560dcc 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/query/TransientQueryQueue.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/query/TransientQueryQueue.java @@ -15,12 +15,15 @@ package io.confluent.ksql.query; +import com.google.common.annotations.VisibleForTesting; import io.confluent.ksql.GenericRow; import io.confluent.ksql.util.KsqlException; +import java.util.Collection; import java.util.Objects; import java.util.OptionalInt; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.kstream.ForeachAction; import org.apache.kafka.streams.kstream.KStream; @@ -29,41 +32,62 @@ /** * A queue of rows for transient queries. */ -class TransientQueryQueue { +class TransientQueryQueue implements BlockingRowQueue { private final LimitQueueCallback callback; - private final BlockingQueue> rowQueue = - new LinkedBlockingQueue<>(100); + private final BlockingQueue> rowQueue; + private final int offerTimeoutMs; + private volatile boolean closed = false; TransientQueryQueue(final KStream kstream, final OptionalInt limit) { + this(kstream, limit, 100, 100); + } + + @VisibleForTesting + TransientQueryQueue( + final KStream kstream, + final OptionalInt limit, + final int queueSizeLimit, + final int offerTimeoutMs + ) { this.callback = limit.isPresent() ? new LimitedQueueCallback(limit.getAsInt()) : new UnlimitedQueueCallback(); + this.rowQueue = new LinkedBlockingQueue<>(queueSizeLimit); + this.offerTimeoutMs = offerTimeoutMs; - kstream.foreach(new TransientQueryQueue.QueuePopulator<>(rowQueue, callback)); + kstream.foreach(new QueuePopulator<>()); } - BlockingQueue> getQueue() { - return rowQueue; + @Override + public void setLimitHandler(final LimitHandler limitHandler) { + callback.setLimitHandler(limitHandler); } - void setLimitHandler(final LimitHandler limitHandler) { - callback.setLimitHandler(limitHandler); + @Override + public KeyValue poll(final long timeout, final TimeUnit unit) + throws InterruptedException { + return rowQueue.poll(timeout, unit); } - @SuppressWarnings("OptionalUsedAsFieldOrParameterType") - static final class QueuePopulator implements ForeachAction { + @Override + public void drainTo(final Collection> collection) { + rowQueue.drainTo(collection); + } - private final BlockingQueue> queue; - private final QueueCallback callback; + @Override + public int size() { + return rowQueue.size(); + } - QueuePopulator( - final BlockingQueue> queue, - final QueueCallback callback - ) { - this.queue = Objects.requireNonNull(queue, "queue"); - this.callback = Objects.requireNonNull(callback, "callback"); - } + @Override + public void close() { + closed = true; + } + + @VisibleForTesting + @SuppressWarnings("OptionalUsedAsFieldOrParameterType") + final class QueuePopulator implements ForeachAction { @Override public void apply(final K key, final GenericRow row) { @@ -76,18 +100,22 @@ public void apply(final K key, final GenericRow row) { return; } - final String keyString = getStringKey(key); - queue.put(new KeyValue<>(keyString, row)); + final KeyValue kv = new KeyValue<>(getStringKey(key), row); - callback.onQueued(); - } catch (final InterruptedException exception) { + while (!closed) { + if (rowQueue.offer(kv, offerTimeoutMs, TimeUnit.MILLISECONDS)) { + callback.onQueued(); + break; + } + } + } catch (final InterruptedException e) { throw new KsqlException("InterruptedException while enqueueing:" + key); } } private String getStringKey(final K key) { if (key instanceof Windowed) { - final Windowed windowedKey = (Windowed) key; + final Windowed windowedKey = (Windowed) key; return String.format("%s : %s", windowedKey.key(), windowedKey.window()); } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java b/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java index 2dea992ffafd..6036ec1e78f3 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/util/PersistentQueryMetadata.java @@ -64,7 +64,7 @@ public PersistentQueryMetadata( final Map streamsProperties, final Map overriddenProperties, final Consumer closeCallback, - final Long closeTimeout) { + final long closeTimeout) { // CHECKSTYLE_RULES.ON: ParameterNumberCheck super( statementString, diff --git a/ksql-engine/src/main/java/io/confluent/ksql/util/QueryMetadata.java b/ksql-engine/src/main/java/io/confluent/ksql/util/QueryMetadata.java index 47414605ab6f..3d24c3c53c40 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/util/QueryMetadata.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/util/QueryMetadata.java @@ -62,7 +62,8 @@ protected QueryMetadata( final Map streamsProperties, final Map overriddenProperties, final Consumer closeCallback, - final Long closeTimeout) { + final long closeTimeout + ) { // CHECKSTYLE_RULES.ON: ParameterNumberCheck this.statementString = Objects.requireNonNull(statementString, "statementString"); this.kafkaStreams = Objects.requireNonNull(kafkaStreams, "kafkaStreams"); @@ -78,7 +79,7 @@ protected QueryMetadata( this.closeCallback = Objects.requireNonNull(closeCallback, "closeCallback"); this.sourceNames = Objects.requireNonNull(sourceNames, "sourceNames"); this.logicalSchema = Objects.requireNonNull(logicalSchema, "logicalSchema"); - this.closeTimeout = Objects.requireNonNull(closeTimeout, "closeTimeout"); + this.closeTimeout = closeTimeout; } protected QueryMetadata(final QueryMetadata other, final Consumer closeCallback) { diff --git a/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java b/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java index 8f3f33be693f..3c83876d073e 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java @@ -15,18 +15,16 @@ package io.confluent.ksql.util; -import io.confluent.ksql.GenericRow; import io.confluent.ksql.name.SourceName; +import io.confluent.ksql.query.BlockingRowQueue; import io.confluent.ksql.query.LimitHandler; import io.confluent.ksql.schema.ksql.LogicalSchema; import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.concurrent.BlockingQueue; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.Topology; /** @@ -34,9 +32,8 @@ */ public class TransientQueryMetadata extends QueryMetadata { - private final BlockingQueue> rowQueue; + private final BlockingRowQueue rowQueue; private final AtomicBoolean isRunning = new AtomicBoolean(true); - private final Consumer limitHandlerSetter; // CHECKSTYLE_RULES.OFF: ParameterNumberCheck public TransientQueryMetadata( @@ -44,15 +41,14 @@ public TransientQueryMetadata( final KafkaStreams kafkaStreams, final LogicalSchema logicalSchema, final Set sourceNames, - final Consumer limitHandlerSetter, final String executionPlan, - final BlockingQueue> rowQueue, + final BlockingRowQueue rowQueue, final String queryApplicationId, final Topology topology, final Map streamsProperties, final Map overriddenProperties, final Consumer closeCallback, - final Long closeTimeout) { + final long closeTimeout) { // CHECKSTYLE_RULES.ON: ParameterNumberCheck super( statementString, @@ -65,8 +61,8 @@ public TransientQueryMetadata( streamsProperties, overriddenProperties, closeCallback, - closeTimeout); - this.limitHandlerSetter = Objects.requireNonNull(limitHandlerSetter, "limitHandlerSetter"); + closeTimeout + ); this.rowQueue = Objects.requireNonNull(rowQueue, "rowQueue"); if (!logicalSchema.metadata().isEmpty() || !logicalSchema.key().isEmpty()) { @@ -78,7 +74,7 @@ public boolean isRunning() { return isRunning.get(); } - public BlockingQueue> getRowQueue() { + public BlockingRowQueue getRowQueue() { return rowQueue; } @@ -99,11 +95,16 @@ public int hashCode() { } public void setLimitHandler(final LimitHandler limitHandler) { - limitHandlerSetter.accept(limitHandler); + rowQueue.setLimitHandler(limitHandler); } @Override public void close() { + // To avoid deadlock, close the queue first to ensure producer side isn't blocked trying to + // write to the blocking queue, otherwise super.close call can deadlock: + rowQueue.close(); + + // Now safe to close: super.close(); isRunning.set(false); } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/integration/EndToEndIntegrationTest.java b/ksql-engine/src/test/java/io/confluent/ksql/integration/EndToEndIntegrationTest.java index ae3edbcfb317..580ccdf3b16b 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/integration/EndToEndIntegrationTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/integration/EndToEndIntegrationTest.java @@ -30,6 +30,7 @@ import io.confluent.ksql.GenericRow; import io.confluent.ksql.function.udf.Udf; import io.confluent.ksql.function.udf.UdfDescription; +import io.confluent.ksql.query.BlockingRowQueue; import io.confluent.ksql.query.QueryId; import io.confluent.ksql.serde.Format; import io.confluent.ksql.util.KsqlConstants; @@ -44,7 +45,6 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -216,7 +216,7 @@ public void shouldSelectAllFromDerivedStream() throws Exception { "SELECT * from pageviews_female EMIT CHANGES;"); final List> results = new ArrayList<>(); - final BlockingQueue> rowQueue = queryMetadata.getRowQueue(); + final BlockingRowQueue rowQueue = queryMetadata.getRowQueue(); // From the mock data, we expect exactly 3 page views from female users. final List expectedPages = ImmutableList.of("PAGE_2", "PAGE_5", "PAGE_5"); @@ -402,7 +402,7 @@ private static List verifyAvailableRows( final TransientQueryMetadata queryMetadata, final int expectedRows ) throws Exception { - final BlockingQueue> rowQueue = queryMetadata.getRowQueue(); + final BlockingRowQueue rowQueue = queryMetadata.getRowQueue(); TestUtils.waitForCondition( () -> rowQueue.size() >= expectedRows, diff --git a/ksql-engine/src/test/java/io/confluent/ksql/query/TransientQueryQueueTest.java b/ksql-engine/src/test/java/io/confluent/ksql/query/TransientQueryQueueTest.java index aea22e2c4e53..e59167da08ed 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/query/TransientQueryQueueTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/query/TransientQueryQueueTest.java @@ -16,9 +16,9 @@ package io.confluent.ksql.query; import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -26,47 +26,58 @@ import io.confluent.ksql.GenericRow; import io.confluent.ksql.query.TransientQueryQueue.QueuePopulator; +import java.util.ArrayList; +import java.util.List; import java.util.OptionalInt; -import java.util.Queue; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; import java.util.stream.IntStream; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.kstream.KStream; +import org.junit.After; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.Timeout; import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; -@SuppressWarnings("ConstantConditions") +@SuppressWarnings("unchecked") @RunWith(MockitoJUnitRunner.class) public class TransientQueryQueueTest { private static final int SOME_LIMIT = 4; + private static final int MAX_LIMIT = SOME_LIMIT * 2; private static final GenericRow ROW_ONE = mock(GenericRow.class); private static final GenericRow ROW_TWO = mock(GenericRow.class); + @Rule + public final Timeout timeout = Timeout.seconds(10); + @Mock private LimitHandler limitHandler; @Mock private KStream kStreamsApp; @Captor private ArgumentCaptor> queuePopulatorCaptor; - private Queue> queue; private QueuePopulator queuePopulator; + private TransientQueryQueue queue; + private ScheduledExecutorService executorService; @Before public void setUp() { - final TransientQueryQueue queuer = - new TransientQueryQueue(kStreamsApp, OptionalInt.of(SOME_LIMIT)); - - queuer.setLimitHandler(limitHandler); - - queue = queuer.getQueue(); + givenQueue(OptionalInt.of(SOME_LIMIT)); + } - verify(kStreamsApp).foreach(queuePopulatorCaptor.capture()); - queuePopulator = queuePopulatorCaptor.getValue(); + @After + public void tearDown() { + if (executorService != null) { + executorService.shutdownNow(); + } } @Test @@ -76,11 +87,10 @@ public void shouldQueue() { queuePopulator.apply("key2", ROW_TWO); // Then: - assertThat(queue, hasSize(2)); - assertThat(queue.peek().key, is("key1")); - assertThat(queue.remove().value, is(ROW_ONE)); - assertThat(queue.peek().key, is("key2")); - assertThat(queue.remove().value, is(ROW_TWO)); + assertThat(drainValues(), contains( + new KeyValue<>("key1", ROW_ONE), + new KeyValue<>("key2", ROW_TWO) + )); } @Test @@ -89,7 +99,7 @@ public void shouldNotQueueNullValues() { queuePopulator.apply("key1", null); // Then: - assertThat(queue, is(empty())); + assertThat(queue.size(), is(0)); } @Test @@ -99,7 +109,21 @@ public void shouldQueueUntilLimitReached() { .forEach(idx -> queuePopulator.apply("key1", ROW_ONE)); // Then: - assertThat(queue, hasSize(SOME_LIMIT)); + assertThat(queue.size(), is(SOME_LIMIT)); + } + + @Test + public void shouldPoll() throws Exception { + // Given: + queuePopulator.apply("key1", ROW_ONE); + queuePopulator.apply("key2", ROW_TWO); + + // When: + final KeyValue result = queue.poll(1, TimeUnit.SECONDS); + + // Then: + assertThat(result, is(new KeyValue<>("key1", ROW_ONE))); + assertThat(drainValues(), contains(new KeyValue<>("key2", ROW_TWO))); } @Test @@ -131,4 +155,42 @@ public void shouldCallLimitHandlerOnlyOnce() { // Then: verify(limitHandler, times(1)).limitReached(); } + + @Test + public void shouldBlockOnProduceOnceQueueLimitReachedAndUnblockOnClose() { + // Given: + givenQueue(OptionalInt.empty()); + + IntStream.range(0, MAX_LIMIT) + .forEach(idx -> queuePopulator.apply("key1", ROW_ONE)); + + givenWillCloseQueueAsync(); + + // When: + queuePopulator.apply("should not be queued", ROW_TWO); + + // Then: did not block and: + assertThat(queue.size(), is(MAX_LIMIT)); + } + + private void givenWillCloseQueueAsync() { + executorService = Executors.newSingleThreadScheduledExecutor(); + executorService.schedule(queue::close, 200, TimeUnit.MILLISECONDS); + } + + private void givenQueue(final OptionalInt limit) { + clearInvocations(kStreamsApp); + queue = new TransientQueryQueue(kStreamsApp, limit, MAX_LIMIT, 1); + + queue.setLimitHandler(limitHandler); + + verify(kStreamsApp).foreach(queuePopulatorCaptor.capture()); + queuePopulator = queuePopulatorCaptor.getValue(); + } + + private List> drainValues() { + final List> entries = new ArrayList<>(); + queue.drainTo(entries); + return entries; + } } \ No newline at end of file diff --git a/ksql-engine/src/test/java/io/confluent/ksql/util/TransientQueryMetadataTest.java b/ksql-engine/src/test/java/io/confluent/ksql/util/TransientQueryMetadataTest.java new file mode 100644 index 000000000000..198529e24342 --- /dev/null +++ b/ksql-engine/src/test/java/io/confluent/ksql/util/TransientQueryMetadataTest.java @@ -0,0 +1,90 @@ +/* + * Copyright 2020 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.util; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.inOrder; + +import io.confluent.ksql.name.SourceName; +import io.confluent.ksql.query.BlockingRowQueue; +import io.confluent.ksql.schema.ksql.LogicalSchema; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; +import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.Topology; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InOrder; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class TransientQueryMetadataTest { + + private static final String QUERY_ID = "queryId"; + private static final String EXECUTION_PLAN = "execution plan"; + private static final String SQL = "sql"; + private static final long CLOSE_TIMEOUT = 10L; + + @Mock + private KafkaStreams kafkaStreams; + @Mock + private LogicalSchema logicalSchema; + @Mock + private Set sourceNames; + @Mock + private BlockingRowQueue rowQueue; + @Mock + private Topology topology; + @Mock + private Map props; + @Mock + private Map overrides; + @Mock + private Consumer closeCallback; + private TransientQueryMetadata query; + + @Before + public void setUp() { + query = new TransientQueryMetadata( + SQL, + kafkaStreams, + logicalSchema, + sourceNames, + EXECUTION_PLAN, + rowQueue, + QUERY_ID, + topology, + props, + overrides, + closeCallback, + CLOSE_TIMEOUT + ); + } + + @Test + public void shouldCloseQueueBeforeTopologyToAvoidDeadLock() { + // When: + query.close(); + + // Then: + final InOrder inOrder = inOrder(rowQueue, kafkaStreams); + inOrder.verify(rowQueue).close(); + inOrder.verify(kafkaStreams).close(any()); + } +} \ No newline at end of file diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java index a77244684401..79c11ad3f9ff 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java @@ -26,7 +26,7 @@ import io.confluent.ksql.metastore.model.DataSource.DataSourceType; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.name.SourceName; -import io.confluent.ksql.query.LimitHandler; +import io.confluent.ksql.query.BlockingRowQueue; import io.confluent.ksql.query.QueryId; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.PhysicalSchema; @@ -42,7 +42,6 @@ import java.util.LinkedHashMap; import java.util.Map; import java.util.Optional; -import java.util.concurrent.LinkedBlockingQueue; import java.util.function.Consumer; import java.util.stream.Collectors; import org.apache.kafka.streams.KafkaStreams; @@ -86,7 +85,7 @@ public class QueryDescriptionFactoryTest { @Mock(name = TOPOLOGY_TEXT) private TopologyDescription topologyDescription; @Mock - private Consumer limitHandler; + private BlockingRowQueue queryQueue; @Mock private KsqlTopic sinkTopic; private QueryMetadata transientQuery; @@ -103,9 +102,8 @@ public void setUp() { queryStreams, TRANSIENT_SCHEMA, SOURCE_NAMES, - limitHandler, "execution plan", - new LinkedBlockingQueue<>(), + queryQueue, "app id", topology, STREAMS_PROPS, @@ -218,9 +216,8 @@ public void shouldHandleRowTimeInValueSchemaForTransientQuery() { queryStreams, schema, SOURCE_NAMES, - limitHandler, "execution plan", - new LinkedBlockingQueue<>(), + queryQueue, "app id", topology, STREAMS_PROPS, @@ -253,9 +250,8 @@ public void shouldHandleRowKeyInValueSchemaForTransientQuery() { queryStreams, schema, SOURCE_NAMES, - limitHandler, "execution plan", - new LinkedBlockingQueue<>(), + queryQueue, "app id", topology, STREAMS_PROPS, diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriterTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriterTest.java index 1ac874e28b9a..1a2f73cf58c9 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriterTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriterTest.java @@ -32,6 +32,7 @@ import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.json.JsonMapper; import io.confluent.ksql.name.ColumnName; +import io.confluent.ksql.query.BlockingRowQueue; import io.confluent.ksql.query.LimitHandler; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.types.SqlTypes; @@ -42,7 +43,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; -import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.apache.kafka.streams.KafkaStreams; @@ -59,7 +59,7 @@ import org.junit.rules.Timeout; import org.junit.runner.RunWith; -@SuppressWarnings({"unchecked", "ConstantConditions"}) +@SuppressWarnings("unchecked") @RunWith(EasyMockRunner.class) public class QueryStreamWriterTest { @@ -74,7 +74,7 @@ public class QueryStreamWriterTest { @Mock(MockType.NICE) private TransientQueryMetadata queryMetadata; @Mock(MockType.NICE) - private BlockingQueue> rowQueue; + private BlockingRowQueue rowQueue; private Capture ehCapture; private Capture>> drainCapture; private Capture limitHandlerCapture; @@ -115,10 +115,11 @@ public void setUp() { } @Test - public void shouldWriteAnyPendingRowsBeforeReportingException() throws Exception { + public void shouldWriteAnyPendingRowsBeforeReportingException() { // Given: expect(queryMetadata.isRunning()).andReturn(true).anyTimes(); - expect(rowQueue.drainTo(capture(drainCapture))).andAnswer(rows("Row1", "Row2", "Row3")); + rowQueue.drainTo(capture(drainCapture)); + expectLastCall().andAnswer(rows("Row1", "Row2", "Row3")); createWriter(); @@ -136,10 +137,11 @@ public void shouldWriteAnyPendingRowsBeforeReportingException() throws Exception } @Test - public void shouldExitAndDrainIfQueryStopsRunning() throws Exception { + public void shouldExitAndDrainIfQueryStopsRunning() { // Given: expect(queryMetadata.isRunning()).andReturn(true).andReturn(false); - expect(rowQueue.drainTo(capture(drainCapture))).andAnswer(rows("Row1", "Row2", "Row3")); + rowQueue.drainTo(capture(drainCapture)); + expectLastCall().andAnswer(rows("Row1", "Row2", "Row3")); createWriter(); @@ -155,10 +157,11 @@ public void shouldExitAndDrainIfQueryStopsRunning() throws Exception { } @Test - public void shouldExitAndDrainIfLimitReached() throws Exception { + public void shouldExitAndDrainIfLimitReached() { // Given: expect(queryMetadata.isRunning()).andReturn(true).anyTimes(); - expect(rowQueue.drainTo(capture(drainCapture))).andAnswer(rows("Row1", "Row2", "Row3")); + rowQueue.drainTo(capture(drainCapture)); + expectLastCall().andAnswer(rows("Row1", "Row2", "Row3")); createWriter(); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java index 7f41dd116f26..d0935b7bdafd 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java @@ -24,7 +24,6 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; @@ -47,6 +46,8 @@ import io.confluent.ksql.parser.tree.PrintTopic; import io.confluent.ksql.parser.tree.Query; import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.query.BlockingRowQueue; +import io.confluent.ksql.query.LimitHandler; import io.confluent.ksql.rest.Errors; import io.confluent.ksql.rest.entity.KsqlErrorMessage; import io.confluent.ksql.rest.entity.KsqlRequest; @@ -70,12 +71,15 @@ import java.io.PipedInputStream; import java.io.PipedOutputStream; import java.time.Duration; +import java.util.Collection; import java.util.Collections; import java.util.LinkedList; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Scanner; import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -365,9 +369,8 @@ public void shouldStreamRowsCorrectly() throws Throwable { mockKafkaStreams, SOME_SCHEMA, Collections.emptySet(), - limitHandler -> {}, "", - rowQueue, + new TestRowQueue(rowQueue), "", mock(Topology.class), Collections.emptyMap(), @@ -612,4 +615,41 @@ public void shouldSuggestAlternativesIfPrintTopicDoesNotExist() { new KsqlRequest(PRINT_TOPIC, Collections.emptyMap(), null) ); } + + private static class TestRowQueue implements BlockingRowQueue { + + private final SynchronousQueue> rowQueue; + + TestRowQueue( + final SynchronousQueue> rowQueue + ) { + this.rowQueue = Objects.requireNonNull(rowQueue, "rowQueue"); + } + + @Override + public void setLimitHandler(final LimitHandler limitHandler) { + + } + + @Override + public KeyValue poll(final long timeout, final TimeUnit unit) + throws InterruptedException { + return rowQueue.poll(timeout, unit); + } + + @Override + public void drainTo(final Collection> collection) { + rowQueue.drainTo(collection); + } + + @Override + public int size() { + return rowQueue.size(); + } + + @Override + public void close() { + + } + } } From 5911fafe9969b34ca97c09f8a567b3e44784a679 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Tue, 14 Jan 2020 12:52:52 +0000 Subject: [PATCH 102/123] fix: log4j properties files (#4293) * fix: log4j properties files This PR removes the log4j.properties files from our production jars. The presence of such files will make it more tricky for clients to correctly configure log4j with their own properties files as log4j picks the first it finds on the class path by default. Also, changed many log levels from `ERROR` to `WARN` as important warnings are being excluded, which could be hiding issues. --- config/log4j-rolling.properties | 28 +++++++++++++------ config/log4j.properties | 25 ++++++++++++----- .../src/main/resources/log4j.properties | 22 --------------- .../src/main/resources/log4j.properties | 22 --------------- 4 files changed, 38 insertions(+), 59 deletions(-) delete mode 100644 ksql-engine/src/main/resources/log4j.properties delete mode 100644 ksql-rest-app/src/main/resources/log4j.properties diff --git a/config/log4j-rolling.properties b/config/log4j-rolling.properties index 9bc1e0887122..5167f35433b9 100644 --- a/config/log4j-rolling.properties +++ b/config/log4j-rolling.properties @@ -46,22 +46,34 @@ log4j.appender.kafka_appender.BrokerList=localhost:9092 log4j.appender.kafka_appender.Topic=default_ksql_processing_log # loggers + +log4j.logger.org.reflections=ERROR, main + +# Uncomment the following line to stop KSQL from logging out each request it receives: +#log4j.logger.io.confluent.ksql.rest.server.resources.KsqlResource=WARN + +# And this one to avoid the logs being spammed with KsqlConfig values. +# Though these can be useful for debugging / investigations. +#log4j.logger.io.confluent.ksql.util.KsqlConfig=WARN + +## KSQL Processing logs: +log4j.logger.processing=WARN, kafka_appender +log4j.additivity.processing=false + +## Kafka Streams logs: log4j.logger.org.apache.kafka.streams=INFO, streams log4j.additivity.org.apache.kafka.streams=false -log4j.logger.kafka=ERROR, kafka +## Kafka client logs: +log4j.logger.kafka=WARN, kafka log4j.additivity.kafka=false -log4j.logger.org.apache.zookeeper=ERROR, kafka +log4j.logger.org.apache.zookeeper=WARN, kafka log4j.additivity.org.apache.zookeeper=false -log4j.logger.org.apache.kafka=ERROR, kafka +log4j.logger.org.apache.kafka=WARN, kafka log4j.additivity.org.apache.kafka=false -log4j.logger.org.I0Itec.zkclient=ERROR, kafka +log4j.logger.org.I0Itec.zkclient=WARN, kafka log4j.additivity.org.I0Itec.zkclient=false -log4j.logger.processing=ERROR, kafka_appender -log4j.additivity.processing=false - -log4j.logger.org.reflections=ERROR, main diff --git a/config/log4j.properties b/config/log4j.properties index 07a11c170b72..c1a9178c1fa3 100644 --- a/config/log4j.properties +++ b/config/log4j.properties @@ -15,6 +15,7 @@ log4j.rootLogger=INFO, stdout +# appenders log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n @@ -23,13 +24,23 @@ log4j.appender.streams=org.apache.log4j.ConsoleAppender log4j.appender.streams.layout=org.apache.log4j.PatternLayout log4j.appender.streams.layout.ConversionPattern=[%d] %p %m (%c:%L)%n -log4j.logger.kafka=ERROR, stdout -log4j.logger.org.apache.kafka.streams=INFO, streams -log4j.additivity.org.apache.kafka.streams=false -log4j.logger.org.apache.zookeeper=ERROR, stdout -log4j.logger.org.apache.kafka=ERROR, stdout -log4j.logger.org.I0Itec.zkclient=ERROR, stdout +# loggers + log4j.logger.org.reflections=ERROR, stdout # Uncomment the following line to stop KSQL from logging out each request it receives: -#log4j.logger.io.confluent.ksql.rest.server.resources.KsqlResource=WARN, stdout +#log4j.logger.io.confluent.ksql.rest.server.resources.KsqlResource=WARN + +# And this one to avoid the logs being spammed with KsqlConfig values. +# Though these can be useful for debugging / investigations. +#log4j.logger.io.confluent.ksql.util.KsqlConfig=WARN + +## Kafka Streams logs: +log4j.logger.org.apache.kafka.streams=INFO, streams +log4j.additivity.org.apache.kafka.streams=false + +## Kafka Client logs: +log4j.logger.kafka=WARN, stdout +log4j.logger.org.apache.zookeeper=WARN, stdout +log4j.logger.org.apache.kafka=WARN, stdout +log4j.logger.org.I0Itec.zkclient=WARN, stdout diff --git a/ksql-engine/src/main/resources/log4j.properties b/ksql-engine/src/main/resources/log4j.properties deleted file mode 100644 index eef0378911d1..000000000000 --- a/ksql-engine/src/main/resources/log4j.properties +++ /dev/null @@ -1,22 +0,0 @@ -# -# Copyright 2018 Confluent Inc. -# -# Licensed under the Confluent Community License (the "License"); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at -# -# http://www.confluent.io/confluent-community-license -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -# - -log4j.rootLogger=WARN,stdout - -log4j.logger.io.confluent.ksql=INFO -log4j.logger.org.apache.kafka.clients.NetworkClient=ERROR -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n diff --git a/ksql-rest-app/src/main/resources/log4j.properties b/ksql-rest-app/src/main/resources/log4j.properties deleted file mode 100644 index eef0378911d1..000000000000 --- a/ksql-rest-app/src/main/resources/log4j.properties +++ /dev/null @@ -1,22 +0,0 @@ -# -# Copyright 2018 Confluent Inc. -# -# Licensed under the Confluent Community License (the "License"); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at -# -# http://www.confluent.io/confluent-community-license -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. -# - -log4j.rootLogger=WARN,stdout - -log4j.logger.io.confluent.ksql=INFO -log4j.logger.org.apache.kafka.clients.NetworkClient=ERROR -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n From 50b4c1ccd2c2ba769d522d84338fdcd1c7b064c8 Mon Sep 17 00:00:00 2001 From: Alan Sheinberg <57688982+AlanConfluent@users.noreply.github.com> Date: Tue, 14 Jan 2020 11:55:10 -0800 Subject: [PATCH 103/123] perf: Avoids logging INFO for rest-util requests, since it hurts pull query performance (#4302) * perf: Avoids logging INFO for rest-util requests, since it hurts pull query performance --- config/log4j-rolling.properties | 2 ++ 1 file changed, 2 insertions(+) diff --git a/config/log4j-rolling.properties b/config/log4j-rolling.properties index 5167f35433b9..13102c692733 100644 --- a/config/log4j-rolling.properties +++ b/config/log4j-rolling.properties @@ -77,3 +77,5 @@ log4j.additivity.org.apache.kafka=false log4j.logger.org.I0Itec.zkclient=WARN, kafka log4j.additivity.org.I0Itec.zkclient=false +# To achieve high throughput on pull queries, avoid logging every request from Jetty +log4j.logger.io.confluent.rest-utils.requests=WARN From 0b6da0bd9e2c4b6c9325a02fe33dfbe53b424fd2 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Tue, 14 Jan 2020 21:25:15 +0000 Subject: [PATCH 104/123] fix: fix NPE in CLI if not username supplied (#4312) Fixes NPE when running the CLI with no arguments. Was throwing NPE on line: ```java if (!options.getUserName().isEmpty() && !options.isPasswordSet()) { ``` in Ksql.java, as `options.getUserName()` was returning `null`. Refactored so that username and password are never null, only empty. Also refactored so that server will default to `http://localhost:8088`, even if other params are supplied. Previously, it only defaulted to this if no params supplied. --- .../src/main/java/io/confluent/ksql/Ksql.java | 19 +-- .../java/io/confluent/ksql/cli/Options.java | 32 +++-- .../ksql/cli/commands/OptionsTest.java | 133 ++++++++++++++---- 3 files changed, 137 insertions(+), 47 deletions(-) diff --git a/ksql-cli/src/main/java/io/confluent/ksql/Ksql.java b/ksql-cli/src/main/java/io/confluent/ksql/Ksql.java index d4ee1d79555d..5fc4c52c7514 100644 --- a/ksql-cli/src/main/java/io/confluent/ksql/Ksql.java +++ b/ksql-cli/src/main/java/io/confluent/ksql/Ksql.java @@ -25,7 +25,6 @@ import io.confluent.ksql.util.ErrorMessageUtil; import io.confluent.ksql.version.metrics.KsqlVersionCheckerAgent; import io.confluent.ksql.version.metrics.collector.KsqlModuleType; - import java.io.Console; import java.io.File; import java.io.IOException; @@ -61,16 +60,13 @@ public final class Ksql { } public static void main(final String[] args) throws IOException { - final Options options = args.length == 0 - ? Options.parse("http://localhost:8088") - : Options.parse(args); - + final Options options = Options.parse(args); if (options == null) { System.exit(-1); } // ask for password if not set through command parameters - if (!options.getUserName().isEmpty() && !options.isPasswordSet()) { + if (options.requiresPassword()) { options.setPassword(readPassword()); } @@ -90,8 +86,15 @@ private static String readPassword() { System.err.println("Could not get console for enter password; use -p option instead."); System.exit(-1); } - - return new String(console.readPassword("Enter password: ")); + + String password = ""; + while (password.isEmpty()) { + password = new String(console.readPassword("Enter password: ")); + if (password.isEmpty()) { + console.writer().println("Error: password can not be empty"); + } + } + return password; } void run() { diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/Options.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/Options.java index 38467697ef7d..4f349a07a40a 100644 --- a/ksql-cli/src/main/java/io/confluent/ksql/cli/Options.java +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/Options.java @@ -21,7 +21,6 @@ import com.github.rvesse.airline.annotations.Command; import com.github.rvesse.airline.annotations.Option; import com.github.rvesse.airline.annotations.restrictions.Once; -import com.github.rvesse.airline.annotations.restrictions.Required; import com.github.rvesse.airline.annotations.restrictions.ranges.LongRange; import com.github.rvesse.airline.help.Help; import com.github.rvesse.airline.parser.errors.ParseException; @@ -47,13 +46,12 @@ public class Options { @Inject public HelpOption help; - @SuppressWarnings("unused") // Accessed via reflection + @SuppressWarnings({"unused", "FieldMayBeFinal", "FieldCanBeLocal"}) // Accessed via reflection @Once - @Required @Arguments( title = "server", description = "The address of the Ksql server to connect to (ex: http://confluent.io:9098)") - private String server; + private String server = "http://localhost:8088"; private static final String CONFIGURATION_FILE_OPTION_NAME = "--config-file"; @@ -65,7 +63,7 @@ public class Options { private String configFile; - @SuppressWarnings("unused") // Accessed via reflection + @SuppressWarnings({"unused", "FieldMayBeFinal"}) // Accessed via reflection @Option( name = {USERNAME_OPTION, USERNAME_SHORT_OPTION}, description = @@ -75,7 +73,7 @@ public class Options { + "/" + PASSWORD_OPTION + " flag") - private String userName; + private String userName = ""; @SuppressWarnings("unused") // Accessed via reflection @Option( @@ -87,7 +85,7 @@ public class Options { + "/" + USERNAME_OPTION + " flag") - private String password; + private String password = ""; @SuppressWarnings("unused") // Accessed via reflection @Option( @@ -158,20 +156,24 @@ public OutputFormat getOutputFormat() { return OutputFormat.valueOf(outputFormat); } - public String getUserName() { - return userName; + public boolean requiresPassword() { + if (userName.isEmpty()) { + return false; + } + + return password.trim().isEmpty(); } public void setPassword(final String password) { - this.password = password; - } + if (password.isEmpty()) { + throw new IllegalArgumentException("Password must not be empty"); + } - public boolean isPasswordSet() { - return (password != null && !password.trim().isEmpty()); + this.password = password; } public Optional getUserNameAndPassword() { - if ((userName == null && password != null) || (password == null && userName != null)) { + if (userName.isEmpty() != password.isEmpty()) { throw new ConfigException( "You must specify both a username and a password. If you don't want to use an " + "authenticated session, don't specify either of the " @@ -181,7 +183,7 @@ public Optional getUserNameAndPassword() { + " flags on the command line"); } - if (userName == null) { + if (userName.isEmpty()) { return Optional.empty(); } diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/commands/OptionsTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/commands/OptionsTest.java index d45e704e575f..09065d7e6924 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/commands/OptionsTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/commands/OptionsTest.java @@ -15,55 +15,140 @@ package io.confluent.ksql.cli.commands; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; import io.confluent.ksql.cli.Options; - +import io.confluent.ksql.rest.client.BasicCredentials; +import java.util.Optional; +import org.apache.commons.lang3.StringUtils; import org.apache.kafka.common.config.ConfigException; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.ExpectedException; public class OptionsTest { - @Test(expected = ConfigException.class) - public void shouldThrowConfigExceptionIfOnlyUsernameIsProvided() throws Exception { - final Options options = Options.parse("http://foobar", "-u", "joe"); + @Rule + public final ExpectedException expectedException = ExpectedException.none(); + + @Test + public void shouldUseDefaultServerIfNoneSupplied() { + // When: + final Options options = parse(); + + // Then: + assertThat(options.getServer(), is("http://localhost:8088")); + } + + @Test + public void shouldWorkWithUserSuppliedServer() { + // When: + final Options options = parse("custom server"); + + // Then: + assertThat(options.getServer(), is("custom server")); + } + + @Test + public void shouldThrowConfigExceptionIfOnlyUsernameIsProvided() { + // Given: + final Options options = parse("-u", "joe"); + + // Expect: + expectedException.expect(ConfigException.class); + expectedException.expectMessage("You must specify both a username and a password"); + + // When: options.getUserNameAndPassword(); } - @Test(expected = ConfigException.class) - public void shouldThrowConfigExceptionIfOnlyPasswordIsProvided() throws Exception { - final Options options = Options.parse("http://foobar", "-p", "joe"); + @Test + public void shouldThrowConfigExceptionIfOnlyPasswordIsProvided() { + // Given: + final Options options = parse("http://foobar", "-p", "joe"); + + // Expect: + expectedException.expect(ConfigException.class); + expectedException.expectMessage("You must specify both a username and a password"); + + // When: options.getUserNameAndPassword(); } @Test - public void shouldReturnUserPasswordPairWhenBothProvided() throws Exception { - final Options options = Options.parse("http://foobar", "-u", "joe", "-p", "joe"); - assertTrue(options.getUserNameAndPassword().isPresent()); + public void shouldReturnUserPasswordPairWhenBothProvided() { + // When: + final Options options = parse("http://foobar", "-u", "joe", "-p", "pp"); + + // Then: + assertThat(options.getUserNameAndPassword(), + is(Optional.of(BasicCredentials.of("joe", "pp")))); + } + + @Test + public void shouldReturnEmptyOptionWhenUserAndPassNotPresent() { + // When: + final Options options = parse(); + + // Then: + assertThat(options.getUserNameAndPassword(), is(Optional.empty())); } @Test - public void shouldReturnEmptyOptionWhenUserAndPassNotPresent() throws Exception { - final Options options = Options.parse("http://foobar"); - assertFalse(options.getUserNameAndPassword().isPresent()); + public void shouldNotRequirePasswordIfUserNameNotSet() { + // When: + final Options options = parse(); + + // Then: + assertThat(options.requiresPassword(), is(false)); } @Test - public void shouldReturnPasswordNotSetIfPasswordIsNull() throws Exception { - final Options options = Options.parse("http://foobar"); - assertFalse(options.isPasswordSet()); + public void shouldNotRequirePasswordIfUserNameAndPasswordSupplied() { + // When: + final Options options = parse("-u", "joe", "-p", "oo"); + + // Then: + assertThat(options.requiresPassword(), is(false)); } @Test - public void shouldReturnPasswordNotSetIfPasswordIsEmpty() throws Exception { - final Options options = Options.parse("http://foobar", "-u", "joe", "-p", ""); - assertFalse(options.isPasswordSet()); + public void shouldRequirePasswordIfUserNameSuppliedButNotPassword() { + // When: + final Options options = parse("-u", "joe"); + + // Then: + assertThat(options.requiresPassword(), is(true)); } @Test - public void shouldReturnPasswordSetIfPasswordIsNotEmpty() throws Exception { - final Options options = Options.parse("http://foobar", "-u", "joe", "-p", "joe"); - assertTrue(options.isPasswordSet()); + public void shouldNotRequirePasswordIfUserNameAndPasswordSuppliedButEmpty() { + // When: + final Options options = parse("-u", "joe", "-p", ""); + + // Then: + assertThat(options.requiresPassword(), is(true)); + } + + @Test + public void shouldNotTrimPasswords() { + // When: + final Options options = parse("-u", "joe", "-p", " "); + + // Then: + assertThat(options.getUserNameAndPassword().map(BasicCredentials::password), + is(Optional.of(" "))); + } + + private static Options parse(final String... args) { + try { + final Options parsed = Options.parse(args); + assertThat(parsed, is(notNullValue())); + return parsed; + } catch (final Exception e) { + throw new AssertionError("Failed to parse options: " + StringUtils.join(args, ","), e); + } } } From 0bd49975e903a23800ac2d728ab95d45093fa0f0 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Tue, 14 Jan 2020 21:26:11 +0000 Subject: [PATCH 105/123] test: rQTT should fail if expecting more responses than statements (#4309) This just improves the error message. Better this than an `IndexOutOfBoundsException`. --- .../io/confluent/ksql/test/rest/RestTestExecutor.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java index d55148b45024..307c2792e748 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java @@ -102,6 +102,14 @@ public class RestTestExecutor implements Closeable { void buildAndExecuteQuery(final RestTestCase testCase) { topicInfoCache.clear(); + if (testCase.getStatements().size() < testCase.getExpectedResponses().size()) { + throw new AssertionError("Invalid test case: more expected responses than statements. " + + System.lineSeparator() + + "statementCount: " + testCase.getStatements().size() + + System.lineSeparator() + + "responsesCount: " + testCase.getExpectedResponses().size()); + } + initializeTopics(testCase.getTopics()); final StatementSplit statements = splitStatements(testCase); From fcfe2b9a5d6cbda9c9f89a2c0f0c92ccbe1d9573 Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Tue, 14 Jan 2020 13:31:43 -0800 Subject: [PATCH 106/123] docs: refactor installation docs around Docker (DOCS-3009) (#4235) * docs: start refactoring installation docs (DOCS-3009) * docs: add docker command lines for starting ksqldb-cli * docs: fully dockerize the main installation topic * docs: refactor around Docker stacks * docs: rework installation topic around one stack file * docs: add section for PostgreSQL stack * docs: add Compose output * docs: incorporate copy edits --- .../install-ksqldb-with-docker.md | 71 +-- .../installation/installing.md | 507 ++++++++++++++---- docs/tutorials/docker-compose.yml | 8 +- mkdocs.yml | 2 +- 4 files changed, 430 insertions(+), 158 deletions(-) diff --git a/docs-md/operate-and-deploy/installation/install-ksqldb-with-docker.md b/docs-md/operate-and-deploy/installation/install-ksqldb-with-docker.md index a2bb69e1b0c6..c2451389ba35 100644 --- a/docs-md/operate-and-deploy/installation/install-ksqldb-with-docker.md +++ b/docs-md/operate-and-deploy/installation/install-ksqldb-with-docker.md @@ -1,45 +1,23 @@ --- layout: page -title: Install ksqlDB with Docker -tagline: Run ksqlDB by using Docker containers +title: Configure ksqlDB with Docker +tagline: Configure ksqlDB by using Docker containers description: Learn how to install ksqlDB in various configurations by using Docker containers -keywords: ksqldb, docker, install +keywords: ksqldb, docker, container, install, configure, production --- -You can deploy ksqlDB by using Docker containers. Starting with {{ site.cp }} -4.1.2, Confluent maintains images at [Docker Hub](https://hub.docker.com/u/confluentinc) -for [ksqlDB Server](https://hub.docker.com/r/confluentinc/ksqldb-server/) and the +You can deploy ksqlDB by using Docker containers. Confluent maintains images at +[Docker Hub](https://hub.docker.com/u/confluentinc) for +[ksqlDB Server](https://hub.docker.com/r/confluentinc/ksqldb-server/) and the [ksqlDB command-line interface (CLI)](https://hub.docker.com/r/confluentinc/ksqldb-cli/). -ksqlDB runs separately from your {{ site.aktm }} cluster, so you specify -the IP addresses of the cluster's bootstrap servers when you start a -container for ksqlDB Server. To set up {{ site.cp }} by using containers, see -[Confluent Platform Quick Start (Docker)](https://docs.confluent.io/current/quickstart/ce-docker-quickstart.html). - Use the following settings to start containers that run ksqlDB in various configurations. -When your ksqlDB processes are running in containers, you can -[interact](#interact-with-ksqldb-running-in-a-docker-container) with -them by using shell scripts and Docker Compose files. - -- [Wait for an HTTP Endpoint to Be Available](#ksqldb-wait-for-http-endpoint) -- [Wait for a Particular Phrase in a Container's Log](#ksqldb-wait-for-message-in-container-log) -- [Run Custom Code Before Launching a Container's Program](#ksqldb-run-custom-code-before-launch) -- [Execute a SQL script in the ksqlDB CLI](#ksqldb-execute-script-in-cli) - -Scale Your ksqlDB Server Deployment ---------------------------------- - -You can scale ksqlDB by adding more capacity per server (vertically) or by -adding more servers (horizontally). Also, you can scale ksqlDB clusters -during live operations without loss of data. For more information, see -[Scaling ksqlDB](../capacity-planning.md#scaling-ksqldb). - -Assign Configuration Settings in the Docker Run Command +Assign configuration settings in the Docker run command ------------------------------------------------------- -You can dynamically pass configuration settings into containers by using +You can pass configuration settings dynamically into containers by using environment variables. When you start a container, set up the configuration with the `-e` or `--env` flags in the `docker run` command. @@ -68,10 +46,11 @@ Properties set with `KSQL_OPTS` take precedence over values specified in the ksqlDB configuration file. For more information, see [Setting ksqlDB Server Parameters](server-config/index.md#setting-ksqldb-server-parameters). -ksqlDB Server -------------- +ksqlDB Server configurations +---------------------------- -The following commands show how to run ksqlDB Server in a container. +The following commands show how to run ksqlDB Server in different +configurations. ### ksqlDB Headless Server Settings (Production) @@ -142,13 +121,13 @@ docker run -d \ In interactive mode, a ksqlDB CLI instance running outside of Docker can connect to the ksqlDB server running in Docker. -Connect ksqlDB Server to a Secure Kafka Cluster, Like Confluent Cloud -=================================================================== +### Connect ksqlDB Server to a secure Kafka Cluster, like Confluent Cloud -ksqlDB Server runs outside of your Kafka clusters, so you need specify in -the container environment how ksqlDB Server connects with a Kafka cluster. +ksqlDB Server runs outside of your {{ site.ak }} clusters, so you need to +specify in the container environment how ksqlDB Server connects with a +{{ site.ak }} cluster. -Run a ksqlDB Server that uses a secure connection to a Kafka cluster: +Run a ksqlDB Server that uses a secure connection to a {{ site.ak }} cluster: ```bash docker run -d \ @@ -275,7 +254,7 @@ KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: "true" ``` ksqlDB Command-line Interface (CLI) ---------------------------------- +----------------------------------- Develop the SQL queries and statements for your real-time streaming applications by using the ksqlDB CLI, or the graphical interface in @@ -355,8 +334,8 @@ Having trouble? Type 'help' (case-insensitive) for a rundown of how things work! ksql> ``` -Interact With ksqlDB Running in a Docker Container ------------------------------------------------- +Interact with ksqlDB running in a Docker container +-------------------------------------------------- You can communicate with ksqlDB Server and the ksqlDB CLI when they run in Docker containers. The following examples show common tasks with ksqlDB @@ -367,7 +346,7 @@ processes that run in containers. - [Run Custom Code Before Launching a Container's Program](#run-custom-code-before-launching-a-containers-program) - [Execute a ksqlDB script in the ksqlDB CLI](#execute-a-ksql-script-in-the-ksql-cli) -### Wait for an HTTP Endpoint to Be Available +### Wait for an HTTP endpoint to be available Sometimes, a container reports its state as `up` before it's actually running. In this case, the docker-compose `depends_on` dependencies @@ -401,7 +380,7 @@ docker-compose exec ksql-cli bash -c \ 'echo -e "\n\n⏳ Waiting for ksqlDB to be available before launching CLI\n"; while [ $(curl -s -o /dev/null -w %{http_code} http://:8088/) -eq 000 ] ; do echo -e $(date) "ksqlDB Server HTTP state: " $(curl -s -o /dev/null -w %{http_code} http://:8088/) " (waiting for 200)" ; sleep 5 ; done; ksql http://:8088' ``` -### Wait for a Particular Phrase in a Container's Log +### Wait for a particular phrase in a container's log Use the `grep` command and [bash process substitution](http://tldp.org/LDP/abs/html/process-sub.html) @@ -413,13 +392,13 @@ echo -e "\n--\n\nWaiting for Kafka Connect to start on $CONNECT_HOST … ⏳" grep -q "Kafka Connect started" <(docker-compose logs -f $CONNECT_HOST) ``` -### Run Custom Code Before Launching a Container's Program +### Run custom code before launching a container's program You can run custom code, like downloading a dependency or moving a file, before a ksqlDB process starts in a container. Use Docker Compose to overlay a change on an existing image. -#### Get the Container's Default Command +#### Get the container's default command Discover the default command that the container runs when it launches, which is either `Entrypoint` or `Cmd`: @@ -440,7 +419,7 @@ Your output should resemble: In this example, the default command is `/etc/confluent/docker/run`. -#### Run Custom Commands Before the ksqlDB Process Starts +#### Run custom commands before the ksqlDB process starts In a Docker Compose file, add the commands that you want to run before the main process starts. Use the `command` option to override the diff --git a/docs-md/operate-and-deploy/installation/installing.md b/docs-md/operate-and-deploy/installation/installing.md index dcdcc524f223..b9881120a0fb 100644 --- a/docs-md/operate-and-deploy/installation/installing.md +++ b/docs-md/operate-and-deploy/installation/installing.md @@ -3,144 +3,223 @@ layout: page title: Install ksqlDB tagline: Install ksqlDB on-premises description: Learn how to install ksqlDB on-premises -keywords: ksql, install, on-prem +keywords: ksql, install, docker, docker-compose, container, docker image, on-prem --- -ksqlDB is a component of {{ site.cp }} and the ksqlDB binaries are located -at [Confluent Downloads](https://www.confluent.io/download/) -as a part of the {{ site.cp }} bundle. +ksqlDB and Docker containers +---------------------------- -ksqlDB must have access to a running {{ site.aktm }} cluster, which can -be in your data center, in a public cloud, {{ site.ccloud }}, etc. +You can run ksqlDB locally by using Docker containers, and you can define a +ksqlDB application by creating a *stack* of containers. A stack is a group of +containers that run interrelated services. For more information on stacks, see +[Describing Apps Using Stack Files](https://docs.docker.com/get-started/part4/#describing-apps-using-stack-files). -Docker support --------------- - -You can deploy ksqlDB by using -[Docker containers](install-ksqldb-with-docker.md). -Starting with {{ site.cp }} 4.1.2, Confluent maintains images at -[Docker Hub](https://hub.docker.com/r/confluentinc/ksqldb-server/). -To start ksqlDB containers in configurations like "ksqlDB Headless -Server" and "Interactive Server with Interceptors", see -[Docker Configuration Parameters](https://docs.confluent.io/current/installation/docker/config-reference.html). - -Watch the -[screencast of Installing and Running KSQL](https://www.youtube.com/embed/icwHpPm-TCA). - -Supported Versions and Interoperability ---------------------------------------- +The minimal ksqlDB stack has containers for {{ site.aktm }}, {{ site.zk }}, and +ksqlDB Server. More sophisticated ksqlDB stacks can have {{ site.sr }}, +{{ site.kconnect }}, and other third-party services, like Elasticsearch. -You can use ksqlDB with compatible {{ site.cp }} and {{ site.aktm }} -versions. +Stacks that have {{ site.sr }} can use Avro-encoded events in ksqlDB +applications. Without {{ site.sr }}, your ksqlDB applications can use only JSON +or delimited formats. -| ksqlDB version | {{ site.version }} | -| --------------------- | ------------------ | -| Apache Kafka version | 0.11.0 and later | -| {{ site.cp }} version | > 3.3.0 and later | +!!! note + ksqlDB Server can connect to a remote {{ site.ak }} cluster that isn't + defined in a local stack. In this case, you can run ksqlDB in a standalone + container and pass in the connection parameters on the command line. -Installation Instructions -------------------------- +Docker images for ksqlDB +------------------------ -Follow the instructions at -[Confluent Platform Quick Start (Local)](https://docs.confluent.io/current/quickstart/ce-quickstart.html). +ksqlDB has a server component and a separate command-line interface (CLI) +component. Both components have their own Docker images. -Also, you can install ksqlDB individually by using the -[confluent-ksql package](https://docs.confluent.io/current/installation/available_packages.html#confluent-ksql). -For more information, see -[Confluent Platform Packages](https://docs.confluent.io/current/installation/available_packages.html). +Confluent maintains images on [Docker Hub](https://hub.docker.com/u/confluentinc) +for ksqlDB components. -Scale Your ksqlDB Server Deployment ---------------------------------- +- [ksqldb-server](https://hub.docker.com/r/confluentinc/ksqldb-server/): + ksqlDB Server image +- [ksqldb-cli](https://hub.docker.com/r/confluentinc/ksqldb-cli/): + ksqlDB command-line interface (CLI) image +- [cp-zookeeper](https://hub.docker.com/r/confluentinc/cp-zookeeper): + {{ site.zk }} image (Community Version) +- [cp-schema-registry](https://hub.docker.com/r/confluentinc/cp-schema-registry): + {{ site.sr }} image (Community Version) +- [cp-kafka](https://hub.docker.com/r/confluentinc/cp-kafka): + {{ site.aktm }} image (Community Version) -You can scale ksqlDB by adding more capacity per server (vertically) or by -adding more servers (horizontally). Also, you can scale ksqlDB clusters -during live operations without loss of data. For more information, see -[Scaling ksqlDB](../capacity-planning.md#scaling-ksqldb). +Install ksqlDB and {{ site.aktm }} by starting a +[Docker Compose](https://docs.docker.com/compose/) stack that runs containers +based on these images. -Start the ksqlDB Server ------------------------ +The following sections show how to install Docker and use the docker-compose +tool to download and run the ksqlDB and related images. -The ksqlDB servers are run separately from the ksqlDB CLI client and {{ site.ak }} -brokers. You can deploy servers on remote machines, VMs, or containers, -and the CLI connects to these remote servers. - -You can add or remove servers from the same resource pool during live -operations, to scale query processing. You can use different resource pools -to support workload isolation. For example, you could deploy separate pools -for production and for testing. +Install Docker +-------------- -You can only connect to one ksqlDB server at a time. The ksqlDB CLI does not -support automatic failover to another ksqlDB Server. +Install the Docker distribution that's compatible with your operating system. -![image](../../img/client-server.png) +!!! important + For macOS and Windows, Docker runs in a virtual machine, and you must + allocate at least 8 GB of RAM for the Docker VM to run the {{ site.ak }} + stack. The default is 2 GB. + +- For macOS, use + [Docker Desktop for Mac](https://docs.docker.com/docker-for-mac/install/). + Change the **Memory** setting on the + [Resources](https://docs.docker.com/docker-for-mac/#resources) page to 8 GB. +- For Windows, use + [Docker Desktop for Windows](https://docs.docker.com/docker-for-windows/install/). + Change the **Memory** setting on the + [Advanced](https://docs.docker.com/docker-for-windows/#advanced) settings + page to 8 GB. +- For Linux, follow the [instructions](https://docs.docker.com/install/) + for your Linux distribution. No memory change is necessary, because Docker + runs natively and not in a VM. + +Create a stack file to define your ksqlDB application +----------------------------------------------------- + +When you've decided on the services that you want in the stack, you define a +[Compose file, or "stack" file](https://docs.docker.com/compose/compose-file/), +which is a YAML file, to configure your ksqlDB application's services. The +stack file is frequently named `docker-compose.yml`. + +To start the ksqlDB application, use the +[docker-compose CLI](https://docs.docker.com/compose/) to +run the stack for the application. Run `docker-compose up` to start the +application and `docker-compose down` to stop it. + +!!! note + If the stack file is compatible with version 3 or higher, + you can use the `docker stack deploy` command: + `docker stack deploy -c docker-compose.yml your-ksqldb-app`. + For more information, see + [docker stack deploy](https://docs.docker.com/engine/reference/commandline/stack_deploy/). + +Build a ksqlDB application +-------------------------- + +The following steps describe how to define and deploy a stack for a ksqlDB +application. + +### 1. Define the services for your ksqlDB application + +Decide which services you need for your ksqlDB application. + +For a local installation, include one or more {{ site.ak }} brokers in the +stack and one or more ksqlDB Server instances. + +- {{ site.zk }} -- one, for cluster metadata +- {{ site.ak }} -- one or more +- {{ site.sr }} -- optional, but required for Avro +- ksqlDB Server -- one or more +- ksqlDB CLI -- optional +- Other services -- like Elasticsearch, optional + +!!! note + A stack that runs {{ site.sr }} can handle Avro-encoded events. Without + {{ site.sr }}, ksqlDB handles only JSON or delimited schemas for events. + +You can declare a container for the ksqlDB CLI in the stack, or you can attach +the CLI to a ksqlDB Server instance later, from a separate container. + +### 2. Build the stack + +Build a stack of services and deploy them by using +[Docker Compose](https://docs.docker.com/compose/). + +Define the configuration of your local ksqlDB installation by creating a +[Compose file](https://docs.docker.com/compose/compose-file/), which by +convention is named `docker-compose.yml`. + +### 3. Bring up the stack and run ksqlDB + +To bring up the stack and run ksqlDB, use the +[docker-compose](https://docs.docker.com/compose/reference/overview/) tool, +which reads your `docker-compose.yml` file and runs containers for your +{{ site.ak }} and ksqlDB services. + +ksqlDB Tutorial stack +--------------------- + +Many `docker-compose.yml` files exist for different configurations, and this +topic shows a simple stack that you can extend for your use cases. The +stack for the [ksqlDB Tutorial](../../tutorials/basics-docker.md) brings up +these services: + +- {{ site.zk }} +- {{ site.ak }} -- one broker +- {{ site.sr }} -- enables Avro +- ksqlDB Server -- one instance + +Download the [docker-compose.yml file](https://github.com/confluentinc/ksql/blob/master/docs/tutorials/docker-compose.yml) +for the [ksqlDB Tutorial](../../tutorials/basics-docker.md) to get started with +a local installation of ksqlDB. + +Start the stack +--------------- + +Navigate to the directory where you saved `docker-compose.yml` and start the +stack by using the `docker-compose up` command: -Follow these instructions to start ksqlDB Server using the -`ksql-server-start` script. +```bash +docker-compose up -d +``` !!! tip - These instructions assume you are installing {{ site.cp }} by using ZIP - or TAR archives. For more information, see [On-Premises - Deployments](https://docs.confluent.io/current/installation/installing_cp/index.html). - -### Specify your ksqlDB server configuration parameters - -Specify the configuration parameters for your ksqlDB server. You can also set -any property for the Kafka Streams API, the Kafka producer, or the Kafka -consumer. The required parameters are `bootstrap.servers` and `listeners`. -You can specify the parameters in the ksqlDB properties file or the `KSQL_OPTS` -environment variable. Properties set with `KSQL_OPTS` take precedence over -those specified in the properties file. - -A recommended approach is to configure a common set of properties -using the ksqlDB configuration file and override specific properties -as needed, using the `KSQL_OPTS` environment variable. + The `-d` option specifies detached mode, so containers run in the background. -Here are the default settings: +Your output should resemble: ``` - bootstrap.servers=localhost:9092 - listeners=http://0.0.0.0:8088 +Creating network "tutorials_default" with the default driver +Creating tutorials_zookeeper_1 ... done +Creating tutorials_kafka_1 ... done +Creating tutorials_schema-registry_1 ... done +Creating tutorials_ksql-server_1 ... done ``` -For more information, see [Configuring ksqlDB Server](server-config/index.md). - -### Start a ksqlDB Server node - -Start a server node by using the following command: +Run the following command to check the status of the stack. ```bash -/bin/ksql-server-start /etc/ksql/ksql-server.properties +docker-compose ps ``` -!!! tip - You can view the ksqlDB server help text by running - `/bin/ksql-server-start --help`. +Your output should resemble: + +``` + Name Command State Ports +---------------------------------------------------------------------------------------------------- +tutorials_kafka_1 /etc/confluent/docker/run Up 0.0.0.0:39092->39092/tcp, 9092/tcp +tutorials_ksql-server_1 /usr/bin/docker/run Up +tutorials_schema-registry_1 /etc/confluent/docker/run Up 8081/tcp +tutorials_zookeeper_1 /etc/confluent/docker/run Up 2181/tcp, 2888/tcp, 3888/tcp +``` -Have a look at [this page](server-config/index.md#non-interactive-headless-ksqldb-usage) -for instructions on running ksqlDB in non-interactive, "headless" -mode. +When all of the containers have the `Up` state, the ksqlDB stack is ready +to use. Start the ksqlDB CLI -------------------- -The ksqlDB CLI is a client that connects to the ksqlDB servers. +When all of the services in the stack are `Up`, run the following command +to start the ksqlDB CLI and connect to a ksqlDB Server. -You can start the ksqlDB CLI by providing the connection information to -the ksqlDB server. +For the ksqlDB Tutorial stack, run the following command to start a container +from the `ksqldb-cli:latest` image that runs the ksqlDB CLI: ```bash -LOG_DIR=./ksql_logs /bin/ksql http://localhost:8088 +docker run --network tutorials_default --rm --interactive --tty \ + confluentinc/ksqldb-cli:latest ksql \ + http://ksql-server:8088 ``` -!!! important - By default ksqlDB attempts to store its logs in a directory called `logs` - that is relative to the location of the `ksql` executable. For example, - if `ksql` is installed at `/usr/local/bin/ksql`, then it would attempt - to store its logs in `/usr/local/logs`. If you are running `ksql` from - the default {{ site.cp }} location, `/bin`, you must - override this default behavior by using the `LOG_DIR` variable. +The `--interactive` and `--tty` options together enable the ksqlDB CLI process +to communicate with the console. For more information, see +[docker run](https://docs.docker.com/engine/reference/run/#foreground). -After ksqlDB is started, your terminal should resemble this. +After the ksqlDB CLI starts, your terminal should resemble the following. ``` =========================================== @@ -163,15 +242,229 @@ Having trouble? Type 'help' (case-insensitive) for a rundown of how things work! ksql> ``` -!!! tip - You can view the ksqlDB CLI help text by running - `/bin/ksql --help`. +With the ksqlDB CLI running, you can issue SQL statements and queries on the +`ksql>` command line. + +!!! note + The ksqlDB CLI connects to one ksqlDB Server at a time. The ksqlDB CLI + doesn't support automatic failover to another ksqlDB Server. + +### Stacks with ksqlDB CLI containers + +Some stacks declare a container for the ksqlDB CLI but don't specify the +process that runs in the container. This kind of stack declares a generic +shell entry point: + +```yaml +entrypoint: /bin/sh +``` + +To interact with a CLI container that's defined this way, use the +`docker exec` command to start the `ksql` process within the container. + +```bash +docker exec ksqldb-cli ksql http://: +``` + +Stop your ksqlDB application +---------------------------- + +Run the following command to stop the containers in your stack. + +```bash +docker-compose down +``` + +Your output should resemble: + +``` +Stopping tutorials_ksql-server_1 ... done +Stopping tutorials_schema-registry_1 ... done +Stopping tutorials_kafka_1 ... done +Stopping tutorials_zookeeper_1 ... done +Removing tutorials_ksql-server_1 ... done +Removing tutorials_schema-registry_1 ... done +Removing tutorials_kafka_1 ... done +Removing tutorials_zookeeper_1 ... done +Removing network tutorials_default +``` + +Specify ksqlDB Server configuration parameters +---------------------------------------------- + +You can specify the configuration for your ksqlDB Server instances by using +these approaches: + +- **The `environment` key:** In the stack file, populate the `environment` key with + your settings. By convention, the ksqlDB setting names are prepended with + `KSQL_`. +- **`--env` option:** On the + [docker run](https://docs.docker.com/engine/reference/commandline/run/) + command line, specify your settings by using the `--env` option once for each + parameter. For more information, see + [Configure ksqlDB with Docker](install-ksqldb-with-docker.md). +- **ksqlDB Server config file:** Add settings to the `ksql-server.properties` + file. This requires building your own Docker image for ksqlDB Server. For + more information, see [Configuring ksqlDB Server](server-config/index.md). + +For a complete list of ksqlDB parameters, see the +[Configuration Parameter Reference](server-config/config-reference.md). + +You can also set any property for the {{ site.kstreams }} API, the +{{ site.ak }} producer, or the {{ site.ak }} consumer. + +A recommended approach is to configure a common set of properties +using the ksqlDB Server configuration file and override specific properties +as needed, using the environment variables. + +ksqlDB must have access to a running {{ site.ak }} cluster, which can be on +your local machine, in a data center, a public cloud, or {{ site.ccloud }}. +For ksqlDB Server to connect to a {{ site.ak }} cluster, the required +parameters are `KSQL_LISTENERS` and `KSQL_BOOTSTRAP_SERVERS`, which have the +following default values: + +```yaml +environment: + KSQL_LISTENERS: http://0.0.0.0:8088 + KSQL_BOOTSTRAP_SERVERS: localhost:9092 +``` -Configure ksqlDB for Confluent Cloud ------------------------------------- +ksqlDB runs separately from your {{ site.ak }} cluster, so you specify +the IP addresses of the cluster's bootstrap servers when you start a +container for ksqlDB Server. For more information, see +[Configuring ksqlDB Server](server-config/index.md). -You can use ksqlDB with a {{ site.ak }} cluster in {{ site.ccloud }}. For more -information, see +To start ksqlDB containers in configurations like "ksqlDB Headless Server" +and "ksqlDB Interactive Server (Development)", see +[Configure ksqlDB with Docker](install-ksqldb-with-docker.md). + +Supported versions and interoperability +--------------------------------------- + +You can use ksqlDB with compatible {{ site.aktm }} and {{ site.cp }} +versions. + +| ksqlDB version | {{ site.release }} | +| --------------------- | ------------------ | +| Apache Kafka version | 0.11.0 and later | +| {{ site.cp }} version | > 3.3.0 and later | + +Scale your ksqlDB Server deployment +----------------------------------- + +You can scale ksqlDB by adding more capacity per server (vertically) or by +adding more servers (horizontally). Also, you can scale ksqlDB clusters +during live operations without loss of data. For more information, see +[Scaling ksqlDB](../capacity-planning.md#scaling-ksqldb). + +The ksqlDB servers are run separately from the ksqlDB CLI client and {{ site.ak }} +brokers. You can deploy servers on remote machines, VMs, or containers, +and the CLI connects to these remote servers. + +![image](../../img/client-server.png) + +You can add or remove servers from the same resource pool during live +operations, to scale query processing. You can use different resource pools +to support workload isolation. For example, you could deploy separate pools +for production and for testing. + +Next steps +---------- + +### Configure ksqlDB for Confluent Cloud + +You can use ksqlDB with a {{ site.ak }} cluster hosted in {{ site.ccloud }}. +For more information, see [Connect ksqlDB to Confluent Cloud](https://docs.confluent.io/current/cloud/connect/ksql-cloud-config.html). +### Experiment with other stacks + +You can try out other stacks that have different configurations, like the +"Quickstart" and "reference" stacks. + +#### ksqlDB Quickstart stack + +Download the `docker-compose.yml` file from the **Include Kafka** tab of the +[ksqlDB Quickstart](https://ksqldb.io/quickstart.html). + +This `docker-compose.yml` file defines a stack with these features: + +- Start one ksqlDB Server instance. +- Does not start {{ site.sr }}, so Avro schemas aren't available. +- Start the ksqlDB CLI container automatically. + +Use the following command to start the ksqlDB CLI in the running `ksqldb-cli` +container. + +```bash +docker exec -it ksqldb-cli ksql http://ksqldb-server:8088 +``` + +#### ksqlDB reference stack + +Download the [docker-compose.yml file](https://github.com/confluentinc/ksql/blob/master/docker-compose.yml) +for the reference stack in the ksqlDB repo. + +This `docker-compose.yml` file defines a stack with these features: + +- Start two or more ksqlDB Server instances. +- Start {{ site.sr }}. +- Start the ksqlDB CLI container automatically. + +Use the following command to start the ksqlDB CLI in the running `ksqldb-cli` +container. + +```bash +docker exec ksqldb-cli ksql http://primary-ksqldb-server:8088 +``` + +#### PostgreSQL stack + +The [ksqlDB with Embedded Connect](../../tutorials/embedded-connect.md) tutorial +shows how to integrate ksqlDB with an external PostgreSQL database to power a +simple ride sharing app. The `docker-compose.yml` file defines a stack with +these features: + +- Start one ksqlDB Server instance. +- Start PostgreSQL on port 5432. +- Start the ksqlDB CLI container automatically. + +Use the following command to start the ksqlDB CLI in the running `ksqldb-cli` +container. + +```bash +docker exec ksqldb-cli ksql http://ksqldb-server:8088 +``` + +#### Full ksqlDB event processing application + +[The Confluent Platform Demo](https://github.com/confluentinc/cp-demo/) +shows how to build an event streaming application that processes live edits to +real Wikipedia pages. The +[docker-compose.yml](https://github.com/confluentinc/cp-demo/blob/master/docker-compose.yml) +file shows how to configure a stack with these features: + +- Start a {{ site.ak }} cluster with two brokers. +- Start a {{ site.kconnect }} instance. +- Start {{ site.sr }}. +- Start containers running Elasticsearch and Kibana. +- Start ksqlDB Server and ksqlDB CLI containers. + +!!! note + You must install + [Confluent Platform](https://docs.confluent.io/current/installation/docker/installation/index.html) + to run this application. The {{ site.cp }} images are distinct from the + images that are used in this topic. + +#### Confluent examples repo + +There are numerous other stack files to explore in the +[Confluent examples repo](https://github.com/confluentinc/examples). + +!!! note + You must install + [Confluent Platform](https://docs.confluent.io/current/installation/docker/installation/index.html) + to run these applications. The {{ site.cp }} images are distinct from the + images that are used in this topic. + Page last revised on: {{ git_revision_date }} diff --git a/docs/tutorials/docker-compose.yml b/docs/tutorials/docker-compose.yml index b98031a0d87e..cf4582fb3f52 100644 --- a/docs/tutorials/docker-compose.yml +++ b/docs/tutorials/docker-compose.yml @@ -2,13 +2,13 @@ version: '2' services: zookeeper: - image: "confluentinc/cp-zookeeper:5.3.0" + image: "confluentinc/cp-zookeeper:latest" environment: ZOOKEEPER_CLIENT_PORT: 32181 ZOOKEEPER_TICK_TIME: 2000 kafka: - image: "confluentinc/cp-enterprise-kafka:5.3.0" + image: "confluentinc/cp-enterprise-kafka:latest" ports: - '39092:39092' depends_on: @@ -30,7 +30,7 @@ services: CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous' schema-registry: - image: "confluentinc/cp-schema-registry:5.3.0" + image: "confluentinc/cp-schema-registry:latest" depends_on: - zookeeper - kafka @@ -39,7 +39,7 @@ services: SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper:32181 ksql-server: - image: "confluentinc/cp-ksql-server:5.3.0" + image: "confluentinc/ksqldb-server:latest" depends_on: - kafka - schema-registry diff --git a/mkdocs.yml b/mkdocs.yml index 26c3f2cd343b..96a48c56e0d4 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -116,7 +116,7 @@ nav: - Deploy: - Install ksqlDB: operate-and-deploy/installation/index.md - Configure ksqlDB CLI: operate-and-deploy/installation/cli-config.md - - Install ksqlDB with Docker: operate-and-deploy/installation/install-ksqldb-with-docker.md + - Configure ksqlDB with Docker: operate-and-deploy/installation/install-ksqldb-with-docker.md # - Install ksqlDB locally: installation/installing.md # leave in docs.confluent.io # - Upgrade ksqlDB: installation/upgrading.md # possibly unnecessary in a Dockerized world - Check the Health of a ksqlDB Server: operate-and-deploy/installation/check-ksqldb-server-health.md From ac8fb6309eef4263dfbea9dcd1383ebbcde703ae Mon Sep 17 00:00:00 2001 From: Andy Coates Date: Tue, 14 Jan 2020 12:52:37 +0000 Subject: [PATCH 107/123] fix: deadlock when closing transient push query (#4297) fixes: https://github.com/confluentinc/ksql/issues/4296 The produce side not calls `offer` in a loop, with a short timeout, to try and put the row into the blocking queue. When the consume side closes the query, e.g. on an `EOFException` if the user has closed the connection, the query first closes the queue; setting a flag the producers are checking on each loop; causing any producers to exit the loop. Then it can safely close the KS topology. (cherry picked from commit 6b5ce0c1a6f34891dcac88ad7454a06f3ed08f37) --- .../ksql/query/BlockingRowQueue.java | 68 ++++++++++++ .../confluent/ksql/query/QueryExecutor.java | 5 +- .../ksql/query/TransientQueryQueue.java | 76 ++++++++----- .../ksql/util/TransientQueryMetadata.java | 24 ++--- .../integration/EndToEndIntegrationTest.java | 6 +- .../ksql/query/TransientQueryQueueTest.java | 102 ++++++++++++++---- .../ksql/util/TransientQueryMetadataTest.java | 87 +++++++++++++++ .../entity/QueryDescriptionFactoryTest.java | 14 +-- .../streaming/QueryStreamWriterTest.java | 21 ++-- .../streaming/StreamedQueryResourceTest.java | 46 +++++++- 10 files changed, 366 insertions(+), 83 deletions(-) create mode 100644 ksql-engine/src/main/java/io/confluent/ksql/query/BlockingRowQueue.java create mode 100644 ksql-engine/src/test/java/io/confluent/ksql/util/TransientQueryMetadataTest.java diff --git a/ksql-engine/src/main/java/io/confluent/ksql/query/BlockingRowQueue.java b/ksql-engine/src/main/java/io/confluent/ksql/query/BlockingRowQueue.java new file mode 100644 index 000000000000..6512ec9e15ce --- /dev/null +++ b/ksql-engine/src/main/java/io/confluent/ksql/query/BlockingRowQueue.java @@ -0,0 +1,68 @@ +/* + * Copyright 2020 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.query; + +import io.confluent.ksql.GenericRow; +import java.util.Collection; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; +import org.apache.kafka.streams.KeyValue; + +/** + * The queue between the Kafka-streams topology and the client connection. + * + *

The KS topology writes to the queue from its {@code StreamThread}, while the KSQL server + * thread that is servicing the client request reads from the queue and writes to the client + * socket. + */ +public interface BlockingRowQueue { + + /** + * Sets the limit handler that will be called when any row limit is reached. + * + *

Replaces any previous handler. + * + * @param limitHandler the handler. + */ + void setLimitHandler(LimitHandler limitHandler); + + /** + * Poll the queue for a single row + * + * @see BlockingQueue#poll(long, TimeUnit) + */ + KeyValue poll(long timeout, TimeUnit unit) + throws InterruptedException; + + /** + * Drain the queue to the supplied {@code collection}. + * + * @see BlockingQueue#drainTo(Collection) + */ + void drainTo(Collection> collection); + + /** + * The size of the queue. + * + * @see BlockingQueue#size() + */ + int size(); + + /** + * Close the queue. + */ + void close(); +} diff --git a/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java b/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java index 638ca2d6748b..93ffd3e0ad53 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java @@ -154,7 +154,7 @@ public TransientQueryMetadata buildTransientQuery( final LogicalSchema schema, final OptionalInt limit ) { - final TransientQueryQueue queue = buildTransientQueryQueue(queryId, physicalPlan, limit); + final BlockingRowQueue queue = buildTransientQueryQueue(queryId, physicalPlan, limit); final String transientQueryPrefix = ksqlConfig.getString(KsqlConfig.KSQL_TRANSIENT_QUERY_NAME_PREFIX_CONFIG); final String applicationId = addTimeSuffix(getQueryApplicationId( @@ -171,9 +171,8 @@ public TransientQueryMetadata buildTransientQuery( streams, schema, sources, - queue::setLimitHandler, planSummary, - queue.getQueue(), + queue, applicationId, streamsBuilder.build(), streamsProperties, diff --git a/ksql-engine/src/main/java/io/confluent/ksql/query/TransientQueryQueue.java b/ksql-engine/src/main/java/io/confluent/ksql/query/TransientQueryQueue.java index 2d308e374a9a..eb983e560dcc 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/query/TransientQueryQueue.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/query/TransientQueryQueue.java @@ -15,12 +15,15 @@ package io.confluent.ksql.query; +import com.google.common.annotations.VisibleForTesting; import io.confluent.ksql.GenericRow; import io.confluent.ksql.util.KsqlException; +import java.util.Collection; import java.util.Objects; import java.util.OptionalInt; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.kstream.ForeachAction; import org.apache.kafka.streams.kstream.KStream; @@ -29,41 +32,62 @@ /** * A queue of rows for transient queries. */ -class TransientQueryQueue { +class TransientQueryQueue implements BlockingRowQueue { private final LimitQueueCallback callback; - private final BlockingQueue> rowQueue = - new LinkedBlockingQueue<>(100); + private final BlockingQueue> rowQueue; + private final int offerTimeoutMs; + private volatile boolean closed = false; TransientQueryQueue(final KStream kstream, final OptionalInt limit) { + this(kstream, limit, 100, 100); + } + + @VisibleForTesting + TransientQueryQueue( + final KStream kstream, + final OptionalInt limit, + final int queueSizeLimit, + final int offerTimeoutMs + ) { this.callback = limit.isPresent() ? new LimitedQueueCallback(limit.getAsInt()) : new UnlimitedQueueCallback(); + this.rowQueue = new LinkedBlockingQueue<>(queueSizeLimit); + this.offerTimeoutMs = offerTimeoutMs; - kstream.foreach(new TransientQueryQueue.QueuePopulator<>(rowQueue, callback)); + kstream.foreach(new QueuePopulator<>()); } - BlockingQueue> getQueue() { - return rowQueue; + @Override + public void setLimitHandler(final LimitHandler limitHandler) { + callback.setLimitHandler(limitHandler); } - void setLimitHandler(final LimitHandler limitHandler) { - callback.setLimitHandler(limitHandler); + @Override + public KeyValue poll(final long timeout, final TimeUnit unit) + throws InterruptedException { + return rowQueue.poll(timeout, unit); } - @SuppressWarnings("OptionalUsedAsFieldOrParameterType") - static final class QueuePopulator implements ForeachAction { + @Override + public void drainTo(final Collection> collection) { + rowQueue.drainTo(collection); + } - private final BlockingQueue> queue; - private final QueueCallback callback; + @Override + public int size() { + return rowQueue.size(); + } - QueuePopulator( - final BlockingQueue> queue, - final QueueCallback callback - ) { - this.queue = Objects.requireNonNull(queue, "queue"); - this.callback = Objects.requireNonNull(callback, "callback"); - } + @Override + public void close() { + closed = true; + } + + @VisibleForTesting + @SuppressWarnings("OptionalUsedAsFieldOrParameterType") + final class QueuePopulator implements ForeachAction { @Override public void apply(final K key, final GenericRow row) { @@ -76,18 +100,22 @@ public void apply(final K key, final GenericRow row) { return; } - final String keyString = getStringKey(key); - queue.put(new KeyValue<>(keyString, row)); + final KeyValue kv = new KeyValue<>(getStringKey(key), row); - callback.onQueued(); - } catch (final InterruptedException exception) { + while (!closed) { + if (rowQueue.offer(kv, offerTimeoutMs, TimeUnit.MILLISECONDS)) { + callback.onQueued(); + break; + } + } + } catch (final InterruptedException e) { throw new KsqlException("InterruptedException while enqueueing:" + key); } } private String getStringKey(final K key) { if (key instanceof Windowed) { - final Windowed windowedKey = (Windowed) key; + final Windowed windowedKey = (Windowed) key; return String.format("%s : %s", windowedKey.key(), windowedKey.window()); } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java b/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java index 89004e7a42ad..68e9c4ce8bd4 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java @@ -15,18 +15,16 @@ package io.confluent.ksql.util; -import io.confluent.ksql.GenericRow; import io.confluent.ksql.name.SourceName; +import io.confluent.ksql.query.BlockingRowQueue; import io.confluent.ksql.query.LimitHandler; import io.confluent.ksql.schema.ksql.LogicalSchema; import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.concurrent.BlockingQueue; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.Topology; /** @@ -34,9 +32,8 @@ */ public class TransientQueryMetadata extends QueryMetadata { - private final BlockingQueue> rowQueue; + private final BlockingRowQueue rowQueue; private final AtomicBoolean isRunning = new AtomicBoolean(true); - private final Consumer limitHandlerSetter; // CHECKSTYLE_RULES.OFF: ParameterNumberCheck public TransientQueryMetadata( @@ -44,9 +41,8 @@ public TransientQueryMetadata( final KafkaStreams kafkaStreams, final LogicalSchema logicalSchema, final Set sourceNames, - final Consumer limitHandlerSetter, final String executionPlan, - final BlockingQueue> rowQueue, + final BlockingRowQueue rowQueue, final String queryApplicationId, final Topology topology, final Map streamsProperties, @@ -63,9 +59,8 @@ public TransientQueryMetadata( topology, streamsProperties, overriddenProperties, - closeCallback - ); - this.limitHandlerSetter = Objects.requireNonNull(limitHandlerSetter, "limitHandlerSetter"); + closeCallback); + this.rowQueue = Objects.requireNonNull(rowQueue, "rowQueue"); } @@ -73,7 +68,7 @@ public boolean isRunning() { return isRunning.get(); } - public BlockingQueue> getRowQueue() { + public BlockingRowQueue getRowQueue() { return rowQueue; } @@ -94,11 +89,16 @@ public int hashCode() { } public void setLimitHandler(final LimitHandler limitHandler) { - limitHandlerSetter.accept(limitHandler); + rowQueue.setLimitHandler(limitHandler); } @Override public void close() { + // To avoid deadlock, close the queue first to ensure producer side isn't blocked trying to + // write to the blocking queue, otherwise super.close call can deadlock: + rowQueue.close(); + + // Now safe to close: super.close(); isRunning.set(false); } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/integration/EndToEndIntegrationTest.java b/ksql-engine/src/test/java/io/confluent/ksql/integration/EndToEndIntegrationTest.java index ae3edbcfb317..580ccdf3b16b 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/integration/EndToEndIntegrationTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/integration/EndToEndIntegrationTest.java @@ -30,6 +30,7 @@ import io.confluent.ksql.GenericRow; import io.confluent.ksql.function.udf.Udf; import io.confluent.ksql.function.udf.UdfDescription; +import io.confluent.ksql.query.BlockingRowQueue; import io.confluent.ksql.query.QueryId; import io.confluent.ksql.serde.Format; import io.confluent.ksql.util.KsqlConstants; @@ -44,7 +45,6 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -216,7 +216,7 @@ public void shouldSelectAllFromDerivedStream() throws Exception { "SELECT * from pageviews_female EMIT CHANGES;"); final List> results = new ArrayList<>(); - final BlockingQueue> rowQueue = queryMetadata.getRowQueue(); + final BlockingRowQueue rowQueue = queryMetadata.getRowQueue(); // From the mock data, we expect exactly 3 page views from female users. final List expectedPages = ImmutableList.of("PAGE_2", "PAGE_5", "PAGE_5"); @@ -402,7 +402,7 @@ private static List verifyAvailableRows( final TransientQueryMetadata queryMetadata, final int expectedRows ) throws Exception { - final BlockingQueue> rowQueue = queryMetadata.getRowQueue(); + final BlockingRowQueue rowQueue = queryMetadata.getRowQueue(); TestUtils.waitForCondition( () -> rowQueue.size() >= expectedRows, diff --git a/ksql-engine/src/test/java/io/confluent/ksql/query/TransientQueryQueueTest.java b/ksql-engine/src/test/java/io/confluent/ksql/query/TransientQueryQueueTest.java index aea22e2c4e53..e59167da08ed 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/query/TransientQueryQueueTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/query/TransientQueryQueueTest.java @@ -16,9 +16,9 @@ package io.confluent.ksql.query; import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -26,47 +26,58 @@ import io.confluent.ksql.GenericRow; import io.confluent.ksql.query.TransientQueryQueue.QueuePopulator; +import java.util.ArrayList; +import java.util.List; import java.util.OptionalInt; -import java.util.Queue; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; import java.util.stream.IntStream; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.kstream.KStream; +import org.junit.After; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.Timeout; import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; -@SuppressWarnings("ConstantConditions") +@SuppressWarnings("unchecked") @RunWith(MockitoJUnitRunner.class) public class TransientQueryQueueTest { private static final int SOME_LIMIT = 4; + private static final int MAX_LIMIT = SOME_LIMIT * 2; private static final GenericRow ROW_ONE = mock(GenericRow.class); private static final GenericRow ROW_TWO = mock(GenericRow.class); + @Rule + public final Timeout timeout = Timeout.seconds(10); + @Mock private LimitHandler limitHandler; @Mock private KStream kStreamsApp; @Captor private ArgumentCaptor> queuePopulatorCaptor; - private Queue> queue; private QueuePopulator queuePopulator; + private TransientQueryQueue queue; + private ScheduledExecutorService executorService; @Before public void setUp() { - final TransientQueryQueue queuer = - new TransientQueryQueue(kStreamsApp, OptionalInt.of(SOME_LIMIT)); - - queuer.setLimitHandler(limitHandler); - - queue = queuer.getQueue(); + givenQueue(OptionalInt.of(SOME_LIMIT)); + } - verify(kStreamsApp).foreach(queuePopulatorCaptor.capture()); - queuePopulator = queuePopulatorCaptor.getValue(); + @After + public void tearDown() { + if (executorService != null) { + executorService.shutdownNow(); + } } @Test @@ -76,11 +87,10 @@ public void shouldQueue() { queuePopulator.apply("key2", ROW_TWO); // Then: - assertThat(queue, hasSize(2)); - assertThat(queue.peek().key, is("key1")); - assertThat(queue.remove().value, is(ROW_ONE)); - assertThat(queue.peek().key, is("key2")); - assertThat(queue.remove().value, is(ROW_TWO)); + assertThat(drainValues(), contains( + new KeyValue<>("key1", ROW_ONE), + new KeyValue<>("key2", ROW_TWO) + )); } @Test @@ -89,7 +99,7 @@ public void shouldNotQueueNullValues() { queuePopulator.apply("key1", null); // Then: - assertThat(queue, is(empty())); + assertThat(queue.size(), is(0)); } @Test @@ -99,7 +109,21 @@ public void shouldQueueUntilLimitReached() { .forEach(idx -> queuePopulator.apply("key1", ROW_ONE)); // Then: - assertThat(queue, hasSize(SOME_LIMIT)); + assertThat(queue.size(), is(SOME_LIMIT)); + } + + @Test + public void shouldPoll() throws Exception { + // Given: + queuePopulator.apply("key1", ROW_ONE); + queuePopulator.apply("key2", ROW_TWO); + + // When: + final KeyValue result = queue.poll(1, TimeUnit.SECONDS); + + // Then: + assertThat(result, is(new KeyValue<>("key1", ROW_ONE))); + assertThat(drainValues(), contains(new KeyValue<>("key2", ROW_TWO))); } @Test @@ -131,4 +155,42 @@ public void shouldCallLimitHandlerOnlyOnce() { // Then: verify(limitHandler, times(1)).limitReached(); } + + @Test + public void shouldBlockOnProduceOnceQueueLimitReachedAndUnblockOnClose() { + // Given: + givenQueue(OptionalInt.empty()); + + IntStream.range(0, MAX_LIMIT) + .forEach(idx -> queuePopulator.apply("key1", ROW_ONE)); + + givenWillCloseQueueAsync(); + + // When: + queuePopulator.apply("should not be queued", ROW_TWO); + + // Then: did not block and: + assertThat(queue.size(), is(MAX_LIMIT)); + } + + private void givenWillCloseQueueAsync() { + executorService = Executors.newSingleThreadScheduledExecutor(); + executorService.schedule(queue::close, 200, TimeUnit.MILLISECONDS); + } + + private void givenQueue(final OptionalInt limit) { + clearInvocations(kStreamsApp); + queue = new TransientQueryQueue(kStreamsApp, limit, MAX_LIMIT, 1); + + queue.setLimitHandler(limitHandler); + + verify(kStreamsApp).foreach(queuePopulatorCaptor.capture()); + queuePopulator = queuePopulatorCaptor.getValue(); + } + + private List> drainValues() { + final List> entries = new ArrayList<>(); + queue.drainTo(entries); + return entries; + } } \ No newline at end of file diff --git a/ksql-engine/src/test/java/io/confluent/ksql/util/TransientQueryMetadataTest.java b/ksql-engine/src/test/java/io/confluent/ksql/util/TransientQueryMetadataTest.java new file mode 100644 index 000000000000..aab26c6fb146 --- /dev/null +++ b/ksql-engine/src/test/java/io/confluent/ksql/util/TransientQueryMetadataTest.java @@ -0,0 +1,87 @@ +/* + * Copyright 2020 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.util; + +import static org.mockito.Mockito.inOrder; + +import io.confluent.ksql.name.SourceName; +import io.confluent.ksql.query.BlockingRowQueue; +import io.confluent.ksql.schema.ksql.LogicalSchema; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; +import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.Topology; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InOrder; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class TransientQueryMetadataTest { + + private static final String QUERY_ID = "queryId"; + private static final String EXECUTION_PLAN = "execution plan"; + private static final String SQL = "sql"; + + @Mock + private KafkaStreams kafkaStreams; + @Mock + private LogicalSchema logicalSchema; + @Mock + private Set sourceNames; + @Mock + private BlockingRowQueue rowQueue; + @Mock + private Topology topology; + @Mock + private Map props; + @Mock + private Map overrides; + @Mock + private Consumer closeCallback; + private TransientQueryMetadata query; + + @Before + public void setUp() { + query = new TransientQueryMetadata( + SQL, + kafkaStreams, + logicalSchema, + sourceNames, + EXECUTION_PLAN, + rowQueue, + QUERY_ID, + topology, + props, + overrides, + closeCallback + ); + } + + @Test + public void shouldCloseQueueBeforeTopologyToAvoidDeadLock() { + // When: + query.close(); + + // Then: + final InOrder inOrder = inOrder(rowQueue, kafkaStreams); + inOrder.verify(rowQueue).close(); + inOrder.verify(kafkaStreams).close(); + } +} \ No newline at end of file diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java index f0a24bb21811..34a29d981b5e 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java @@ -26,7 +26,7 @@ import io.confluent.ksql.metastore.model.DataSource.DataSourceType; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.name.SourceName; -import io.confluent.ksql.query.LimitHandler; +import io.confluent.ksql.query.BlockingRowQueue; import io.confluent.ksql.query.QueryId; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.PhysicalSchema; @@ -41,7 +41,6 @@ import java.util.LinkedHashMap; import java.util.Map; import java.util.Optional; -import java.util.concurrent.LinkedBlockingQueue; import java.util.function.Consumer; import java.util.stream.Collectors; import org.apache.kafka.streams.KafkaStreams; @@ -78,7 +77,7 @@ public class QueryDescriptionFactoryTest { @Mock(name = TOPOLOGY_TEXT) private TopologyDescription topologyDescription; @Mock - private Consumer limitHandler; + private BlockingRowQueue queryQueue; @Mock private KsqlTopic sinkTopic; private QueryMetadata transientQuery; @@ -95,9 +94,8 @@ public void setUp() { queryStreams, SOME_SCHEMA, SOURCE_NAMES, - limitHandler, "execution plan", - new LinkedBlockingQueue<>(), + queryQueue, "app id", topology, STREAMS_PROPS, @@ -207,9 +205,8 @@ public void shouldHandleRowTimeInValueSchemaForTransientQuery() { queryStreams, schema, SOURCE_NAMES, - limitHandler, "execution plan", - new LinkedBlockingQueue<>(), + queryQueue, "app id", topology, STREAMS_PROPS, @@ -240,9 +237,8 @@ public void shouldHandleRowKeyInValueSchemaForTransientQuery() { queryStreams, schema, SOURCE_NAMES, - limitHandler, "execution plan", - new LinkedBlockingQueue<>(), + queryQueue, "app id", topology, STREAMS_PROPS, diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriterTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriterTest.java index 1ac874e28b9a..1a2f73cf58c9 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriterTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriterTest.java @@ -32,6 +32,7 @@ import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.json.JsonMapper; import io.confluent.ksql.name.ColumnName; +import io.confluent.ksql.query.BlockingRowQueue; import io.confluent.ksql.query.LimitHandler; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.types.SqlTypes; @@ -42,7 +43,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; -import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.apache.kafka.streams.KafkaStreams; @@ -59,7 +59,7 @@ import org.junit.rules.Timeout; import org.junit.runner.RunWith; -@SuppressWarnings({"unchecked", "ConstantConditions"}) +@SuppressWarnings("unchecked") @RunWith(EasyMockRunner.class) public class QueryStreamWriterTest { @@ -74,7 +74,7 @@ public class QueryStreamWriterTest { @Mock(MockType.NICE) private TransientQueryMetadata queryMetadata; @Mock(MockType.NICE) - private BlockingQueue> rowQueue; + private BlockingRowQueue rowQueue; private Capture ehCapture; private Capture>> drainCapture; private Capture limitHandlerCapture; @@ -115,10 +115,11 @@ public void setUp() { } @Test - public void shouldWriteAnyPendingRowsBeforeReportingException() throws Exception { + public void shouldWriteAnyPendingRowsBeforeReportingException() { // Given: expect(queryMetadata.isRunning()).andReturn(true).anyTimes(); - expect(rowQueue.drainTo(capture(drainCapture))).andAnswer(rows("Row1", "Row2", "Row3")); + rowQueue.drainTo(capture(drainCapture)); + expectLastCall().andAnswer(rows("Row1", "Row2", "Row3")); createWriter(); @@ -136,10 +137,11 @@ public void shouldWriteAnyPendingRowsBeforeReportingException() throws Exception } @Test - public void shouldExitAndDrainIfQueryStopsRunning() throws Exception { + public void shouldExitAndDrainIfQueryStopsRunning() { // Given: expect(queryMetadata.isRunning()).andReturn(true).andReturn(false); - expect(rowQueue.drainTo(capture(drainCapture))).andAnswer(rows("Row1", "Row2", "Row3")); + rowQueue.drainTo(capture(drainCapture)); + expectLastCall().andAnswer(rows("Row1", "Row2", "Row3")); createWriter(); @@ -155,10 +157,11 @@ public void shouldExitAndDrainIfQueryStopsRunning() throws Exception { } @Test - public void shouldExitAndDrainIfLimitReached() throws Exception { + public void shouldExitAndDrainIfLimitReached() { // Given: expect(queryMetadata.isRunning()).andReturn(true).anyTimes(); - expect(rowQueue.drainTo(capture(drainCapture))).andAnswer(rows("Row1", "Row2", "Row3")); + rowQueue.drainTo(capture(drainCapture)); + expectLastCall().andAnswer(rows("Row1", "Row2", "Row3")); createWriter(); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java index e21246e54641..e5bfed96aa27 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java @@ -23,7 +23,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; @@ -47,6 +46,8 @@ import io.confluent.ksql.parser.tree.PrintTopic; import io.confluent.ksql.parser.tree.Query; import io.confluent.ksql.parser.tree.Statement; +import io.confluent.ksql.query.BlockingRowQueue; +import io.confluent.ksql.query.LimitHandler; import io.confluent.ksql.rest.Errors; import io.confluent.ksql.rest.entity.KsqlErrorMessage; import io.confluent.ksql.rest.entity.KsqlRequest; @@ -70,12 +71,15 @@ import java.io.PipedInputStream; import java.io.PipedOutputStream; import java.time.Duration; +import java.util.Collection; import java.util.Collections; import java.util.LinkedList; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Scanner; import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -371,9 +375,8 @@ public void shouldStreamRowsCorrectly() throws Throwable { mockKafkaStreams, SOME_SCHEMA, Collections.emptySet(), - limitHandler -> {}, "", - rowQueue, + new TestRowQueue(rowQueue), "", mock(Topology.class), Collections.emptyMap(), @@ -653,4 +656,41 @@ public void shouldSuggestAlternativesIfPrintTopicDoesNotExist() { new KsqlRequest(PRINT_TOPIC, Collections.emptyMap(), null) ); } + + private static class TestRowQueue implements BlockingRowQueue { + + private final SynchronousQueue> rowQueue; + + TestRowQueue( + final SynchronousQueue> rowQueue + ) { + this.rowQueue = Objects.requireNonNull(rowQueue, "rowQueue"); + } + + @Override + public void setLimitHandler(final LimitHandler limitHandler) { + + } + + @Override + public KeyValue poll(final long timeout, final TimeUnit unit) + throws InterruptedException { + return rowQueue.poll(timeout, unit); + } + + @Override + public void drainTo(final Collection> collection) { + rowQueue.drainTo(collection); + } + + @Override + public int size() { + return rowQueue.size(); + } + + @Override + public void close() { + + } + } } From 22aaaa79e1ee27f99f2757552edf4d4e822f78bc Mon Sep 17 00:00:00 2001 From: Andy Coates Date: Tue, 14 Jan 2020 21:35:21 +0000 Subject: [PATCH 108/123] Revert "fix: deadlock when closing transient push query (#4297)" This reverts commit ac8fb630 --- .../ksql/query/BlockingRowQueue.java | 68 ------------ .../confluent/ksql/query/QueryExecutor.java | 5 +- .../ksql/query/TransientQueryQueue.java | 76 +++++-------- .../ksql/util/TransientQueryMetadata.java | 24 ++--- .../integration/EndToEndIntegrationTest.java | 6 +- .../ksql/query/TransientQueryQueueTest.java | 102 ++++-------------- .../ksql/util/TransientQueryMetadataTest.java | 87 --------------- .../entity/QueryDescriptionFactoryTest.java | 14 ++- .../streaming/QueryStreamWriterTest.java | 21 ++-- .../streaming/StreamedQueryResourceTest.java | 46 +------- 10 files changed, 83 insertions(+), 366 deletions(-) delete mode 100644 ksql-engine/src/main/java/io/confluent/ksql/query/BlockingRowQueue.java delete mode 100644 ksql-engine/src/test/java/io/confluent/ksql/util/TransientQueryMetadataTest.java diff --git a/ksql-engine/src/main/java/io/confluent/ksql/query/BlockingRowQueue.java b/ksql-engine/src/main/java/io/confluent/ksql/query/BlockingRowQueue.java deleted file mode 100644 index 6512ec9e15ce..000000000000 --- a/ksql-engine/src/main/java/io/confluent/ksql/query/BlockingRowQueue.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2020 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.query; - -import io.confluent.ksql.GenericRow; -import java.util.Collection; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.TimeUnit; -import org.apache.kafka.streams.KeyValue; - -/** - * The queue between the Kafka-streams topology and the client connection. - * - *

The KS topology writes to the queue from its {@code StreamThread}, while the KSQL server - * thread that is servicing the client request reads from the queue and writes to the client - * socket. - */ -public interface BlockingRowQueue { - - /** - * Sets the limit handler that will be called when any row limit is reached. - * - *

Replaces any previous handler. - * - * @param limitHandler the handler. - */ - void setLimitHandler(LimitHandler limitHandler); - - /** - * Poll the queue for a single row - * - * @see BlockingQueue#poll(long, TimeUnit) - */ - KeyValue poll(long timeout, TimeUnit unit) - throws InterruptedException; - - /** - * Drain the queue to the supplied {@code collection}. - * - * @see BlockingQueue#drainTo(Collection) - */ - void drainTo(Collection> collection); - - /** - * The size of the queue. - * - * @see BlockingQueue#size() - */ - int size(); - - /** - * Close the queue. - */ - void close(); -} diff --git a/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java b/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java index 93ffd3e0ad53..638ca2d6748b 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/query/QueryExecutor.java @@ -154,7 +154,7 @@ public TransientQueryMetadata buildTransientQuery( final LogicalSchema schema, final OptionalInt limit ) { - final BlockingRowQueue queue = buildTransientQueryQueue(queryId, physicalPlan, limit); + final TransientQueryQueue queue = buildTransientQueryQueue(queryId, physicalPlan, limit); final String transientQueryPrefix = ksqlConfig.getString(KsqlConfig.KSQL_TRANSIENT_QUERY_NAME_PREFIX_CONFIG); final String applicationId = addTimeSuffix(getQueryApplicationId( @@ -171,8 +171,9 @@ public TransientQueryMetadata buildTransientQuery( streams, schema, sources, + queue::setLimitHandler, planSummary, - queue, + queue.getQueue(), applicationId, streamsBuilder.build(), streamsProperties, diff --git a/ksql-engine/src/main/java/io/confluent/ksql/query/TransientQueryQueue.java b/ksql-engine/src/main/java/io/confluent/ksql/query/TransientQueryQueue.java index eb983e560dcc..2d308e374a9a 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/query/TransientQueryQueue.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/query/TransientQueryQueue.java @@ -15,15 +15,12 @@ package io.confluent.ksql.query; -import com.google.common.annotations.VisibleForTesting; import io.confluent.ksql.GenericRow; import io.confluent.ksql.util.KsqlException; -import java.util.Collection; import java.util.Objects; import java.util.OptionalInt; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.kstream.ForeachAction; import org.apache.kafka.streams.kstream.KStream; @@ -32,62 +29,41 @@ /** * A queue of rows for transient queries. */ -class TransientQueryQueue implements BlockingRowQueue { +class TransientQueryQueue { private final LimitQueueCallback callback; - private final BlockingQueue> rowQueue; - private final int offerTimeoutMs; - private volatile boolean closed = false; + private final BlockingQueue> rowQueue = + new LinkedBlockingQueue<>(100); TransientQueryQueue(final KStream kstream, final OptionalInt limit) { - this(kstream, limit, 100, 100); - } - - @VisibleForTesting - TransientQueryQueue( - final KStream kstream, - final OptionalInt limit, - final int queueSizeLimit, - final int offerTimeoutMs - ) { this.callback = limit.isPresent() ? new LimitedQueueCallback(limit.getAsInt()) : new UnlimitedQueueCallback(); - this.rowQueue = new LinkedBlockingQueue<>(queueSizeLimit); - this.offerTimeoutMs = offerTimeoutMs; - - kstream.foreach(new QueuePopulator<>()); - } - @Override - public void setLimitHandler(final LimitHandler limitHandler) { - callback.setLimitHandler(limitHandler); + kstream.foreach(new TransientQueryQueue.QueuePopulator<>(rowQueue, callback)); } - @Override - public KeyValue poll(final long timeout, final TimeUnit unit) - throws InterruptedException { - return rowQueue.poll(timeout, unit); + BlockingQueue> getQueue() { + return rowQueue; } - @Override - public void drainTo(final Collection> collection) { - rowQueue.drainTo(collection); + void setLimitHandler(final LimitHandler limitHandler) { + callback.setLimitHandler(limitHandler); } - @Override - public int size() { - return rowQueue.size(); - } + @SuppressWarnings("OptionalUsedAsFieldOrParameterType") + static final class QueuePopulator implements ForeachAction { - @Override - public void close() { - closed = true; - } + private final BlockingQueue> queue; + private final QueueCallback callback; - @VisibleForTesting - @SuppressWarnings("OptionalUsedAsFieldOrParameterType") - final class QueuePopulator implements ForeachAction { + QueuePopulator( + final BlockingQueue> queue, + final QueueCallback callback + ) { + this.queue = Objects.requireNonNull(queue, "queue"); + this.callback = Objects.requireNonNull(callback, "callback"); + } @Override public void apply(final K key, final GenericRow row) { @@ -100,22 +76,18 @@ public void apply(final K key, final GenericRow row) { return; } - final KeyValue kv = new KeyValue<>(getStringKey(key), row); + final String keyString = getStringKey(key); + queue.put(new KeyValue<>(keyString, row)); - while (!closed) { - if (rowQueue.offer(kv, offerTimeoutMs, TimeUnit.MILLISECONDS)) { - callback.onQueued(); - break; - } - } - } catch (final InterruptedException e) { + callback.onQueued(); + } catch (final InterruptedException exception) { throw new KsqlException("InterruptedException while enqueueing:" + key); } } private String getStringKey(final K key) { if (key instanceof Windowed) { - final Windowed windowedKey = (Windowed) key; + final Windowed windowedKey = (Windowed) key; return String.format("%s : %s", windowedKey.key(), windowedKey.window()); } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java b/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java index 68e9c4ce8bd4..89004e7a42ad 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/util/TransientQueryMetadata.java @@ -15,16 +15,18 @@ package io.confluent.ksql.util; +import io.confluent.ksql.GenericRow; import io.confluent.ksql.name.SourceName; -import io.confluent.ksql.query.BlockingRowQueue; import io.confluent.ksql.query.LimitHandler; import io.confluent.ksql.schema.ksql.LogicalSchema; import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.BlockingQueue; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.Topology; /** @@ -32,8 +34,9 @@ */ public class TransientQueryMetadata extends QueryMetadata { - private final BlockingRowQueue rowQueue; + private final BlockingQueue> rowQueue; private final AtomicBoolean isRunning = new AtomicBoolean(true); + private final Consumer limitHandlerSetter; // CHECKSTYLE_RULES.OFF: ParameterNumberCheck public TransientQueryMetadata( @@ -41,8 +44,9 @@ public TransientQueryMetadata( final KafkaStreams kafkaStreams, final LogicalSchema logicalSchema, final Set sourceNames, + final Consumer limitHandlerSetter, final String executionPlan, - final BlockingRowQueue rowQueue, + final BlockingQueue> rowQueue, final String queryApplicationId, final Topology topology, final Map streamsProperties, @@ -59,8 +63,9 @@ public TransientQueryMetadata( topology, streamsProperties, overriddenProperties, - closeCallback); - + closeCallback + ); + this.limitHandlerSetter = Objects.requireNonNull(limitHandlerSetter, "limitHandlerSetter"); this.rowQueue = Objects.requireNonNull(rowQueue, "rowQueue"); } @@ -68,7 +73,7 @@ public boolean isRunning() { return isRunning.get(); } - public BlockingRowQueue getRowQueue() { + public BlockingQueue> getRowQueue() { return rowQueue; } @@ -89,16 +94,11 @@ public int hashCode() { } public void setLimitHandler(final LimitHandler limitHandler) { - rowQueue.setLimitHandler(limitHandler); + limitHandlerSetter.accept(limitHandler); } @Override public void close() { - // To avoid deadlock, close the queue first to ensure producer side isn't blocked trying to - // write to the blocking queue, otherwise super.close call can deadlock: - rowQueue.close(); - - // Now safe to close: super.close(); isRunning.set(false); } diff --git a/ksql-engine/src/test/java/io/confluent/ksql/integration/EndToEndIntegrationTest.java b/ksql-engine/src/test/java/io/confluent/ksql/integration/EndToEndIntegrationTest.java index 580ccdf3b16b..ae3edbcfb317 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/integration/EndToEndIntegrationTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/integration/EndToEndIntegrationTest.java @@ -30,7 +30,6 @@ import io.confluent.ksql.GenericRow; import io.confluent.ksql.function.udf.Udf; import io.confluent.ksql.function.udf.UdfDescription; -import io.confluent.ksql.query.BlockingRowQueue; import io.confluent.ksql.query.QueryId; import io.confluent.ksql.serde.Format; import io.confluent.ksql.util.KsqlConstants; @@ -45,6 +44,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -216,7 +216,7 @@ public void shouldSelectAllFromDerivedStream() throws Exception { "SELECT * from pageviews_female EMIT CHANGES;"); final List> results = new ArrayList<>(); - final BlockingRowQueue rowQueue = queryMetadata.getRowQueue(); + final BlockingQueue> rowQueue = queryMetadata.getRowQueue(); // From the mock data, we expect exactly 3 page views from female users. final List expectedPages = ImmutableList.of("PAGE_2", "PAGE_5", "PAGE_5"); @@ -402,7 +402,7 @@ private static List verifyAvailableRows( final TransientQueryMetadata queryMetadata, final int expectedRows ) throws Exception { - final BlockingRowQueue rowQueue = queryMetadata.getRowQueue(); + final BlockingQueue> rowQueue = queryMetadata.getRowQueue(); TestUtils.waitForCondition( () -> rowQueue.size() >= expectedRows, diff --git a/ksql-engine/src/test/java/io/confluent/ksql/query/TransientQueryQueueTest.java b/ksql-engine/src/test/java/io/confluent/ksql/query/TransientQueryQueueTest.java index e59167da08ed..aea22e2c4e53 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/query/TransientQueryQueueTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/query/TransientQueryQueueTest.java @@ -16,9 +16,9 @@ package io.confluent.ksql.query; import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; -import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -26,58 +26,47 @@ import io.confluent.ksql.GenericRow; import io.confluent.ksql.query.TransientQueryQueue.QueuePopulator; -import java.util.ArrayList; -import java.util.List; import java.util.OptionalInt; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; +import java.util.Queue; import java.util.stream.IntStream; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.kstream.KStream; -import org.junit.After; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.Timeout; import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; -@SuppressWarnings("unchecked") +@SuppressWarnings("ConstantConditions") @RunWith(MockitoJUnitRunner.class) public class TransientQueryQueueTest { private static final int SOME_LIMIT = 4; - private static final int MAX_LIMIT = SOME_LIMIT * 2; private static final GenericRow ROW_ONE = mock(GenericRow.class); private static final GenericRow ROW_TWO = mock(GenericRow.class); - @Rule - public final Timeout timeout = Timeout.seconds(10); - @Mock private LimitHandler limitHandler; @Mock private KStream kStreamsApp; @Captor private ArgumentCaptor> queuePopulatorCaptor; + private Queue> queue; private QueuePopulator queuePopulator; - private TransientQueryQueue queue; - private ScheduledExecutorService executorService; @Before public void setUp() { - givenQueue(OptionalInt.of(SOME_LIMIT)); - } + final TransientQueryQueue queuer = + new TransientQueryQueue(kStreamsApp, OptionalInt.of(SOME_LIMIT)); + + queuer.setLimitHandler(limitHandler); - @After - public void tearDown() { - if (executorService != null) { - executorService.shutdownNow(); - } + queue = queuer.getQueue(); + + verify(kStreamsApp).foreach(queuePopulatorCaptor.capture()); + queuePopulator = queuePopulatorCaptor.getValue(); } @Test @@ -87,10 +76,11 @@ public void shouldQueue() { queuePopulator.apply("key2", ROW_TWO); // Then: - assertThat(drainValues(), contains( - new KeyValue<>("key1", ROW_ONE), - new KeyValue<>("key2", ROW_TWO) - )); + assertThat(queue, hasSize(2)); + assertThat(queue.peek().key, is("key1")); + assertThat(queue.remove().value, is(ROW_ONE)); + assertThat(queue.peek().key, is("key2")); + assertThat(queue.remove().value, is(ROW_TWO)); } @Test @@ -99,7 +89,7 @@ public void shouldNotQueueNullValues() { queuePopulator.apply("key1", null); // Then: - assertThat(queue.size(), is(0)); + assertThat(queue, is(empty())); } @Test @@ -109,21 +99,7 @@ public void shouldQueueUntilLimitReached() { .forEach(idx -> queuePopulator.apply("key1", ROW_ONE)); // Then: - assertThat(queue.size(), is(SOME_LIMIT)); - } - - @Test - public void shouldPoll() throws Exception { - // Given: - queuePopulator.apply("key1", ROW_ONE); - queuePopulator.apply("key2", ROW_TWO); - - // When: - final KeyValue result = queue.poll(1, TimeUnit.SECONDS); - - // Then: - assertThat(result, is(new KeyValue<>("key1", ROW_ONE))); - assertThat(drainValues(), contains(new KeyValue<>("key2", ROW_TWO))); + assertThat(queue, hasSize(SOME_LIMIT)); } @Test @@ -155,42 +131,4 @@ public void shouldCallLimitHandlerOnlyOnce() { // Then: verify(limitHandler, times(1)).limitReached(); } - - @Test - public void shouldBlockOnProduceOnceQueueLimitReachedAndUnblockOnClose() { - // Given: - givenQueue(OptionalInt.empty()); - - IntStream.range(0, MAX_LIMIT) - .forEach(idx -> queuePopulator.apply("key1", ROW_ONE)); - - givenWillCloseQueueAsync(); - - // When: - queuePopulator.apply("should not be queued", ROW_TWO); - - // Then: did not block and: - assertThat(queue.size(), is(MAX_LIMIT)); - } - - private void givenWillCloseQueueAsync() { - executorService = Executors.newSingleThreadScheduledExecutor(); - executorService.schedule(queue::close, 200, TimeUnit.MILLISECONDS); - } - - private void givenQueue(final OptionalInt limit) { - clearInvocations(kStreamsApp); - queue = new TransientQueryQueue(kStreamsApp, limit, MAX_LIMIT, 1); - - queue.setLimitHandler(limitHandler); - - verify(kStreamsApp).foreach(queuePopulatorCaptor.capture()); - queuePopulator = queuePopulatorCaptor.getValue(); - } - - private List> drainValues() { - final List> entries = new ArrayList<>(); - queue.drainTo(entries); - return entries; - } } \ No newline at end of file diff --git a/ksql-engine/src/test/java/io/confluent/ksql/util/TransientQueryMetadataTest.java b/ksql-engine/src/test/java/io/confluent/ksql/util/TransientQueryMetadataTest.java deleted file mode 100644 index aab26c6fb146..000000000000 --- a/ksql-engine/src/test/java/io/confluent/ksql/util/TransientQueryMetadataTest.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2020 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.util; - -import static org.mockito.Mockito.inOrder; - -import io.confluent.ksql.name.SourceName; -import io.confluent.ksql.query.BlockingRowQueue; -import io.confluent.ksql.schema.ksql.LogicalSchema; -import java.util.Map; -import java.util.Set; -import java.util.function.Consumer; -import org.apache.kafka.streams.KafkaStreams; -import org.apache.kafka.streams.Topology; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class TransientQueryMetadataTest { - - private static final String QUERY_ID = "queryId"; - private static final String EXECUTION_PLAN = "execution plan"; - private static final String SQL = "sql"; - - @Mock - private KafkaStreams kafkaStreams; - @Mock - private LogicalSchema logicalSchema; - @Mock - private Set sourceNames; - @Mock - private BlockingRowQueue rowQueue; - @Mock - private Topology topology; - @Mock - private Map props; - @Mock - private Map overrides; - @Mock - private Consumer closeCallback; - private TransientQueryMetadata query; - - @Before - public void setUp() { - query = new TransientQueryMetadata( - SQL, - kafkaStreams, - logicalSchema, - sourceNames, - EXECUTION_PLAN, - rowQueue, - QUERY_ID, - topology, - props, - overrides, - closeCallback - ); - } - - @Test - public void shouldCloseQueueBeforeTopologyToAvoidDeadLock() { - // When: - query.close(); - - // Then: - final InOrder inOrder = inOrder(rowQueue, kafkaStreams); - inOrder.verify(rowQueue).close(); - inOrder.verify(kafkaStreams).close(); - } -} \ No newline at end of file diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java index 34a29d981b5e..f0a24bb21811 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java @@ -26,7 +26,7 @@ import io.confluent.ksql.metastore.model.DataSource.DataSourceType; import io.confluent.ksql.name.ColumnName; import io.confluent.ksql.name.SourceName; -import io.confluent.ksql.query.BlockingRowQueue; +import io.confluent.ksql.query.LimitHandler; import io.confluent.ksql.query.QueryId; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.PhysicalSchema; @@ -41,6 +41,7 @@ import java.util.LinkedHashMap; import java.util.Map; import java.util.Optional; +import java.util.concurrent.LinkedBlockingQueue; import java.util.function.Consumer; import java.util.stream.Collectors; import org.apache.kafka.streams.KafkaStreams; @@ -77,7 +78,7 @@ public class QueryDescriptionFactoryTest { @Mock(name = TOPOLOGY_TEXT) private TopologyDescription topologyDescription; @Mock - private BlockingRowQueue queryQueue; + private Consumer limitHandler; @Mock private KsqlTopic sinkTopic; private QueryMetadata transientQuery; @@ -94,8 +95,9 @@ public void setUp() { queryStreams, SOME_SCHEMA, SOURCE_NAMES, + limitHandler, "execution plan", - queryQueue, + new LinkedBlockingQueue<>(), "app id", topology, STREAMS_PROPS, @@ -205,8 +207,9 @@ public void shouldHandleRowTimeInValueSchemaForTransientQuery() { queryStreams, schema, SOURCE_NAMES, + limitHandler, "execution plan", - queryQueue, + new LinkedBlockingQueue<>(), "app id", topology, STREAMS_PROPS, @@ -237,8 +240,9 @@ public void shouldHandleRowKeyInValueSchemaForTransientQuery() { queryStreams, schema, SOURCE_NAMES, + limitHandler, "execution plan", - queryQueue, + new LinkedBlockingQueue<>(), "app id", topology, STREAMS_PROPS, diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriterTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriterTest.java index 1a2f73cf58c9..1ac874e28b9a 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriterTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/QueryStreamWriterTest.java @@ -32,7 +32,6 @@ import io.confluent.ksql.engine.KsqlEngine; import io.confluent.ksql.json.JsonMapper; import io.confluent.ksql.name.ColumnName; -import io.confluent.ksql.query.BlockingRowQueue; import io.confluent.ksql.query.LimitHandler; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.schema.ksql.types.SqlTypes; @@ -43,6 +42,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.apache.kafka.streams.KafkaStreams; @@ -59,7 +59,7 @@ import org.junit.rules.Timeout; import org.junit.runner.RunWith; -@SuppressWarnings("unchecked") +@SuppressWarnings({"unchecked", "ConstantConditions"}) @RunWith(EasyMockRunner.class) public class QueryStreamWriterTest { @@ -74,7 +74,7 @@ public class QueryStreamWriterTest { @Mock(MockType.NICE) private TransientQueryMetadata queryMetadata; @Mock(MockType.NICE) - private BlockingRowQueue rowQueue; + private BlockingQueue> rowQueue; private Capture ehCapture; private Capture>> drainCapture; private Capture limitHandlerCapture; @@ -115,11 +115,10 @@ public void setUp() { } @Test - public void shouldWriteAnyPendingRowsBeforeReportingException() { + public void shouldWriteAnyPendingRowsBeforeReportingException() throws Exception { // Given: expect(queryMetadata.isRunning()).andReturn(true).anyTimes(); - rowQueue.drainTo(capture(drainCapture)); - expectLastCall().andAnswer(rows("Row1", "Row2", "Row3")); + expect(rowQueue.drainTo(capture(drainCapture))).andAnswer(rows("Row1", "Row2", "Row3")); createWriter(); @@ -137,11 +136,10 @@ public void shouldWriteAnyPendingRowsBeforeReportingException() { } @Test - public void shouldExitAndDrainIfQueryStopsRunning() { + public void shouldExitAndDrainIfQueryStopsRunning() throws Exception { // Given: expect(queryMetadata.isRunning()).andReturn(true).andReturn(false); - rowQueue.drainTo(capture(drainCapture)); - expectLastCall().andAnswer(rows("Row1", "Row2", "Row3")); + expect(rowQueue.drainTo(capture(drainCapture))).andAnswer(rows("Row1", "Row2", "Row3")); createWriter(); @@ -157,11 +155,10 @@ public void shouldExitAndDrainIfQueryStopsRunning() { } @Test - public void shouldExitAndDrainIfLimitReached() { + public void shouldExitAndDrainIfLimitReached() throws Exception { // Given: expect(queryMetadata.isRunning()).andReturn(true).anyTimes(); - rowQueue.drainTo(capture(drainCapture)); - expectLastCall().andAnswer(rows("Row1", "Row2", "Row3")); + expect(rowQueue.drainTo(capture(drainCapture))).andAnswer(rows("Row1", "Row2", "Row3")); createWriter(); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java index e5bfed96aa27..e21246e54641 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/streaming/StreamedQueryResourceTest.java @@ -23,6 +23,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; @@ -46,8 +47,6 @@ import io.confluent.ksql.parser.tree.PrintTopic; import io.confluent.ksql.parser.tree.Query; import io.confluent.ksql.parser.tree.Statement; -import io.confluent.ksql.query.BlockingRowQueue; -import io.confluent.ksql.query.LimitHandler; import io.confluent.ksql.rest.Errors; import io.confluent.ksql.rest.entity.KsqlErrorMessage; import io.confluent.ksql.rest.entity.KsqlRequest; @@ -71,15 +70,12 @@ import java.io.PipedInputStream; import java.io.PipedOutputStream; import java.time.Duration; -import java.util.Collection; import java.util.Collections; import java.util.LinkedList; import java.util.Map; -import java.util.Objects; import java.util.Optional; import java.util.Scanner; import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -375,8 +371,9 @@ public void shouldStreamRowsCorrectly() throws Throwable { mockKafkaStreams, SOME_SCHEMA, Collections.emptySet(), + limitHandler -> {}, "", - new TestRowQueue(rowQueue), + rowQueue, "", mock(Topology.class), Collections.emptyMap(), @@ -656,41 +653,4 @@ public void shouldSuggestAlternativesIfPrintTopicDoesNotExist() { new KsqlRequest(PRINT_TOPIC, Collections.emptyMap(), null) ); } - - private static class TestRowQueue implements BlockingRowQueue { - - private final SynchronousQueue> rowQueue; - - TestRowQueue( - final SynchronousQueue> rowQueue - ) { - this.rowQueue = Objects.requireNonNull(rowQueue, "rowQueue"); - } - - @Override - public void setLimitHandler(final LimitHandler limitHandler) { - - } - - @Override - public KeyValue poll(final long timeout, final TimeUnit unit) - throws InterruptedException { - return rowQueue.poll(timeout, unit); - } - - @Override - public void drainTo(final Collection> collection) { - rowQueue.drainTo(collection); - } - - @Override - public int size() { - return rowQueue.size(); - } - - @Override - public void close() { - - } - } } From c7fb07f33943e362b84facb59d0358b57d848701 Mon Sep 17 00:00:00 2001 From: Almog Gavra Date: Tue, 14 Jan 2020 13:51:00 -0800 Subject: [PATCH 109/123] docs: add docs for COUNT_DISTINCT (#4300) --- .../ksqldb-reference/aggregate-functions.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs-md/developer-guide/ksqldb-reference/aggregate-functions.md b/docs-md/developer-guide/ksqldb-reference/aggregate-functions.md index 1c2b908488e4..4ef6f2889d48 100644 --- a/docs-md/developer-guide/ksqldb-reference/aggregate-functions.md +++ b/docs-md/developer-guide/ksqldb-reference/aggregate-functions.md @@ -89,6 +89,18 @@ returned will be the number of rows where `col1` is non-null. When `*` is specified, the count returned will be the total number of rows. +COUNT_DISTINCT +-------------- + +`COUNT_DISTINCT(col1)` + +Stream, Table + +Returns the _approximate_ number of unique values of `col1` in a group. +The function implementation uses [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) +to estimate cardinalties of 10^9 with a typical standard error of 2%. + + HISTOGRAM --------- From 2d0bfe8c860c63a71fda07a986d93f659e9ccc97 Mon Sep 17 00:00:00 2001 From: Almog Gavra Date: Tue, 14 Jan 2020 16:18:04 -0800 Subject: [PATCH 110/123] feat: expression support in JOINs (#4278) --- .../io/confluent/ksql/analyzer/Analysis.java | 20 +- .../io/confluent/ksql/analyzer/Analyzer.java | 142 ++++------ .../ksql/analyzer/ExpressionAnalyzer.java | 111 ++------ .../ksql/planner/LogicalPlanner.java | 19 +- .../confluent/ksql/planner/plan/JoinNode.java | 162 ++--------- .../ksql/structured/SchemaKStream.java | 2 +- .../ksql/structured/SchemaKTable.java | 15 + .../ksql/analyzer/AnalyzerFunctionalTest.java | 115 +++++++- .../ksql/analyzer/ExpressionAnalyzerTest.java | 23 ++ .../physical/PhysicalPlanBuilderTest.java | 14 +- .../ksql/planner/LogicalPlannerTest.java | 9 +- .../ksql/planner/plan/JoinNodeTest.java | 260 ++---------------- .../ksql/structured/SchemaKStreamTest.java | 15 + .../query-validation-tests/joins.json | 199 +++++--------- 14 files changed, 385 insertions(+), 721 deletions(-) diff --git a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analysis.java b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analysis.java index 3521d22cd0d9..66b9b0b6c848 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analysis.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analysis.java @@ -281,30 +281,30 @@ public DataSource getDataSource() { @Immutable public static final class JoinInfo { - private final ColumnRef leftJoinField; - private final ColumnRef rightJoinField; + private final Expression leftJoinExpression; + private final Expression rightJoinExpression; private final JoinNode.JoinType type; private final Optional withinExpression; JoinInfo( - final ColumnRef leftJoinField, - final ColumnRef rightJoinField, + final Expression leftJoinExpression, + final Expression rightJoinExpression, final JoinType type, final Optional withinExpression ) { - this.leftJoinField = requireNonNull(leftJoinField, "leftJoinField"); - this.rightJoinField = requireNonNull(rightJoinField, "rightJoinField"); + this.leftJoinExpression = requireNonNull(leftJoinExpression, "leftJoinExpression"); + this.rightJoinExpression = requireNonNull(rightJoinExpression, "rightJoinExpression"); this.type = requireNonNull(type, "type"); this.withinExpression = requireNonNull(withinExpression, "withinExpression"); } - public ColumnRef getLeftJoinField() { - return leftJoinField; + public Expression getLeftJoinExpression() { + return leftJoinExpression; } - public ColumnRef getRightJoinField() { - return rightJoinField; + public Expression getRightJoinExpression() { + return rightJoinExpression; } public JoinType getType() { diff --git a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analyzer.java b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analyzer.java index 7f0c174529ce..1cffda3855da 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analyzer.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/Analyzer.java @@ -18,7 +18,9 @@ import static java.util.Objects.requireNonNull; import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; +import com.google.common.collect.MoreCollectors; import io.confluent.ksql.analyzer.Analysis.AliasedDataSource; import io.confluent.ksql.analyzer.Analysis.Into; import io.confluent.ksql.analyzer.Analysis.JoinInfo; @@ -67,7 +69,6 @@ import io.confluent.ksql.util.SchemaUtil; import java.util.HashSet; import java.util.List; -import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; @@ -349,21 +350,28 @@ protected AstNode visitJoin(final Join node, final Void context) { ); } - final ColumnRef leftJoinField = getJoinFieldName( - comparisonExpression, - left.getAlias(), - left.getDataSource().getSchema() - ); + final Set colsUsedInLeft = new ExpressionAnalyzer(analysis.getFromSourceSchemas()) + .analyzeExpression(comparisonExpression.getLeft(), false); + final Set colsUsedInRight = new ExpressionAnalyzer(analysis.getFromSourceSchemas()) + .analyzeExpression(comparisonExpression.getRight(), false); - final ColumnRef rightJoinField = getJoinFieldName( - comparisonExpression, - right.getAlias(), - right.getDataSource().getSchema() - ); + final SourceName leftSourceName = getOnlySourceForJoin( + comparisonExpression.getLeft(), comparisonExpression, colsUsedInLeft); + final SourceName rightSourceName = getOnlySourceForJoin( + comparisonExpression.getRight(), comparisonExpression, colsUsedInRight); + + if (!validJoin(left.getAlias(), right.getAlias(), leftSourceName, rightSourceName)) { + throw new KsqlException( + "Each side of the join must reference exactly one source and not the same source. " + + "Left side references " + leftSourceName + + " and right references " + rightSourceName + ); + } + final boolean flipped = leftSourceName.equals(right.getAlias()); analysis.setJoin(new JoinInfo( - leftJoinField, - rightJoinField, + flipped ? comparisonExpression.getRight() : comparisonExpression.getLeft(), + flipped ? comparisonExpression.getLeft() : comparisonExpression.getRight(), joinType, node.getWithinExpression() )); @@ -371,6 +379,34 @@ protected AstNode visitJoin(final Join node, final Void context) { return null; } + private boolean validJoin( + final SourceName leftName, + final SourceName rightName, + final SourceName leftExpressionSource, + final SourceName rightExpressionSource + ) { + return ImmutableSet.of(leftExpressionSource, rightExpressionSource) + .containsAll(ImmutableList.of(leftName, rightName)); + } + + private SourceName getOnlySourceForJoin( + final Expression exp, + final ComparisonExpression join, + final Set columnRefs + ) { + try { + return columnRefs.stream() + .map(ColumnRef::source) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(MoreCollectors.onlyElement()); + } catch (final Exception e) { + throw new KsqlException("Invalid comparison expression '" + exp + "' in join '" + join + + "'. Each side of the join comparision must contain references from exactly one " + + "source."); + } + } + private JoinNode.JoinType getJoinType(final Join node) { final JoinNode.JoinType joinType; switch (node.getType()) { @@ -389,86 +425,6 @@ private JoinNode.JoinType getJoinType(final Join node) { return joinType; } - private ColumnReferenceExp checkExpressionType( - final ComparisonExpression comparisonExpression, - final Expression subExpression) { - - if (!(subExpression instanceof ColumnReferenceExp)) { - throw new KsqlException( - String.format( - "%s : Invalid comparison expression '%s' in join '%s'. Joins must only contain a " - + "field comparison.", - comparisonExpression.getLocation().map(Objects::toString).orElse(""), - subExpression, - comparisonExpression - ) - ); - } - return (ColumnReferenceExp) subExpression; - } - - private ColumnRef getJoinFieldName( - final ComparisonExpression comparisonExpression, - final SourceName sourceAlias, - final LogicalSchema sourceSchema - ) { - final ColumnReferenceExp left = - checkExpressionType(comparisonExpression, comparisonExpression.getLeft()); - - Optional joinFieldName = getJoinFieldNameFromExpr(left, sourceAlias); - - if (!joinFieldName.isPresent()) { - final ColumnReferenceExp right = - checkExpressionType(comparisonExpression, comparisonExpression.getRight()); - - joinFieldName = getJoinFieldNameFromExpr(right, sourceAlias); - - if (!joinFieldName.isPresent()) { - // Should never happen as only QualifiedNameReference are allowed - throw new IllegalStateException("Cannot find join field name"); - } - } - - final ColumnRef fieldName = joinFieldName.get(); - - final Optional joinField = - getJoinFieldNameFromSource(fieldName.withoutSource(), sourceAlias, sourceSchema); - - return joinField - .orElseThrow(() -> new KsqlException( - String.format( - "%s : Invalid join criteria %s. Column %s.%s does not exist.", - comparisonExpression.getLocation().map(Objects::toString).orElse(""), - comparisonExpression, - sourceAlias.name(), - fieldName.name().toString(FormatOptions.noEscape()) - ) - )); - } - - private Optional getJoinFieldNameFromExpr( - final ColumnReferenceExp nameRef, - final SourceName sourceAlias - ) { - if (nameRef.getReference().source().isPresent() - && !nameRef.getReference().source().get().equals(sourceAlias)) { - return Optional.empty(); - } - - final ColumnRef fieldName = nameRef.getReference(); - return Optional.of(fieldName); - } - - private Optional getJoinFieldNameFromSource( - final ColumnRef fieldName, - final SourceName sourceAlias, - final LogicalSchema sourceSchema - ) { - return sourceSchema.findColumn(fieldName) - .map(Column::ref) - .map(ref -> ref.withSource(sourceAlias)); - } - @Override protected AstNode visitAliasedRelation(final AliasedRelation node, final Void context) { final SourceName structuredDataSourceName = ((Table) node.getRelation()).getName(); diff --git a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/ExpressionAnalyzer.java b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/ExpressionAnalyzer.java index e10ee0908c2f..0374ea0954e9 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/analyzer/ExpressionAnalyzer.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/analyzer/ExpressionAnalyzer.java @@ -15,24 +15,16 @@ package io.confluent.ksql.analyzer; -import io.confluent.ksql.execution.expression.tree.ArithmeticBinaryExpression; -import io.confluent.ksql.execution.expression.tree.Cast; +import com.google.common.collect.Iterables; import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; -import io.confluent.ksql.execution.expression.tree.ComparisonExpression; -import io.confluent.ksql.execution.expression.tree.DereferenceExpression; import io.confluent.ksql.execution.expression.tree.Expression; -import io.confluent.ksql.execution.expression.tree.FunctionCall; -import io.confluent.ksql.execution.expression.tree.IsNotNullPredicate; -import io.confluent.ksql.execution.expression.tree.IsNullPredicate; -import io.confluent.ksql.execution.expression.tree.LikePredicate; -import io.confluent.ksql.execution.expression.tree.LogicalBinaryExpression; -import io.confluent.ksql.execution.expression.tree.NotExpression; -import io.confluent.ksql.execution.expression.tree.VisitParentExpressionVisitor; +import io.confluent.ksql.execution.expression.tree.TraversalExpressionVisitor; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.FormatOptions; import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.SchemaUtil; +import java.util.HashSet; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; @@ -48,99 +40,46 @@ class ExpressionAnalyzer { this.sourceSchemas = Objects.requireNonNull(sourceSchemas, "sourceSchemas"); } - void analyzeExpression(final Expression expression, final boolean allowWindowMetaFields) { - final Visitor visitor = new Visitor(allowWindowMetaFields); - visitor.process(expression, null); + Set analyzeExpression( + final Expression expression, + final boolean allowWindowMetaFields + ) { + final Set referencedColumns = new HashSet<>(); + final ColumnExtractor extractor = new ColumnExtractor(allowWindowMetaFields, referencedColumns); + extractor.process(expression, null); + return referencedColumns; } - private final class Visitor extends VisitParentExpressionVisitor { + private final class ColumnExtractor extends TraversalExpressionVisitor { + private final Set referencedColumns; private final boolean allowWindowMetaFields; - Visitor(final boolean allowWindowMetaFields) { + ColumnExtractor( + final boolean allowWindowMetaFields, + final Set referencedColumns + ) { this.allowWindowMetaFields = allowWindowMetaFields; - } - - public Object visitLikePredicate(final LikePredicate node, final Object context) { - process(node.getValue(), null); - return null; - } - - public Object visitFunctionCall(final FunctionCall node, final Object context) { - for (final Expression argExpr : node.getArguments()) { - process(argExpr, null); - } - return null; - } - - public Object visitArithmeticBinary( - final ArithmeticBinaryExpression node, - final Object context) { - process(node.getLeft(), null); - process(node.getRight(), null); - return null; - } - - public Object visitIsNotNullPredicate(final IsNotNullPredicate node, final Object context) { - return process(node.getValue(), context); - } - - public Object visitIsNullPredicate(final IsNullPredicate node, final Object context) { - return process(node.getValue(), context); - } - - public Object visitLogicalBinaryExpression( - final LogicalBinaryExpression node, - final Object context) { - process(node.getLeft(), null); - process(node.getRight(), null); - return null; + this.referencedColumns = referencedColumns; } @Override - public Object visitComparisonExpression( - final ComparisonExpression node, - final Object context) { - process(node.getLeft(), null); - process(node.getRight(), null); - return null; - } - - @Override - public Object visitNotExpression(final NotExpression node, final Object context) { - return process(node.getValue(), null); - } - - @Override - public Object visitCast(final Cast node, final Object context) { - process(node.getExpression(), context); - return null; - } - - @Override - public Object visitColumnReference( + public Void visitColumnReference( final ColumnReferenceExp node, final Object context ) { - throwOnUnknownOrAmbiguousColumn(node.getReference()); + final ColumnRef reference = node.getReference(); + referencedColumns.add(getQualifiedColumnRef(reference)); return null; } - @Override - public Object visitDereferenceExpression( - final DereferenceExpression node, - final Object context - ) { - process(node.getBase(), context); - return null; - } - - private void throwOnUnknownOrAmbiguousColumn(final ColumnRef name) { + private ColumnRef getQualifiedColumnRef(final ColumnRef name) { final Set sourcesWithField = sourceSchemas.sourcesWithField(name); if (sourcesWithField.isEmpty()) { if (allowWindowMetaFields && name.name().equals(SchemaUtil.WINDOWSTART_NAME)) { - return; + // window start doesn't need a qualifier as it's a special hacky column + return name; } throw new KsqlException("Column '" + name.toString(FormatOptions.noEscape()) @@ -156,6 +95,8 @@ private void throwOnUnknownOrAmbiguousColumn(final ColumnRef name) { throw new KsqlException("Column '" + name.name().name() + "' is ambiguous. " + "Could be any of: " + possibilities); } + + return name.withSource(Iterables.getOnlyElement(sourcesWithField)); } } } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java index b6488ff65f79..e95cf32c886c 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/LogicalPlanner.java @@ -84,7 +84,8 @@ public OutputNode buildPlan() { } if (analysis.getPartitionBy().isPresent()) { - currentNode = buildRepartitionNode(currentNode, analysis.getPartitionBy().get()); + currentNode = buildRepartitionNode( + "PartitionBy", currentNode, analysis.getPartitionBy().get()); } if (!analysis.getTableFunctions().isEmpty()) { @@ -207,6 +208,7 @@ private static FilterNode buildFilterNode( } private RepartitionNode buildRepartitionNode( + final String planId, final PlanNode sourceNode, final Expression partitionBy ) { @@ -219,7 +221,7 @@ private RepartitionNode buildRepartitionNode( final LogicalSchema sourceSchema = sourceNode.getSchema(); final Column proposedKey = sourceSchema - .findValueColumn(columnRef) + .findColumn(columnRef) .orElseThrow(() -> new KsqlException("Invalid identifier for PARTITION BY clause: '" + columnRef.name().toString(FormatOptions.noEscape()) + "' Only columns from the " + "source schema can be referenced in the PARTITION BY clause.")); @@ -240,7 +242,7 @@ private RepartitionNode buildRepartitionNode( final LogicalSchema schema = buildRepartitionedSchema(sourceNode, partitionBy); return new RepartitionNode( - new PlanNodeId("PartitionBy"), + new PlanNodeId(planId), sourceNode, schema, partitionBy, @@ -286,10 +288,13 @@ private PlanNode buildSourceNode() { new PlanNodeId("Join"), analysis.getSelectExpressions(), joinInfo.get().getType(), - leftSourceNode, - rightSourceNode, - joinInfo.get().getLeftJoinField(), - joinInfo.get().getRightJoinField(), + // it is always safe to build the repartition node - this operation will be + // a no-op if a repartition is not required. if the source is a table, and + // a repartition is needed, then an exception will be thrown + buildRepartitionNode( + "LeftSourceKeyed", leftSourceNode, joinInfo.get().getLeftJoinExpression()), + buildRepartitionNode( + "RightSourceKeyed", rightSourceNode, joinInfo.get().getRightJoinExpression()), joinInfo.get().getWithinExpression() ); } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java index 5f07e36f585a..a1b0ce8dfd0a 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/JoinNode.java @@ -19,17 +19,11 @@ import com.google.common.collect.ImmutableMap; import io.confluent.ksql.execution.builder.KsqlQueryBuilder; import io.confluent.ksql.execution.context.QueryContext; -import io.confluent.ksql.execution.context.QueryContext.Stacker; -import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; import io.confluent.ksql.execution.plan.SelectExpression; import io.confluent.ksql.execution.streams.JoinParamsFactory; import io.confluent.ksql.metastore.model.DataSource.DataSourceType; import io.confluent.ksql.metastore.model.KeyField; import io.confluent.ksql.parser.tree.WithinExpression; -import io.confluent.ksql.schema.ksql.Column; -import io.confluent.ksql.schema.ksql.Column.Namespace; -import io.confluent.ksql.schema.ksql.ColumnRef; -import io.confluent.ksql.schema.ksql.FormatOptions; import io.confluent.ksql.schema.ksql.LogicalSchema; import io.confluent.ksql.serde.ValueFormat; import io.confluent.ksql.services.KafkaTopicClient; @@ -37,7 +31,6 @@ import io.confluent.ksql.structured.SchemaKTable; import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.Pair; -import io.confluent.ksql.util.SchemaUtil; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -54,11 +47,9 @@ public enum JoinType { } private final JoinType joinType; - private final DataSourceNode left; - private final DataSourceNode right; + private final PlanNode left; + private final PlanNode right; private final LogicalSchema schema; - private final ColumnRef leftJoinFieldName; - private final ColumnRef rightJoinFieldName; private final KeyField keyField; private final Optional withinExpression; private final ImmutableList selectExpressions; @@ -67,32 +58,23 @@ public JoinNode( final PlanNodeId id, final List selectExpressions, final JoinType joinType, - final DataSourceNode left, - final DataSourceNode right, - final ColumnRef leftJoinFieldName, - final ColumnRef rightJoinFieldName, + final PlanNode left, + final PlanNode right, final Optional withinExpression ) { super(id, calculateSinkType(left, right)); this.joinType = Objects.requireNonNull(joinType, "joinType"); this.left = Objects.requireNonNull(left, "left"); this.right = Objects.requireNonNull(right, "right"); - this.leftJoinFieldName = Objects.requireNonNull(leftJoinFieldName, "leftJoinFieldName"); - this.rightJoinFieldName = Objects.requireNonNull(rightJoinFieldName, "rightJoinFieldName"); this.withinExpression = Objects.requireNonNull(withinExpression, "withinExpression"); this.selectExpressions = ImmutableList .copyOf(Objects.requireNonNull(selectExpressions, "selectExpressions")); - final Column leftKeyCol = validateSchemaColumn(leftJoinFieldName, left.getSchema()); - validateSchemaColumn(rightJoinFieldName, right.getSchema()); - this.keyField = joinType == JoinType.OUTER ? KeyField.none() // Both source key columns can be null, hence neither can be the keyField - : left.getSchema().isKeyColumn(leftKeyCol.name()) - ? left.getKeyField() - : KeyField.of(leftKeyCol.ref()); + : left.getKeyField(); - this.schema = buildJoinSchema(left, leftJoinFieldName, right, rightJoinFieldName); + this.schema = buildJoinSchema(left, right); } @Override @@ -120,11 +102,11 @@ public List getSelectExpressions() { return selectExpressions; } - public DataSourceNode getLeft() { + public PlanNode getLeft() { return left; } - public DataSourceNode getRight() { + public PlanNode getRight() { return right; } @@ -138,7 +120,7 @@ public SchemaKStream buildStream(final KsqlQueryBuilder builder) { this, builder.buildNodeContext(getId().toString())); - return joinerFactory.getJoiner(left.getDataSourceType(), right.getDataSourceType()).join(); + return joinerFactory.getJoiner(left.getNodeOutputType(), right.getNodeOutputType()).join(); } @Override @@ -161,14 +143,8 @@ private void ensureMatchingPartitionCounts(final KafkaTopicClient kafkaTopicClie } } - private static String getSourceName(final DataSourceNode node) { - return node.getDataSource().getName().name(); - } - - private static Column validateSchemaColumn(final ColumnRef column, final LogicalSchema schema) { - return schema.findValueColumn(column) - .orElseThrow(() -> new IllegalArgumentException( - "Invalid join field, not found in schema: " + column)); + private static String getSourceName(final PlanNode node) { + return node.getTheSourceNode().getAlias().name(); } private static class JoinerFactory { @@ -220,15 +196,9 @@ private abstract static class Joiner { public abstract SchemaKStream join(); - SchemaKStream buildStream( - final PlanNode node, - final ColumnRef joinFieldName - ) { - return maybeRePartitionByKey( - node.buildStream(builder), - joinFieldName, - contextStacker - ); + @SuppressWarnings("unchecked") + SchemaKStream buildStream(final PlanNode node) { + return (SchemaKStream) node.buildStream(builder); } @SuppressWarnings("unchecked") @@ -246,17 +216,9 @@ SchemaKTable buildTable(final PlanNode node) { return (SchemaKTable) schemaKStream; } - @SuppressWarnings("unchecked") - static SchemaKStream maybeRePartitionByKey( - final SchemaKStream stream, - final ColumnRef joinFieldName, - final Stacker contextStacker - ) { - return stream.selectKey(new ColumnReferenceExp(joinFieldName), contextStacker); - } - - static ValueFormat getFormatForSource(final DataSourceNode sourceNode) { - return sourceNode.getDataSource() + static ValueFormat getFormatForSource(final PlanNode sourceNode) { + return sourceNode.getTheSourceNode() + .getDataSource() .getKsqlTopic() .getValueFormat(); } @@ -282,10 +244,10 @@ public SchemaKStream join() { } final SchemaKStream leftStream = buildStream( - joinNode.getLeft(), joinNode.leftJoinFieldName); + joinNode.getLeft()); final SchemaKStream rightStream = buildStream( - joinNode.getRight(), joinNode.rightJoinFieldName); + joinNode.getRight()); switch (joinNode.joinType) { case LEFT: @@ -342,7 +304,7 @@ public SchemaKStream join() { final SchemaKTable rightTable = buildTable(joinNode.getRight()); final SchemaKStream leftStream = buildStream( - joinNode.getLeft(), joinNode.leftJoinFieldName); + joinNode.getLeft()); switch (joinNode.joinType) { case LEFT: @@ -414,90 +376,20 @@ public SchemaKTable join() { } private static DataSourceType calculateSinkType( - final DataSourceNode left, - final DataSourceNode right + final PlanNode left, + final PlanNode right ) { - final DataSourceType leftType = left.getDataSourceType(); - final DataSourceType rightType = right.getDataSourceType(); + final DataSourceType leftType = left.getNodeOutputType(); + final DataSourceType rightType = right.getNodeOutputType(); return leftType == DataSourceType.KTABLE && rightType == DataSourceType.KTABLE ? DataSourceType.KTABLE : DataSourceType.KSTREAM; } private static LogicalSchema buildJoinSchema( - final DataSourceNode left, - final ColumnRef leftJoinFieldName, - final DataSourceNode right, - final ColumnRef rightJoinFieldName - ) { - final LogicalSchema leftSchema = selectKey(left, leftJoinFieldName); - final LogicalSchema rightSchema = selectKey(right, rightJoinFieldName); - - return JoinParamsFactory.createSchema(leftSchema, rightSchema); - } - - /** - * Adjust the schema to take into account any change in key columns. - * - * @param source the source node - * @param joinColumnRef the join column - * @return the true source schema after any change of key columns. - */ - private static LogicalSchema selectKey( - final DataSourceNode source, - final ColumnRef joinColumnRef + final PlanNode left, + final PlanNode right ) { - final LogicalSchema sourceSchema = source.getSchema(); - - final Column joinCol = sourceSchema.findColumn(joinColumnRef) - .orElseThrow(() -> new KsqlException("Unknown join column: " + joinColumnRef)); - - if (sourceSchema.key().size() != 1) { - throw new UnsupportedOperationException("Only single key columns supported"); - } - - if (joinCol.namespace() == Namespace.KEY) { - // Join column is only key column, so no change of key columns required: - return sourceSchema; - } - - final Optional keyColumn = source - .getKeyField() - .resolve(sourceSchema); - - if (keyColumn.isPresent() && keyColumn.get().equals(joinCol)) { - // Join column is KEY field, which is an alias for the only key column, so no change of key - // columns required: - return sourceSchema; - } - - // Change of key columns required - - if (source.getDataSourceType() == DataSourceType.KTABLE) { - // Tables do not support rekey: - final String sourceName = source.getDataSource().getName().toString(FormatOptions.noEscape()); - - if (!keyColumn.isPresent()) { - throw new KsqlException( - "Invalid join criteria: Source table (" + sourceName + ") has no key column " - + "defined. Only 'ROWKEY' is supported in the join criteria for a TABLE." - ); - } - - throw new KsqlException( - "Invalid join criteria: Source table " - + "(" + sourceName + ") key column " - + "(" + keyColumn.get().ref().toString(FormatOptions.noEscape()) + ") " - + "is not the column used in the join criteria (" - + joinCol.ref().toString(FormatOptions.noEscape()) + "). " - + "Only the table's key column or 'ROWKEY' is supported in the join criteria " - + "for a TABLE." - ); - } - - return LogicalSchema.builder() - .keyColumn(source.getAlias(), SchemaUtil.ROWKEY_NAME, joinCol.type()) - .valueColumns(sourceSchema.value()) - .build(); + return JoinParamsFactory.createSchema(left.getSchema(), right.getSchema()); } } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKStream.java b/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKStream.java index 28762d95e2c8..9018192fc8ad 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKStream.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKStream.java @@ -360,7 +360,7 @@ private KeyField getNewKeyField(final Expression expression) { return getSchema().isMetaColumn(columnRef.name()) ? KeyField.none() : newKeyField; } - private boolean needsRepartition(final Expression expression) { + protected boolean needsRepartition(final Expression expression) { if (!(expression instanceof ColumnReferenceExp)) { return true; } diff --git a/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKTable.java b/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKTable.java index 68268f839070..b28976cddf73 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKTable.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/structured/SchemaKTable.java @@ -42,6 +42,7 @@ import java.util.List; import java.util.Optional; import java.util.Set; +import org.apache.kafka.connect.data.Struct; // CHECKSTYLE_RULES.OFF: ClassDataAbstractionCoupling public class SchemaKTable extends SchemaKStream { @@ -134,6 +135,20 @@ public SchemaKTable select( ); } + @SuppressWarnings("unchecked") + @Override + public SchemaKStream selectKey(final Expression keyExpression, + final Stacker contextStacker) { + if (!needsRepartition(keyExpression)) { + return (SchemaKStream) this; + } + + throw new UnsupportedOperationException("Cannot repartition a TABLE source. " + + "If this is a join, make sure that the criteria uses the TABLE key " + + this.keyField.ref().map(ColumnRef::toString).orElse("ROWKEY") + " instead of " + + keyExpression); + } + @Override public ExecutionStep getSourceStep() { return sourceTableStep; diff --git a/ksql-engine/src/test/java/io/confluent/ksql/analyzer/AnalyzerFunctionalTest.java b/ksql-engine/src/test/java/io/confluent/ksql/analyzer/AnalyzerFunctionalTest.java index 4c8587a61d2c..a6f4ee46f244 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/analyzer/AnalyzerFunctionalTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/analyzer/AnalyzerFunctionalTest.java @@ -38,6 +38,8 @@ import io.confluent.ksql.execution.ddl.commands.KsqlTopic; import io.confluent.ksql.execution.expression.tree.BooleanLiteral; import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; +import io.confluent.ksql.execution.expression.tree.FunctionCall; +import io.confluent.ksql.execution.expression.tree.IntegerLiteral; import io.confluent.ksql.execution.expression.tree.Literal; import io.confluent.ksql.execution.expression.tree.StringLiteral; import io.confluent.ksql.execution.plan.SelectExpression; @@ -47,6 +49,7 @@ import io.confluent.ksql.metastore.model.KeyField; import io.confluent.ksql.metastore.model.KsqlStream; import io.confluent.ksql.name.ColumnName; +import io.confluent.ksql.name.FunctionName; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.parser.KsqlParser.PreparedStatement; import io.confluent.ksql.parser.properties.with.CreateSourceAsProperties; @@ -171,8 +174,47 @@ public void testSimpleLeftJoinAnalysis() { assertThat(analysis.getFromDataSources().get(1).getAlias(), is(SourceName.of("T2"))); assertThat(analysis.getJoin(), is(not(Optional.empty()))); - assertThat(analysis.getJoin().get().getLeftJoinField(), is(ColumnRef.of(SourceName.of("T1"),ColumnName.of("COL1")))); - assertThat(analysis.getJoin().get().getRightJoinField(), is(ColumnRef.of(SourceName.of("T2"),ColumnName.of("COL1")))); + assertThat(analysis.getJoin().get().getLeftJoinExpression(), is(new ColumnReferenceExp(ColumnRef.of(SourceName.of("T1"),ColumnName.of("COL1"))))); + assertThat(analysis.getJoin().get().getRightJoinExpression(), is(new ColumnReferenceExp(ColumnRef.of(SourceName.of("T2"),ColumnName.of("COL1"))))); + + final List selects = analysis.getSelectExpressions().stream() + .map(SelectExpression::getExpression) + .map(Objects::toString) + .collect(Collectors.toList()); + + assertThat(selects, contains("T1.COL1", "T2.COL1", "T2.COL4", "T1.COL5", "T2.COL2")); + + final List aliases = analysis.getSelectExpressions().stream() + .map(SelectExpression::getAlias) + .collect(Collectors.toList()); + + assertThat(aliases.stream().map(ColumnName::name).collect(Collectors.toList()), + contains("T1_COL1", "T2_COL1", "T2_COL4", "COL5", "T2_COL2")); + } + + @Test + public void testExpressionLeftJoinAnalysis() { + // When: + final Analysis analysis = analyzeQuery( + "SELECT t1.col1, t2.col1, t2.col4, col5, t2.col2 " + + "FROM test1 t1 LEFT JOIN test2 t2 " + + "ON t1.col1 = SUBSTRING(t2.col1, 2) EMIT CHANGES;", jsonMetaStore); + + // Then: + assertThat(analysis.getFromDataSources(), hasSize(2)); + assertThat(analysis.getFromDataSources().get(0).getAlias(), is(SourceName.of("T1"))); + assertThat(analysis.getFromDataSources().get(1).getAlias(), is(SourceName.of("T2"))); + + assertThat(analysis.getJoin(), is(not(Optional.empty()))); + assertThat(analysis.getJoin().get().getLeftJoinExpression(), is(new ColumnReferenceExp(ColumnRef.of(SourceName.of("T1"),ColumnName.of("COL1"))))); + assertThat( + analysis.getJoin().get().getRightJoinExpression(), + is(new FunctionCall( + FunctionName.of("SUBSTRING"), + ImmutableList.of( + new ColumnReferenceExp(ColumnRef.of(SourceName.of("T2"),ColumnName.of("COL1"))), + new IntegerLiteral(2) + )))); final List selects = analysis.getSelectExpressions().stream() .map(SelectExpression::getExpression) @@ -200,8 +242,8 @@ public void shouldHandleJoinOnRowKey() { // Then: assertThat(join, is(not(Optional.empty()))); assertThat(join.get().getType(), is(JoinType.LEFT)); - assertThat(join.get().getLeftJoinField(), is(ColumnRef.of(SourceName.of("T1"),ColumnName.of("ROWKEY")))); - assertThat(join.get().getRightJoinField(), is(ColumnRef.of(SourceName.of("T2"), ColumnName.of("ROWKEY")))); + assertThat(join.get().getLeftJoinExpression(), is(new ColumnReferenceExp(ColumnRef.of(SourceName.of("T1"),ColumnName.of("ROWKEY"))))); + assertThat(join.get().getRightJoinExpression(), is(new ColumnReferenceExp(ColumnRef.of(SourceName.of("T2"), ColumnName.of("ROWKEY"))))); } @Test @@ -504,6 +546,71 @@ public void shouldThrowOnSelfJoin() { analyzer.analyze(query, Optional.of(createStreamAsSelect.getSink())); } + @Test + public void shouldFailOnJoinWithoutSource() { + // Given: + final CreateStreamAsSelect createStreamAsSelect = parseSingle( + "CREATE STREAM FOO AS " + + "SELECT * FROM test1 t1 JOIN test2 t2 ON t1.rowkey = 'foo';" + ); + + final Query query = createStreamAsSelect.getQuery(); + + final Analyzer analyzer = new Analyzer(jsonMetaStore, "", DEFAULT_SERDE_OPTIONS); + + // Expect: + expectedException.expect(KsqlException.class); + expectedException.expectMessage("Invalid comparison expression ''foo'' in join " + + "'(T1.ROWKEY = 'foo')'. Each side of the join comparision must contain references " + + "from exactly one source."); + + // When: + analyzer.analyze(query, Optional.of(createStreamAsSelect.getSink())); + } + + @Test + public void shouldFailOnJoinOnOverlappingSources() { + // Given: + final CreateStreamAsSelect createStreamAsSelect = parseSingle( + "CREATE STREAM FOO AS " + + "SELECT * FROM test1 t1 JOIN test2 t2 ON t1.rowkey + t2.rowkey = t1.rowkey;" + ); + + final Query query = createStreamAsSelect.getQuery(); + + final Analyzer analyzer = new Analyzer(jsonMetaStore, "", DEFAULT_SERDE_OPTIONS); + + // Expect: + expectedException.expect(KsqlException.class); + expectedException.expectMessage("Invalid comparison expression '(T1.ROWKEY + T2.ROWKEY)' in " + + "join '((T1.ROWKEY + T2.ROWKEY) = T1.ROWKEY)'. Each side of the join comparision must " + + "contain references from exactly one source."); + + // When: + analyzer.analyze(query, Optional.of(createStreamAsSelect.getSink())); + } + + @Test + public void shouldFailOnSelfJoinInCondition() { + // Given: + final CreateStreamAsSelect createStreamAsSelect = parseSingle( + "CREATE STREAM FOO AS " + + "SELECT * FROM test1 t1 JOIN test2 t2 ON t1.rowkey = t1.rowkey;" + ); + + final Query query = createStreamAsSelect.getQuery(); + + final Analyzer analyzer = new Analyzer(jsonMetaStore, "", DEFAULT_SERDE_OPTIONS); + + // Expect: + expectedException.expect(KsqlException.class); + expectedException.expectMessage("Each side of the join must reference exactly one source " + + "and not the same source. Left side references `T1` and right references `T1`"); + + // When: + analyzer.analyze(query, Optional.of(createStreamAsSelect.getSink())); + } + @SuppressWarnings("unchecked") private T parseSingle(final String simpleQuery) { return (T) Iterables.getOnlyElement(parse(simpleQuery, jsonMetaStore)); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/analyzer/ExpressionAnalyzerTest.java b/ksql-engine/src/test/java/io/confluent/ksql/analyzer/ExpressionAnalyzerTest.java index a39383f6ef1e..187f9e0b4493 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/analyzer/ExpressionAnalyzerTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/analyzer/ExpressionAnalyzerTest.java @@ -15,11 +15,14 @@ package io.confluent.ksql.analyzer; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterables; import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; import io.confluent.ksql.execution.expression.tree.ComparisonExpression; import io.confluent.ksql.execution.expression.tree.ComparisonExpression.Type; @@ -33,6 +36,7 @@ import java.util.Arrays; import java.util.Set; import java.util.stream.Collectors; +import org.hamcrest.CoreMatchers; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -129,6 +133,25 @@ public void shouldThrowOnMultipleSources() { analyzer.analyzeExpression(expression, true); } + @Test + public void shouldAddQualifier() { + // Given: + final Expression expression = new ColumnReferenceExp( + ColumnRef.withoutSource(ColumnName.of("else")) + ); + + when(sourceSchemas.sourcesWithField(any())) + .thenReturn(ImmutableSet.of(SourceName.of("something"))); + + // When: + final Set columnRefs = analyzer.analyzeExpression(expression, true); + + // Then: + assertThat( + Iterables.getOnlyElement(columnRefs), + is(ColumnRef.of(SourceName.of("something"), ColumnName.of("else")))); + } + @Test public void shouldThrowOnNoSources() { // Given: diff --git a/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java b/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java index 6ef10d5f24d1..eef1046a9c51 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/physical/PhysicalPlanBuilderTest.java @@ -76,11 +76,11 @@ public class PhysicalPlanBuilderTest { + " WITH (KAFKA_TOPIC = 'test3', VALUE_FORMAT = 'JSON', KEY='ID');"; private static final String CREATE_TABLE_TEST4 = "CREATE TABLE TEST4 " - + "(ROWKEY BIGINT KEY, ID BIGINT, COL0 VARCHAR, COL1 DOUBLE) " + + "(ROWKEY BIGINT KEY, ID BIGINT, COL0 BIGINT, COL1 DOUBLE) " + " WITH (KAFKA_TOPIC = 'test4', VALUE_FORMAT = 'JSON', KEY='ID');"; private static final String CREATE_TABLE_TEST5 = "CREATE TABLE TEST5 " - + "(ROWKEY BIGINT KEY, ID BIGINT, COL0 VARCHAR, COL1 DOUBLE) " + + "(ROWKEY BIGINT KEY, ID BIGINT, COL0 BIGINT, COL1 DOUBLE) " + " WITH (KAFKA_TOPIC = 'test5', VALUE_FORMAT = 'JSON', KEY='ID');"; private static final String CREATE_STREAM_TEST6 = "CREATE STREAM TEST6 " @@ -343,9 +343,8 @@ public void shouldThrowIfLeftTableNotJoiningOnTableKey() { // Then: expectedException.expect(KsqlException.class); - expectedException.expectMessage( - "Source table (TEST4) key column (TEST4.ID) is not the column " - + "used in the join criteria (TEST4.COL0)."); + expectedException.expectMessage("Cannot repartition a TABLE source. If this is a join, make " + + "sure that the criteria uses the TABLE key TEST4.ID instead of TEST4.COL0"); // When: execute("CREATE TABLE t1 AS " @@ -361,9 +360,8 @@ public void shouldThrowIfRightTableNotJoiningOnTableKey() { // Then: expectedException.expect(KsqlException.class); - expectedException.expectMessage( - "Source table (TEST5) key column (TEST5.ID) is not the column " - + "used in the join criteria (TEST5.COL0)."); + expectedException.expectMessage("Cannot repartition a TABLE source. If this is a join, make " + + "sure that the criteria uses the TABLE key TEST5.ID instead of TEST5.COL0"); // When: execute("CREATE TABLE t1 AS " diff --git a/ksql-engine/src/test/java/io/confluent/ksql/planner/LogicalPlannerTest.java b/ksql-engine/src/test/java/io/confluent/ksql/planner/LogicalPlannerTest.java index c3d85e440171..e3d29f588810 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/planner/LogicalPlannerTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/planner/LogicalPlannerTest.java @@ -32,6 +32,7 @@ import io.confluent.ksql.planner.plan.JoinNode; import io.confluent.ksql.planner.plan.PlanNode; import io.confluent.ksql.planner.plan.ProjectNode; +import io.confluent.ksql.planner.plan.RepartitionNode; import io.confluent.ksql.schema.ksql.ColumnRef; import io.confluent.ksql.schema.ksql.types.SqlTypes; import io.confluent.ksql.testutils.AnalysisTestUtil; @@ -93,9 +94,9 @@ public void testSimpleLeftJoinLogicalPlan() { assertThat(logicalPlan.getSources().get(0), instanceOf(ProjectNode.class)); assertThat(logicalPlan.getSources().get(0).getSources().get(0), instanceOf(JoinNode.class)); assertThat(logicalPlan.getSources().get(0).getSources().get(0).getSources() - .get(0), instanceOf(DataSourceNode.class)); + .get(0), instanceOf(RepartitionNode.class)); assertThat(logicalPlan.getSources().get(0).getSources().get(0).getSources() - .get(1), instanceOf(DataSourceNode.class)); + .get(1), instanceOf(RepartitionNode.class)); assertThat(logicalPlan.getSchema().value().size(), equalTo(4)); @@ -121,8 +122,8 @@ public void testSimpleLeftJoinFilterLogicalPlan() { assertThat(filterNode.getSources().get(0), instanceOf(JoinNode.class)); final JoinNode joinNode = (JoinNode) filterNode.getSources().get(0); - assertThat(joinNode.getSources().get(0), instanceOf(DataSourceNode.class)); - assertThat(joinNode.getSources().get(1), instanceOf(DataSourceNode.class)); + assertThat(joinNode.getSources().get(0), instanceOf(RepartitionNode.class)); + assertThat(joinNode.getSources().get(1), instanceOf(RepartitionNode.class)); } @Test diff --git a/ksql-engine/src/test/java/io/confluent/ksql/planner/plan/JoinNodeTest.java b/ksql-engine/src/test/java/io/confluent/ksql/planner/plan/JoinNodeTest.java index 3a5ecc64bd94..ed6d6c77ce43 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/planner/plan/JoinNodeTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/planner/plan/JoinNodeTest.java @@ -33,7 +33,6 @@ import io.confluent.ksql.execution.builder.KsqlQueryBuilder; import io.confluent.ksql.execution.context.QueryContext; import io.confluent.ksql.execution.ddl.commands.KsqlTopic; -import io.confluent.ksql.execution.expression.tree.ColumnReferenceExp; import io.confluent.ksql.execution.streams.KSPlanBuilder; import io.confluent.ksql.function.FunctionRegistry; import io.confluent.ksql.function.InternalFunctionRegistry; @@ -172,9 +171,6 @@ public void setUp() { new QueryContext.Stacker() .push(inv.getArgument(0).toString())); - when(left.getDataSourceType()).thenReturn(DataSourceType.KSTREAM); - when(right.getDataSourceType()).thenReturn(DataSourceType.KTABLE); - when(left.getSchema()).thenReturn(LEFT_NODE_SCHEMA); when(right.getSchema()).thenReturn(RIGHT_NODE_SCHEMA); @@ -182,50 +178,11 @@ public void setUp() { when(right.getPartitions(mockKafkaTopicClient)).thenReturn(2); when(left.getKeyField()).thenReturn(KeyField.of(LEFT_JOIN_FIELD_REF)); - when(right.getKeyField()).thenReturn(KeyField.of(RIGHT_JOIN_FIELD_REF)); setUpSource(left, VALUE_FORMAT, leftSource, "Foobar1"); setUpSource(right, OTHER_FORMAT, rightSource, "Foobar2"); } - @Test - public void shouldThrowIfLeftKeyFieldNotInLeftSchema() { - // Then: - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("Invalid join field"); - - // When: - new JoinNode( - nodeId, - Collections.emptyList(), - JoinNode.JoinType.LEFT, - left, - right, - ColumnRef.withoutSource(ColumnName.of("won't find me")), - RIGHT_JOIN_FIELD_REF, - Optional.empty() - ); - } - - @Test - public void shouldThrowIfRightKeyFieldNotInRightSchema() { - // Then: - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("Invalid join field"); - - // When: - new JoinNode( - nodeId, - Collections.emptyList(), - JoinNode.JoinType.LEFT, - left, - right, - LEFT_JOIN_FIELD_REF, - ColumnRef.withoutSource(ColumnName.of("won't find me")), - Optional.empty() - ); - } - @Test public void shouldReturnLeftJoinKeyAsKeyField() { // When: @@ -235,8 +192,6 @@ public void shouldReturnLeftJoinKeyAsKeyField() { JoinType.LEFT, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, Optional.empty() ); @@ -275,8 +230,8 @@ public void shouldThrowOnPartitionMismatch() { // Then: expectedException.expect(KsqlException.class); expectedException.expectMessage( - "Can't join TEST1 with TEST2 since the number of partitions don't match. TEST1 " - + "partitions = 1; TEST2 partitions = 2. Please repartition either one so that the " + "Can't join T1 with T2 since the number of partitions don't match. T1 " + + "partitions = 1; T2 partitions = 2. Please repartition either one so that the " + "number of partitions match." ); @@ -299,8 +254,6 @@ public void shouldPerformStreamToStreamLeftJoin() { JoinNode.JoinType.LEFT, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, WITHIN_EXPRESSION ); @@ -330,8 +283,6 @@ public void shouldPerformStreamToStreamInnerJoin() { JoinNode.JoinType.INNER, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, WITHIN_EXPRESSION ); @@ -361,8 +312,6 @@ public void shouldPerformStreamToStreamOuterJoin() { JoinNode.JoinType.OUTER, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, WITHIN_EXPRESSION ); @@ -383,8 +332,8 @@ public void shouldPerformStreamToStreamOuterJoin() { @Test public void shouldNotPerformStreamStreamJoinWithoutJoinWindow() { // Given: - when(left.getDataSourceType()).thenReturn(DataSourceType.KSTREAM); - when(right.getDataSourceType()).thenReturn(DataSourceType.KSTREAM); + when(left.getNodeOutputType()).thenReturn(DataSourceType.KSTREAM); + when(right.getNodeOutputType()).thenReturn(DataSourceType.KSTREAM); final JoinNode joinNode = new JoinNode( nodeId, @@ -392,8 +341,6 @@ public void shouldNotPerformStreamStreamJoinWithoutJoinWindow() { JoinNode.JoinType.INNER, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, Optional.empty() ); @@ -410,6 +357,10 @@ public void shouldNotPerformStreamStreamJoinWithoutJoinWindow() { @Test public void shouldNotPerformJoinIfInputPartitionsMisMatch() { // Given: + when(left.getTheSourceNode()).thenReturn(left); + when(right.getTheSourceNode()).thenReturn(right); + when(left.getAlias()).thenReturn(LEFT_ALIAS); + when(right.getAlias()).thenReturn(RIGHT_ALIAS); when(left.getPartitions(mockKafkaTopicClient)).thenReturn(3); final JoinNode joinNode = new JoinNode( @@ -418,81 +369,24 @@ public void shouldNotPerformJoinIfInputPartitionsMisMatch() { JoinNode.JoinType.OUTER, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, WITHIN_EXPRESSION ); // Then: expectedException.expect(KsqlException.class); expectedException.expectMessage( - "Can't join Foobar1 with Foobar2 since the number of partitions don't match." + "Can't join left with right since the number of partitions don't match." ); // When: joinNode.buildStream(ksqlStreamBuilder); } - @Test - public void shouldFailJoinIfTableCriteriaColumnIsNotKey() { - // Given: - setupStream(left, leftSchemaKStream); - setupTable(right, rightSchemaKTable); - - final ColumnRef rightCriteriaColumn = - getNonKeyColumn(RIGHT_SOURCE_SCHEMA, RIGHT_ALIAS, RIGHT_JOIN_FIELD_REF); - - // Then: - expectedException.expect(KsqlException.class); - expectedException.expectMessage( - "Source table (Foobar2) key column (right.R1) is not the column used in the join criteria (right.C0). " - + "Only the table's key column or 'ROWKEY' is supported in the join criteria for a TABLE." - ); - - // When: - new JoinNode( - nodeId, - Collections.emptyList(), - JoinType.LEFT, - left, - right, - LEFT_JOIN_FIELD_REF, - rightCriteriaColumn, - Optional.empty() - ); - } - - @Test - public void shouldFailJoinIfTableHasNoKeyAndJoinFieldIsNotRowKey() { - // Given: - setupStream(left, leftSchemaKStream); - setupTable(right, rightSchemaKTable, NO_KEY_FIELD); - - // Then: - expectedException.expect(KsqlException.class); - expectedException.expectMessage( - "Source table (Foobar2) has no key column defined. " - + "Only 'ROWKEY' is supported in the join criteria for a TABLE." - ); - - // When: - new JoinNode( - nodeId, - Collections.emptyList(), - JoinNode.JoinType.LEFT, - left, - right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, - Optional.empty() - ); - } - @Test public void shouldHandleJoinIfTableHasNoKeyAndJoinFieldIsRowKey() { // Given: setupStream(left, leftSchemaKStream); - setupTable(right, rightSchemaKTable, NO_KEY_FIELD); + setupTable(right, rightSchemaKTable); final JoinNode joinNode = new JoinNode( nodeId, @@ -500,8 +394,6 @@ public void shouldHandleJoinIfTableHasNoKeyAndJoinFieldIsRowKey() { JoinNode.JoinType.LEFT, left, right, - LEFT_JOIN_FIELD_REF, - ColumnRef.of(SourceName.of("right"), ColumnName.of("ROWKEY")), Optional.empty() ); @@ -529,8 +421,6 @@ public void shouldPerformStreamToTableLeftJoin() { JoinNode.JoinType.LEFT, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, Optional.empty() ); @@ -558,8 +448,6 @@ public void shouldPerformStreamToTableInnerJoin() { JoinNode.JoinType.INNER, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, Optional.empty() ); @@ -587,8 +475,6 @@ public void shouldNotAllowStreamToTableOuterJoin() { JoinNode.JoinType.OUTER, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, Optional.empty() ); @@ -605,8 +491,8 @@ public void shouldNotAllowStreamToTableOuterJoin() { @Test public void shouldNotPerformStreamToTableJoinIfJoinWindowIsSpecified() { // Given: - when(left.getDataSourceType()).thenReturn(DataSourceType.KSTREAM); - when(right.getDataSourceType()).thenReturn(DataSourceType.KTABLE); + when(left.getNodeOutputType()).thenReturn(DataSourceType.KSTREAM); + when(right.getNodeOutputType()).thenReturn(DataSourceType.KTABLE); final WithinExpression withinExpression = new WithinExpression(10, TimeUnit.SECONDS); @@ -616,8 +502,6 @@ public void shouldNotPerformStreamToTableJoinIfJoinWindowIsSpecified() { JoinNode.JoinType.OUTER, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, Optional.of(withinExpression) ); @@ -631,64 +515,6 @@ public void shouldNotPerformStreamToTableJoinIfJoinWindowIsSpecified() { joinNode.buildStream(ksqlStreamBuilder); } - @Test - public void shouldFailTableTableJoinIfLeftCriteriaColumnIsNotKey() { - // Given: - setupTable(left, leftSchemaKTable); - setupTable(right, rightSchemaKTable); - - final ColumnRef leftCriteriaColumn = - getNonKeyColumn(LEFT_SOURCE_SCHEMA, LEFT_ALIAS, LEFT_JOIN_FIELD_REF); - - // Then: - expectedException.expect(KsqlException.class); - expectedException.expectMessage( - "Source table (Foobar1) key column (left.C0) is not the column used in the join criteria (left.L1). " - + "Only the table's key column or 'ROWKEY' is supported in the join criteria for a TABLE." - ); - - // When: - new JoinNode( - nodeId, - Collections.emptyList(), - JoinNode.JoinType.LEFT, - left, - right, - leftCriteriaColumn, - RIGHT_JOIN_FIELD_REF, - Optional.empty() - ); - } - - @Test - public void shouldFailTableTableJoinIfRightCriteriaColumnIsNotKey() { - // Given: - setupTable(left, leftSchemaKTable); - setupTable(right, rightSchemaKTable); - - final ColumnRef rightCriteriaColumn = - getNonKeyColumn(RIGHT_SOURCE_SCHEMA, RIGHT_ALIAS, RIGHT_JOIN_FIELD_REF); - - // Then: - expectedException.expect(KsqlException.class); - expectedException.expectMessage( - "Source table (Foobar2) key column (right.R1) is not the column used in the join criteria (right.C0). " - + "Only the table's key column or 'ROWKEY' is supported in the join criteria for a TABLE." - ); - - // When: - new JoinNode( - nodeId, - Collections.emptyList(), - JoinNode.JoinType.LEFT, - left, - right, - LEFT_JOIN_FIELD_REF, - rightCriteriaColumn, - Optional.empty() - ); - } - @Test public void shouldPerformTableToTableInnerJoin() { // Given: @@ -701,8 +527,6 @@ public void shouldPerformTableToTableInnerJoin() { JoinNode.JoinType.INNER, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, Optional.empty() ); @@ -728,8 +552,6 @@ public void shouldPerformTableToTableLeftJoin() { JoinNode.JoinType.LEFT, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, Optional.empty() ); @@ -755,8 +577,6 @@ public void shouldPerformTableToTableOuterJoin() { JoinNode.JoinType.OUTER, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, Optional.empty() ); @@ -773,8 +593,8 @@ public void shouldPerformTableToTableOuterJoin() { @Test public void shouldNotPerformTableToTableJoinIfJoinWindowIsSpecified() { // Given: - when(left.getDataSourceType()).thenReturn(DataSourceType.KTABLE); - when(right.getDataSourceType()).thenReturn(DataSourceType.KTABLE); + when(left.getNodeOutputType()).thenReturn(DataSourceType.KTABLE); + when(right.getNodeOutputType()).thenReturn(DataSourceType.KTABLE); final WithinExpression withinExpression = new WithinExpression(10, TimeUnit.SECONDS); @@ -784,8 +604,6 @@ public void shouldNotPerformTableToTableJoinIfJoinWindowIsSpecified() { JoinNode.JoinType.OUTER, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, Optional.of(withinExpression) ); @@ -808,8 +626,6 @@ public void shouldHaveFullyQualifiedJoinSchema() { JoinNode.JoinType.OUTER, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, Optional.empty() ); @@ -828,33 +644,6 @@ public void shouldHaveFullyQualifiedJoinSchema() { )); } - @Test - public void shouldSelectLeftKeyField() { - // Given: - setupStream(left, leftSchemaKStream); - setupStream(right, rightSchemaKStream); - - final JoinNode joinNode = new JoinNode( - nodeId, - Collections.emptyList(), - JoinNode.JoinType.OUTER, - left, - right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, - WITHIN_EXPRESSION - ); - - // When: - joinNode.buildStream(ksqlStreamBuilder); - - // Then: - verify(leftSchemaKStream).selectKey( - eq(new ColumnReferenceExp(LEFT_JOIN_FIELD_REF)), - any() - ); - } - @Test public void shouldNotUseSourceSerdeOptionsForInternalTopics() { // Given: @@ -867,8 +656,6 @@ public void shouldNotUseSourceSerdeOptionsForInternalTopics() { JoinNode.JoinType.LEFT, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, WITHIN_EXPRESSION ); @@ -889,8 +676,6 @@ public void shouldReturnCorrectSchema() { JoinNode.JoinType.LEFT, left, right, - LEFT_JOIN_FIELD_REF, - RIGHT_JOIN_FIELD_REF, WITHIN_EXPRESSION ); @@ -908,17 +693,7 @@ private void setupTable( final SchemaKTable table ) { when(node.buildStream(ksqlStreamBuilder)).thenReturn((SchemaKTable) table); - when(node.getDataSourceType()).thenReturn(DataSourceType.KTABLE); - } - - private void setupTable( - final DataSourceNode node, - final SchemaKTable table, - final Optional keyFieldName - ) { - setupTable(node, table); - - when(node.getKeyField()).thenReturn(KeyField.of(keyFieldName)); + when(node.getNodeOutputType()).thenReturn(DataSourceType.KTABLE); } @SuppressWarnings("unchecked") @@ -927,8 +702,8 @@ private void setupStream( final SchemaKStream stream ) { when(node.buildStream(ksqlStreamBuilder)).thenReturn(stream); - when(stream.selectKey(any(), any())).thenReturn(stream); - when(node.getDataSourceType()).thenReturn(DataSourceType.KSTREAM); + when(node.getTheSourceNode()).thenReturn(node); + when(node.getNodeOutputType()).thenReturn(DataSourceType.KSTREAM); } private void buildJoin() { @@ -1015,7 +790,6 @@ private static void setUpSource( final DataSource dataSource, final String name ) { - when(dataSource.getName()).thenReturn(SourceName.of(name)); when(node.getDataSource()).thenReturn((DataSource)dataSource); final KsqlTopic ksqlTopic = mock(KsqlTopic.class); diff --git a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKStreamTest.java b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKStreamTest.java index 88453d24e37b..e1d89e5e6edf 100644 --- a/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKStreamTest.java +++ b/ksql-engine/src/test/java/io/confluent/ksql/structured/SchemaKStreamTest.java @@ -391,6 +391,21 @@ public void shouldHandleSourceWithoutKey() { assertThat(result.getKeyField(), is(KeyField.none())); } + @Test(expected = UnsupportedOperationException.class) + public void shouldFailRepartitionTable() { + // Given: + final PlanNode planNode = givenInitialKStreamOf("SELECT * FROM test2 EMIT CHANGES;"); + final RepartitionNode repartitionNode = new RepartitionNode( + planNode.getId(), + planNode, + schemaKTable.schema, + new ColumnReferenceExp(ColumnRef.withoutSource(ColumnName.of("COL2"))), + KeyField.none()); + + // When: + schemaKTable.selectKey(repartitionNode.getPartitionBy(), childContextStacker); + } + @Test public void testSelectWithExpression() { // Given: diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json b/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json index 125764a25f8f..fff0cfac10de 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/joins.json @@ -1286,7 +1286,7 @@ ], "expectedException": { "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Source table (NO_KEY) has no key column defined. Only 'ROWKEY' is supported in the join criteria for a TABLE." + "message": "Cannot repartition a TABLE source. If this is a join, make sure that the criteria uses the TABLE key ROWKEY instead of T.ID" } }, { @@ -1534,7 +1534,7 @@ ], "expectedException": { "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression '0' in join '(T.ID = 0)'. Joins must only contain a field comparison." + "message": "Invalid comparison expression '0' in join '(T.ID = 0)'. Each side of the join comparision must contain references from exactly one source." } }, { @@ -1546,176 +1546,113 @@ ], "expectedException": { "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression '0' in join '(0 = T.ID)'. Joins must only contain a field comparison." + "message": "Invalid comparison expression '0' in join '(0 = T.ID)'. Each side of the join comparision must contain references from exactly one source." } }, { - "name": "stream stream left join - invalid join field - contains function", + "name": "stream stream join - contains function", "statements": [ - "CREATE STREAM TEST1 (ID bigint) WITH (kafka_topic='left_topic', value_format='JSON');", - "CREATE STREAM TEST2 (ID bigint) WITH (kafka_topic='right_topic', value_format='JSON');", - "CREATE STREAM LEFT_OUTER_JOIN as SELECT * FROM test1 t left join test2 tt ON t.id = test_udf(tt.id);" + "CREATE STREAM TEST1 (ID varchar) WITH (kafka_topic='left_topic', value_format='JSON');", + "CREATE STREAM TEST2 (ID varchar) WITH (kafka_topic='right_topic', value_format='JSON');", + "CREATE STREAM OUTPUT as SELECT T.ID FROM test1 t join test2 tt WITHIN 30 SECONDS ON t.id = SUBSTRING(tt.id, 2);" ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression 'TEST_UDF(TT.ID)' in join '(T.ID = TEST_UDF(TT.ID))'. Joins must only contain a field comparison." - } - }, - { - "name": "stream stream left join - invalid join field - contains CAST", - "statements": [ - "CREATE STREAM TEST1 (ID bigint) WITH (kafka_topic='left_topic', value_format='JSON');", - "CREATE STREAM TEST2 (ID bigint) WITH (kafka_topic='right_topic', value_format='JSON');", - "CREATE STREAM LEFT_OUTER_JOIN as SELECT * FROM test1 t left join test2 tt ON t.id = CAST(tt.id AS BIGINT);" + "inputs": [ + {"topic": "left_topic", "key": "foo", "value": {"id": "foo"}, "timestamp": 0}, + {"topic": "right_topic", "key": "!foo", "value": {"id": "!foo"}, "timestamp": 10} ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression 'CAST(TT.ID AS BIGINT)' in join '(T.ID = CAST(TT.ID AS BIGINT))'. Joins must only contain a field comparison." - } + "outputs": [ + {"topic": "OUTPUT", "key": "foo", "value": {"T_ID": "foo"}, "timestamp": 10} + ] }, { - "name": "stream stream left join - invalid join field - contains subscript", + "name": "stream stream join - contains CAST", "statements": [ "CREATE STREAM TEST1 (ID bigint) WITH (kafka_topic='left_topic', value_format='JSON');", - "CREATE STREAM TEST2 (ID bigint) WITH (kafka_topic='right_topic', value_format='JSON');", - "CREATE STREAM LEFT_OUTER_JOIN as SELECT * FROM test1 t left join test2 tt ON t.id = tt.id[0];" + "CREATE STREAM TEST2 (ID int) WITH (kafka_topic='right_topic', value_format='JSON');", + "CREATE STREAM OUTPUT as SELECT t.ID FROM test1 t JOIN test2 tt WITHIN 30 seconds ON t.id = CAST(tt.id AS BIGINT);" ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression 'TT.ID[0]' in join '(T.ID = TT.ID[0])'. Joins must only contain a field comparison." - } - }, - { - "name": "stream stream left join - invalid join field - contains subexpression", - "statements": [ - "CREATE STREAM TEST1 (ID bigint) WITH (kafka_topic='left_topic', value_format='JSON');", - "CREATE STREAM TEST2 (ID bigint) WITH (kafka_topic='right_topic', value_format='JSON');", - "CREATE STREAM LEFT_OUTER_JOIN as SELECT * FROM test1 t left join test2 tt ON t.id = (tt.id = 0);" + "inputs": [ + {"topic": "left_topic", "key": "1", "value": {"id": 1}, "timestamp": 10}, + {"topic": "right_topic", "key": "1", "value": {"id": 1}, "timestamp": 10} ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression '(TT.ID = 0)' in join '(T.ID = (TT.ID = 0))'. Joins must only contain a field comparison." - } + "outputs": [ + {"topic": "OUTPUT", "key": 1, "value": {"T_ID": 1}, "timestamp": 10} + ] }, { - "name": "stream stream left join - invalid join field - contains arithmetic binary expression", + "name": "stream stream join - contains subscript", "statements": [ "CREATE STREAM TEST1 (ID bigint) WITH (kafka_topic='left_topic', value_format='JSON');", - "CREATE STREAM TEST2 (ID bigint) WITH (kafka_topic='right_topic', value_format='JSON');", - "CREATE STREAM LEFT_OUTER_JOIN as SELECT * FROM test1 t left join test2 tt ON t.id = tt.id + 1;" + "CREATE STREAM TEST2 (ID ARRAY) WITH (kafka_topic='right_topic', value_format='JSON');", + "CREATE STREAM OUTPUT as SELECT T.ID FROM test1 t JOIN test2 tt WITHIN 30 SECONDS ON t.id = tt.id[1];" ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression '(TT.ID + 1)' in join '(T.ID = (TT.ID + 1))'. Joins must only contain a field comparison." - } - }, - { - "name": "stream stream left join - invalid join field - contains IS NULL expression", - "statements": [ - "CREATE STREAM TEST1 (ID bigint) WITH (kafka_topic='left_topic', value_format='JSON');", - "CREATE STREAM TEST2 (ID bigint) WITH (kafka_topic='right_topic', value_format='JSON');", - "CREATE STREAM LEFT_OUTER_JOIN as SELECT * FROM test1 t left join test2 tt ON t.id = (tt.id IS NULL);" + "inputs": [ + {"topic": "left_topic", "key": "1", "value": {"id": 1}, "timestamp": 0}, + {"topic": "right_topic", "key": "1", "value": {"id": [1]}, "timestamp": 10} ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression '(TT.ID IS NULL)' in join '(T.ID = (TT.ID IS NULL))'. Joins must only contain a field comparison." - } + "outputs": [ + {"topic": "OUTPUT", "key": 1, "value": {"T_ID": 1}, "timestamp": 10} + ] }, { - "name": "stream stream left join - invalid join field - contains IS NOT NULL expression", + "name": "stream stream join - contains arithmetic binary expression", "statements": [ "CREATE STREAM TEST1 (ID bigint) WITH (kafka_topic='left_topic', value_format='JSON');", "CREATE STREAM TEST2 (ID bigint) WITH (kafka_topic='right_topic', value_format='JSON');", - "CREATE STREAM LEFT_OUTER_JOIN as SELECT * FROM test1 t left join test2 tt ON t.id = (tt.id IS NOT NULL);" + "CREATE STREAM OUTPUT as SELECT T.ID FROM test1 t join test2 tt WITHIN 30 seconds ON t.id = tt.id + 1;" ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression '(TT.ID IS NOT NULL)' in join '(T.ID = (TT.ID IS NOT NULL))'. Joins must only contain a field comparison." - } - }, - { - "name": "stream stream left join - invalid join field - contains logical binary expression", - "statements": [ - "CREATE STREAM TEST1 (ID bigint) WITH (kafka_topic='left_topic', value_format='JSON');", - "CREATE STREAM TEST2 (ID bigint) WITH (kafka_topic='right_topic', value_format='JSON');", - "CREATE STREAM LEFT_OUTER_JOIN as SELECT * FROM test1 t left join test2 tt ON t.id = (tt.id AND tt.f1);" + "inputs": [ + {"topic": "left_topic", "key": "1", "value": {"id": 1}, "timestamp": 0}, + {"topic": "right_topic", "key": "0", "value": {"id": 0}, "timestamp": 10} ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression '(TT.ID AND TT.F1)' in join '(T.ID = (TT.ID AND TT.F1))'. Joins must only contain a field comparison." - } + "outputs": [ + {"topic": "OUTPUT", "key": 1, "value": {"T_ID": 1}, "timestamp": 10} + ] }, { - "name": "stream stream left join - invalid join field - contains not expression", + "name": "stream stream join - contains arithmetic unary expression", "statements": [ "CREATE STREAM TEST1 (ID bigint) WITH (kafka_topic='left_topic', value_format='JSON');", "CREATE STREAM TEST2 (ID bigint) WITH (kafka_topic='right_topic', value_format='JSON');", - "CREATE STREAM LEFT_OUTER_JOIN as SELECT * FROM test1 t left join test2 tt ON t.id = (NOT tt.id);" + "CREATE STREAM OUTPUT as SELECT T.ID FROM test1 t join test2 tt WITHIN 30 seconds ON t.id = -tt.id;" ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression '(NOT TT.ID)' in join '(T.ID = (NOT TT.ID))'. Joins must only contain a field comparison." - } - }, - { - "name": "stream stream left join - invalid join field - contains arithmetic unary expression", - "statements": [ - "CREATE STREAM TEST1 (ID bigint) WITH (kafka_topic='left_topic', value_format='JSON');", - "CREATE STREAM TEST2 (ID bigint) WITH (kafka_topic='right_topic', value_format='JSON');", - "CREATE STREAM LEFT_OUTER_JOIN as SELECT * FROM test1 t left join test2 tt ON t.id = -tt.id;" + "inputs": [ + {"topic": "left_topic", "key": "1", "value": {"id": 1}, "timestamp": 0}, + {"topic": "right_topic", "key": "1", "value": {"id": -1}, "timestamp": 10} ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression '-TT.ID' in join '(T.ID = -TT.ID)'. Joins must only contain a field comparison." - } + "outputs": [ + {"topic": "OUTPUT", "key": 1, "value": {"T_ID": 1}, "timestamp": 10} + ] }, { - "name": "stream stream left join - invalid join field - contains LIKE expression", + "name": "stream stream join - contains CASE expression", "statements": [ - "CREATE STREAM TEST1 (ID bigint) WITH (kafka_topic='left_topic', value_format='JSON');", - "CREATE STREAM TEST2 (ID bigint) WITH (kafka_topic='right_topic', value_format='JSON');", - "CREATE STREAM LEFT_OUTER_JOIN as SELECT * FROM test1 t left join test2 tt ON t.id = (tt.id LIKE '%x');" + "CREATE STREAM TEST1 (ID int) WITH (kafka_topic='left_topic', value_format='JSON');", + "CREATE STREAM TEST2 (ID int) WITH (kafka_topic='right_topic', value_format='JSON');", + "CREATE STREAM OUTPUT as SELECT T.ID FROM test1 t join test2 tt WITHIN 30 SECONDS ON t.id = (CASE WHEN tt.id = 2 THEN 1 ELSE 3 END);" ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression '(TT.ID LIKE '%x')' in join '(T.ID = (TT.ID LIKE '%x'))'. Joins must only contain a field comparison." - } - }, - { - "name": "stream stream left join - invalid join field - contains CASE expression", - "statements": [ - "CREATE STREAM TEST1 (ID bigint) WITH (kafka_topic='left_topic', value_format='JSON');", - "CREATE STREAM TEST2 (ID bigint) WITH (kafka_topic='right_topic', value_format='JSON');", - "CREATE STREAM LEFT_OUTER_JOIN as SELECT * FROM test1 t left join test2 tt ON t.id = (CASE WHEN 1 THEN 2 END);" + "inputs": [ + {"topic": "left_topic", "key": "1", "value": {"id": 1}, "timestamp": 0}, + {"topic": "right_topic", "key": "1", "value": {"id": 2}, "timestamp": 10} ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression '(CASE WHEN 1 THEN 2 END)' in join '(T.ID = (CASE WHEN 1 THEN 2 END))'. Joins must only contain a field comparison." - } + "outputs": [ + {"topic": "OUTPUT", "key": 1, "value": {"T_ID": 1}, "timestamp": 10} + ] }, { - "name": "stream stream left join - invalid join field - contains IN expression", + "name": "stream stream join - contains arithmetic unary expression flipped sides", "statements": [ "CREATE STREAM TEST1 (ID bigint) WITH (kafka_topic='left_topic', value_format='JSON');", "CREATE STREAM TEST2 (ID bigint) WITH (kafka_topic='right_topic', value_format='JSON');", - "CREATE STREAM LEFT_OUTER_JOIN as SELECT * FROM test1 t left join test2 tt ON t.id = (tt.id IN (1, 2, 3));" + "CREATE STREAM OUTPUT as SELECT T.ID FROM test1 t join test2 tt WITHIN 30 seconds ON -tt.id = t.id;" ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression '(TT.ID IN (1, 2, 3))' in join '(T.ID = (TT.ID IN (1, 2, 3)))'. Joins must only contain a field comparison." - } - }, - { - "name": "stream stream left join - invalid join field - contains BETWEEN expression", - "statements": [ - "CREATE STREAM TEST1 (ID bigint) WITH (kafka_topic='left_topic', value_format='JSON');", - "CREATE STREAM TEST2 (ID bigint) WITH (kafka_topic='right_topic', value_format='JSON');", - "CREATE STREAM LEFT_OUTER_JOIN as SELECT * FROM test1 t left join test2 tt ON t.id = (tt.id BETWEEN 1 AND 3);" + "inputs": [ + {"topic": "left_topic", "key": "1", "value": {"id": 1}, "timestamp": 0}, + {"topic": "right_topic", "key": "1", "value": {"id": -1}, "timestamp": 10} ], - "expectedException": { - "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid comparison expression '(TT.ID BETWEEN 1 AND 3)' in join '(T.ID = (TT.ID BETWEEN 1 AND 3))'. Joins must only contain a field comparison." - } + "outputs": [ + {"topic": "OUTPUT", "key": 1, "value": {"T_ID": 1}, "timestamp": 10} + ] }, { "name": "stream stream left join - invalid left join expression - field does not exist", @@ -1726,7 +1663,7 @@ ], "expectedException": { "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid join criteria (T.IID = TT.ID). Column T.IID does not exist." + "message": "Column 'T.IID' cannot be resolved." } }, { @@ -1738,7 +1675,7 @@ ], "expectedException": { "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid join criteria (T.ID = TT.IID). Column TT.IID does not exist." + "message": "Column 'TT.IID' cannot be resolved." } }, { @@ -1836,7 +1773,7 @@ ], "expectedException": { "type": "io.confluent.ksql.util.KsqlStatementException", - "message": "Invalid join criteria: Source table (INPUT_TABLE) has no key column defined. Only 'ROWKEY' is supported in the join criteria for a TABLE." + "message": "Cannot repartition a TABLE source. If this is a join, make sure that the criteria uses the TABLE key ROWKEY instead of T.ID" } }, { From 51f8f7c2f5121e2265d8139ec69fa6bcd27739cb Mon Sep 17 00:00:00 2001 From: Confluent Jenkins Bot Date: Wed, 15 Jan 2020 02:46:19 +0000 Subject: [PATCH 111/123] Bump Confluent to 5.4.1-SNAPSHOT, Kafka to 5.4.1-SNAPSHOT --- build-tools/pom.xml | 2 +- docs/conf.py | 2 +- ksql-benchmark/pom.xml | 2 +- ksql-cli/pom.xml | 2 +- ksql-clickstream-demo/pom.xml | 2 +- ksql-common/pom.xml | 2 +- ksql-console-scripts/pom.xml | 2 +- ksql-engine/pom.xml | 2 +- ksql-etc/pom.xml | 2 +- ksql-examples/pom.xml | 2 +- ksql-execution/pom.xml | 2 +- ksql-functional-tests/pom.xml | 2 +- ksql-metastore/pom.xml | 2 +- ksql-package/pom.xml | 2 +- ksql-parser/pom.xml | 2 +- ksql-rest-app/pom.xml | 2 +- ksql-rest-client/pom.xml | 2 +- ksql-rest-model/pom.xml | 2 +- ksql-rocksdb-config-setter/pom.xml | 2 +- ksql-serde/pom.xml | 2 +- ksql-streams/pom.xml | 2 +- ksql-test-util/pom.xml | 2 +- ksql-tools/pom.xml | 2 +- ksql-udf-quickstart/pom.xml | 2 +- ksql-udf/pom.xml | 2 +- ksql-version-metrics-client/pom.xml | 2 +- licenses/licenses.html | 12 ++++++------ pom.xml | 4 ++-- 28 files changed, 34 insertions(+), 34 deletions(-) diff --git a/build-tools/pom.xml b/build-tools/pom.xml index 2622ad368bcc..788f1833b470 100644 --- a/build-tools/pom.xml +++ b/build-tools/pom.xml @@ -19,6 +19,6 @@ 4.0.0 io.confluent build-tools - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT Build Tools diff --git a/docs/conf.py b/docs/conf.py index 430a6ad12a4f..f954bdf6b9bd 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -56,7 +56,7 @@ def setup(app): # The short X.Y version. version = '5.4' # The full version, including alpha/beta/rc tags. -release = '5.4.0-SNAPSHOT' +release = '5.4.1-SNAPSHOT' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/ksql-benchmark/pom.xml b/ksql-benchmark/pom.xml index 1551e5f56c45..9791862d56c2 100644 --- a/ksql-benchmark/pom.xml +++ b/ksql-benchmark/pom.xml @@ -47,7 +47,7 @@ questions. io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-benchmark diff --git a/ksql-cli/pom.xml b/ksql-cli/pom.xml index 6bb1a00a74cb..1cd398a6e404 100644 --- a/ksql-cli/pom.xml +++ b/ksql-cli/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-cli diff --git a/ksql-clickstream-demo/pom.xml b/ksql-clickstream-demo/pom.xml index 54f28a966969..13185320ff84 100644 --- a/ksql-clickstream-demo/pom.xml +++ b/ksql-clickstream-demo/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT io.confluent.ksql diff --git a/ksql-common/pom.xml b/ksql-common/pom.xml index 78affa0a0da4..a8f32fe1f039 100644 --- a/ksql-common/pom.xml +++ b/ksql-common/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-common diff --git a/ksql-console-scripts/pom.xml b/ksql-console-scripts/pom.xml index c1152d3b111b..17d9936cb153 100644 --- a/ksql-console-scripts/pom.xml +++ b/ksql-console-scripts/pom.xml @@ -22,7 +22,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT io.confluent.ksql diff --git a/ksql-engine/pom.xml b/ksql-engine/pom.xml index ecef4f9c1781..1021f98b9d7a 100644 --- a/ksql-engine/pom.xml +++ b/ksql-engine/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-engine diff --git a/ksql-etc/pom.xml b/ksql-etc/pom.xml index 6b1ff175473f..31056f97d1b0 100644 --- a/ksql-etc/pom.xml +++ b/ksql-etc/pom.xml @@ -22,7 +22,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT io.confluent.ksql diff --git a/ksql-examples/pom.xml b/ksql-examples/pom.xml index ffcf96b127d4..7bf69a88b313 100644 --- a/ksql-examples/pom.xml +++ b/ksql-examples/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-examples diff --git a/ksql-execution/pom.xml b/ksql-execution/pom.xml index cfb83030eade..0e1895fb585d 100644 --- a/ksql-execution/pom.xml +++ b/ksql-execution/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-execution diff --git a/ksql-functional-tests/pom.xml b/ksql-functional-tests/pom.xml index c1f7cc813d4a..8a4caecb448d 100644 --- a/ksql-functional-tests/pom.xml +++ b/ksql-functional-tests/pom.xml @@ -21,7 +21,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT 4.0.0 diff --git a/ksql-metastore/pom.xml b/ksql-metastore/pom.xml index e11aeb3b6deb..2d7d9df11620 100644 --- a/ksql-metastore/pom.xml +++ b/ksql-metastore/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-metastore diff --git a/ksql-package/pom.xml b/ksql-package/pom.xml index 5d15789cd406..c8045c90ffd4 100644 --- a/ksql-package/pom.xml +++ b/ksql-package/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-package diff --git a/ksql-parser/pom.xml b/ksql-parser/pom.xml index 7f659a747375..730066c6086a 100644 --- a/ksql-parser/pom.xml +++ b/ksql-parser/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-parser diff --git a/ksql-rest-app/pom.xml b/ksql-rest-app/pom.xml index 7afdd6f74831..af1cba507af8 100644 --- a/ksql-rest-app/pom.xml +++ b/ksql-rest-app/pom.xml @@ -23,7 +23,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-rest-app diff --git a/ksql-rest-client/pom.xml b/ksql-rest-client/pom.xml index 2a4f27562c68..1ceea268c166 100644 --- a/ksql-rest-client/pom.xml +++ b/ksql-rest-client/pom.xml @@ -23,7 +23,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-rest-client diff --git a/ksql-rest-model/pom.xml b/ksql-rest-model/pom.xml index c2a1ba6f6b58..a7059ef980f7 100644 --- a/ksql-rest-model/pom.xml +++ b/ksql-rest-model/pom.xml @@ -23,7 +23,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-rest-model diff --git a/ksql-rocksdb-config-setter/pom.xml b/ksql-rocksdb-config-setter/pom.xml index de5d625c7e83..78b8b8ccca78 100644 --- a/ksql-rocksdb-config-setter/pom.xml +++ b/ksql-rocksdb-config-setter/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-rocksdb-config-setter diff --git a/ksql-serde/pom.xml b/ksql-serde/pom.xml index 5cf9292115de..c53646450ae7 100644 --- a/ksql-serde/pom.xml +++ b/ksql-serde/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-serde diff --git a/ksql-streams/pom.xml b/ksql-streams/pom.xml index 40d9b26c9be5..0de376a78fe2 100644 --- a/ksql-streams/pom.xml +++ b/ksql-streams/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-streams diff --git a/ksql-test-util/pom.xml b/ksql-test-util/pom.xml index abebddcf37a0..e8e93766385e 100644 --- a/ksql-test-util/pom.xml +++ b/ksql-test-util/pom.xml @@ -20,7 +20,7 @@ ksql-parent io.confluent.ksql - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT 4.0.0 diff --git a/ksql-tools/pom.xml b/ksql-tools/pom.xml index f4a34f94ffbd..1e6efd7f9718 100644 --- a/ksql-tools/pom.xml +++ b/ksql-tools/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-tools diff --git a/ksql-udf-quickstart/pom.xml b/ksql-udf-quickstart/pom.xml index c4dc1908ac3d..2e690fed307b 100644 --- a/ksql-udf-quickstart/pom.xml +++ b/ksql-udf-quickstart/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-udf-quickstart diff --git a/ksql-udf/pom.xml b/ksql-udf/pom.xml index 773a2d1ef312..3ed81b0fe16b 100644 --- a/ksql-udf/pom.xml +++ b/ksql-udf/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-udf diff --git a/ksql-version-metrics-client/pom.xml b/ksql-version-metrics-client/pom.xml index 26d539c4684d..82f6d3aaa3ce 100644 --- a/ksql-version-metrics-client/pom.xml +++ b/ksql-version-metrics-client/pom.xml @@ -22,7 +22,7 @@ io.confluent.ksql ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT ksql-version-metrics-client diff --git a/licenses/licenses.html b/licenses/licenses.html index ba684079d4ff..cc1f7b86d415 100644 --- a/licenses/licenses.html +++ b/licenses/licenses.html @@ -67,15 +67,15 @@

License Report


slice-0.29jar0.29 -common-config-5.4.0-SNAPSHOTjar5.4.0-SNAPSHOT +common-config-5.4.1-SNAPSHOTjar5.4.1-SNAPSHOT -common-utils-5.4.0-SNAPSHOTjar5.4.0-SNAPSHOT +common-utils-5.4.1-SNAPSHOTjar5.4.1-SNAPSHOT -kafka-avro-serializer-5.4.0-SNAPSHOTjar5.4.0-SNAPSHOT +kafka-avro-serializer-5.4.1-SNAPSHOTjar5.4.1-SNAPSHOT -kafka-connect-avro-converter-5.4.0-SNAPSHOTjar5.4.0-SNAPSHOT +kafka-connect-avro-converter-5.4.1-SNAPSHOTjar5.4.1-SNAPSHOT -kafka-schema-registry-client-5.4.0-SNAPSHOTjar5.4.0-SNAPSHOT +kafka-schema-registry-client-5.4.1-SNAPSHOTjar5.4.1-SNAPSHOT ksql-engine-0.1-SNAPSHOTjar0.1-SNAPSHOT @@ -123,7 +123,7 @@

License Report


kafka-streams-0.11.0.0-cp1jarincluded file -kafka_2.11-5.4.0-ccs-SNAPSHOTjarincluded file +kafka_2.11-5.4.1-ccs-SNAPSHOTjarincluded file lz4-1.3.0jar1.3.0 diff --git a/pom.xml b/pom.xml index e746ce812706..3be34d91c23e 100644 --- a/pom.xml +++ b/pom.xml @@ -22,14 +22,14 @@ io.confluent rest-utils-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT io.confluent.ksql ksql-parent pom ksql-parent - 5.4.0-SNAPSHOT + 5.4.1-SNAPSHOT Confluent Community License From cfe6821aa590a40a087ee64793384df1f0be5f54 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Wed, 15 Jan 2020 10:30:25 +0000 Subject: [PATCH 112/123] test: push queries work with non-windowed primitive keys (#4310) * test: push queries work with non-windowed primitive keys Fixes: https://github.com/confluentinc/ksql/issues/4123 --- .../ksql/test/rest/RestTestExecutor.java | 5 ++ .../push-queries.json | 87 +++++++++++++++++++ 2 files changed, 92 insertions(+) create mode 100644 ksql-functional-tests/src/test/resources/rest-query-validation-tests/push-queries.json diff --git a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java index 307c2792e748..e840a5c4f782 100644 --- a/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java +++ b/ksql-functional-tests/src/test/java/io/confluent/ksql/test/rest/RestTestExecutor.java @@ -463,6 +463,11 @@ private void waitForWarmStateStore( ) { // Special handling for pull queries is required, as they depend on materialized state stores // being warmed up. Initial requests may return no rows. + + if (querySql.contains("EMIT CHANGES")) { + // Push, not pull query: + return; + } final ImmutableList expectedResponse = ImmutableList.of(queryResponse); final ImmutableList statements = ImmutableList.of(querySql); diff --git a/ksql-functional-tests/src/test/resources/rest-query-validation-tests/push-queries.json b/ksql-functional-tests/src/test/resources/rest-query-validation-tests/push-queries.json new file mode 100644 index 000000000000..d32d953aa656 --- /dev/null +++ b/ksql-functional-tests/src/test/resources/rest-query-validation-tests/push-queries.json @@ -0,0 +1,87 @@ +{ + "comments": [ + "Tests covering Push queries" + ], + "tests": [ + { + "name": "non-windowed transient stream query - STRING key", + "statements": [ + "CREATE STREAM INPUT (ID INT) WITH (kafka_topic='test_topic', value_format='JSON');", + "SELECT * FROM INPUT EMIT CHANGES LIMIT 2;" + ], + "inputs": [ + {"topic": "test_topic", "timestamp": 12345, "key": "11", "value": {"id": 100}}, + {"topic": "test_topic", "timestamp": 12365, "key": "11", "value": {"id": 101}} + ], + "responses": [ + {"admin": {"@type": "currentStatus"}}, + {"query": [ + {"header":{"schema":"`ROWTIME` BIGINT, `ROWKEY` STRING, `ID` INTEGER"}}, + {"row":{"columns":[12345, "11", 100]}}, + {"row":{"columns":[12365, "11", 101]}}, + {"finalMessage":"Limit Reached"} + ]} + ] + }, + { + "name": "non-windowed transient stream query - INT key", + "statements": [ + "CREATE STREAM INPUT (ROWKEY INT KEY, ID INT) WITH (kafka_topic='test_topic', value_format='JSON');", + "SELECT * FROM INPUT EMIT CHANGES LIMIT 2;" + ], + "inputs": [ + {"topic": "test_topic", "timestamp": 12345, "key": 11, "value": {"id": 100}}, + {"topic": "test_topic", "timestamp": 12365, "key": 11, "value": {"id": 101}} + ], + "responses": [ + {"admin": {"@type": "currentStatus"}}, + {"query": [ + {"header":{"schema":"`ROWTIME` BIGINT, `ROWKEY` INTEGER, `ID` INTEGER"}}, + {"row":{"columns":[12345, 11, 100]}}, + {"row":{"columns":[12365, 11, 101]}}, + {"finalMessage":"Limit Reached"} + ]} + ] + }, + { + "name": "non-windowed transient stream query - BIGINT key", + "statements": [ + "CREATE STREAM INPUT (ROWKEY BIGINT KEY, ID INT) WITH (kafka_topic='test_topic', value_format='JSON');", + "SELECT * FROM INPUT EMIT CHANGES LIMIT 2;" + ], + "inputs": [ + {"topic": "test_topic", "timestamp": 12345, "key": 11, "value": {"id": 100}}, + {"topic": "test_topic", "timestamp": 12365, "key": 11, "value": {"id": 101}} + ], + "responses": [ + {"admin": {"@type": "currentStatus"}}, + {"query": [ + {"header":{"schema":"`ROWTIME` BIGINT, `ROWKEY` BIGINT, `ID` INTEGER"}}, + {"row":{"columns":[12345, 11, 100]}}, + {"row":{"columns":[12365, 11, 101]}}, + {"finalMessage":"Limit Reached"} + ]} + ] + }, + { + "name": "non-windowed transient stream query - DOUBLE key", + "statements": [ + "CREATE STREAM INPUT (ROWKEY DOUBLE KEY, ID INT) WITH (kafka_topic='test_topic', value_format='JSON');", + "SELECT * FROM INPUT EMIT CHANGES LIMIT 2;" + ], + "inputs": [ + {"topic": "test_topic", "timestamp": 12345, "key": 11.0, "value": {"id": 100}}, + {"topic": "test_topic", "timestamp": 12365, "key": 11.0, "value": {"id": 101}} + ], + "responses": [ + {"admin": {"@type": "currentStatus"}}, + {"query": [ + {"header":{"schema":"`ROWTIME` BIGINT, `ROWKEY` DOUBLE, `ID` INTEGER"}}, + {"row":{"columns":[12345, 11.0, 100]}}, + {"row":{"columns":[12365, 11.0, 101]}}, + {"finalMessage":"Limit Reached"} + ]} + ] + } + ] +} \ No newline at end of file From a0ca68839f9fa0b9427562db93254b6105aa2fc0 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Wed, 15 Jan 2020 10:31:52 +0000 Subject: [PATCH 113/123] test: ensure test cases for group by and partition by primitive key (#4311) Fixes: https://github.com/confluentinc/ksql/issues/4107 --- .../query-validation-tests/group-by.json | 33 +++++++++++++++++++ .../query-validation-tests/partition-by.json | 12 ++++++- 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/group-by.json b/ksql-functional-tests/src/test/resources/query-validation-tests/group-by.json index ffb6cb89b766..dd6abe149b20 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/group-by.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/group-by.json @@ -1373,6 +1373,39 @@ ], "outputs": [ ] + }, + { + "name": "by non-STRING key", + "statements": [ + "CREATE STREAM INPUT (f0 INT) WITH (kafka_topic='test_topic', value_format='DELIMITED');", + "CREATE TABLE OUTPUT AS SELECT COUNT(1) FROM INPUT GROUP BY f0;" + ], + "inputs": [ + {"topic": "test_topic", "value": "2"}, + {"topic": "test_topic", "value": "3"}, + {"topic": "test_topic", "value": "2"} + ], + "outputs": [ + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-GroupBy-repartition", "key": 2, "value": "2,1"}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-GroupBy-repartition", "key": 3, "value": "3,1"}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-GroupBy-repartition", "key": 2, "value": "2,1"}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": 2, "value": "2,1"}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": 3, "value": "3,1"}, + {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_OUTPUT_0-Aggregate-Aggregate-Materialize-changelog", "key": 2, "value": "2,2"}, + {"topic": "OUTPUT", "key": 2, "value": "1"}, + {"topic": "OUTPUT", "key": 3, "value": "1"}, + {"topic": "OUTPUT", "key": 2, "value": "2"} + ], + "post": { + "sources": [ + { + "name": "OUTPUT", + "type": "table", + "keyFormat": {"format": "KAFKA"}, + "schema": "`ROWKEY` INTEGER KEY, `KSQL_COL_0` BIGINT" + } + ] + } } ] } \ No newline at end of file diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json b/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json index 1fbf48f9d1f4..cad4ee7b0fa5 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/partition-by.json @@ -11,7 +11,17 @@ ], "outputs": [ {"topic": "REPARTITIONED", "key": "zero", "value": "zero,0"} - ] + ], + "post": { + "sources": [ + { + "name": "REPARTITIONED", + "type": "stream", + "keyFormat": {"format": "KAFKA"}, + "schema": "ROWKEY STRING KEY, NAME STRING, ID BIGINT" + } + ] + } }, { "name": "int column", From 5554913a890b5bab2dae4004905aafa74acb5e1a Mon Sep 17 00:00:00 2001 From: Robin Moffatt Date: Wed, 15 Jan 2020 16:29:02 +0000 Subject: [PATCH 114/123] Add note re. logs folder for server output (#4318) I followed the instructions to build and run locally, and fatal errors weren't written to stdout - took me a while to realise why the process was just exiting :) Found the error in the `ksql.log` log in the logs folder. --- CONTRIBUTING.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3de353d60cdc..b6178b107488 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -18,7 +18,8 @@ $ ./bin/ksql ``` This will start the KSQL server in the background and the KSQL CLI in the -foreground. +foreground. Check the `logs` folder for the log files that the server writes +including any errors. If you would rather have the KSQL server logs spool to the console, then drop the `-daemon` switch, and start the CLI in a second console. From 67ee038f1dee968ccf7d8936efa00801cbac1f5d Mon Sep 17 00:00:00 2001 From: Jim Galasyn Date: Wed, 15 Jan 2020 17:46:49 -0800 Subject: [PATCH 115/123] docs: fix broken link on dev guide index page (DOCS-3243) (#4327) --- docs-md/developer-guide/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs-md/developer-guide/index.md b/docs-md/developer-guide/index.md index 1d10479f5b80..fb1d5f8967d3 100644 --- a/docs-md/developer-guide/index.md +++ b/docs-md/developer-guide/index.md @@ -7,7 +7,7 @@ description: Learn to create ksqlDB applications These topics show how to develop ksqlDB applications. -- [Configure ksqlDB CLI](../installation/cli-config) +- [Configure ksqlDB CLI](../operate-and-deploy/installation/cli-config.md) - [Create a ksqlDB Stream](create-a-stream.md) - [Create a ksqlDB Table](create-a-table.md) - [Aggregate Streaming Data With ksqlDB](aggregate-streaming-data.md) From 7640b434b2f43728f599fdeff472c7d3a8dd0b74 Mon Sep 17 00:00:00 2001 From: Rohan Date: Thu, 16 Jan 2020 02:03:47 -0800 Subject: [PATCH 116/123] fix: pin the jetty client version (#4324) This patch pins the jetty client version to the jetty version specified by confluent/common. --- pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pom.xml b/pom.xml index 99fc03c77281..5026448e689e 100644 --- a/pom.xml +++ b/pom.xml @@ -379,6 +379,12 @@ ${clearspring-analytics.version} + + org.eclipse.jetty + jetty-client + ${jetty.version} + + junit From 934011c1646ee8d841996c9bcec65420ba22b5bf Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Thu, 16 Jan 2020 11:07:55 +0000 Subject: [PATCH 117/123] test: added more table related test cases (#4329) and fixed small bug in test framework --- .../ksql/test/tools/TestExecutor.java | 4 +- .../query-validation-tests/count.json | 38 +++++++++++++++++++ .../query-validation-tests/table.json | 21 ++++++++++ 3 files changed, 61 insertions(+), 2 deletions(-) diff --git a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java index 810e68d6195c..da6229ad7d6b 100644 --- a/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java +++ b/ksql-functional-tests/src/main/java/io/confluent/ksql/test/tools/TestExecutor.java @@ -365,9 +365,9 @@ private void processRecordsForTopic( final TopologyTestDriver topologyTestDriver, final Topic sinkTopic ) { - final int idx = 0; + int idx = 0; while (true) { - final ProducerRecord producerRecord = readOutput(topologyTestDriver, sinkTopic, idx); + final ProducerRecord producerRecord = readOutput(topologyTestDriver, sinkTopic, idx++); if (producerRecord == null) { break; } diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/count.json b/ksql-functional-tests/src/test/resources/query-validation-tests/count.json index dfc5383a832d..2cda27891eed 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/count.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/count.json @@ -75,6 +75,44 @@ {"topic": "OUTPUT", "key": "john", "value": "2"}, {"topic": "OUTPUT", "key": "john", "value": "1"} ] + }, + { + "name": "should count back to zero", + "statements": [ + "CREATE TABLE INPUT (ID bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED');", + "CREATE TABLE OUTPUT as SELECT COUNT() FROM INPUT GROUP BY ID;" + ], + "inputs": [ + {"topic": "test_topic", "key": "1", "value": "3"}, + {"topic": "test_topic", "key": "2", "value": "3"}, + {"topic": "test_topic", "key": "1", "value": null}, + {"topic": "test_topic", "key": "2", "value": null} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 3, "value": "1"}, + {"topic": "OUTPUT", "key": 3, "value": "2"}, + {"topic": "OUTPUT", "key": 3, "value": "1"}, + {"topic": "OUTPUT", "key": 3, "value": "0"} + ] + }, + { + "name": "should support removing zero counts from table", + "statements": [ + "CREATE TABLE INPUT (ID bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED');", + "CREATE TABLE OUTPUT as SELECT COUNT() FROM INPUT GROUP BY ID HAVING COUNT() > 0;" + ], + "inputs": [ + {"topic": "test_topic", "key": "1", "value": "3"}, + {"topic": "test_topic", "key": "2", "value": "3"}, + {"topic": "test_topic", "key": "1", "value": null}, + {"topic": "test_topic", "key": "2", "value": null} + ], + "outputs": [ + {"topic": "OUTPUT", "key": 3, "value": "1"}, + {"topic": "OUTPUT", "key": 3, "value": "2"}, + {"topic": "OUTPUT", "key": 3, "value": "1"}, + {"topic": "OUTPUT", "key": 3, "value": null} + ] } ] } diff --git a/ksql-functional-tests/src/test/resources/query-validation-tests/table.json b/ksql-functional-tests/src/test/resources/query-validation-tests/table.json index f0b224cb9918..b10e34d09aac 100644 --- a/ksql-functional-tests/src/test/resources/query-validation-tests/table.json +++ b/ksql-functional-tests/src/test/resources/query-validation-tests/table.json @@ -53,6 +53,27 @@ {"topic": "_confluent-ksql-some.ksql.service.idquery_CTAS_T1_0-KsqlTopic-Reduce-changelog", "key": "1", "value": "2"}, {"topic": "T1", "key": "1", "value": "2"} ] + }, + { + "name": "should forward nulls in changelog when table not materialized", + "comment": [ + "OUTPUT does not cause the INPUT table to be materialized", + "This test is more about testing current, rather than required, behaviour" + ], + "statements": [ + "CREATE TABLE INPUT (ID bigint) WITH (kafka_topic='test_topic', value_format='DELIMITED');", + "CREATE TABLE OUTPUT as SELECT * FROM INPUT;" + ], + "inputs": [ + {"topic": "test_topic", "key": "1", "value": "1"}, + {"topic": "test_topic", "key": "1", "value": null}, + {"topic": "test_topic", "key": "1", "value": "2"} + ], + "outputs": [ + {"topic": "OUTPUT", "key": "1", "value": "1"}, + {"topic": "OUTPUT", "key": "1", "value": null}, + {"topic": "OUTPUT", "key": "1", "value": "2"} + ] } ] } From 0a74151feacc8deff67d464addc1e3db57b1a24d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergio=20Pe=C3=B1a?= Date: Thu, 16 Jan 2020 08:01:23 -0600 Subject: [PATCH 118/123] fix: add ksql-test-runner deps to ksql package lib (#4272) --- ksql-package/pom.xml | 27 +++++++++++++++++++++++++++ ksql-package/src/assembly/package.xml | 1 - 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/ksql-package/pom.xml b/ksql-package/pom.xml index 2f756caa4baa..c32519b0cefd 100644 --- a/ksql-package/pom.xml +++ b/ksql-package/pom.xml @@ -80,6 +80,33 @@ ${project.version} + + + + org.hamcrest + hamcrest-all + compile + + + + junit + junit + compile + + + + org.apache.kafka + kafka-clients + test + compile + + + + com.fasterxml.jackson.core + jackson-annotations + compile + + diff --git a/ksql-package/src/assembly/package.xml b/ksql-package/src/assembly/package.xml index 6970d99a87f1..b1e9e0909673 100644 --- a/ksql-package/src/assembly/package.xml +++ b/ksql-package/src/assembly/package.xml @@ -83,7 +83,6 @@ io.confluent:rest-utils io.confluent:common-config - io.confluent:common-utils io.confluent:common-metrics From ca9368a1494d7608895e2a35cc1efca28dd8afb7 Mon Sep 17 00:00:00 2001 From: Andy Coates <8012398+big-andy-coates@users.noreply.github.com> Date: Thu, 16 Jan 2020 14:07:20 +0000 Subject: [PATCH 119/123] fix: report window type and query status better from API (#4313) This commit: 1. exposes the window type of the key of a query/source, i.e. `HOPPING`, `TUMBLING` `SESSION` or none. 2. makes the status of a query easier to find. 3. fixes a bug that meant the statement text of a query was not displayed in the CLI. BREAKING CHANGE: The response from the RESTful API has changed for some commands with this commit: the `SourceDescription` type no longer has a `format` field. Instead it has `keyFormat` and `valueFormat` fields. ## `SHOW QUERY` changes: Response now includes a `state` property for each query that indicates the state of the query. e.g. ```json { "queryString" : "create table OUTPUT as select * from INPUT;", "sinks" : [ "OUTPUT" ], "id" : "CSAS_OUTPUT_0", "state" : "Running" } ``` The CLI output was: ``` ksql> show queries; Query ID | Kafka Topic | Query String CSAS_OUTPUT_0 | OUTPUT | CREATE STREAM OUTPUT WITH (KAFKA_TOPIC='OUTPUT', PARTITIONS=1, REPLICAS=1) AS SELECT * FROM INPUT INPUT EMIT CHANGES; CTAS_CLICK_USER_SESSIONS_5 | CLICK_USER_SESSIONS | CREATE TABLE CLICK_USER_SESSIONS WITH (KAFKA_TOPIC='CLICK_USER_SESSIONS', PARTITIONS=1, REPLICAS=1) AS SELECT CLICKSTREAM.USERID USERID, COUNT(*) COUNT FROM CLICKSTREAM CLICKSTREAM WINDOW SESSION ( 300 SECONDS ) GROUP BY CLICKSTREAM.USERID EMIT CHANGES; For detailed information on a Query run: EXPLAIN ; ``` and is now: ``` Query ID | Status | Kafka Topic | Query String CSAS_OUTPUT_0 | RUNNING | OUTPUT | CREATE STREAM OUTPUT WITH (KAFKA_TOPIC='OUTPUT', PARTITIONS=1, REPLICAS=1) AS SELECT *FROM INPUT INPUTEMIT CHANGES; For detailed information on a Query run: EXPLAIN ; ``` Note the addition of the `Status` column and the fact that `Query String` is now longer being written across multiple lines. ## `DESCRIBE ;` changes: old CLI output: ``` ksql> describe CLICK_USER_SESSIONS; Name : CLICK_USER_SESSIONS Field | Type ROWTIME | BIGINT (system) ROWKEY | INTEGER (system) USERID | INTEGER COUNT | BIGINT For runtime statistics and query details run: DESCRIBE EXTENDED ; ``` New CLI output: ``` ksql> describe CLICK_USER_SESSIONS; Name : CLICK_USER_SESSIONS Field | Type ROWTIME | BIGINT (system) ROWKEY | INTEGER (system) (Window type: SESSION) USERID | INTEGER COUNT | BIGINT For runtime statistics and query details run: DESCRIBE EXTENDED ; ``` Note the addition of the `Window Type` information. The extended version of the command has also changed. Old output: ``` ksql> describe extended CLICK_USER_SESSIONS; Name : CLICK_USER_SESSIONS Type : TABLE Key field : USERID Key format : STRING Timestamp field : Not set - using Value Format : JSON Kafka topic : CLICK_USER_SESSIONS (partitions: 1, replication: 1) Statement : CREATE TABLE CLICK_USER_SESSIONS WITH (KAFKA_TOPIC='CLICK_USER_SESSIONS', PARTITIONS=1, REPLICAS=1) AS SELECT CLICKSTREAM.USERID USERID, COUNT(*) COUNT FROM CLICKSTREAM CLICKSTREAM WINDOW SESSION ( 300 SECONDS ) GROUP BY CLICKSTREAM.USERID EMIT CHANGES; Field | Type ROWTIME | BIGINT (system) ROWKEY | INTEGER (system) USERID | INTEGER COUNT | BIGINT Queries that write from this TABLE ----------------------------------- CTAS_CLICK_USER_SESSIONS_5 (RUNNING) : CREATE TABLE CLICK_USER_SESSIONS WITH (KAFKA_TOPIC='CLICK_USER_SESSIONS', PARTITIONS=1, REPLICAS=1) AS SELECT CLICKSTREAM.USERID USERID, COUNT(*) COUNT FROM CLICKSTREAM CLICKSTREAM WINDOW SESSION ( 300 SECONDS ) GROUP BY CLICKSTREAM.USERID EMIT CHANGES; For query topology and execution plan please run: EXPLAIN Local runtime statistics ------------------------ (Statistics of the local KSQL server interaction with the Kafka topic CLICK_USER_SESSIONS) ``` New output: ``` ksql> describe extended CLICK_USER_SESSIONS; Name : CLICK_USER_SESSIONS Type : TABLE Key field : USERID Timestamp field : Not set - using Key format : KAFKA Value format : JSON Kafka topic : CLICK_USER_SESSIONS (partitions: 1, replication: 1) Statement : CREATE TABLE CLICK_USER_SESSIONS WITH (KAFKA_TOPIC='CLICK_USER_SESSIONS', PARTITIONS=1, REPLICAS=1) AS SELECT CLICKSTREAM.USERID USERID, COUNT(*) COUNT FROM CLICKSTREAM CLICKSTREAM WINDOW SESSION ( 300 SECONDS ) GROUP BY CLICKSTREAM.USERID EMIT CHANGES; Field | Type ROWTIME | BIGINT (system) ROWKEY | INTEGER (system) (Window type: SESSION) USERID | INTEGER COUNT | BIGINT Queries that write from this TABLE ----------------------------------- CTAS_CLICK_USER_SESSIONS_5 (RUNNING) : CREATE TABLE CLICK_USER_SESSIONS WITH (KAFKA_TOPIC='CLICK_USER_SESSIONS', PARTITIONS=1, REPLICAS=1) AS SELECT CLICKSTREAM.USERID USERID, COUNT(*) COUNTFROM CLICKSTREAM CLICKSTREAMWINDOW SESSION ( 300 SECONDS ) GROUP BY CLICKSTREAM.USERIDEMIT CHANGES; For query topology and execution plan please run: EXPLAIN Local runtime statistics ------------------------ (Statistics of the local KSQL server interaction with the Kafka topic CLICK_USER_SESSIONS) ``` Note: the change from `Key format` of `STRING` to `KAFKA`. The output of `Window Type` information for windowed schemas and outputing sql statements on a single line. --- .../confluent/ksql/cli/console/Console.java | 47 ++++-- .../table/builder/QueriesTableBuilder.java | 7 +- .../ksql/cli/console/ConsoleTest.java | 58 ++++--- .../rest/entity/QueryDescriptionFactory.java | 5 + .../rest/entity/SourceDescriptionFactory.java | 5 +- .../execution/DescribeConnectorExecutor.java | 1 - .../server/execution/ListQueriesExecutor.java | 7 +- .../server/execution/ListSourceExecutor.java | 4 +- .../entity/QueryDescriptionFactoryTest.java | 5 + .../entity/SourceDescriptionFactoryTest.java | 3 - .../server/execution/ExplainExecutorTest.java | 13 +- .../execution/ListQueriesExecutorTest.java | 17 +- .../execution/ListSourceExecutorTest.java | 35 ++-- .../server/resources/KsqlResourceTest.java | 13 +- .../ksql/rest/entity/QueryDescription.java | 31 +++- .../ksql/rest/entity/RunningQuery.java | 30 +++- .../ksql/rest/entity/SourceDescription.java | 94 +++++------ .../rest/entity/SourceDescriptionTest.java | 154 ++++++++++++++++-- 18 files changed, 384 insertions(+), 145 deletions(-) diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java index 7cc90f5c8799..c2c6d39d0d27 100644 --- a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/Console.java @@ -44,6 +44,7 @@ import io.confluent.ksql.cli.console.table.builder.TopicDescriptionTableBuilder; import io.confluent.ksql.cli.console.table.builder.TypeListTableBuilder; import io.confluent.ksql.json.JsonMapper; +import io.confluent.ksql.model.WindowType; import io.confluent.ksql.rest.entity.ArgumentInfo; import io.confluent.ksql.rest.entity.CommandStatusEntity; import io.confluent.ksql.rest.entity.ConnectorDescription; @@ -448,22 +449,40 @@ private void printWarnings(final KsqlEntity entity) { } } - private static String formatFieldType(final FieldInfo field, final String keyField) { - if (field.getName().equals("ROWTIME") || field.getName().equals("ROWKEY")) { + private static String formatFieldType( + final FieldInfo field, + final Optional windowType, + final String keyField + ) { + if (field.getName().equals("ROWTIME")) { return String.format("%-16s %s", field.getSchema().toTypeString(), "(system)"); - } else if (keyField != null && keyField.contains("." + field.getName())) { + } + + if (field.getName().equals("ROWKEY")) { + final String wt = windowType + .map(v -> " (Window type: " + v + ")") + .orElse(""); + + return String.format("%-16s %s%s", field.getSchema().toTypeString(), "(system)", wt); + } + + if (keyField != null && keyField.contains("." + field.getName())) { return String.format("%-16s %s", field.getSchema().toTypeString(), "(key)"); - } else { - return field.getSchema().toTypeString(); } + + return field.getSchema().toTypeString(); } - private void printSchema(final List fields, final String keyField) { + private void printSchema( + final Optional windowType, + final List fields, + final String keyField + ) { final Table.Builder tableBuilder = new Table.Builder(); if (!fields.isEmpty()) { tableBuilder.withColumnHeaders("Field", "Type"); fields.forEach( - f -> tableBuilder.withRow(f.getName(), formatFieldType(f, keyField))); + f -> tableBuilder.withRow(f.getName(), formatFieldType(f, windowType, keyField))); tableBuilder.build().print(this); } } @@ -474,9 +493,9 @@ private void printTopicInfo(final SourceDescription source) { : source.getTimestamp(); writer().println(String.format("%-20s : %s", "Key field", source.getKey())); - writer().println(String.format("%-20s : %s", "Key format", "STRING")); writer().println(String.format("%-20s : %s", "Timestamp field", timestamp)); - writer().println(String.format("%-20s : %s", "Value format", source.getFormat())); + writer().println(String.format("%-20s : %s", "Key format", source.getKeyFormat())); + writer().println(String.format("%-20s : %s", "Value format", source.getValueFormat())); if (!source.getTopic().isEmpty()) { String topicInformation = String.format("%-20s : %s", @@ -509,7 +528,9 @@ private void printQueries( "-----------------------------------" )); for (final RunningQuery writeQuery : queries) { - writer().println(writeQuery.getId().getId() + " : " + writeQuery.getQueryString()); + writer().println(writeQuery.getId().getId() + + " (" + writeQuery.getState().orElse("N/A") + + ") : " + writeQuery.getQuerySingleLine()); } writer().println("\nFor query topology and execution plan please run: EXPLAIN "); } @@ -562,7 +583,7 @@ private void printOverriddenProperties(final QueryDescription queryDescription) private void printSourceDescription(final SourceDescription source) { writer().println(String.format("%-20s : %s", "Name", source.getName())); if (!source.isExtended()) { - printSchema(source.getFields(), source.getKey()); + printSchema(source.getWindowType(), source.getFields(), source.getKey()); writer().println( "For runtime statistics and query details run: DESCRIBE EXTENDED ;"); return; @@ -573,7 +594,7 @@ private void printSourceDescription(final SourceDescription source) { writer().println(String.format("%-20s : %s", "Statement", source.getStatement())); writer().println(""); - printSchema(source.getFields(), source.getKey()); + printSchema(source.getWindowType(), source.getFields(), source.getKey()); printQueries(source.getReadQueries(), source.getType(), "read"); @@ -638,7 +659,7 @@ private void printQueryDescription(final QueryDescription query) { writer().println(String.format("%-20s : %s", "Status", query.getState().get())); } writer().println(); - printSchema(query.getFields(), ""); + printSchema(query.getWindowType(), query.getFields(), ""); printQuerySources(query); printQuerySinks(query); printExecutionPlan(query); diff --git a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/QueriesTableBuilder.java b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/QueriesTableBuilder.java index 079f507f9ece..52d74297a78e 100644 --- a/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/QueriesTableBuilder.java +++ b/ksql-cli/src/main/java/io/confluent/ksql/cli/console/table/builder/QueriesTableBuilder.java @@ -25,14 +25,17 @@ public class QueriesTableBuilder implements TableBuilder { private static final List HEADERS = - ImmutableList.of("Query ID", "Kafka Topic", "Query String"); + ImmutableList.of("Query ID", "Status", "Kafka Topic", "Query String"); @Override public Table buildTable(final Queries entity) { final Stream> rows = entity.getQueries().stream() .map(r -> ImmutableList.of( r.getId().getId(), - String.join(",", r.getSinks()), r.getQueryString())); + r.getState().orElse("N/A"), + String.join(",", r.getSinks()), + r.getQuerySingleLine() + )); return new Builder() .withColumnHeaders(HEADERS) diff --git a/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java b/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java index aa8f29638bfd..ea63de882e3a 100644 --- a/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java +++ b/ksql-cli/src/test/java/io/confluent/ksql/cli/console/ConsoleTest.java @@ -80,6 +80,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.function.Supplier; import org.apache.commons.lang3.StringUtils; import org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo; @@ -109,6 +110,7 @@ public class ConsoleTest { private final CliSpecificCommand cliCommand; private final SourceDescription sourceDescription = new SourceDescription( "TestSource", + Optional.empty(), Collections.emptyList(), Collections.emptyList(), buildTestSchema(SqlTypes.INTEGER, SqlTypes.STRING), @@ -118,6 +120,7 @@ public class ConsoleTest { "stats", "errors", true, + "kafka", "avro", "kadka-topic", 2, @@ -285,7 +288,7 @@ public void testPrintQueries() { final List queries = new ArrayList<>(); queries.add( new RunningQuery( - "select * from t1", Collections.singleton("Test"), new QueryId("0"))); + "select * from t1", Collections.singleton("Test"), new QueryId("0"), Optional.of("Running"))); final KsqlEntityList entityList = new KsqlEntityList(ImmutableList.of( new Queries("e", queries) @@ -301,18 +304,19 @@ public void testPrintQueries() { + " \"@type\" : \"queries\",\n" + " \"statementText\" : \"e\",\n" + " \"queries\" : [ {\n" + + " \"queryString\" : \"select * from t1\",\n" + " \"sinks\" : [ \"Test\" ],\n" + " \"id\" : \"0\",\n" - + " \"queryString\" : \"select * from t1\"\n" + + " \"state\" : \"Running\"\n" + " } ],\n" + " \"warnings\" : [ ]\n" + "} ]\n")); } else { assertThat(output, is("\n" - + " Query ID | Kafka Topic | Query String \n" - + "-------------------------------------------\n" - + " 0 | Test | select * from t1 \n" - + "-------------------------------------------\n" + + " Query ID | Status | Kafka Topic | Query String \n" + + "-----------------------------------------------------\n" + + " 0 | Running | Test | select * from t1 \n" + + "-----------------------------------------------------\n" + "For detailed information on a Query run: EXPLAIN ;\n")); } } @@ -334,10 +338,10 @@ public void testPrintSourceDescription() { ); final List readQueries = ImmutableList.of( - new RunningQuery("read query", ImmutableSet.of("sink1"), new QueryId("readId")) + new RunningQuery("read query", ImmutableSet.of("sink1"), new QueryId("readId"), Optional.of("Running")) ); final List writeQueries = ImmutableList.of( - new RunningQuery("write query", ImmutableSet.of("sink2"), new QueryId("writeId")) + new RunningQuery("write query", ImmutableSet.of("sink2"), new QueryId("writeId"), Optional.of("Running")) ); final KsqlEntityList entityList = new KsqlEntityList(ImmutableList.of( @@ -345,6 +349,7 @@ public void testPrintSourceDescription() { "some sql", new SourceDescription( "TestSource", + Optional.empty(), readQueries, writeQueries, fields, @@ -354,6 +359,7 @@ public void testPrintSourceDescription() { "stats", "errors", false, + "kafka", "avro", "kadka-topic", 1, @@ -375,15 +381,18 @@ public void testPrintSourceDescription() { + " \"statementText\" : \"some sql\",\n" + " \"sourceDescription\" : {\n" + " \"name\" : \"TestSource\",\n" + + " \"windowType\" : null,\n" + " \"readQueries\" : [ {\n" + + " \"queryString\" : \"read query\",\n" + " \"sinks\" : [ \"sink1\" ],\n" + " \"id\" : \"readId\",\n" - + " \"queryString\" : \"read query\"\n" + + " \"state\" : \"Running\"\n" + " } ],\n" + " \"writeQueries\" : [ {\n" + + " \"queryString\" : \"write query\",\n" + " \"sinks\" : [ \"sink2\" ],\n" + " \"id\" : \"writeId\",\n" - + " \"queryString\" : \"write query\"\n" + + " \"state\" : \"Running\"\n" + " } ],\n" + " \"fields\" : [ {\n" + " \"name\" : \"ROWTIME\",\n" @@ -477,7 +486,8 @@ public void testPrintSourceDescription() { + " \"statistics\" : \"stats\",\n" + " \"errorStats\" : \"errors\",\n" + " \"extended\" : false,\n" - + " \"format\" : \"avro\",\n" + + " \"keyFormat\" : \"kafka\",\n" + + " \"valueFormat\" : \"avro\",\n" + " \"topic\" : \"kadka-topic\",\n" + " \"partitions\" : 1,\n" + " \"replication\" : 1,\n" @@ -583,6 +593,7 @@ public void testPrintConnectorDescription() { + " },\n" + " \"sources\" : [ {\n" + " \"name\" : \"TestSource\",\n" + + " \"windowType\" : null,\n" + " \"readQueries\" : [ ],\n" + " \"writeQueries\" : [ ],\n" + " \"fields\" : [ {\n" @@ -620,7 +631,8 @@ public void testPrintConnectorDescription() { + " \"statistics\" : \"stats\",\n" + " \"errorStats\" : \"errors\",\n" + " \"extended\" : true,\n" - + " \"format\" : \"avro\",\n" + + " \"keyFormat\" : \"kafka\",\n" + + " \"valueFormat\" : \"avro\",\n" + " \"topic\" : \"kadka-topic\",\n" + " \"partitions\" : 2,\n" + " \"replication\" : 1,\n" @@ -979,10 +991,10 @@ public void testPrintExecuptionPlan() { public void shouldPrintTopicDescribeExtended() { // Given: final List readQueries = ImmutableList.of( - new RunningQuery("read query", ImmutableSet.of("sink1"), new QueryId("readId")) + new RunningQuery("read query", ImmutableSet.of("sink1"), new QueryId("readId"), Optional.of("Running")) ); final List writeQueries = ImmutableList.of( - new RunningQuery("write query", ImmutableSet.of("sink2"), new QueryId("writeId")) + new RunningQuery("write query", ImmutableSet.of("sink2"), new QueryId("writeId"), Optional.of("Running")) ); final KsqlEntityList entityList = new KsqlEntityList(ImmutableList.of( @@ -990,6 +1002,7 @@ public void shouldPrintTopicDescribeExtended() { "e", new SourceDescription( "TestSource", + Optional.empty(), readQueries, writeQueries, buildTestSchema(SqlTypes.STRING), @@ -999,6 +1012,7 @@ public void shouldPrintTopicDescribeExtended() { "stats", "errors", true, + "kafka", "avro", "kadka-topic", 2, 1, @@ -1019,15 +1033,18 @@ public void shouldPrintTopicDescribeExtended() { + " \"statementText\" : \"e\",\n" + " \"sourceDescription\" : {\n" + " \"name\" : \"TestSource\",\n" + + " \"windowType\" : null,\n" + " \"readQueries\" : [ {\n" + + " \"queryString\" : \"read query\",\n" + " \"sinks\" : [ \"sink1\" ],\n" + " \"id\" : \"readId\",\n" - + " \"queryString\" : \"read query\"\n" + + " \"state\" : \"Running\"\n" + " } ],\n" + " \"writeQueries\" : [ {\n" + + " \"queryString\" : \"write query\",\n" + " \"sinks\" : [ \"sink2\" ],\n" + " \"id\" : \"writeId\",\n" - + " \"queryString\" : \"write query\"\n" + + " \"state\" : \"Running\"\n" + " } ],\n" + " \"fields\" : [ {\n" + " \"name\" : \"ROWTIME\",\n" @@ -1057,7 +1074,8 @@ public void shouldPrintTopicDescribeExtended() { + " \"statistics\" : \"stats\",\n" + " \"errorStats\" : \"errors\",\n" + " \"extended\" : true,\n" - + " \"format\" : \"avro\",\n" + + " \"keyFormat\" : \"kafka\",\n" + + " \"valueFormat\" : \"avro\",\n" + " \"topic\" : \"kadka-topic\",\n" + " \"partitions\" : 2,\n" + " \"replication\" : 1,\n" @@ -1070,8 +1088,8 @@ public void shouldPrintTopicDescribeExtended() { + "Name : TestSource\n" + "Type : TABLE\n" + "Key field : key\n" - + "Key format : STRING\n" + "Timestamp field : 2000-01-01\n" + + "Key format : kafka\n" + "Value format : avro\n" + "Kafka topic : kadka-topic (partitions: 2, replication: 1)\n" + "Statement : sql statement text\n" @@ -1085,13 +1103,13 @@ public void shouldPrintTopicDescribeExtended() { + "\n" + "Queries that read from this TABLE\n" + "-----------------------------------\n" - + "readId : read query\n" + + "readId (Running) : read query\n" + "\n" + "For query topology and execution plan please run: EXPLAIN \n" + "\n" + "Queries that write from this TABLE\n" + "-----------------------------------\n" - + "writeId : write query\n" + + "writeId (Running) : write query\n" + "\n" + "For query topology and execution plan please run: EXPLAIN \n" + "\n" diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/QueryDescriptionFactory.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/QueryDescriptionFactory.java index 869f2ad9d715..d323904d819e 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/QueryDescriptionFactory.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/QueryDescriptionFactory.java @@ -16,6 +16,7 @@ package io.confluent.ksql.rest.entity; import com.google.common.collect.ImmutableSet; +import io.confluent.ksql.model.WindowType; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.query.QueryId; import io.confluent.ksql.rest.util.EntityUtil; @@ -37,6 +38,7 @@ public static QueryDescription forQueryMetadata(final QueryMetadata queryMetadat return create( persistentQuery.getQueryId(), persistentQuery, + persistentQuery.getResultTopic().getKeyFormat().getWindowType(), ImmutableSet.of(persistentQuery.getSinkName()), Optional.of(persistentQuery.getState()) ); @@ -45,6 +47,7 @@ public static QueryDescription forQueryMetadata(final QueryMetadata queryMetadat return create( new QueryId(""), queryMetadata, + Optional.empty(), Collections.emptySet(), Optional.empty() ); @@ -53,12 +56,14 @@ public static QueryDescription forQueryMetadata(final QueryMetadata queryMetadat private static QueryDescription create( final QueryId id, final QueryMetadata queryMetadata, + final Optional windowType, final Set sinks, final Optional state ) { return new QueryDescription( id, queryMetadata.getStatementString(), + windowType, EntityUtil.buildSourceSchemaEntity(queryMetadata.getLogicalSchema()), queryMetadata.getSourceNames().stream().map(SourceName::name).collect(Collectors.toSet()), sinks.stream().map(SourceName::name).collect(Collectors.toSet()), diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SourceDescriptionFactory.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SourceDescriptionFactory.java index ea357831049a..c7ac042aad7a 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SourceDescriptionFactory.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/entity/SourceDescriptionFactory.java @@ -32,13 +32,13 @@ private SourceDescriptionFactory() { public static SourceDescription create( final DataSource dataSource, final boolean extended, - final String format, final List readQueries, final List writeQueries, final Optional topicDescription ) { return new SourceDescription( dataSource.getName().toString(FormatOptions.noEscape()), + dataSource.getKsqlTopic().getKeyFormat().getWindowType(), readQueries, writeQueries, EntityUtil.buildSourceSchemaEntity(dataSource.getSchema()), @@ -54,7 +54,8 @@ public static SourceDescription create( ? MetricCollectors.getAndFormatStatsFor( dataSource.getKafkaTopicName(), true) : ""), extended, - format, + dataSource.getKsqlTopic().getKeyFormat().getFormat().name(), + dataSource.getKsqlTopic().getValueFormat().getFormat().name(), dataSource.getKafkaTopicName(), topicDescription.map(td -> td.partitions().size()).orElse(0), topicDescription.map(td -> td.partitions().get(0).replicas().size()).orElse(0), diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutor.java index fa1385c27b37..293ceff571cd 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/DescribeConnectorExecutor.java @@ -106,7 +106,6 @@ public Optional execute( .map(source -> SourceDescriptionFactory.create( source, false, - source.getKsqlTopic().getValueFormat().getFormat().name(), ImmutableList.of(), ImmutableList.of(), Optional.empty())) diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListQueriesExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListQueriesExecutor.java index 90f1265d0a81..e57caf60ef06 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListQueriesExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListQueriesExecutor.java @@ -51,11 +51,12 @@ public static Optional execute( statement.getStatementText(), executionContext.getPersistentQueries() .stream() - .map( - q -> new RunningQuery( + .map(q -> new RunningQuery( q.getStatementString(), ImmutableSet.of(q.getSinkName().name()), - q.getQueryId())) + q.getQueryId(), + Optional.of(q.getState()) + )) .collect(Collectors.toList()))); } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListSourceExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListSourceExecutor.java index 6b2b03b6bb99..7b474dd3e76f 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListSourceExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListSourceExecutor.java @@ -205,7 +205,6 @@ private static SourceDescriptionWithWarnings describeSource( SourceDescriptionFactory.create( dataSource, extended, - dataSource.getKsqlTopic().getValueFormat().getFormat().name(), getQueries(ksqlEngine, q -> q.getSourceNames().contains(dataSource.getName())), getQueries(ksqlEngine, q -> q.getSinkName().equals(dataSource.getName())), topicDescription @@ -223,7 +222,8 @@ private static List getQueries( .map(q -> new RunningQuery( q.getStatementString(), ImmutableSet.of(q.getSinkName().name()), - q.getQueryId() + q.getQueryId(), + Optional.of(q.getState()) )) .collect(Collectors.toList()); } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java index 79c11ad3f9ff..f4ac5003f630 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/QueryDescriptionFactoryTest.java @@ -32,6 +32,9 @@ import io.confluent.ksql.schema.ksql.PhysicalSchema; import io.confluent.ksql.schema.ksql.SqlBaseType; import io.confluent.ksql.schema.ksql.types.SqlTypes; +import io.confluent.ksql.serde.Format; +import io.confluent.ksql.serde.FormatInfo; +import io.confluent.ksql.serde.KeyFormat; import io.confluent.ksql.serde.SerdeOption; import io.confluent.ksql.util.KsqlConfig; import io.confluent.ksql.util.PersistentQueryMetadata; @@ -97,6 +100,8 @@ public void setUp() { when(topology.describe()).thenReturn(topologyDescription); when(queryStreams.state()).thenReturn(State.RUNNING); + when(sinkTopic.getKeyFormat()).thenReturn(KeyFormat.nonWindowed(FormatInfo.of(Format.KAFKA))); + transientQuery = new TransientQueryMetadata( SQL_TEXT, queryStreams, diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionFactoryTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionFactoryTest.java index 27fcacf6c4f0..a0f4da4c9552 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionFactoryTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionFactoryTest.java @@ -118,7 +118,6 @@ public void shouldReturnStatsBasedOnKafkaTopic() { final SourceDescription sourceDescription = SourceDescriptionFactory.create( dataSource, true, - "json", Collections.emptyList(), Collections.emptyList(), Optional.empty()); @@ -142,7 +141,6 @@ public void shouldReturnEmptyTimestampColumn() { final SourceDescription sourceDescription = SourceDescriptionFactory.create( dataSource, true, - "json", Collections.emptyList(), Collections.emptyList(), Optional.empty()); @@ -165,7 +163,6 @@ public void shouldReturnTimestampColumnIfPresent() { final SourceDescription sourceDescription = SourceDescriptionFactory.create( dataSource, true, - "json", Collections.emptyList(), Collections.emptyList(), Optional.empty()); diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ExplainExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ExplainExecutorTest.java index a4bf771c84f7..a55116ca5de5 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ExplainExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ExplainExecutorTest.java @@ -26,11 +26,15 @@ import com.google.common.collect.ImmutableMap; import io.confluent.ksql.engine.KsqlEngine; +import io.confluent.ksql.execution.ddl.commands.KsqlTopic; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.query.QueryId; import io.confluent.ksql.rest.entity.QueryDescriptionEntity; import io.confluent.ksql.rest.entity.QueryDescriptionFactory; import io.confluent.ksql.rest.server.TemporaryEngine; +import io.confluent.ksql.serde.Format; +import io.confluent.ksql.serde.FormatInfo; +import io.confluent.ksql.serde.KeyFormat; import io.confluent.ksql.statement.ConfiguredStatement; import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.PersistentQueryMetadata; @@ -54,7 +58,6 @@ public void shouldExplainQueryId() { // Given: final ConfiguredStatement explain = engine.configure("EXPLAIN id;"); final PersistentQueryMetadata metadata = givenPersistentQuery("id"); - when(metadata.getState()).thenReturn("Running"); final KsqlEngine engine = mock(KsqlEngine.class); when(engine.getPersistentQuery(metadata.getQueryId())).thenReturn(Optional.of(metadata)); @@ -153,6 +156,14 @@ public static PersistentQueryMetadata givenPersistentQuery(final String id) { when(metadata.getQueryId()).thenReturn(new QueryId(id)); when(metadata.getSinkName()).thenReturn(SourceName.of(id)); when(metadata.getLogicalSchema()).thenReturn(TemporaryEngine.SCHEMA); + when(metadata.getState()).thenReturn("Running"); + when(metadata.getTopologyDescription()).thenReturn("topology"); + when(metadata.getExecutionPlan()).thenReturn("plan"); + when(metadata.getStatementString()).thenReturn("sql"); + + final KsqlTopic sinkTopic = mock(KsqlTopic.class); + when(sinkTopic.getKeyFormat()).thenReturn(KeyFormat.nonWindowed(FormatInfo.of(Format.KAFKA))); + when(metadata.getResultTopic()).thenReturn(sinkTopic); return metadata; } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListQueriesExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListQueriesExecutorTest.java index b5fe79efddf4..579fd7b14959 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListQueriesExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListQueriesExecutorTest.java @@ -26,6 +26,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import io.confluent.ksql.engine.KsqlEngine; +import io.confluent.ksql.execution.ddl.commands.KsqlTopic; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.query.QueryId; import io.confluent.ksql.rest.entity.Queries; @@ -33,8 +34,12 @@ import io.confluent.ksql.rest.entity.QueryDescriptionList; import io.confluent.ksql.rest.entity.RunningQuery; import io.confluent.ksql.rest.server.TemporaryEngine; +import io.confluent.ksql.serde.Format; +import io.confluent.ksql.serde.FormatInfo; +import io.confluent.ksql.serde.KeyFormat; import io.confluent.ksql.statement.ConfiguredStatement; import io.confluent.ksql.util.PersistentQueryMetadata; +import java.util.Optional; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; @@ -79,7 +84,9 @@ public void shouldListQueriesBasic() { new RunningQuery( metadata.getStatementString(), ImmutableSet.of(metadata.getSinkName().name()), - metadata.getQueryId()))); + metadata.getQueryId(), + Optional.of(metadata.getState()) + ))); } @Test @@ -107,9 +114,17 @@ public void shouldListQueriesExtended() { @SuppressWarnings("SameParameterValue") public static PersistentQueryMetadata givenPersistentQuery(final String id) { final PersistentQueryMetadata metadata = mock(PersistentQueryMetadata.class); + when(metadata.getStatementString()).thenReturn("sql"); when(metadata.getQueryId()).thenReturn(new QueryId(id)); when(metadata.getSinkName()).thenReturn(SourceName.of(id)); when(metadata.getLogicalSchema()).thenReturn(TemporaryEngine.SCHEMA); + when(metadata.getState()).thenReturn("Running"); + when(metadata.getTopologyDescription()).thenReturn("topology"); + when(metadata.getExecutionPlan()).thenReturn("plan"); + + final KsqlTopic sinkTopic = mock(KsqlTopic.class); + when(sinkTopic.getKeyFormat()).thenReturn(KeyFormat.nonWindowed(FormatInfo.of(Format.KAFKA))); + when(metadata.getResultTopic()).thenReturn(sinkTopic); return metadata; } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListSourceExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListSourceExecutorTest.java index 7cbbe411bd07..baa8890122f8 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListSourceExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListSourceExecutorTest.java @@ -22,9 +22,11 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -56,10 +58,15 @@ import java.util.Arrays; import java.util.Optional; import java.util.stream.Collectors; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicPartitionInfo; +import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; +import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) @@ -70,6 +77,17 @@ public class ListSourceExecutorTest { @Rule public final ExpectedException expectedException = ExpectedException.none(); + @Mock + private TopicDescription topicWith1PartitionAndRfOf1; + + @Before + public void setUp() { + final Node node = mock(Node.class); + final TopicPartitionInfo topicInfo = mock(TopicPartitionInfo.class); + when(topicInfo.replicas()).thenReturn(ImmutableList.of(node)); + when(topicWith1PartitionAndRfOf1.partitions()).thenReturn(ImmutableList.of(topicInfo)); + } + @Test public void shouldShowStreams() { // Given: @@ -122,17 +140,15 @@ public void shouldShowStreamsExtended() { SourceDescriptionFactory.create( stream1, true, - "JSON", ImmutableList.of(), ImmutableList.of(), - Optional.empty()), + Optional.of(topicWith1PartitionAndRfOf1)), SourceDescriptionFactory.create( stream2, true, - "JSON", ImmutableList.of(), ImmutableList.of(), - Optional.empty()) + Optional.of(topicWith1PartitionAndRfOf1)) )); } @@ -191,7 +207,6 @@ public void shouldShowTablesExtended() { SourceDescriptionFactory.create( table1, true, - "JSON", ImmutableList.of(), ImmutableList.of(), Optional.of(client.describeTopic(table1.getKafkaTopicName())) @@ -199,7 +214,6 @@ public void shouldShowTablesExtended() { SourceDescriptionFactory.create( table2, true, - "JSON", ImmutableList.of(), ImmutableList.of(), Optional.of(client.describeTopic(table1.getKafkaTopicName())) @@ -239,12 +253,13 @@ public void shouldShowColumnsSource() { equalTo(SourceDescriptionFactory.create( stream, false, - "JSON", ImmutableList.of(), ImmutableList.of(new RunningQuery( metadata.getStatementString(), ImmutableSet.of(metadata.getSinkName().toString(FormatOptions.noEscape())), - metadata.getQueryId())), + metadata.getQueryId(), + Optional.of(metadata.getState()) + )), Optional.empty()))); } @@ -288,7 +303,7 @@ public void shouldNotCallTopicClientForExtendedDescription() { verify(spyTopicClient, never()).describeTopic(anyString()); } - private void assertSourceListWithWarning( + private static void assertSourceListWithWarning( final KsqlEntity entity, final DataSource... sources) { assertThat(entity, instanceOf(SourceDescriptionList.class)); @@ -302,7 +317,6 @@ private void assertSourceListWithWarning( SourceDescriptionFactory.create( s, true, - "JSON", ImmutableList.of(), ImmutableList.of(), Optional.empty() @@ -389,7 +403,6 @@ public void shouldAddWarningOnClientExceptionForDescription() { SourceDescriptionFactory.create( stream1, true, - "JSON", ImmutableList.of(), ImmutableList.of(), Optional.empty() diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java index ca0a2e48258c..d73a51925300 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/KsqlResourceTest.java @@ -473,11 +473,11 @@ public void shouldShowStreamsExtended() { assertThat(descriptionList.getSourceDescriptions(), containsInAnyOrder( SourceDescriptionFactory.create( ksqlEngine.getMetaStore().getSource(SourceName.of("TEST_STREAM")), - true, "JSON", Collections.emptyList(), Collections.emptyList(), + true, Collections.emptyList(), Collections.emptyList(), Optional.of(kafkaTopicClient.describeTopic("KAFKA_TOPIC_2"))), SourceDescriptionFactory.create( ksqlEngine.getMetaStore().getSource(SourceName.of("new_stream")), - true, "JSON", Collections.emptyList(), Collections.emptyList(), + true, Collections.emptyList(), Collections.emptyList(), Optional.of(kafkaTopicClient.describeTopic("new_topic")))) ); } @@ -502,11 +502,11 @@ public void shouldShowTablesExtended() { assertThat(descriptionList.getSourceDescriptions(), containsInAnyOrder( SourceDescriptionFactory.create( ksqlEngine.getMetaStore().getSource(SourceName.of("TEST_TABLE")), - true, "JSON", Collections.emptyList(), Collections.emptyList(), + true, Collections.emptyList(), Collections.emptyList(), Optional.of(kafkaTopicClient.describeTopic("KAFKA_TOPIC_1"))), SourceDescriptionFactory.create( ksqlEngine.getMetaStore().getSource(SourceName.of("new_table")), - true, "JSON", Collections.emptyList(), Collections.emptyList(), + true, Collections.emptyList(), Collections.emptyList(), Optional.of(kafkaTopicClient.describeTopic("new_topic")))) ); } @@ -547,7 +547,6 @@ public void shouldDescribeStatement() { final SourceDescription expectedDescription = SourceDescriptionFactory.create( ksqlEngine.getMetaStore().getSource(SourceName.of("DESCRIBED_STREAM")), false, - "JSON", Collections.singletonList(queries.get(1)), Collections.singletonList(queries.get(0)), Optional.empty() @@ -1959,7 +1958,9 @@ private List createRunningQueries( .map(md -> new RunningQuery( md.getStatementString(), ImmutableSet.of(md.getSinkName().toString(FormatOptions.noEscape())), - md.getQueryId())) + md.getQueryId(), + Optional.of(md.getState()) + )) .collect(Collectors.toList()); } diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/QueryDescription.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/QueryDescription.java index d62ec29b6894..cd6bbe599a50 100644 --- a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/QueryDescription.java +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/QueryDescription.java @@ -18,8 +18,11 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import io.confluent.ksql.model.WindowType; import io.confluent.ksql.query.QueryId; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -31,6 +34,7 @@ public class QueryDescription { private final QueryId id; private final String statementText; + private final Optional windowType; private final List fields; private final Set sources; private final Set sinks; @@ -44,6 +48,7 @@ public class QueryDescription { public QueryDescription( @JsonProperty("id") final QueryId id, @JsonProperty("statementText") final String statementText, + @JsonProperty("windowType") final Optional windowType, @JsonProperty("fields") final List fields, @JsonProperty("sources") final Set sources, @JsonProperty("sinks") final Set sinks, @@ -52,14 +57,16 @@ public QueryDescription( @JsonProperty("overriddenProperties") final Map overriddenProperties, @JsonProperty("state") final Optional state ) { - this.id = id; - this.statementText = statementText; - this.fields = Collections.unmodifiableList(fields); - this.sources = Collections.unmodifiableSet(sources); - this.sinks = Collections.unmodifiableSet(sinks); - this.topology = topology; - this.executionPlan = executionPlan; - this.overriddenProperties = Collections.unmodifiableMap(overriddenProperties); + this.id = Objects.requireNonNull(id, "id"); + this.statementText = Objects.requireNonNull(statementText, "statementText"); + this.windowType = Objects.requireNonNull(windowType, "windowType"); + this.fields = ImmutableList.copyOf(Objects.requireNonNull(fields, "fields")); + this.sources = ImmutableSet.copyOf(Objects.requireNonNull(sources, "sources")); + this.sinks = ImmutableSet.copyOf(Objects.requireNonNull(sinks, "sinks")); + this.topology = Objects.requireNonNull(topology, "topology"); + this.executionPlan = Objects.requireNonNull(executionPlan, "executionPlan"); + this.overriddenProperties = ImmutableMap.copyOf(Objects + .requireNonNull(overriddenProperties, "overriddenProperties")); this.state = Objects.requireNonNull(state, "state"); } @@ -71,6 +78,10 @@ public String getStatementText() { return statementText; } + public Optional getWindowType() { + return windowType; + } + public List getFields() { return fields; } @@ -112,6 +123,7 @@ public boolean equals(final Object o) { final QueryDescription that = (QueryDescription) o; return Objects.equals(id, that.id) && Objects.equals(statementText, that.statementText) + && Objects.equals(windowType, that.windowType) && Objects.equals(fields, that.fields) && Objects.equals(topology, that.topology) && Objects.equals(executionPlan, that.executionPlan) @@ -126,6 +138,7 @@ public int hashCode() { return Objects.hash( id, statementText, + windowType, fields, topology, executionPlan, diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/RunningQuery.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/RunningQuery.java index fb03cafbae2d..6b28978686a6 100644 --- a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/RunningQuery.java +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/RunningQuery.java @@ -16,33 +16,44 @@ package io.confluent.ksql.rest.entity; import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; import io.confluent.ksql.query.QueryId; import java.util.Objects; +import java.util.Optional; import java.util.Set; @JsonIgnoreProperties(ignoreUnknown = true) public class RunningQuery { + private final String queryString; private final Set sinks; private final QueryId id; + private final Optional state; @JsonCreator public RunningQuery( - @JsonProperty("statementText") final String queryString, + @JsonProperty("queryString") final String queryString, @JsonProperty("sinks") final Set sinks, - @JsonProperty("id") final QueryId id + @JsonProperty("id") final QueryId id, + @JsonProperty("state") final Optional state ) { - this.queryString = queryString; - this.sinks = sinks; - this.id = id; + this.queryString = Objects.requireNonNull(queryString, "queryString"); + this.sinks = Objects.requireNonNull(sinks, "sinks"); + this.id = Objects.requireNonNull(id, "id"); + this.state = Objects.requireNonNull(state, "state"); } public String getQueryString() { return queryString; } + @JsonIgnore + public String getQuerySingleLine() { + return queryString.replaceAll(System.lineSeparator(), ""); + } + public Set getSinks() { return sinks; } @@ -51,6 +62,10 @@ public QueryId getId() { return id; } + public Optional getState() { + return state; + } + @Override public boolean equals(final Object o) { if (this == o) { @@ -62,11 +77,12 @@ public boolean equals(final Object o) { final RunningQuery that = (RunningQuery) o; return Objects.equals(id, that.id) && Objects.equals(queryString, that.queryString) - && Objects.equals(sinks, that.sinks); + && Objects.equals(sinks, that.sinks) + && Objects.equals(state, that.state); } @Override public int hashCode() { - return Objects.hash(id, queryString, id); + return Objects.hash(id, queryString, id, state); } } diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/SourceDescription.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/SourceDescription.java index 19416348a798..6affc6eeba41 100644 --- a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/SourceDescription.java +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/SourceDescription.java @@ -20,9 +20,11 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonSubTypes; import com.fasterxml.jackson.annotation.JsonTypeName; +import io.confluent.ksql.model.WindowType; import java.util.Collections; import java.util.List; import java.util.Objects; +import java.util.Optional; @JsonIgnoreProperties(ignoreUnknown = true) @JsonTypeName("description") @@ -30,6 +32,7 @@ public class SourceDescription { private final String name; + private final Optional windowType; private final List readQueries; private final List writeQueries; private final List fields; @@ -39,7 +42,8 @@ public class SourceDescription { private final String statistics; private final String errorStats; private final boolean extended; - private final String format; + private final String keyFormat; + private final String valueFormat; private final String topic; private final int partitions; private final int replication; @@ -49,6 +53,7 @@ public class SourceDescription { @JsonCreator public SourceDescription( @JsonProperty("name") final String name, + @JsonProperty("windowType") final Optional windowType, @JsonProperty("readQueries") final List readQueries, @JsonProperty("writeQueries") final List writeQueries, @JsonProperty("fields") final List fields, @@ -58,7 +63,8 @@ public SourceDescription( @JsonProperty("statistics") final String statistics, @JsonProperty("errorStats") final String errorStats, @JsonProperty("extended") final boolean extended, - @JsonProperty("format") final String format, + @JsonProperty("keyFormat") final String keyFormat, + @JsonProperty("valueFormat") final String valueFormat, @JsonProperty("topic") final String topic, @JsonProperty("partitions") final int partitions, @JsonProperty("replication") final int replication, @@ -66,6 +72,7 @@ public SourceDescription( ) { // CHECKSTYLE_RULES.ON: ParameterNumberCheck this.name = Objects.requireNonNull(name, "name"); + this.windowType = Objects.requireNonNull(windowType, "windowType"); this.readQueries = Collections.unmodifiableList(Objects.requireNonNull(readQueries, "readQueries")); this.writeQueries = @@ -77,8 +84,9 @@ public SourceDescription( this.timestamp = Objects.requireNonNull(timestamp, "timestamp"); this.statistics = Objects.requireNonNull(statistics, "statistics"); this.errorStats = Objects.requireNonNull(errorStats, "errorStats"); - this.extended = Objects.requireNonNull(extended, "extended"); - this.format = Objects.requireNonNull(format, "format"); + this.extended = extended; + this.keyFormat = Objects.requireNonNull(keyFormat, "keyFormat"); + this.valueFormat = Objects.requireNonNull(valueFormat, "valueFormat"); this.topic = Objects.requireNonNull(topic, "topic"); this.partitions = partitions; this.replication = replication; @@ -89,6 +97,10 @@ public String getStatement() { return statement; } + public Optional getWindowType() { + return windowType; + } + public int getPartitions() { return partitions; } @@ -113,8 +125,12 @@ public String getType() { return type; } - public String getFormat() { - return format; + public String getKeyFormat() { + return keyFormat; + } + + public String getValueFormat() { + return valueFormat; } public String getTopic() { @@ -145,62 +161,41 @@ public String getErrorStats() { return errorStats; } - private boolean equals2(final SourceDescription that) { - if (!Objects.equals(topic, that.topic)) { - return false; - } - if (!Objects.equals(key, that.key)) { - return false; - } - if (!Objects.equals(writeQueries, that.writeQueries)) { - return false; - } - if (!Objects.equals(readQueries, that.readQueries)) { - return false; - } - if (!Objects.equals(timestamp, that.timestamp)) { - return false; - } - if (!Objects.equals(statistics, that.statistics)) { - return false; - } - if (!Objects.equals(errorStats, that.errorStats)) { - return false; - } - return Objects.equals(statement, that.statement); - } - + // CHECKSTYLE_RULES.OFF: CyclomaticComplexity @Override public boolean equals(final Object o) { + // CHECKSTYLE_RULES.ON: CyclomaticComplexity if (this == o) { return true; } - if (!(o instanceof SourceDescription)) { + if (o == null || getClass() != o.getClass()) { return false; } final SourceDescription that = (SourceDescription) o; - if (!Objects.equals(name, that.name)) { - return false; - } - if (!Objects.equals(fields, that.fields)) { - return false; - } - if (!Objects.equals(extended, that.extended)) { - return false; - } - if (!Objects.equals(type, that.type)) { - return false; - } - if (!Objects.equals(format, that.format)) { - return false; - } - return equals2(that); + return extended == that.extended + && partitions == that.partitions + && replication == that.replication + && Objects.equals(name, that.name) + && Objects.equals(windowType, that.windowType) + && Objects.equals(readQueries, that.readQueries) + && Objects.equals(writeQueries, that.writeQueries) + && Objects.equals(fields, that.fields) + && Objects.equals(type, that.type) + && Objects.equals(key, that.key) + && Objects.equals(timestamp, that.timestamp) + && Objects.equals(statistics, that.statistics) + && Objects.equals(errorStats, that.errorStats) + && Objects.equals(keyFormat, that.keyFormat) + && Objects.equals(valueFormat, that.valueFormat) + && Objects.equals(topic, that.topic) + && Objects.equals(statement, that.statement); } @Override public int hashCode() { return Objects.hash( name, + windowType, readQueries, writeQueries, fields, @@ -210,7 +205,8 @@ public int hashCode() { statistics, errorStats, extended, - format, + keyFormat, + valueFormat, topic, partitions, replication, diff --git a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionTest.java b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionTest.java index 5027bbd9b229..6e793f28ad39 100644 --- a/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionTest.java +++ b/ksql-rest-model/src/test/java/io/confluent/ksql/rest/entity/SourceDescriptionTest.java @@ -15,8 +15,12 @@ package io.confluent.ksql.rest.entity; +import com.google.common.collect.ImmutableList; import com.google.common.testing.EqualsTester; +import io.confluent.ksql.model.WindowType; import java.util.Collections; +import java.util.List; +import java.util.Optional; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; @@ -28,26 +32,146 @@ public class SourceDescriptionTest { private static final String SOME_STRING = "some string"; private static final int SOME_INT = 3; private static final boolean SOME_BOOL = true; - - @Mock - private RunningQuery runningQuery; + @Mock + private RunningQuery query1; + @Mock + private RunningQuery query2; @Mock private FieldInfo fieldInfo; + @SuppressWarnings("UnstableApiUsage") @Test public void shouldImplementHashCodeAndEqualsProperty() { - new EqualsTester() - .addEqualityGroup( - new SourceDescription( - SOME_STRING, Collections.singletonList(runningQuery), Collections.singletonList(runningQuery), - Collections.singletonList(fieldInfo), SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, - SOME_STRING, SOME_BOOL, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, SOME_STRING), - new SourceDescription( - SOME_STRING, Collections.singletonList(runningQuery), Collections.singletonList(runningQuery), - Collections.singletonList(fieldInfo), SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, - SOME_STRING, SOME_BOOL, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, SOME_STRING) - ) - .testEquals(); + final List readQueries = Collections.singletonList(query1); + final List writeQueries = Collections.singletonList(query2); + final List fields = Collections.singletonList(fieldInfo); + + new EqualsTester() + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, writeQueries, fields, SOME_STRING, + SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + SOME_BOOL, SOME_STRING, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, + SOME_STRING), + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, writeQueries, fields, SOME_STRING, + SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + SOME_BOOL, SOME_STRING, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + "diff", Optional.of(WindowType.SESSION), readQueries, writeQueries, fields, + SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + SOME_BOOL, SOME_STRING, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), ImmutableList.of(), writeQueries, fields, + SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + SOME_BOOL, SOME_STRING, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, ImmutableList.of(), fields, + SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + SOME_BOOL, SOME_STRING, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, writeQueries, ImmutableList.of(), + SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + SOME_BOOL, SOME_STRING, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, writeQueries, fields, "diff", + SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + SOME_BOOL, SOME_STRING, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, writeQueries, fields, SOME_STRING, + "diff", SOME_STRING, SOME_STRING, SOME_STRING, + SOME_BOOL, SOME_STRING, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, writeQueries, fields, SOME_STRING, + SOME_STRING, "diff", SOME_STRING, SOME_STRING, + SOME_BOOL, SOME_STRING, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, writeQueries, fields, SOME_STRING, + SOME_STRING, SOME_STRING, "diff", SOME_STRING, + SOME_BOOL, SOME_STRING, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, writeQueries, fields, SOME_STRING, + SOME_STRING, SOME_STRING, SOME_STRING, "diff", + SOME_BOOL, SOME_STRING, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, writeQueries, fields, SOME_STRING, + SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + SOME_BOOL, "diff", SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, writeQueries, fields, SOME_STRING, + SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + !SOME_BOOL, SOME_STRING, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, writeQueries, fields, SOME_STRING, + SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + SOME_BOOL, SOME_STRING, "diff", SOME_STRING, SOME_INT, SOME_INT, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, writeQueries, fields, SOME_STRING, + SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + SOME_BOOL, SOME_STRING, SOME_STRING, "diff", SOME_INT, SOME_INT, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, writeQueries, fields, SOME_STRING, + SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + SOME_BOOL, SOME_STRING, SOME_STRING, SOME_STRING, SOME_INT + 1, SOME_INT, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, writeQueries, fields, SOME_STRING, + SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + SOME_BOOL, SOME_STRING, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT + 1, + SOME_STRING) + ) + .addEqualityGroup( + new SourceDescription( + SOME_STRING, Optional.empty(), readQueries, writeQueries, fields, SOME_STRING, + SOME_STRING, SOME_STRING, SOME_STRING, SOME_STRING, + SOME_BOOL, SOME_STRING, SOME_STRING, SOME_STRING, SOME_INT, SOME_INT, + "diff") + ) + .testEquals(); } } From 4ff9e6e8d0878974db08f7eb3956e6d7b76a06c9 Mon Sep 17 00:00:00 2001 From: Almog Gavra Date: Thu, 16 Jan 2020 08:38:34 -0800 Subject: [PATCH 120/123] chore: change RUNNING to WARNING when all connector tasks fail (#4323) --- .../execution/ListConnectorsExecutor.java | 6 ++- .../execution/ListConnectorsExecutorTest.java | 40 +++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListConnectorsExecutor.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListConnectorsExecutor.java index a1162e5d1395..420c703a85b4 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListConnectorsExecutor.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/execution/ListConnectorsExecutor.java @@ -129,7 +129,11 @@ private static String summarizeState(final ConnectorStateInfo connectorState) { .filter(State.RUNNING.name()::equals) .count(); - return String.format("RUNNING (%s/%s tasks RUNNING)", + final String status = connectorState.tasks().size() > 0 && numRunningTasks == 0 + ? "WARNING" + : "RUNNING"; + return String.format("%s (%s/%s tasks RUNNING)", + status, numRunningTasks, connectorState.tasks().size()); } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListConnectorsExecutorTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListConnectorsExecutorTest.java index 4891f49906ad..2ebf49b0716c 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListConnectorsExecutorTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/execution/ListConnectorsExecutorTest.java @@ -71,6 +71,16 @@ public class ListConnectorsExecutorTest { ConnectorType.SOURCE ); + private static final ConnectorStateInfo STATUS_WARNING = new ConnectorStateInfo( + "connector", + new ConnectorState("RUNNING", "foo", "bar"), + ImmutableList.of( + new TaskState(0, "FAILED", "", ""), + new TaskState(1, "FAILED", "", "") + ), + ConnectorType.SOURCE + ); + @Mock private KsqlExecutionContext engine; @Mock @@ -117,6 +127,36 @@ public void shouldListValidConnector() { ))); } + @Test + public void shouldLabelConnectorsWithNoRunningTasksAsWarning() { + // Given: + when(connectClient.status("connector")) + .thenReturn(ConnectResponse.success(STATUS_WARNING, HttpStatus.SC_OK)); + when(connectClient.connectors()) + .thenReturn(ConnectResponse.success(ImmutableList.of("connector"), HttpStatus.SC_OK)); + final ConfiguredStatement statement = ConfiguredStatement.of( + PreparedStatement.of("", new ListConnectors(Optional.empty(), Scope.ALL)), + ImmutableMap.of(), + new KsqlConfig(ImmutableMap.of()) + ); + + // When: + final Optional entity = ListConnectorsExecutor + .execute(statement, ImmutableMap.of(), engine, serviceContext); + + // Then: + assertThat("expected response!", entity.isPresent()); + final ConnectorList connectorList = (ConnectorList) entity.get(); + + assertThat(connectorList, is(new ConnectorList( + "", + ImmutableList.of(), + ImmutableList.of( + new SimpleConnectorInfo("connector", ConnectorType.SOURCE, CONNECTOR_CLASS, "WARNING (0/2 tasks RUNNING)") + ) + ))); + } + @Test public void shouldFilterNonMatchingConnectors() { // Given: From 3946f7317705cdb01201b1317fd90d6d6e7875c8 Mon Sep 17 00:00:00 2001 From: Rohan Date: Thu, 16 Jan 2020 09:51:13 -0800 Subject: [PATCH 121/123] chore: clean up LogicalSchemaWithMetaAndKeyFields (#4188) --- .../ksql/planner/plan/DataSourceNode.java | 10 ++- .../execution/plan/AbstractStreamSource.java | 6 -- .../LogicalSchemaWithMetaAndKeyFields.java | 61 ------------------- ...LogicalSchemaWithMetaAndKeyFieldsTest.java | 48 --------------- 4 files changed, 4 insertions(+), 121 deletions(-) delete mode 100644 ksql-execution/src/main/java/io/confluent/ksql/execution/plan/LogicalSchemaWithMetaAndKeyFields.java delete mode 100644 ksql-execution/src/test/java/io/confluent/ksql/execution/plan/LogicalSchemaWithMetaAndKeyFieldsTest.java diff --git a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/DataSourceNode.java b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/DataSourceNode.java index 399787de615c..9347cf613421 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/DataSourceNode.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/planner/plan/DataSourceNode.java @@ -22,9 +22,7 @@ import io.confluent.ksql.execution.builder.KsqlQueryBuilder; import io.confluent.ksql.execution.context.QueryContext; import io.confluent.ksql.execution.context.QueryContext.Stacker; -import io.confluent.ksql.execution.plan.LogicalSchemaWithMetaAndKeyFields; import io.confluent.ksql.execution.plan.SelectExpression; -import io.confluent.ksql.execution.plan.StreamSource; import io.confluent.ksql.metastore.model.DataSource; import io.confluent.ksql.metastore.model.DataSource.DataSourceType; import io.confluent.ksql.metastore.model.KeyField; @@ -42,7 +40,7 @@ public class DataSourceNode extends PlanNode { private final DataSource dataSource; private final SourceName alias; - private final LogicalSchemaWithMetaAndKeyFields schema; + private final LogicalSchema schema; private final KeyField keyField; private final SchemaKStreamFactory schemaKStreamFactory; private final ImmutableList selectExpressions; @@ -72,18 +70,18 @@ public DataSourceNode( // DataSourceNode copies implicit and key fields into the value schema // It users a KS valueMapper to add the key fields // and a KS transformValues to add the implicit fields - this.schema = StreamSource.getSchemaWithMetaAndKeyFields(alias, dataSource.getSchema()); + this.schema = dataSource.getSchema().withAlias(alias).withMetaAndKeyColsInValue(); this.keyField = dataSource.getKeyField() .withAlias(alias) - .validateKeyExistsIn(schema.getSchema()); + .validateKeyExistsIn(schema); this.schemaKStreamFactory = requireNonNull(schemaKStreamFactory, "schemaKStreamFactory"); } @Override public LogicalSchema getSchema() { - return schema.getSchema(); + return schema; } @Override diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/AbstractStreamSource.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/AbstractStreamSource.java index 72d4fb6ca1b6..fc481184e292 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/AbstractStreamSource.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/AbstractStreamSource.java @@ -34,12 +34,6 @@ public abstract class AbstractStreamSource implements ExecutionStep { final LogicalSchema sourceSchema; final SourceName alias; - public static LogicalSchemaWithMetaAndKeyFields getSchemaWithMetaAndKeyFields( - final SourceName alias, - final LogicalSchema schema) { - return LogicalSchemaWithMetaAndKeyFields.fromOriginal(alias, schema); - } - @VisibleForTesting public AbstractStreamSource( final ExecutionStepPropertiesV1 properties, diff --git a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/LogicalSchemaWithMetaAndKeyFields.java b/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/LogicalSchemaWithMetaAndKeyFields.java deleted file mode 100644 index e87f41131d49..000000000000 --- a/ksql-execution/src/main/java/io/confluent/ksql/execution/plan/LogicalSchemaWithMetaAndKeyFields.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2019 Confluent Inc. - * - * Licensed under the Confluent Community License; you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.execution.plan; - -import com.google.errorprone.annotations.Immutable; -import io.confluent.ksql.name.SourceName; -import io.confluent.ksql.schema.ksql.LogicalSchema; -import java.util.Objects; - -@Immutable -public final class LogicalSchemaWithMetaAndKeyFields { - private final LogicalSchema schema; - - private LogicalSchemaWithMetaAndKeyFields(final LogicalSchema schema) { - this.schema = schema; - } - - static LogicalSchemaWithMetaAndKeyFields fromOriginal( - final SourceName alias, - final LogicalSchema schema) { - return new LogicalSchemaWithMetaAndKeyFields( - schema.withAlias(alias).withMetaAndKeyColsInValue()); - } - - public LogicalSchema getSchema() { - return schema; - } - - public LogicalSchema getOriginalSchema() { - return schema.withoutMetaAndKeyColsInValue().withoutAlias(); - } - - @Override - public boolean equals(final Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - final LogicalSchemaWithMetaAndKeyFields that = (LogicalSchemaWithMetaAndKeyFields) o; - return Objects.equals(schema, that.schema); - } - - @Override - public int hashCode() { - return Objects.hash(schema); - } -} diff --git a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/LogicalSchemaWithMetaAndKeyFieldsTest.java b/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/LogicalSchemaWithMetaAndKeyFieldsTest.java deleted file mode 100644 index b0868b2f047b..000000000000 --- a/ksql-execution/src/test/java/io/confluent/ksql/execution/plan/LogicalSchemaWithMetaAndKeyFieldsTest.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2018 Confluent Inc. - * - * Licensed under the Confluent Community License (the "License"); you may not use - * this file except in compliance with the License. You may obtain a copy of the - * License at - * - * http://www.confluent.io/confluent-community-license - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package io.confluent.ksql.execution.plan; - -import static org.hamcrest.Matchers.equalTo; -import static org.junit.Assert.assertThat; - -import io.confluent.ksql.name.ColumnName; -import io.confluent.ksql.name.SourceName; -import io.confluent.ksql.schema.ksql.LogicalSchema; -import io.confluent.ksql.schema.ksql.types.SqlTypes; -import org.junit.Test; - -public class LogicalSchemaWithMetaAndKeyFieldsTest { - private static final SourceName ALIAS = SourceName.of("alias"); - private static final LogicalSchema ORIGINAL = LogicalSchema.builder() - .valueColumn(ColumnName.of("field1"), SqlTypes.STRING) - .valueColumn(ColumnName.of("field2"), SqlTypes.BIGINT) - .build(); - - private final LogicalSchemaWithMetaAndKeyFields schema - = LogicalSchemaWithMetaAndKeyFields.fromOriginal(ALIAS, ORIGINAL); - - @Test - public void shouldTransformSchemaCorrectly() { - assertThat( - schema.getSchema(), - equalTo(ORIGINAL.withAlias(ALIAS).withMetaAndKeyColsInValue())); - } - - @Test - public void shouldReturnoriginalSchema() { - assertThat(schema.getOriginalSchema(), equalTo(ORIGINAL)); - } -} \ No newline at end of file From 5ee79046ce8416bc51bb0d64c5d4c0103f49bf15 Mon Sep 17 00:00:00 2001 From: Vicky Papavasileiou Date: Sun, 8 Dec 2019 16:29:05 -0800 Subject: [PATCH 122/123] initial implementation of heartbeat and cluster stats=us fixed tests use application_server config to determine local host address fixed compile issues added extra tests test fixed failing test added debug logging, made critical section smaller addressed almogs comments added return --- .../io/confluent/ksql/util/QueryMetadata.java | 12 + .../ksql/services/DisabledKsqlClient.java | 16 + .../ksql/services/SimpleKsqlClient.java | 21 + .../ksql/rest/server/HeartbeatAgent.java | 497 ++++++++++++++++++ .../ksql/rest/server/KsqlRestApplication.java | 66 ++- .../ksql/rest/server/KsqlRestConfig.java | 76 +++ .../resources/ClusterStatusResource.java | 52 ++ .../server/resources/HeartbeatResource.java | 57 ++ .../server/services/DefaultKsqlClient.java | 28 + .../services/ServerInternalKsqlClient.java | 16 + .../HeartbeatAgentFunctionalTest.java | 262 +++++++++ .../ksql/rest/server/HeartbeatAgentTest.java | 266 ++++++++++ .../rest/server/KsqlRestApplicationTest.java | 25 +- .../ksql/rest/server/TestKsqlRestApp.java | 2 +- .../resources/ClusterStatusResourceTest.java | 53 ++ .../resources/HeartbeatResourceTest.java | 55 ++ .../ksql/rest/client/KsqlRestClient.java | 15 + .../ksql/rest/client/KsqlTarget.java | 45 ++ .../rest/entity/ClusterStatusResponse.java | 62 +++ .../ksql/rest/entity/HeartbeatMessage.java | 72 +++ .../ksql/rest/entity/HeartbeatResponse.java | 57 ++ .../ksql/rest/entity/HostInfoEntity.java | 70 +++ .../ksql/rest/entity/HostStatusEntity.java | 89 ++++ 23 files changed, 1896 insertions(+), 18 deletions(-) create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/HeartbeatAgent.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/ClusterStatusResource.java create mode 100644 ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/HeartbeatResource.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/HeartbeatAgentFunctionalTest.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/HeartbeatAgentTest.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/ClusterStatusResourceTest.java create mode 100644 ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/HeartbeatResourceTest.java create mode 100644 ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/ClusterStatusResponse.java create mode 100644 ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HeartbeatMessage.java create mode 100644 ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HeartbeatResponse.java create mode 100644 ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HostInfoEntity.java create mode 100644 ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HostStatusEntity.java diff --git a/ksql-engine/src/main/java/io/confluent/ksql/util/QueryMetadata.java b/ksql-engine/src/main/java/io/confluent/ksql/util/QueryMetadata.java index 3d24c3c53c40..5e3e0f8c8774 100644 --- a/ksql-engine/src/main/java/io/confluent/ksql/util/QueryMetadata.java +++ b/ksql-engine/src/main/java/io/confluent/ksql/util/QueryMetadata.java @@ -15,12 +15,14 @@ package io.confluent.ksql.util; +import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import io.confluent.ksql.internal.QueryStateListener; import io.confluent.ksql.name.SourceName; import io.confluent.ksql.schema.ksql.LogicalSchema; import java.lang.Thread.UncaughtExceptionHandler; import java.time.Duration; +import java.util.Collection; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -28,6 +30,7 @@ import java.util.function.Consumer; import org.apache.kafka.streams.KafkaStreams; import org.apache.kafka.streams.Topology; +import org.apache.kafka.streams.state.StreamsMetadata; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -129,6 +132,15 @@ public Topology getTopology() { return topology; } + public Collection getAllMetadata() { + try { + return kafkaStreams.allMetadata(); + } catch (IllegalStateException e) { + LOG.error(e.getMessage()); + } + return ImmutableList.of(); + } + public Map getStreamsProperties() { return streamsProperties; } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/services/DisabledKsqlClient.java b/ksql-execution/src/main/java/io/confluent/ksql/services/DisabledKsqlClient.java index f6a41cfb5807..634c3cff98ea 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/services/DisabledKsqlClient.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/services/DisabledKsqlClient.java @@ -16,10 +16,12 @@ package io.confluent.ksql.services; import io.confluent.ksql.rest.client.RestResponse; +import io.confluent.ksql.rest.entity.ClusterStatusResponse; import io.confluent.ksql.rest.entity.KsqlEntityList; import io.confluent.ksql.rest.entity.StreamedRow; import java.net.URI; import java.util.List; +import org.apache.kafka.streams.state.HostInfo; /** * A KSQL client implementation for use when communication with other nodes is not supported. @@ -45,5 +47,19 @@ public RestResponse> makeQueryRequest( ) { throw new UnsupportedOperationException("KSQL client is disabled"); } + + @Override + public void makeAsyncHeartbeatRequest( + final URI serverEndPoint, + final HostInfo host, + final long timestamp + ) { + throw new UnsupportedOperationException("KSQL client is disabled"); + } + + @Override + public RestResponse makeClusterStatusRequest(final URI serverEndPoint) { + throw new UnsupportedOperationException("KSQL client is disabled"); + } } diff --git a/ksql-execution/src/main/java/io/confluent/ksql/services/SimpleKsqlClient.java b/ksql-execution/src/main/java/io/confluent/ksql/services/SimpleKsqlClient.java index e6948275ff7c..3df0bbcbf000 100644 --- a/ksql-execution/src/main/java/io/confluent/ksql/services/SimpleKsqlClient.java +++ b/ksql-execution/src/main/java/io/confluent/ksql/services/SimpleKsqlClient.java @@ -16,11 +16,13 @@ package io.confluent.ksql.services; import io.confluent.ksql.rest.client.RestResponse; +import io.confluent.ksql.rest.entity.ClusterStatusResponse; import io.confluent.ksql.rest.entity.KsqlEntityList; import io.confluent.ksql.rest.entity.StreamedRow; import java.net.URI; import java.util.List; import javax.annotation.concurrent.ThreadSafe; +import org.apache.kafka.streams.state.HostInfo; @ThreadSafe public interface SimpleKsqlClient { @@ -34,4 +36,23 @@ RestResponse> makeQueryRequest( URI serverEndPoint, String sql ); + + /** + * Send heartbeat to remote Ksql server. + * @param serverEndPoint the remote destination. + * @param host the host information of the sender. + * @param timestamp the timestamp the heartbeat is sent. + */ + void makeAsyncHeartbeatRequest( + URI serverEndPoint, + HostInfo host, + long timestamp + ); + + /** + * Send a request to remote Ksql server to inquire about its view of the status of the cluster. + * @param serverEndPoint the remote destination. + * @return response containing the cluster status. + */ + RestResponse makeClusterStatusRequest(URI serverEndPoint); } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/HeartbeatAgent.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/HeartbeatAgent.java new file mode 100644 index 000000000000..9430fe9dba77 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/HeartbeatAgent.java @@ -0,0 +1,497 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.server; + +import static java.util.Objects.requireNonNull; +import static org.apache.kafka.common.utils.Utils.getHost; +import static org.apache.kafka.common.utils.Utils.getPort; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.util.concurrent.AbstractScheduledService; +import com.google.common.util.concurrent.ServiceManager; +import io.confluent.ksql.engine.KsqlEngine; +import io.confluent.ksql.rest.entity.HostInfoEntity; +import io.confluent.ksql.rest.entity.HostStatusEntity; +import io.confluent.ksql.services.ServiceContext; +import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.PersistentQueryMetadata; +import io.confluent.ksql.util.QueryMetadata; +import java.net.URI; +import java.net.URL; +import java.time.Clock; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; +import org.apache.kafka.streams.processor.internals.StreamsMetadataState; +import org.apache.kafka.streams.state.HostInfo; +import org.apache.kafka.streams.state.StreamsMetadata; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The heartbeat mechanism consists of three periodic tasks running at configurable time intervals: + * 1. Cluster membership: Discover the Ksql hosts that are part of the cluster. + * 2. Send heartbeats: Broadcast heartbeats to remote Ksql hosts. + * 3. Process received heartbeats: Determine which remote host is alive or dead. + * + *

The services are started in the following order by defining their startup delay: + * First, the cluster membership service starts, then the sending of the heartbeats and last the + * processing of the received heartbeats. This provides some buffer for the cluster to be discovered + * before the processing of heartbeats starts. However, it doesn't not guarantee that a remote + * server will not be classified as dead immediately after discovered (although we optimistically + * consider all newly discovered servers as alive) if there is lag in the sending/receiving of + * heartbeats. That's why the service that sends heartbeats sends to both alive and dead servers: + * avoid situations where a remote server is classified as down prematurely.

+ * + */ +// CHECKSTYLE_RULES.OFF: ClassDataAbstractionCoupling +public final class HeartbeatAgent { + // CHECKSTYLE_RULES.ON: ClassDataAbstractionCoupling + + private static final int SERVICE_TIMEOUT_SEC = 2; + private static final int CHECK_HEARTBEAT_DELAY_MS = 1000; + private static final int SEND_HEARTBEAT_DELAY_MS = 100; + private static final Logger LOG = LoggerFactory.getLogger(HeartbeatAgent.class); + + private final KsqlEngine engine; + private final ServiceContext serviceContext; + private final HeartbeatConfig config; + private final ConcurrentHashMap> receivedHeartbeats; + private final ConcurrentHashMap hostsStatus; + private final ScheduledExecutorService scheduledExecutorService; + private final ServiceManager serviceManager; + private final Clock clock; + private HostInfo localHostInfo; + private String localHostString; + private URL localURL; + + public static HeartbeatAgent.Builder builder() { + return new HeartbeatAgent.Builder(); + } + + private HeartbeatAgent(final KsqlEngine engine, + final ServiceContext serviceContext, + final HeartbeatConfig config) { + + this.engine = requireNonNull(engine, "engine"); + this.serviceContext = requireNonNull(serviceContext, "serviceContext"); + this.config = requireNonNull(config, "configuration parameters"); + this.scheduledExecutorService = Executors.newScheduledThreadPool(config.threadPoolSize); + this.serviceManager = new ServiceManager(Arrays.asList( + new DiscoverClusterService(), new SendHeartbeatService(), new CheckHeartbeatService())); + this.receivedHeartbeats = new ConcurrentHashMap<>(); + this.hostsStatus = new ConcurrentHashMap<>(); + this.clock = Clock.systemUTC(); + } + + /** + * Stores the heartbeats received from a remote Ksql server. + * @param hostInfo The host information of the remote Ksql server. + * @param timestamp The timestamp the heartbeat was sent. + */ + public void receiveHeartbeat(final HostInfo hostInfo, final long timestamp) { + final String hostKey = hostInfo.toString(); + final TreeMap heartbeats = receivedHeartbeats.computeIfAbsent( + hostKey, key -> new TreeMap<>()); + synchronized (heartbeats) { + LOG.debug("Receive heartbeat at: {} from host: {} ", timestamp, hostKey); + heartbeats.put(timestamp, new HeartbeatInfo(timestamp)); + } + } + + /** + * Returns the current view of the cluster containing all hosts discovered (whether alive or dead) + * @return status of discovered hosts + */ + public Map getHostsStatus() { + return Collections.unmodifiableMap(hostsStatus); + } + + @VisibleForTesting + void setHostsStatus(final Map status) { + hostsStatus.putAll(status); + } + + void startAgent() { + try { + serviceManager.startAsync().awaitHealthy(SERVICE_TIMEOUT_SEC, TimeUnit.SECONDS); + } catch (TimeoutException | IllegalStateException e) { + LOG.error("Failed to start heartbeat services with exception " + e.getMessage(), e); + } + } + + void stopAgent() { + try { + serviceManager.stopAsync().awaitStopped(SERVICE_TIMEOUT_SEC, TimeUnit.SECONDS); + } catch (TimeoutException | IllegalStateException e) { + LOG.error("Failed to stop heartbeat services with exception " + e.getMessage(), e); + } finally { + scheduledExecutorService.shutdownNow(); + } + } + + void setLocalAddress(final String applicationServer) { + + this.localHostInfo = parseHostInfo(applicationServer); + this.localHostString = localHostInfo.toString(); + try { + this.localURL = new URL(applicationServer); + } catch (final Exception e) { + throw new IllegalStateException("Failed to convert remote host info to URL." + + " remoteInfo: " + localHostInfo.host() + ":" + + localHostInfo.host()); + } + this.hostsStatus.putIfAbsent(localHostString, new HostStatusEntity( + new HostInfoEntity(localHostInfo.host(), localHostInfo.port()), + true, + clock.millis())); + } + + private static HostInfo parseHostInfo(final String endPoint) { + if (endPoint == null || endPoint.trim().isEmpty()) { + return StreamsMetadataState.UNKNOWN_HOST; + } + final String host = getHost(endPoint); + final Integer port = getPort(endPoint); + + if (host == null || port == null) { + throw new KsqlException(String.format( + "Error parsing host address %s. Expected format host:port.", endPoint)); + } + + return new HostInfo(host, port); + } + + + /** + * Check the heartbeats received from remote hosts and apply policy to determine whether a host + * is alive or not. + */ + class CheckHeartbeatService extends AbstractScheduledService { + + @Override + protected void runOneIteration() { + final long now = clock.millis(); + final long windowStart = now - config.heartbeatWindowMs; + runWithWindow(windowStart, now); + } + + @VisibleForTesting + void runWithWindow(final long windowStart, final long windowEnd) { + try { + processHeartbeats(windowStart, windowEnd); + } catch (Throwable t) { + LOG.error("Failed to process heartbeats for window start = " + windowStart + " end = " + + windowEnd + " with exception " + t.getMessage(), t); + } + } + + @Override + protected Scheduler scheduler() { + return Scheduler.newFixedRateSchedule(CHECK_HEARTBEAT_DELAY_MS, + config.heartbeatCheckIntervalMs, + TimeUnit.MILLISECONDS); + } + + @Override + protected ScheduledExecutorService executor() { + return scheduledExecutorService; + } + + /** + * If no heartbeats have been received, all previously discovered hosts are marked as dead. + * If a previously discovered host has not received any heartbeat in this window, it is + * marked as dead. + * For all other hosts that received heartbeats, they are processed to determine whether the + * host is alive or dead based on how many consecutive heartbeats it missed. + * @param windowStart the start time in ms of the current window + * @param windowEnd the end time in ms of the current window + */ + private void processHeartbeats(final long windowStart, final long windowEnd) { + // No heartbeats received -> mark all hosts as dead + if (receivedHeartbeats.isEmpty()) { + hostsStatus.forEach((host, status) -> { + if (!host.equals(localHostString)) { + status.setHostAlive(false); + } + }); + } + + for (String host: hostsStatus.keySet()) { + if (host.equals(localHostString)) { + continue; + } + final TreeMap heartbeats = receivedHeartbeats.get(host); + //For previously discovered hosts, if they have not received any heartbeats, mark them dead + if (heartbeats == null || heartbeats.isEmpty()) { + hostsStatus.get(host).setHostAlive(false); + } else { + final TreeMap copy; + synchronized (heartbeats) { + LOG.debug("Process heartbeats: {} of host: {}", heartbeats, host); + // 1. remove heartbeats older than window + heartbeats.headMap(windowStart).clear(); + copy = new TreeMap<>(heartbeats.subMap(windowStart, true, windowEnd, true)); + } + // 2. count consecutive missed heartbeats and mark as alive or dead + final boolean isAlive = decideStatus(host, windowStart, windowEnd, copy); + final HostStatusEntity status = hostsStatus.get(host); + status.setHostAlive(isAlive); + status.setLastStatusUpdateMs(windowEnd); + } + } + } + + private boolean decideStatus(final String host, final long windowStart, final long windowEnd, + final TreeMap heartbeats) { + long missedCount = 0; + long prev = windowStart; + // No heartbeat received in this window + if (heartbeats.isEmpty()) { + return false; + } + // We want to count consecutive missed heartbeats and reset the count when we have received + // heartbeats. It's not enough to just count how many heartbeats we missed in the window as a + // host may have missed > THRESHOLD but not consecutive ones which doesn't constitute it + // as dead. + for (long ts : heartbeats.keySet()) { + //Don't count heartbeats after window end + if (ts >= windowEnd) { + break; + } + if (ts - config.heartbeatSendIntervalMs > prev) { + missedCount = (ts - prev - 1) / config.heartbeatSendIntervalMs; + } else { + //Reset missed count when we receive heartbeat + missedCount = 0; + } + prev = ts; + } + // Check frame from last received heartbeat to window end + if (windowEnd - prev - 1 > 0) { + missedCount = (windowEnd - prev - 1) / config.heartbeatSendIntervalMs; + } + + LOG.debug("Host: {} has {} missing heartbeats", host, missedCount); + return (missedCount < config.heartbeatMissedThreshold); + } + } + + /** + * Broadcast heartbeats to remote hosts whether they are alive or not. + * We are sending to hosts that might be dead because at startup, a host maybe marked as dead + * only because the sending of heartbeats has not preceded. + * + *

This is an asynchronous RPC and we do not handle the response returned from the remote + * server.

+ */ + class SendHeartbeatService extends AbstractScheduledService { + + @Override + protected void runOneIteration() { + for (Entry hostStatusEntry: hostsStatus.entrySet()) { + final String host = hostStatusEntry.getKey(); + final HostStatusEntity status = hostStatusEntry.getValue(); + try { + if (!host.equals(localHostString)) { + final URI remoteUri = buildLocation(localURL, status.getHostInfoEntity().getHost(), + status.getHostInfoEntity().getPort()); + LOG.debug("Send heartbeat to host {} at {}", status.getHostInfoEntity().getHost(), + clock.millis()); + serviceContext.getKsqlClient().makeAsyncHeartbeatRequest(remoteUri, localHostInfo, + clock.millis()); + } + } catch (Throwable t) { + LOG.error("Request to server: " + status.getHostInfoEntity().getHost() + ":" + + status.getHostInfoEntity().getPort() + + " failed with exception: " + t.getMessage(), t); + } + } + } + + @Override + protected Scheduler scheduler() { + return Scheduler.newFixedRateSchedule(SEND_HEARTBEAT_DELAY_MS, + config.heartbeatSendIntervalMs, + TimeUnit.MILLISECONDS); + } + + @Override + protected ScheduledExecutorService executor() { + return scheduledExecutorService; + } + + private URI buildLocation(final URL localHost, final String host, final int port) { + try { + return new URL(localHost.getProtocol(), host, port, "/").toURI(); + } catch (final Exception e) { + throw new IllegalStateException("Failed to convert remote host info to URL." + + " remoteInfo: " + host + ":" + port); + } + } + } + + /** + * Discovers remote hosts in the cluster through the metadata of currently running + * persistent queries. + */ + class DiscoverClusterService extends AbstractScheduledService { + + @Override + protected void runOneIteration() { + try { + final List currentQueries = engine.getPersistentQueries(); + if (currentQueries.isEmpty()) { + return; + } + + final Set uniqueHosts = currentQueries.stream() + .map(queryMetadata -> ((QueryMetadata) queryMetadata).getAllMetadata()) + .filter(Objects::nonNull) + .flatMap(Collection::stream) + .map(StreamsMetadata::hostInfo) + .filter(hostInfo -> !(hostInfo.host().equals(localHostInfo.host()) + && hostInfo.port() == (localHostInfo.port()))) + .collect(Collectors.toSet()); + + for (HostInfo hostInfo : uniqueHosts) { + // Only add to map if it is the first time it is discovered. Design decision to + // optimistically consider every newly discovered server as alive to avoid situations of + // unavailability until the heartbeating kicks in. + hostsStatus.computeIfAbsent(hostInfo.toString(), key -> new HostStatusEntity( + new HostInfoEntity(hostInfo.host(), hostInfo.port()), + true, + clock.millis())); + } + } catch (Throwable t) { + LOG.error("Failed to discover cluster with exception " + t.getMessage(), t); + } + } + + @Override + protected Scheduler scheduler() { + return Scheduler.newFixedRateSchedule(0, config.discoverClusterIntervalMs, + TimeUnit.MILLISECONDS); + } + + @Override + protected ScheduledExecutorService executor() { + return scheduledExecutorService; + } + } + + public static class Builder { + + private int nestedThreadPoolSize; + private long nestedHeartbeatSendIntervalMs; + private long nestedHeartbeatCheckIntervalMs; + private long nestedDiscoverClusterIntervalMs; + private long nestedHeartbeatWindowMs; + private long nestedHeartbeatMissedThreshold; + + HeartbeatAgent.Builder threadPoolSize(final int size) { + nestedThreadPoolSize = size; + return this; + } + + HeartbeatAgent.Builder heartbeatSendInterval(final long interval) { + nestedHeartbeatSendIntervalMs = interval; + return this; + } + + HeartbeatAgent.Builder heartbeatCheckInterval(final long interval) { + nestedHeartbeatCheckIntervalMs = interval; + return this; + } + + HeartbeatAgent.Builder heartbeatWindow(final long window) { + nestedHeartbeatWindowMs = window; + return this; + } + + HeartbeatAgent.Builder heartbeatMissedThreshold(final long missed) { + nestedHeartbeatMissedThreshold = missed; + return this; + } + + HeartbeatAgent.Builder discoverClusterInterval(final long interval) { + nestedDiscoverClusterIntervalMs = interval; + return this; + } + + public HeartbeatAgent build(final KsqlEngine engine, + final ServiceContext serviceContext) { + + return new HeartbeatAgent(engine, + serviceContext, + new HeartbeatConfig(nestedThreadPoolSize, + nestedHeartbeatSendIntervalMs, + nestedHeartbeatCheckIntervalMs, + nestedHeartbeatWindowMs, + nestedHeartbeatMissedThreshold, + nestedDiscoverClusterIntervalMs)); + } + } + + static class HeartbeatConfig { + private final int threadPoolSize; + private final long heartbeatSendIntervalMs; + private final long heartbeatCheckIntervalMs; + private final long heartbeatWindowMs; + private final long heartbeatMissedThreshold; + private final long discoverClusterIntervalMs; + + HeartbeatConfig(final int threadPoolSize, final long heartbeatSendIntervalMs, + final long heartbeatCheckIntervalMs, final long heartbeatWindowMs, + final long heartbeatMissedThreshold, final long discoverClusterIntervalMs) { + this.threadPoolSize = threadPoolSize; + this.heartbeatSendIntervalMs = heartbeatSendIntervalMs; + this.heartbeatCheckIntervalMs = heartbeatCheckIntervalMs; + this.heartbeatWindowMs = heartbeatWindowMs; + this.heartbeatMissedThreshold = heartbeatMissedThreshold; + this.discoverClusterIntervalMs = discoverClusterIntervalMs; + } + } + + public static class HeartbeatInfo { + private final long timestamp; + + public HeartbeatInfo(final long timestamp) { + this.timestamp = timestamp; + } + + public long getTimestamp() { + return timestamp; + } + + @Override + public String toString() { + return String.valueOf(timestamp); + } + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java index 4a96777fed9b..f0c6d9873854 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestApplication.java @@ -46,12 +46,15 @@ import io.confluent.ksql.rest.client.RestResponse; import io.confluent.ksql.rest.entity.KsqlEntityList; import io.confluent.ksql.rest.entity.KsqlErrorMessage; +import io.confluent.ksql.rest.server.HeartbeatAgent.Builder; import io.confluent.ksql.rest.server.computation.CommandRunner; import io.confluent.ksql.rest.server.computation.CommandStore; import io.confluent.ksql.rest.server.computation.InteractiveStatementExecutor; import io.confluent.ksql.rest.server.context.KsqlSecurityContextBinder; import io.confluent.ksql.rest.server.filters.KsqlAuthorizationFilter; +import io.confluent.ksql.rest.server.resources.ClusterStatusResource; import io.confluent.ksql.rest.server.resources.HealthCheckResource; +import io.confluent.ksql.rest.server.resources.HeartbeatResource; import io.confluent.ksql.rest.server.resources.KsqlConfigurable; import io.confluent.ksql.rest.server.resources.KsqlExceptionMapper; import io.confluent.ksql.rest.server.resources.KsqlResource; @@ -155,6 +158,7 @@ public final class KsqlRestApplication extends ExecutableApplication preconditions; private final List configurables; private final Consumer rocksDBConfigSetterHandler; + private final Optional heartbeatAgent; public static SourceName getCommandsStreamName() { return COMMANDS_STREAM_NAME; @@ -181,7 +185,8 @@ public static SourceName getCommandsStreamName() { final ProcessingLogContext processingLogContext, final List preconditions, final List configurables, - final Consumer rocksDBConfigSetterHandler + final Consumer rocksDBConfigSetterHandler, + final Optional heartbeatAgent ) { super(restConfig); @@ -205,6 +210,7 @@ public static SourceName getCommandsStreamName() { this.configurables = requireNonNull(configurables, "configurables"); this.rocksDBConfigSetterHandler = requireNonNull(rocksDBConfigSetterHandler, "rocksDBConfigSetterHandler"); + this.heartbeatAgent = requireNonNull(heartbeatAgent, "heartbeatAgent"); } @Override @@ -216,6 +222,10 @@ public void setupResources(final Configurable config, final KsqlRestConfig ap config.register(ksqlResource); config.register(streamedQueryResource); config.register(HealthCheckResource.create(ksqlResource, serviceContext, this.config)); + if (heartbeatAgent.isPresent()) { + config.register(new HeartbeatResource(heartbeatAgent.get())); + config.register(new ClusterStatusResource(heartbeatAgent.get())); + } config.register(new KsqlExceptionMapper()); config.register(new ServerStateDynamicBinding(serverState)); } @@ -225,7 +235,7 @@ public void startAsync() { log.info("KSQL RESTful API listening on {}", StringUtils.join(getListeners(), ", ")); final KsqlConfig ksqlConfigWithPort = buildConfigWithPort(); configurables.forEach(c -> c.configure(ksqlConfigWithPort)); - startKsql(); + startKsql(ksqlConfigWithPort); final Properties metricsProperties = new Properties(); metricsProperties.putAll(getConfiguration().getOriginals()); if (versionCheckerAgent != null) { @@ -235,9 +245,9 @@ public void startAsync() { } @VisibleForTesting - void startKsql() { + void startKsql(final KsqlConfig ksqlConfigWithPort) { waitForPreconditions(); - initialize(); + initialize(ksqlConfigWithPort); } @VisibleForTesting @@ -278,7 +288,7 @@ private void waitForPreconditions() { ); } - private void initialize() { + private void initialize(final KsqlConfig configWithApplicationServer) { rocksDBConfigSetterHandler.accept(ksqlConfigNoPort); registerCommandTopic(); @@ -300,6 +310,12 @@ private void initialize() { serviceContext ); + if (heartbeatAgent.isPresent()) { + heartbeatAgent.get().setLocalAddress((String)configWithApplicationServer + .getKsqlStreamConfigProps().get(StreamsConfig.APPLICATION_SERVER_CONFIG)); + heartbeatAgent.get().startAgent(); + } + serverState.setReady(); } @@ -328,6 +344,14 @@ public void triggerShutdown() { } catch (final Exception e) { log.error("Exception while closing security extension", e); } + + if (heartbeatAgent.isPresent()) { + try { + heartbeatAgent.get().stopAgent(); + } catch (final Exception e) { + log.error("Exception while shutting down HeartbeatAgent", e); + } + } } @Override @@ -602,6 +626,9 @@ static KsqlRestApplication buildApplication( final Consumer rocksDBConfigSetterHandler = RocksDBConfigSetterHandler::maybeConfigureRocksDBConfigSetter; + final Optional heartbeatAgent = + initializeHeartbeatAgent(restConfig, ksqlEngine, serviceContext); + return new KsqlRestApplication( serviceContext, ksqlEngine, @@ -620,10 +647,37 @@ static KsqlRestApplication buildApplication( processingLogContext, preconditions, configurables, - rocksDBConfigSetterHandler + rocksDBConfigSetterHandler, + heartbeatAgent ); } + private static Optional initializeHeartbeatAgent( + final KsqlRestConfig restConfig, + final KsqlEngine ksqlEngine, + final ServiceContext serviceContext + ) { + if (restConfig.getBoolean(KsqlRestConfig.KSQL_HEARTBEAT_ENABLE_CONFIG)) { + final Builder builder = HeartbeatAgent.builder(); + return Optional.of( + builder + .heartbeatSendInterval(restConfig.getLong( + KsqlRestConfig.KSQL_HEARTBEAT_SEND_INTERVAL_MS_CONFIG)) + .heartbeatCheckInterval(restConfig.getLong( + KsqlRestConfig.KSQL_HEARTBEAT_CHECK_INTERVAL_MS_CONFIG)) + .heartbeatMissedThreshold(restConfig.getLong( + KsqlRestConfig.KSQL_HEARTBEAT_MISSED_THRESHOLD_CONFIG)) + .heartbeatWindow(restConfig.getLong( + KsqlRestConfig.KSQL_HEARTBEAT_WINDOW_MS_CONFIG)) + .discoverClusterInterval(restConfig.getLong( + KsqlRestConfig.KSQL_HEARTBEAT_DISCOVER_CLUSTER_MS_CONFIG)) + .threadPoolSize(restConfig.getInt( + KsqlRestConfig.KSQL_HEARTBEAT_THREAD_POOL_SIZE_CONFIG)) + .build(ksqlEngine, serviceContext)); + } + return Optional.empty(); + } + private void registerCommandTopic() { final String commandTopic = commandStore.getCommandTopicName(); diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java index ad021684623f..66af6e8e4c78 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/KsqlRestConfig.java @@ -116,6 +116,40 @@ public class KsqlRestConfig extends RestConfig { private static final String KSQL_COMMAND_RUNNER_BLOCKED_THRESHHOLD_ERROR_MS_DOC = "How long to wait for the command runner to process a command from the command topic " + "before reporting an error metric."; + public static final String KSQL_HEARTBEAT_ENABLE_CONFIG = + KSQL_CONFIG_PREFIX + "heartbeat.enable"; + private static final String KSQL_HEARTBEAT_ENABLE_DOC = + "Whether the heartheat mechanism is enabled or not. It is disabled by default."; + + public static final String KSQL_HEARTBEAT_SEND_INTERVAL_MS_CONFIG = + KSQL_CONFIG_PREFIX + "heartbeat.send.interval.ms"; + private static final String KSQL_HEARTBEAT_SEND_INTERVAL_MS_DOC = + "Interval at which heartbeats are broadcasted to servers."; + + public static final String KSQL_HEARTBEAT_CHECK_INTERVAL_MS_CONFIG = + KSQL_CONFIG_PREFIX + "heartbeat.check.interval.ms"; + private static final String KSQL_HEARTBEAT_CHECK_INTERVAL_MS_DOC = + "Interval at which server processes received heartbeats."; + + public static final String KSQL_HEARTBEAT_WINDOW_MS_CONFIG = + KSQL_CONFIG_PREFIX + "heartbeat.window.ms"; + private static final String KSQL_HEARTBEAT_WINDOW_MS_DOC = + "Size of time window across which to count missed heartbeats."; + + public static final String KSQL_HEARTBEAT_MISSED_THRESHOLD_CONFIG = + KSQL_CONFIG_PREFIX + "heartbeat.missed.threshold.ms"; + private static final String KSQL_HEARTBEAT_MISSED_THRESHOLD_DOC = + "Minimum number of consecutive missed heartbeats that flag a server as down."; + + public static final String KSQL_HEARTBEAT_DISCOVER_CLUSTER_MS_CONFIG = + KSQL_CONFIG_PREFIX + "heartbeat.discover.interval.ms"; + private static final String KSQL_HEARTBEAT_DISCOVER_CLUSTER_MS_DOC = + "Interval at which server attempts to discover what other ksql servers exist in the cluster."; + + public static final String KSQL_HEARTBEAT_THREAD_POOL_SIZE_CONFIG = + KSQL_CONFIG_PREFIX + "heartbeat.thread.pool.size"; + private static final String KSQL_HEARTBEAT_THREAD_POOL_SIZE_CONFIG_DOC = + "Size of thread pool used for sending / processing heartbeats and cluster discovery."; private static final ConfigDef CONFIG_DEF; @@ -182,6 +216,48 @@ public class KsqlRestConfig extends RestConfig { DefaultErrorMessages.class, Importance.LOW, KSQL_SERVER_ERRORS_DOC + ).define( + KSQL_HEARTBEAT_ENABLE_CONFIG, + Type.BOOLEAN, + false, + Importance.MEDIUM, + KSQL_HEARTBEAT_ENABLE_DOC + ).define( + KSQL_HEARTBEAT_SEND_INTERVAL_MS_CONFIG, + Type.LONG, + 100L, + Importance.MEDIUM, + KSQL_HEARTBEAT_SEND_INTERVAL_MS_DOC + ).define( + KSQL_HEARTBEAT_CHECK_INTERVAL_MS_CONFIG, + Type.LONG, + 200L, + Importance.MEDIUM, + KSQL_HEARTBEAT_CHECK_INTERVAL_MS_DOC + ).define( + KSQL_HEARTBEAT_WINDOW_MS_CONFIG, + Type.LONG, + 2000L, + Importance.MEDIUM, + KSQL_HEARTBEAT_WINDOW_MS_DOC + ).define( + KSQL_HEARTBEAT_MISSED_THRESHOLD_CONFIG, + Type.LONG, + 3L, + Importance.MEDIUM, + KSQL_HEARTBEAT_MISSED_THRESHOLD_DOC + ).define( + KSQL_HEARTBEAT_DISCOVER_CLUSTER_MS_CONFIG, + Type.LONG, + 2000L, + Importance.MEDIUM, + KSQL_HEARTBEAT_DISCOVER_CLUSTER_MS_DOC + ).define( + KSQL_HEARTBEAT_THREAD_POOL_SIZE_CONFIG, + Type.INT, + 3, + Importance.MEDIUM, + KSQL_HEARTBEAT_THREAD_POOL_SIZE_CONFIG_DOC ); } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/ClusterStatusResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/ClusterStatusResource.java new file mode 100644 index 000000000000..53819c6ac13e --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/ClusterStatusResource.java @@ -0,0 +1,52 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.server.resources; + +import io.confluent.ksql.rest.entity.ClusterStatusResponse; +import io.confluent.ksql.rest.entity.Versions; +import io.confluent.ksql.rest.server.HeartbeatAgent; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +/** + * Endpoint that reports the view of the cluster that this server has. + * Returns every host that has been discovered by this server along side with information about its + * status such as whether it is alive or dead and the last time its status got updated. + */ + +@Path("/clusterStatus") +@Produces({Versions.KSQL_V1_JSON, MediaType.APPLICATION_JSON}) +public class ClusterStatusResource { + + private final HeartbeatAgent heartbeatAgent; + + public ClusterStatusResource(final HeartbeatAgent heartbeatAgent) { + this.heartbeatAgent = heartbeatAgent; + } + + @GET + public Response checkClusterStatus() { + final ClusterStatusResponse response = getResponse(); + return Response.ok(response).build(); + } + + private ClusterStatusResponse getResponse() { + return new ClusterStatusResponse(heartbeatAgent.getHostsStatus()); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/HeartbeatResource.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/HeartbeatResource.java new file mode 100644 index 000000000000..6286be3d0382 --- /dev/null +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/resources/HeartbeatResource.java @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.server.resources; + +import io.confluent.ksql.rest.entity.HeartbeatMessage; +import io.confluent.ksql.rest.entity.HeartbeatResponse; +import io.confluent.ksql.rest.entity.HostInfoEntity; +import io.confluent.ksql.rest.entity.Versions; +import io.confluent.ksql.rest.server.HeartbeatAgent; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import org.apache.kafka.streams.state.HostInfo; + +/** + * Endpoint for registering heartbeats received from remote servers. The heartbeats are used + * to determine the status of the remote servers, i.e. whether they are alive or dead. + */ + +@Path("/heartbeat") +@Produces({Versions.KSQL_V1_JSON, MediaType.APPLICATION_JSON}) +public class HeartbeatResource { + + private final HeartbeatAgent heartbeatAgent; + + public HeartbeatResource(final HeartbeatAgent heartbeatAgent) { + this.heartbeatAgent = heartbeatAgent; + } + + @POST + public Response registerHeartbeat(final HeartbeatMessage request) { + handleHeartbeat(request); + return Response.ok(new HeartbeatResponse(true)).build(); + } + + private void handleHeartbeat(final HeartbeatMessage request) { + final HostInfoEntity hostInfoEntity = request.getHostInfo(); + final HostInfo hostInfo = new HostInfo(hostInfoEntity.getHost(), hostInfoEntity.getPort()); + final long timestamp = request.getTimestamp(); + heartbeatAgent.receiveHeartbeat(hostInfo, timestamp); + } +} diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/DefaultKsqlClient.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/DefaultKsqlClient.java index 10f3be0dc2ca..75a1745d0eaa 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/DefaultKsqlClient.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/DefaultKsqlClient.java @@ -26,12 +26,15 @@ import io.confluent.ksql.rest.client.KsqlTarget; import io.confluent.ksql.rest.client.QueryStream; import io.confluent.ksql.rest.client.RestResponse; +import io.confluent.ksql.rest.entity.ClusterStatusResponse; +import io.confluent.ksql.rest.entity.HostInfoEntity; import io.confluent.ksql.rest.entity.KsqlEntityList; import io.confluent.ksql.rest.entity.StreamedRow; import io.confluent.ksql.services.SimpleKsqlClient; import java.net.URI; import java.util.List; import java.util.Optional; +import org.apache.kafka.streams.state.HostInfo; final class DefaultKsqlClient implements SimpleKsqlClient { @@ -98,4 +101,29 @@ public RestResponse> makeQueryRequest( return RestResponse.successful(resp.getStatusCode(), rows.build()); } + + @Override + public void makeAsyncHeartbeatRequest( + final URI serverEndPoint, + final HostInfo host, + final long timestamp) { + final KsqlTarget target = sharedClient + .target(serverEndPoint); + + authHeader + .map(target::authorizationHeader) + .orElse(target) + .postAsyncHeartbeatRequest(new HostInfoEntity(host.host(), host.port()), timestamp); + } + + @Override + public RestResponse makeClusterStatusRequest(final URI serverEndPoint) { + final KsqlTarget target = sharedClient + .target(serverEndPoint); + + return authHeader + .map(target::authorizationHeader) + .orElse(target) + .getClusterStatus(); + } } diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/ServerInternalKsqlClient.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/ServerInternalKsqlClient.java index d83a2191c0c6..efd76d739f21 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/ServerInternalKsqlClient.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/services/ServerInternalKsqlClient.java @@ -19,6 +19,7 @@ import io.confluent.ksql.rest.client.KsqlClientUtil; import io.confluent.ksql.rest.client.RestResponse; +import io.confluent.ksql.rest.entity.ClusterStatusResponse; import io.confluent.ksql.rest.entity.KsqlEntityList; import io.confluent.ksql.rest.entity.KsqlRequest; import io.confluent.ksql.rest.entity.StreamedRow; @@ -29,6 +30,7 @@ import java.util.Collections; import java.util.List; import javax.ws.rs.core.Response; +import org.apache.kafka.streams.state.HostInfo; /** * A KSQL client implementation that sends requests to KsqlResource directly, rather than going @@ -71,4 +73,18 @@ public RestResponse> makeQueryRequest( ) { throw new UnsupportedOperationException(); } + + @Override + public void makeAsyncHeartbeatRequest( + final URI serverEndPoint, + final HostInfo host, + final long timestamp + ) { + throw new UnsupportedOperationException(); + } + + @Override + public RestResponse makeClusterStatusRequest(final URI serverEndPoint) { + throw new UnsupportedOperationException(); + } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/HeartbeatAgentFunctionalTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/HeartbeatAgentFunctionalTest.java new file mode 100644 index 000000000000..90b6ac5c5e75 --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/integration/HeartbeatAgentFunctionalTest.java @@ -0,0 +1,262 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.integration; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; + +import io.confluent.common.utils.IntegrationTest; +import io.confluent.ksql.integration.IntegrationTestHarness; +import io.confluent.ksql.integration.Retry; +import io.confluent.ksql.rest.client.KsqlRestClient; +import io.confluent.ksql.rest.client.RestResponse; +import io.confluent.ksql.rest.entity.ClusterStatusResponse; +import io.confluent.ksql.rest.entity.HostInfoEntity; +import io.confluent.ksql.rest.entity.HostStatusEntity; +import io.confluent.ksql.rest.server.KsqlRestConfig; +import io.confluent.ksql.rest.server.TestKsqlRestApp; +import io.confluent.ksql.serde.Format; +import io.confluent.ksql.test.util.secure.ClientTrustStore; +import io.confluent.ksql.util.PageViewDataProvider; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import kafka.zookeeper.ZooKeeperClientException; +import org.apache.kafka.streams.state.HostInfo; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; + +@Category({IntegrationTest.class}) +public class HeartbeatAgentFunctionalTest { + + private static final PageViewDataProvider PAGE_VIEWS_PROVIDER = new PageViewDataProvider(); + private static final String PAGE_VIEW_TOPIC = PAGE_VIEWS_PROVIDER.topicName(); + private static final String PAGE_VIEW_STREAM = PAGE_VIEWS_PROVIDER.kstreamName(); + + private static final HostInfo host0 = new HostInfo("localhost",8088); + private static final HostInfo host1 = new HostInfo("localhost",8089); + private static final IntegrationTestHarness TEST_HARNESS = IntegrationTestHarness.build(); + private static final TestKsqlRestApp REST_APP_0 = TestKsqlRestApp + .builder(TEST_HARNESS::kafkaBootstrapServers) + .withProperty(KsqlRestConfig.LISTENERS_CONFIG, "http://localhost:8088") + .withProperty(KsqlRestConfig.KSQL_HEARTBEAT_ENABLE_CONFIG, true) + .withProperty(KsqlRestConfig.KSQL_HEARTBEAT_SEND_INTERVAL_MS_CONFIG, 600000) + .withProperty(KsqlRestConfig.KSQL_HEARTBEAT_CHECK_INTERVAL_MS_CONFIG, 200) + .withProperty(KsqlRestConfig.KSQL_HEARTBEAT_DISCOVER_CLUSTER_MS_CONFIG, 2000) + .withProperties(ClientTrustStore.trustStoreProps()) + .build(); + private static final TestKsqlRestApp REST_APP_1 = TestKsqlRestApp + .builder(TEST_HARNESS::kafkaBootstrapServers) + .withProperty(KsqlRestConfig.LISTENERS_CONFIG, "http://localhost:8089") + .withProperty(KsqlRestConfig.KSQL_HEARTBEAT_ENABLE_CONFIG, true) + .withProperty(KsqlRestConfig.KSQL_HEARTBEAT_SEND_INTERVAL_MS_CONFIG, 600000) + .withProperty(KsqlRestConfig.KSQL_HEARTBEAT_CHECK_INTERVAL_MS_CONFIG, 200) + .withProperty(KsqlRestConfig.KSQL_HEARTBEAT_DISCOVER_CLUSTER_MS_CONFIG, 2000) + .withProperties(ClientTrustStore.trustStoreProps()) + .build(); + + @ClassRule + public static final RuleChain CHAIN = RuleChain + .outerRule(Retry.of(3, ZooKeeperClientException.class, 3, TimeUnit.SECONDS)) + .around(TEST_HARNESS) + .around(REST_APP_0) + .around(REST_APP_1); + + @BeforeClass + public static void setUpClass() { + TEST_HARNESS.ensureTopics(2, PAGE_VIEW_TOPIC); + TEST_HARNESS.produceRows(PAGE_VIEW_TOPIC, PAGE_VIEWS_PROVIDER, Format.JSON); + RestIntegrationTestUtil.createStream(REST_APP_0, PAGE_VIEWS_PROVIDER); + RestIntegrationTestUtil.makeKsqlRequest( + REST_APP_0, + "CREATE STREAM S AS SELECT * FROM " + PAGE_VIEW_STREAM + ";" + ); + } + + @Before + public void setup() { + REST_APP_0.start(); + REST_APP_1.start(); + } + + @After + public void tearDown() { + REST_APP_0.stop(); + REST_APP_1.stop(); + } + + @Test(timeout = 60000) + public void shouldMarkServersAsUp() { + // Given: + waitForClusterToBeDiscovered(); + waitForRemoteServerToChangeStatus(this::remoteServerIsDown); + + // When: + sendHeartbeartsEveryIntervalForWindowLength(100, 3000); + final ClusterStatusResponse clusterStatusResponseUp = waitForRemoteServerToChangeStatus( + this::remoteServerIsUp); + + // Then: + assertThat(clusterStatusResponseUp.getClusterStatus().get(host0.toString()).getHostAlive(), is(true)); + assertThat(clusterStatusResponseUp.getClusterStatus().get(host1.toString()).getHostAlive(), is(true)); + } + + @Test(timeout = 60000) + public void shouldMarkRemoteServerAsDown() { + // Given: + waitForClusterToBeDiscovered(); + + // When: + ClusterStatusResponse clusterStatusResponse = waitForRemoteServerToChangeStatus( + this::remoteServerIsDown); + + // Then: + assertThat(clusterStatusResponse.getClusterStatus().get(host0.toString()).getHostAlive(), is(true)); + assertThat(clusterStatusResponse.getClusterStatus().get(host1.toString()).getHostAlive(), is(false)); + } + + @Test(timeout = 60000) + public void shouldMarkRemoteServerAsUpThenDownThenUp() { + // Given: + waitForClusterToBeDiscovered(); + sendHeartbeartsEveryIntervalForWindowLength(100, 2000); + + // When: + final ClusterStatusResponse clusterStatusResponseUp1 = waitForRemoteServerToChangeStatus( + this::remoteServerIsUp); + + // Then: + assertThat(clusterStatusResponseUp1.getClusterStatus().get(host0.toString()).getHostAlive(), is(true)); + assertThat(clusterStatusResponseUp1.getClusterStatus().get(host1.toString()).getHostAlive(), is(true)); + + // When: + ClusterStatusResponse clusterStatusResponseDown = waitForRemoteServerToChangeStatus( + this::remoteServerIsDown); + + // Then: + assertThat(clusterStatusResponseDown.getClusterStatus().get(host0.toString()).getHostAlive(), is(true)); + assertThat(clusterStatusResponseDown.getClusterStatus().get(host1.toString()).getHostAlive(), is(false)); + + // When : + sendHeartbeartsEveryIntervalForWindowLength(100, 2000); + ClusterStatusResponse clusterStatusResponseUp2 = waitForRemoteServerToChangeStatus( + this::remoteServerIsUp); + + // Then: + assertThat(clusterStatusResponseUp2.getClusterStatus().get(host0.toString()).getHostAlive(), is(true)); + assertThat(clusterStatusResponseUp2.getClusterStatus().get(host1.toString()).getHostAlive(), is(true)); + } + + private void waitForClusterToBeDiscovered() { + while (true) { + final ClusterStatusResponse clusterStatusResponse = sendClusterStatusRequest(REST_APP_0); + if(allServersDiscovered(clusterStatusResponse.getClusterStatus())) { + break; + } + try { + Thread.sleep(200); + } catch (final Exception e) { + // Meh + } + } + } + + private boolean allServersDiscovered(Map clusterStatus) { + if(clusterStatus.size() < 2) { + return false; + } + return true; + } + + private void sendHeartbeartsEveryIntervalForWindowLength(long interval, long window) { + long start = System.currentTimeMillis(); + while (System.currentTimeMillis() - start < window) { + sendHeartbeatRequest(REST_APP_0, host1, System.currentTimeMillis()); + try { + Thread.sleep(interval); + } catch (final Exception e) { + // Meh + } + } + } + + private ClusterStatusResponse waitForRemoteServerToChangeStatus( + Function, Boolean> function) + { + while (true) { + final ClusterStatusResponse clusterStatusResponse = sendClusterStatusRequest(REST_APP_0); + if(function.apply(clusterStatusResponse.getClusterStatus())) { + return clusterStatusResponse; + } + try { + Thread.sleep(200); + } catch (final Exception e) { + // Meh + } + } + } + + private boolean remoteServerIsDown(Map clusterStatus) { + if (!clusterStatus.containsKey(host1.toString())) { + return true; + } + for( Entry entry: clusterStatus.entrySet()) { + if (entry.getKey().contains("8089") && !entry.getValue().getHostAlive()) { + return true; + } + } + return false; + } + + private boolean remoteServerIsUp(Map clusterStatus) { + for( Entry entry: clusterStatus.entrySet()) { + if (entry.getKey().contains("8089") && entry.getValue().getHostAlive()) { + return true; + } + } + return false; + } + + private static void sendHeartbeatRequest( + final TestKsqlRestApp restApp, + final HostInfo host, + final long timestamp) { + + try (final KsqlRestClient restClient = restApp.buildKsqlClient()) { + restClient.makeAsyncHeartbeatRequest(new HostInfoEntity(host.host(), host.port()), timestamp); + } + } + + private static ClusterStatusResponse sendClusterStatusRequest(final TestKsqlRestApp restApp) { + + try (final KsqlRestClient restClient = restApp.buildKsqlClient()) { + + final RestResponse res = restClient.makeClusterStatusRequest(); + + if (res.isErroneous()) { + throw new AssertionError("Erroneous result: " + res.getErrorMessage()); + } + + return res.getResponse(); + } + } +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/HeartbeatAgentTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/HeartbeatAgentTest.java new file mode 100644 index 000000000000..6ca019020180 --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/HeartbeatAgentTest.java @@ -0,0 +1,266 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.server; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableList; +import io.confluent.ksql.engine.KsqlEngine; +import io.confluent.ksql.rest.entity.HostInfoEntity; +import io.confluent.ksql.rest.entity.HostStatusEntity; +import io.confluent.ksql.rest.server.HeartbeatAgent.Builder; +import io.confluent.ksql.rest.server.HeartbeatAgent.CheckHeartbeatService; +import io.confluent.ksql.rest.server.HeartbeatAgent.DiscoverClusterService; +import io.confluent.ksql.services.ServiceContext; +import io.confluent.ksql.util.PersistentQueryMetadata; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import org.apache.kafka.streams.state.HostInfo; +import org.apache.kafka.streams.state.StreamsMetadata; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class HeartbeatAgentTest { + @Mock + private PersistentQueryMetadata query0; + @Mock + private PersistentQueryMetadata query1; + @Mock + private StreamsMetadata streamsMetadata0; + @Mock + private StreamsMetadata streamsMetadata1; + @Mock + private ServiceContext serviceContext; + @Mock + private KsqlEngine ksqlEngine; + + private HeartbeatAgent heartbeatAgent; + private HostInfo localHostInfo; + private HostInfo remoteHostInfo; + private List allMetadata0; + private List allMetadata1; + private static final String LOCALHOST_URL = "http://localhost:8088"; + + @Before + public void setUp() { + localHostInfo = new HostInfo ("localhost", 8088); + remoteHostInfo = new HostInfo("localhost", 8089); + + Builder builder = HeartbeatAgent.builder(); + heartbeatAgent = builder + .heartbeatSendInterval(1) + .heartbeatMissedThreshold(2) + .build(ksqlEngine, serviceContext); + heartbeatAgent.setLocalAddress(LOCALHOST_URL); + Map hostsStatus = new ConcurrentHashMap<>(); + hostsStatus.put(localHostInfo.toString(), new HostStatusEntity( + new HostInfoEntity(localHostInfo.host(), localHostInfo.port()), true, 0L)); + hostsStatus.put(remoteHostInfo.toString(), new HostStatusEntity( + new HostInfoEntity(remoteHostInfo.host(), remoteHostInfo.port()), true, 0L)); + heartbeatAgent.setHostsStatus(hostsStatus); + allMetadata0 = ImmutableList.of(streamsMetadata0); + allMetadata1 = ImmutableList.of(streamsMetadata1); + } + + @Test + public void shouldDiscoverServersInCluster() { + // Given: + when(ksqlEngine.getPersistentQueries()).thenReturn(ImmutableList.of(query0, query1)); + + when(query0.getAllMetadata()).thenReturn(allMetadata0); + when(streamsMetadata0.hostInfo()).thenReturn(localHostInfo); + + when(query1.getAllMetadata()).thenReturn(allMetadata1); + when(streamsMetadata1.hostInfo()).thenReturn(remoteHostInfo); + + DiscoverClusterService discoverService = heartbeatAgent.new DiscoverClusterService(); + + // When: + discoverService.runOneIteration(); + + // Then: + assertThat(heartbeatAgent.getHostsStatus().keySet().contains(remoteHostInfo.toString()), is(true)); + } + + @Test + public void shouldMarkServerAsUpNoMissingHeartbeat() { + // Given: + long windowStart = 0; + long windowEnd = 5; + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 0L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 1L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 2L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 3L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 4L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 5L); + CheckHeartbeatService processService = heartbeatAgent.new CheckHeartbeatService(); + + // When: + processService.runWithWindow(windowStart, windowEnd); + + // Then: + assertThat(heartbeatAgent.getHostsStatus().entrySet(), hasSize(2)); + assertThat(heartbeatAgent.getHostsStatus().get(remoteHostInfo.toString()).getHostAlive(), is(true)); + } + + @Test + public void shouldMarkServerAsUpMissOneHeartbeat() { + // Given: + long windowStart = 1; + long windowEnd = 10; + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 0L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 2L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 4L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 6L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 8L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 10L); + CheckHeartbeatService processService = heartbeatAgent.new CheckHeartbeatService(); + + // When: + processService.runWithWindow(windowStart, windowEnd); + + // Then: + assertThat(heartbeatAgent.getHostsStatus().entrySet(), hasSize(2)); + assertThat(heartbeatAgent.getHostsStatus().get(remoteHostInfo.toString()).getHostAlive(), is(true)); + } + + @Test + public void shouldMarkServerAsUpMissAtBeginning() { + // Given: + long windowStart = 0; + long windowEnd = 10; + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 8L); + CheckHeartbeatService processService = heartbeatAgent.new CheckHeartbeatService(); + + // When: + processService.runWithWindow(windowStart, windowEnd); + + // Then: + assertThat(heartbeatAgent.getHostsStatus().entrySet(), hasSize(2)); + assertThat(heartbeatAgent.getHostsStatus().get(remoteHostInfo.toString()).getHostAlive(), is(true)); + } + + @Test + public void shouldMarkServerAsUpMissInterleaved() { + // Given: + long windowStart = 0; + long windowEnd = 10; + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 0L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 2L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 5L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 8L); + CheckHeartbeatService processService = heartbeatAgent.new CheckHeartbeatService(); + + // When: + processService.runWithWindow(windowStart, windowEnd); + + // Then: + assertThat(heartbeatAgent.getHostsStatus().entrySet(), hasSize(2)); + assertThat(heartbeatAgent.getHostsStatus().get(remoteHostInfo.toString()).getHostAlive(), is(true)); + } + + @Test + public void shouldMarkServerAsUpOutOfOrderHeartbeats() { + // Given: + long windowStart = 0; + long windowEnd = 10; + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 8L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 0L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 5L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 2L); + CheckHeartbeatService processService = heartbeatAgent.new CheckHeartbeatService(); + + // When: + processService.runWithWindow(windowStart, windowEnd); + + // Then: + assertThat(heartbeatAgent.getHostsStatus().entrySet(), hasSize(2)); + assertThat(heartbeatAgent.getHostsStatus().get(remoteHostInfo.toString()).getHostAlive(), is(true)); + } + + @Test + public void shouldMarkServerAsDownMissAtEnd() { + // Given: + long windowStart = 0; + long windowEnd = 10; + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 0L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 2L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 4L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 6L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 7L); + CheckHeartbeatService processService = heartbeatAgent.new CheckHeartbeatService(); + + // When: + processService.runWithWindow(windowStart, windowEnd); + + // Then: + assertThat(heartbeatAgent.getHostsStatus().entrySet(), hasSize(2)); + assertThat(heartbeatAgent.getHostsStatus().get(remoteHostInfo.toString()).getHostAlive(), is(false)); + } + + @Test + public void shouldMarkServerAsDownIgnoreHeartbeatsOutOfWindow() { + // Given: + long windowStart = 5; + long windowEnd = 8; + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 0L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 1L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 2L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 3L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 4L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 9L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 10L); + CheckHeartbeatService processService = heartbeatAgent.new CheckHeartbeatService(); + + // When: + processService.runWithWindow(windowStart, windowEnd); + + // Then: + assertThat(heartbeatAgent.getHostsStatus().entrySet(), hasSize(2)); + assertThat(heartbeatAgent.getHostsStatus().get(remoteHostInfo.toString()).getHostAlive(), is(false)); + } + + @Test + public void shouldMarkServerAsDownOutOfOrderHeartbeats() { + // Given: + long windowStart = 5; + long windowEnd = 8; + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 10L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 9L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 0L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 4L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 2L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 3L); + heartbeatAgent.receiveHeartbeat(remoteHostInfo, 1L); + CheckHeartbeatService processService = heartbeatAgent.new CheckHeartbeatService(); + + // When: + processService.runWithWindow(windowStart, windowEnd); + + // Then: + assertThat(heartbeatAgent.getHostsStatus().entrySet(), hasSize(2)); + assertThat(heartbeatAgent.getHostsStatus().get(remoteHostInfo.toString()).getHostAlive(), is(false)); + } + +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java index 889711990e59..1e8d1a4ffc2c 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/KsqlRestApplicationTest.java @@ -126,6 +126,8 @@ public class KsqlRestApplicationTest { private PreparedStatement preparedStatement; @Mock private Consumer rocksDBConfigSetterHandler; + @Mock + private HeartbeatAgent heartbeatAgent; @Mock private SchemaRegistryClient schemaRegistryClient; @@ -219,7 +221,7 @@ public void shouldRegisterAuthorizationFilterWithAuthorizationProvider() { @Test public void shouldCreateLogStreamThroughKsqlResource() { // When: - app.startKsql(); + app.startKsql(ksqlConfig); // Then: verify(ksqlResource).handleKsqlStatements( @@ -237,7 +239,7 @@ public void shouldNotCreateLogStreamIfAutoCreateNotConfigured() { .thenReturn(false); // When: - app.startKsql(); + app.startKsql(ksqlConfig); // Then: verify(ksqlResource, never()).handleKsqlStatements( @@ -249,7 +251,7 @@ public void shouldNotCreateLogStreamIfAutoCreateNotConfigured() { @Test public void shouldStartCommandStoreAndCommandRunnerBeforeCreatingLogStream() { // When: - app.startKsql(); + app.startKsql(ksqlConfig); // Then: final InOrder inOrder = Mockito.inOrder(commandQueue, commandRunner, ksqlResource); @@ -267,7 +269,7 @@ public void shouldStartCommandStoreAndCommandRunnerBeforeCreatingLogStream() { @Test public void shouldCreateLogTopicBeforeSendingCreateStreamRequest() { // When: - app.startKsql(); + app.startKsql(ksqlConfig); // Then: final InOrder inOrder = Mockito.inOrder(topicClient, ksqlResource); @@ -283,7 +285,7 @@ public void shouldCreateLogTopicBeforeSendingCreateStreamRequest() { @Test public void shouldInitializeCommandStoreCorrectly() { // When: - app.startKsql(); + app.startKsql(ksqlConfig); // Then: final InOrder inOrder = Mockito.inOrder(topicClient, commandQueue, commandRunner); @@ -295,7 +297,7 @@ public void shouldInitializeCommandStoreCorrectly() { @Test public void shouldReplayCommandsBeforeSettingReady() { // When: - app.startKsql(); + app.startKsql(ksqlConfig); // Then: final InOrder inOrder = Mockito.inOrder(commandRunner, serverState); @@ -306,7 +308,7 @@ public void shouldReplayCommandsBeforeSettingReady() { @Test public void shouldSendCreateStreamRequestBeforeSettingReady() { // When: - app.startKsql(); + app.startKsql(ksqlConfig); // Then: final InOrder inOrder = Mockito.inOrder(ksqlResource, serverState); @@ -328,7 +330,7 @@ public void shouldCheckPreconditionsBeforeUsingServiceContext() { }); // When: - app.startKsql(); + app.startKsql(ksqlConfig); // Then: final InOrder inOrder = Mockito.inOrder(precondition1, precondition2, serviceContext); @@ -350,7 +352,7 @@ public void shouldNotInitializeUntilPreconditionsChecked() { }); // When: - app.startKsql(); + app.startKsql(ksqlConfig); // Then: final InOrder inOrder = Mockito.inOrder(precondition1, precondition2, serverState); @@ -367,7 +369,7 @@ public void shouldNotInitializeUntilPreconditionsChecked() { @Test public void shouldConfigureRocksDBConfigSetter() { // When: - app.startKsql(); + app.startKsql(ksqlConfig); // Then: verify(rocksDBConfigSetterHandler).accept(ksqlConfig); @@ -431,7 +433,8 @@ private void givenAppWithRestConfig(final Map restConfigMap) { processingLogContext, ImmutableList.of(precondition1, precondition2), ImmutableList.of(ksqlResource, streamedQueryResource), - rocksDBConfigSetterHandler + rocksDBConfigSetterHandler, + Optional.of(heartbeatAgent) ); } } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java index 7c2dd5cbafbd..6bbc31a530c0 100644 --- a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/TestKsqlRestApp.java @@ -413,7 +413,7 @@ private static KsqlRestConfig buildConfig( configMap.put(KsqlConfig.KSQL_STREAMS_PREFIX + "cache.max.bytes.buffering", 0); configMap.put(KsqlConfig.KSQL_STREAMS_PREFIX + "auto.offset.reset", "earliest"); configMap.put(KsqlConfig.KSQL_ENABLE_UDFS, false); - + configMap.put(KsqlRestConfig.KSQL_HEARTBEAT_ENABLE_CONFIG, false); configMap.putAll(additionalProps); return configMap; } diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/ClusterStatusResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/ClusterStatusResourceTest.java new file mode 100644 index 000000000000..7c4c655d51b2 --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/ClusterStatusResourceTest.java @@ -0,0 +1,53 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.server.resources; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +import io.confluent.ksql.rest.entity.ClusterStatusResponse; +import io.confluent.ksql.rest.server.HeartbeatAgent; +import javax.ws.rs.core.Response; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class ClusterStatusResourceTest { + + @Mock + private HeartbeatAgent heartbeatAgent; + + private ClusterStatusResource clusterStatusResource; + + @Before + public void setUp() { + clusterStatusResource = new ClusterStatusResource(heartbeatAgent); + } + + @Test + public void shouldReturnClusterStatus() { + // When: + final Response response = clusterStatusResource.checkClusterStatus(); + + // Then: + assertThat(response.getStatus(), is(200)); + assertThat(response.getEntity(), instanceOf(ClusterStatusResponse.class)); + } +} diff --git a/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/HeartbeatResourceTest.java b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/HeartbeatResourceTest.java new file mode 100644 index 000000000000..e56945484370 --- /dev/null +++ b/ksql-rest-app/src/test/java/io/confluent/ksql/rest/server/resources/HeartbeatResourceTest.java @@ -0,0 +1,55 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.server.resources; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +import io.confluent.ksql.rest.entity.HeartbeatMessage; +import io.confluent.ksql.rest.entity.HeartbeatResponse; +import io.confluent.ksql.rest.entity.HostInfoEntity; +import io.confluent.ksql.rest.server.HeartbeatAgent; +import javax.ws.rs.core.Response; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class HeartbeatResourceTest { + + @Mock + private HeartbeatAgent heartbeatAgent; + private HeartbeatResource heartbeatResource; + + @Before + public void setUp() { + heartbeatResource = new HeartbeatResource(heartbeatAgent); + } + + @Test + public void shouldSendHeartbeat() { + // When: + final HeartbeatMessage request = new HeartbeatMessage(new HostInfoEntity("localhost", 8080), 1); + final Response response = heartbeatResource.registerHeartbeat(request); + + // Then: + assertThat(response.getStatus(), is(200)); + assertThat(response.getEntity(), instanceOf(HeartbeatResponse.class)); + } +} diff --git a/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/KsqlRestClient.java b/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/KsqlRestClient.java index 5585746be264..1b9d2bba6d06 100644 --- a/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/KsqlRestClient.java +++ b/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/KsqlRestClient.java @@ -20,9 +20,11 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; import io.confluent.ksql.properties.LocalProperties; +import io.confluent.ksql.rest.entity.ClusterStatusResponse; import io.confluent.ksql.rest.entity.CommandStatus; import io.confluent.ksql.rest.entity.CommandStatuses; import io.confluent.ksql.rest.entity.HealthCheckResponse; +import io.confluent.ksql.rest.entity.HostInfoEntity; import io.confluent.ksql.rest.entity.KsqlEntityList; import io.confluent.ksql.rest.entity.ServerInfo; import java.io.Closeable; @@ -33,7 +35,9 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.Future; import java.util.stream.Collectors; +import javax.ws.rs.core.Response; public class KsqlRestClient implements Closeable { @@ -86,6 +90,17 @@ public RestResponse getServerHealth() { return target().getServerHealth(); } + public Future makeAsyncHeartbeatRequest( + final HostInfoEntity host, + final long timestamp + ) { + return target().postAsyncHeartbeatRequest(host, timestamp); + } + + public RestResponse makeClusterStatusRequest() { + return target().getClusterStatus(); + } + public RestResponse makeKsqlRequest(final String ksql) { return target().postKsqlRequest(ksql, Optional.empty()); } diff --git a/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/KsqlTarget.java b/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/KsqlTarget.java index d4ee0bc7e5f7..d3411a323f21 100644 --- a/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/KsqlTarget.java +++ b/ksql-rest-client/src/main/java/io/confluent/ksql/rest/client/KsqlTarget.java @@ -18,15 +18,19 @@ import static java.util.Objects.requireNonNull; import io.confluent.ksql.properties.LocalProperties; +import io.confluent.ksql.rest.entity.ClusterStatusResponse; import io.confluent.ksql.rest.entity.CommandStatus; import io.confluent.ksql.rest.entity.CommandStatuses; import io.confluent.ksql.rest.entity.HealthCheckResponse; +import io.confluent.ksql.rest.entity.HeartbeatMessage; +import io.confluent.ksql.rest.entity.HostInfoEntity; import io.confluent.ksql.rest.entity.KsqlEntityList; import io.confluent.ksql.rest.entity.KsqlRequest; import io.confluent.ksql.rest.entity.ServerInfo; import java.io.InputStream; import java.net.SocketTimeoutException; import java.util.Optional; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.function.Function; import javax.ws.rs.ProcessingException; @@ -47,6 +51,8 @@ public final class KsqlTarget { private static final String STATUS_PATH = "/status"; private static final String KSQL_PATH = "/ksql"; private static final String QUERY_PATH = "/query"; + private static final String HEARTBEAT_PATH = "/heartbeat"; + private static final String CLUSTERSTATUS_PATH = "/clusterStatus"; private final WebTarget target; private final LocalProperties localProperties; @@ -74,6 +80,21 @@ public RestResponse getServerHealth() { return get("/healthcheck", HealthCheckResponse.class); } + public Future postAsyncHeartbeatRequest( + final HostInfoEntity host, + final long timestamp + ) { + return postAsync( + HEARTBEAT_PATH, + new HeartbeatMessage(host, timestamp), + Optional.empty() + ); + } + + public RestResponse getClusterStatus() { + return get(CLUSTERSTATUS_PATH, ClusterStatusResponse.class); + } + public RestResponse getStatuses() { return get(STATUS_PATH, CommandStatuses.class); } @@ -177,6 +198,30 @@ private RestResponse post( } } + private Future postAsync( + final String path, + final Object jsonEntity, + final Optional readTimeoutMs + ) { + try { + // Performs an asynchronous request + return target + .path(path) + .request(MediaType.APPLICATION_JSON_TYPE) + .property(ClientProperties.READ_TIMEOUT, readTimeoutMs.orElse(0)) + .headers(headers()) + .async() + .post(Entity.json(jsonEntity)); + } catch (final ProcessingException e) { + if (shouldRetry(readTimeoutMs, e)) { + return postAsync(path, jsonEntity, calcReadTimeout(readTimeoutMs)); + } + throw new KsqlRestClientException("Error issuing POST to KSQL server. path:" + path, e); + } catch (final Exception e) { + throw new KsqlRestClientException("Error issuing POST to KSQL server. path:" + path, e); + } + } + private MultivaluedMap headers() { final MultivaluedMap headers = new MultivaluedHashMap<>(); authHeader.ifPresent(v -> headers.add(HttpHeaders.AUTHORIZATION, v)); diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/ClusterStatusResponse.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/ClusterStatusResponse.java new file mode 100644 index 000000000000..9cc7dc8ea20f --- /dev/null +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/ClusterStatusResponse.java @@ -0,0 +1,62 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"); you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.entity; + +import static java.util.Objects.requireNonNull; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.ImmutableMap; +import com.google.errorprone.annotations.Immutable; +import java.util.Map; +import java.util.Objects; + +@JsonIgnoreProperties(ignoreUnknown = true) +@Immutable +public final class ClusterStatusResponse { + + private final ImmutableMap clusterStatus; + + @JsonCreator + public ClusterStatusResponse( + @JsonProperty("clusterStatus") final Map clusterStatus) { + this.clusterStatus = ImmutableMap.copyOf(requireNonNull(clusterStatus, "status")); + } + + public Map getClusterStatus() { + return clusterStatus; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + final ClusterStatusResponse that = (ClusterStatusResponse) o; + return Objects.equals(clusterStatus, that.clusterStatus); + } + + @Override + public int hashCode() { + return Objects.hash(clusterStatus); + } +} diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HeartbeatMessage.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HeartbeatMessage.java new file mode 100644 index 000000000000..85f4f7ca20e2 --- /dev/null +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HeartbeatMessage.java @@ -0,0 +1,72 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonSubTypes; +import java.util.Objects; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonSubTypes({}) +public class HeartbeatMessage { + + private final HostInfoEntity hostInfo; + private final long timestamp; + + @JsonCreator + public HeartbeatMessage(@JsonProperty("hostInfo") final HostInfoEntity hostInfo, + @JsonProperty("timestamp") final long timestamp) { + this.hostInfo = hostInfo; + this.timestamp = timestamp; + } + + public HostInfoEntity getHostInfo() { + return hostInfo; + } + + public long getTimestamp() { + return timestamp; + } + + @Override + public boolean equals(final Object other) { + if (this == other) { + return true; + } + + if (!(other instanceof HeartbeatMessage)) { + return false; + } + + final HeartbeatMessage that = (HeartbeatMessage) other; + return this.timestamp == that.timestamp && Objects.equals(hostInfo, that.hostInfo); + } + + @Override + public int hashCode() { + return Objects.hash(hostInfo, timestamp); + } + + @Override + public String toString() { + return "HearbeatRequest{" + + "hostInfo='" + hostInfo + '\'' + + "timestamp='" + timestamp + '\'' + + '}'; + } +} diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HeartbeatResponse.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HeartbeatResponse.java new file mode 100644 index 000000000000..3c69d41e3bf6 --- /dev/null +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HeartbeatResponse.java @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +@JsonIgnoreProperties(ignoreUnknown = true) +public class HeartbeatResponse { + + private final boolean isOk; + + @JsonCreator + public HeartbeatResponse( + @JsonProperty("ok") final boolean ok + ) { + this.isOk = ok; + } + + public boolean getIsOk() { + return isOk; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + final HeartbeatResponse that = (HeartbeatResponse) o; + return isOk == that.isOk; + } + + @Override + public int hashCode() { + return Objects.hash(isOk); + } +} diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HostInfoEntity.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HostInfoEntity.java new file mode 100644 index 000000000000..e0e95f7f99fb --- /dev/null +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HostInfoEntity.java @@ -0,0 +1,70 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +@JsonIgnoreProperties(ignoreUnknown = true) +public class HostInfoEntity { + + private final String host; + private final int port; + + @JsonCreator + public HostInfoEntity( + @JsonProperty("host") final String host, + @JsonProperty("port") final int port + ) { + this.host = Objects.requireNonNull(host, "host"); + this.port = Objects.requireNonNull(port, "port"); + } + + public String getHost() { + return host; + } + + public int getPort() { + return port; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + final HostInfoEntity that = (HostInfoEntity) o; + return Objects.equals(host, that.host) + && port == that.port; + } + + @Override + public int hashCode() { + return Objects.hash(host, port); + } + + @Override + public String toString() { + return host + "," + port; + } +} diff --git a/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HostStatusEntity.java b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HostStatusEntity.java new file mode 100644 index 000000000000..8565177c01bc --- /dev/null +++ b/ksql-rest-model/src/main/java/io/confluent/ksql/rest/entity/HostStatusEntity.java @@ -0,0 +1,89 @@ +/* + * Copyright 2019 Confluent Inc. + * + * Licensed under the Confluent Community License (the "License"; you may not use + * this file except in compliance with the License. You may obtain a copy of the + * License at + * + * http://www.confluent.io/confluent-community-license + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package io.confluent.ksql.rest.entity; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +@JsonIgnoreProperties(ignoreUnknown = true) +public class HostStatusEntity { + + private HostInfoEntity hostInfoEntity; + private boolean hostAlive; + private long lastStatusUpdateMs; + + @JsonCreator + public HostStatusEntity( + @JsonProperty("hostInfoEntity") final HostInfoEntity hostInfoEntity, + @JsonProperty("hostAlive") final boolean hostAlive, + @JsonProperty("lastStatusUpdateMs") final long lastStatusUpdateMs + ) { + this.hostInfoEntity = Objects.requireNonNull(hostInfoEntity, "hostInfoEntity"); + this.hostAlive = hostAlive; + this.lastStatusUpdateMs = lastStatusUpdateMs; + } + + public HostInfoEntity getHostInfoEntity() { + return hostInfoEntity; + } + + public boolean getHostAlive() { + return hostAlive; + } + + public long getLastStatusUpdateMs() { + return lastStatusUpdateMs; + } + + public void setHostInfoEntity(final HostInfoEntity hostInfoEntity) { + this.hostInfoEntity = hostInfoEntity; + } + + public void setHostAlive(final boolean hostAlive) { + this.hostAlive = hostAlive; + } + + public void setLastStatusUpdateMs(final long lastStatusUpdateMs) { + this.lastStatusUpdateMs = lastStatusUpdateMs; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + final HostStatusEntity that = (HostStatusEntity) o; + return Objects.equals(hostInfoEntity, that.hostInfoEntity) + && hostAlive == that.hostAlive && lastStatusUpdateMs == that.lastStatusUpdateMs; + } + + @Override + public int hashCode() { + return Objects.hash(hostInfoEntity, hostAlive, lastStatusUpdateMs); + } + + @Override + public String toString() { + return hostInfoEntity + "," + hostAlive + "," + lastStatusUpdateMs; + } +} From 43dcdbe931159739fd2c591488aaffafac5c1fce Mon Sep 17 00:00:00 2001 From: Vicky Papavasileiou Date: Thu, 16 Jan 2020 11:22:30 -0800 Subject: [PATCH 123/123] fix typo in comment --- .../java/io/confluent/ksql/rest/server/HeartbeatAgent.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/HeartbeatAgent.java b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/HeartbeatAgent.java index 9430fe9dba77..4255bfb12335 100644 --- a/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/HeartbeatAgent.java +++ b/ksql-rest-app/src/main/java/io/confluent/ksql/rest/server/HeartbeatAgent.java @@ -62,11 +62,11 @@ *

The services are started in the following order by defining their startup delay: * First, the cluster membership service starts, then the sending of the heartbeats and last the * processing of the received heartbeats. This provides some buffer for the cluster to be discovered - * before the processing of heartbeats starts. However, it doesn't not guarantee that a remote + * before the processing of heartbeats starts. However, it does not guarantee that a remote * server will not be classified as dead immediately after discovered (although we optimistically * consider all newly discovered servers as alive) if there is lag in the sending/receiving of * heartbeats. That's why the service that sends heartbeats sends to both alive and dead servers: - * avoid situations where a remote server is classified as down prematurely.

+ * avoid situations where a remote server is classified as dead prematurely.

* */ // CHECKSTYLE_RULES.OFF: ClassDataAbstractionCoupling