diff --git a/aggregating-count/flinksql/build.gradle b/aggregating-count/flinksql/build.gradle index 052f45a3..f9ac8e4b 100644 --- a/aggregating-count/flinksql/build.gradle +++ b/aggregating-count/flinksql/build.gradle @@ -26,13 +26,13 @@ dependencies { testImplementation 'junit:junit:4.13.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' } diff --git a/aggregating-minmax/flinksql/build.gradle b/aggregating-minmax/flinksql/build.gradle index 7a55db90..b2b95b81 100644 --- a/aggregating-minmax/flinksql/build.gradle +++ b/aggregating-minmax/flinksql/build.gradle @@ -25,13 +25,13 @@ dependencies { testImplementation 'junit:junit:4.13.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' } diff --git a/common/build.gradle b/common/build.gradle index 9e87c185..5dd03d3f 100644 --- a/common/build.gradle +++ b/common/build.gradle @@ -48,15 +48,15 @@ dependencies { testImplementation 'org.hamcrest:hamcrest:2.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' testRuntimeOnly 'org.junit.jupiter:junit-jupiter-engine:5.9.2' testRuntimeOnly 'org.junit.platform:junit-platform-launcher' } diff --git a/common/src/test/java/io/confluent/developer/AbstractFlinkKafkaTest.java b/common/src/test/java/io/confluent/developer/AbstractFlinkKafkaTest.java index c97dc749..5e3e6445 100644 --- a/common/src/test/java/io/confluent/developer/AbstractFlinkKafkaTest.java +++ b/common/src/test/java/io/confluent/developer/AbstractFlinkKafkaTest.java @@ -3,8 +3,9 @@ import com.google.common.io.Resources; import org.apache.commons.lang3.exception.ExceptionUtils; -import org.apache.flink.api.common.restartstrategy.RestartStrategies; -import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.configuration.RestartStrategyOptions; +import org.apache.flink.configuration.StateBackendOptions; import org.apache.flink.runtime.client.JobCancellationException; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.EnvironmentSettings; @@ -42,10 +43,11 @@ public class AbstractFlinkKafkaTest { @BeforeClass public static void setup() { // create Flink table environment that test subclasses will use to execute SQL statements - StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + Configuration config = new Configuration(); + config.set(RestartStrategyOptions.RESTART_STRATEGY, "none"); + config.set(StateBackendOptions.STATE_BACKEND, "hashmap"); + StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(config); env.setParallelism(1); - env.getConfig().setRestartStrategy(RestartStrategies.noRestart()); - env.setStateBackend(new EmbeddedRocksDBStateBackend()); streamTableEnv = StreamTableEnvironment.create(env, EnvironmentSettings.newInstance().inStreamingMode().build()); diff --git a/cumulating-windows/flinksql/build.gradle b/cumulating-windows/flinksql/build.gradle index f4649fe4..f412f3d9 100644 --- a/cumulating-windows/flinksql/build.gradle +++ b/cumulating-windows/flinksql/build.gradle @@ -26,13 +26,13 @@ dependencies { testImplementation 'junit:junit:4.13.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' } diff --git a/deduplication-windowed/flinksql/build.gradle b/deduplication-windowed/flinksql/build.gradle index 7a55db90..b2b95b81 100644 --- a/deduplication-windowed/flinksql/build.gradle +++ b/deduplication-windowed/flinksql/build.gradle @@ -25,13 +25,13 @@ dependencies { testImplementation 'junit:junit:4.13.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' } diff --git a/deduplication/flinksql/build.gradle b/deduplication/flinksql/build.gradle index 7a55db90..b2b95b81 100644 --- a/deduplication/flinksql/build.gradle +++ b/deduplication/flinksql/build.gradle @@ -25,13 +25,13 @@ dependencies { testImplementation 'junit:junit:4.13.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' } diff --git a/docker/docker-compose-flinksql.yml b/docker/docker-compose-flinksql.yml index 1d50e551..e752b26b 100644 --- a/docker/docker-compose-flinksql.yml +++ b/docker/docker-compose-flinksql.yml @@ -34,7 +34,7 @@ services: SCHEMA_REGISTRY_HOST_NAME: schema-registry SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: broker:9092 flink-sql-client: - image: cnfldemos/flink-sql-client-kafka:1.16.0-scala_2.12-java11 + image: cnfldemos/flink-sql-client-kafka:1.19.1-scala_2.12-java17 hostname: flink-sql-client container_name: flink-sql-client depends_on: @@ -44,7 +44,7 @@ services: volumes: - ./settings/:/settings flink-jobmanager: - image: cnfldemos/flink-kafka:1.16.0-scala_2.12-java11 + image: cnfldemos/flink-kafka:1.19.1-scala_2.12-java17 hostname: flink-jobmanager container_name: flink-jobmanager ports: @@ -56,7 +56,7 @@ services: jobmanager.rpc.address: flink-jobmanager rest.bind-port: 9081 flink-taskmanager: - image: cnfldemos/flink-kafka:1.16.0-scala_2.12-java11 + image: cnfldemos/flink-kafka:1.19.1-scala_2.12-java17 hostname: flink-taskmanager container_name: flink-taskmanager depends_on: diff --git a/docker/flink/Dockerfile.flink-kafka b/docker/flink/Dockerfile.flink-kafka new file mode 100644 index 00000000..1dfc3c6f --- /dev/null +++ b/docker/flink/Dockerfile.flink-kafka @@ -0,0 +1,9 @@ +FROM flink:1.19.1-scala_2.12-java11 + +# Download connector libraries +RUN wget -P /opt/flink/lib/ https://repo.maven.apache.org/maven2/org/apache/flink/flink-sql-connector-kafka/3.2.0-1.19/flink-sql-connector-kafka-3.2.0-1.19.jar; \ + wget -P /opt/flink/lib/ https://repo.maven.apache.org/maven2/org/apache/flink/flink-json/1.19.1/flink-json-1.19.1.jar; \ + wget -P /opt/flink/lib/ https://repo.maven.apache.org/maven2/org/apache/flink/flink-sql-avro-confluent-registry/1.19.1/flink-sql-avro-confluent-registry-1.19.1.jar; + +# Copy configuration +COPY ./conf/* /opt/flink/conf/ diff --git a/docker/flink/Dockerfile.flink-sql-client-kafka b/docker/flink/Dockerfile.flink-sql-client-kafka new file mode 100644 index 00000000..8a0b0033 --- /dev/null +++ b/docker/flink/Dockerfile.flink-sql-client-kafka @@ -0,0 +1,22 @@ +FROM flink:1.19.1-scala_2.12-java11 + +# Create CLI lib folder +#COPY ./bin/* /opt/sql-client/ +RUN mkdir -p /opt/sql-client/lib + +# Download connector libraries +RUN wget -P /opt/sql-client/lib/ https://repo.maven.apache.org/maven2/org/apache/flink/flink-sql-connector-kafka/3.2.0-1.19/flink-sql-connector-kafka-3.2.0-1.19.jar; \ + wget -P /opt/sql-client/lib/ https://repo.maven.apache.org/maven2/org/apache/flink/flink-json/1.19.1/flink-json-1.19.1.jar; \ + wget -P /opt/sql-client/lib/ https://repo.maven.apache.org/maven2/org/apache/flink/flink-sql-avro-confluent-registry/1.19.1/flink-sql-avro-confluent-registry-1.19.1.jar; + +# Also copy to Flink lib so that, e.g., other catalog types can be used if desired +RUN cp /opt/sql-client/lib/* /opt/flink/lib/ + +# Copy configuration +COPY ./conf/* /opt/flink/conf/ + +WORKDIR /opt/sql-client +ENV SQL_CLIENT_HOME=/opt/sql-client + +COPY ./docker-entrypoint.sh / +ENTRYPOINT ["/docker-entrypoint.sh"] diff --git a/docker/flink/Makefile b/docker/flink/Makefile new file mode 100644 index 00000000..2a947a57 --- /dev/null +++ b/docker/flink/Makefile @@ -0,0 +1,15 @@ +FLINK_VERSION ?= 1.19.1-scala_2.12-java17 +BUILD_PLATFORM ?= linux/amd64 +PUSH_PREFIX ?= cnfldemos + +build-flink-kafka: + @docker buildx build --load --platform $(BUILD_PLATFORM) -t flink-kafka:$(FLINK_VERSION) -f Dockerfile.flink-kafka . + +build-flink-sql-client-kafka: + @docker buildx build --load --platform $(BUILD_PLATFORM) -t flink-sql-client-kafka:$(FLINK_VERSION) -f Dockerfile.flink-sql-client-kafka . + +push-flink-kafka: + @docker buildx build --push --platform linux/amd64,linux/arm64 -t $(PUSH_PREFIX)/flink-kafka:$(FLINK_VERSION) -f Dockerfile.flink-kafka . + +push-flink-sql-client-kafka: + @docker buildx build --push --platform linux/amd64,linux/arm64 -t $(PUSH_PREFIX)/flink-sql-client-kafka:$(FLINK_VERSION) -f Dockerfile.flink-sql-client-kafka . diff --git a/docker/flink/conf/flink-conf.yaml b/docker/flink/conf/flink-conf.yaml new file mode 100644 index 00000000..8a6c8b0a --- /dev/null +++ b/docker/flink/conf/flink-conf.yaml @@ -0,0 +1,312 @@ +################################################################################ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + + +#============================================================================== +# Common +#============================================================================== + +# The external address of the host on which the JobManager runs and can be +# reached by the TaskManagers and any clients which want to connect. This setting +# is only used in Standalone mode and may be overwritten on the JobManager side +# by specifying the --host parameter of the bin/jobmanager.sh executable. +# In high availability mode, if you use the bin/start-cluster.sh script and setup +# the conf/masters file, this will be taken care of automatically. Yarn +# automatically configure the host name based on the hostname of the node where the +# JobManager runs. + +jobmanager.rpc.address: flink-taskmanager + +# The RPC port where the JobManager is reachable. + +jobmanager.rpc.port: 6123 + +# The host interface the JobManager will bind to. By default, this is localhost, and will prevent +# the JobManager from communicating outside the machine/container it is running on. +# On YARN this setting will be ignored if it is set to 'localhost', defaulting to 0.0.0.0. +# On Kubernetes this setting will be ignored, defaulting to 0.0.0.0. +# +# To enable this, set the bind-host address to one that has access to an outside facing network +# interface, such as 0.0.0.0. + +jobmanager.bind-host: 0.0.0.0 + + +# The total process memory size for the JobManager. +# +# Note this accounts for all memory usage within the JobManager process, including JVM metaspace and other overhead. + +jobmanager.memory.process.size: 1600m + +# The host interface the TaskManager will bind to. By default, this is localhost, and will prevent +# the TaskManager from communicating outside the machine/container it is running on. +# On YARN this setting will be ignored if it is set to 'localhost', defaulting to 0.0.0.0. +# On Kubernetes this setting will be ignored, defaulting to 0.0.0.0. +# +# To enable this, set the bind-host address to one that has access to an outside facing network +# interface, such as 0.0.0.0. + +taskmanager.bind-host: 0.0.0.0 + +# The address of the host on which the TaskManager runs and can be reached by the JobManager and +# other TaskManagers. If not specified, the TaskManager will try different strategies to identify +# the address. +# +# Note this address needs to be reachable by the JobManager and forward traffic to one of +# the interfaces the TaskManager is bound to (see 'taskmanager.bind-host'). +# +# Note also that unless all TaskManagers are running on the same machine, this address needs to be +# configured separately for each TaskManager. + + +# The total process memory size for the TaskManager. +# +# Note this accounts for all memory usage within the TaskManager process, including JVM metaspace and other overhead. + +taskmanager.memory.process.size: 1728m + +# To exclude JVM metaspace and overhead, please, use total Flink memory size instead of 'taskmanager.memory.process.size'. +# It is not recommended to set both 'taskmanager.memory.process.size' and Flink memory. +# +# taskmanager.memory.flink.size: 1280m + +# The number of task slots that each TaskManager offers. Each slot runs one parallel pipeline. + +taskmanager.numberOfTaskSlots: 1 + +# The parallelism used for programs that did not specify and other parallelism. + +parallelism.default: 1 + +# The default file system scheme and authority. +# +# By default file paths without scheme are interpreted relative to the local +# root file system 'file:///'. Use this to override the default and interpret +# relative paths relative to a different file system, +# for example 'hdfs://mynamenode:12345' +# +# fs.default-scheme + +#============================================================================== +# High Availability +#============================================================================== + +# The high-availability mode. Possible options are 'NONE' or 'zookeeper'. +# +# high-availability: zookeeper + +# The path where metadata for master recovery is persisted. While ZooKeeper stores +# the small ground truth for checkpoint and leader election, this location stores +# the larger objects, like persisted dataflow graphs. +# +# Must be a durable file system that is accessible from all nodes +# (like HDFS, S3, Ceph, nfs, ...) +# +# high-availability.storageDir: hdfs:///flink/ha/ + +# The list of ZooKeeper quorum peers that coordinate the high-availability +# setup. This must be a list of the form: +# "host1:clientPort,host2:clientPort,..." (default clientPort: 2181) +# +# high-availability.zookeeper.quorum: localhost:2181 + + +# ACL options are based on https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html#sc_BuiltinACLSchemes +# It can be either "creator" (ZOO_CREATE_ALL_ACL) or "open" (ZOO_OPEN_ACL_UNSAFE) +# The default value is "open" and it can be changed to "creator" if ZK security is enabled +# +# high-availability.zookeeper.client.acl: open + +#============================================================================== +# Fault tolerance and checkpointing +#============================================================================== + +# The backend that will be used to store operator state checkpoints if +# checkpointing is enabled. Checkpointing is enabled when execution.checkpointing.interval > 0. +# +# Execution checkpointing related parameters. Please refer to CheckpointConfig and ExecutionCheckpointingOptions for more details. +# +# execution.checkpointing.interval: 3min +# execution.checkpointing.externalized-checkpoint-retention: [DELETE_ON_CANCELLATION, RETAIN_ON_CANCELLATION] +# execution.checkpointing.max-concurrent-checkpoints: 1 +# execution.checkpointing.min-pause: 0 +# execution.checkpointing.mode: [EXACTLY_ONCE, AT_LEAST_ONCE] +# execution.checkpointing.timeout: 10min +# execution.checkpointing.tolerable-failed-checkpoints: 0 +# execution.checkpointing.unaligned: false +# +# Supported backends are 'hashmap', 'rocksdb', or the +# . +# +# state.backend: hashmap + +# Directory for checkpoints filesystem, when using any of the default bundled +# state backends. +# +# state.checkpoints.dir: hdfs://namenode-host:port/flink-checkpoints + +# Default target directory for savepoints, optional. +# +# state.savepoints.dir: hdfs://namenode-host:port/flink-savepoints + +# Flag to enable/disable incremental checkpoints for backends that +# support incremental checkpoints (like the RocksDB state backend). +# +# state.backend.incremental: false + +# The failover strategy, i.e., how the job computation recovers from task failures. +# Only restart tasks that may have been affected by the task failure, which typically includes +# downstream tasks and potentially upstream tasks if their produced data is no longer available for consumption. + +jobmanager.execution.failover-strategy: region + +#============================================================================== +# Rest & web frontend +#============================================================================== + +# The port to which the REST client connects to. If rest.bind-port has +# not been specified, then the server will bind to this port as well. +# +rest.port: 9081 + +# The address to which the REST client will connect to +# +rest.address: flink-jobmanager + +# Port range for the REST and web server to bind to. +# +#rest.bind-port: 8080-8090 + +# The address that the REST & web server binds to +# By default, this is localhost, which prevents the REST & web server from +# being able to communicate outside of the machine/container it is running on. +# +# To enable this, set the bind address to one that has access to outside-facing +# network interface, such as 0.0.0.0. +# +rest.bind-address: 0.0.0.0 + +# Flag to specify whether job submission is enabled from the web-based +# runtime monitor. Uncomment to disable. + +#web.submit.enable: false + +# Flag to specify whether job cancellation is enabled from the web-based +# runtime monitor. Uncomment to disable. + +#web.cancel.enable: false + +#============================================================================== +# Advanced +#============================================================================== + +# Override the directories for temporary files. If not specified, the +# system-specific Java temporary directory (java.io.tmpdir property) is taken. +# +# For framework setups on Yarn, Flink will automatically pick up the +# containers' temp directories without any need for configuration. +# +# Add a delimited list for multiple directories, using the system directory +# delimiter (colon ':' on unix) or a comma, e.g.: +# /data1/tmp:/data2/tmp:/data3/tmp +# +# Note: Each directory entry is read from and written to by a different I/O +# thread. You can include the same directory multiple times in order to create +# multiple I/O threads against that directory. This is for example relevant for +# high-throughput RAIDs. +# +# io.tmp.dirs: /tmp + +# The classloading resolve order. Possible values are 'child-first' (Flink's default) +# and 'parent-first' (Java's default). +# +# Child first classloading allows users to use different dependency/library +# versions in their application than those in the classpath. Switching back +# to 'parent-first' may help with debugging dependency issues. +# +# classloader.resolve-order: child-first + +# The amount of memory going to the network stack. These numbers usually need +# no tuning. Adjusting them may be necessary in case of an "Insufficient number +# of network buffers" error. The default min is 64MB, the default max is 1GB. +# +# taskmanager.memory.network.fraction: 0.1 +# taskmanager.memory.network.min: 64mb +# taskmanager.memory.network.max: 1gb + +#============================================================================== +# Flink Cluster Security Configuration +#============================================================================== + +# Kerberos authentication for various components - Hadoop, ZooKeeper, and connectors - +# may be enabled in four steps: +# 1. configure the local krb5.conf file +# 2. provide Kerberos credentials (either a keytab or a ticket cache w/ kinit) +# 3. make the credentials available to various JAAS login contexts +# 4. configure the connector to use JAAS/SASL + +# The below configure how Kerberos credentials are provided. A keytab will be used instead of +# a ticket cache if the keytab path and principal are set. + +# security.kerberos.login.use-ticket-cache: true +# security.kerberos.login.keytab: /path/to/kerberos/keytab +# security.kerberos.login.principal: flink-user + +# The configuration below defines which JAAS login contexts + +# security.kerberos.login.contexts: Client,KafkaClient + +#============================================================================== +# ZK Security Configuration +#============================================================================== + +# Below configurations are applicable if ZK ensemble is configured for security + +# Override below configuration to provide custom ZK service name if configured +# zookeeper.sasl.service-name: zookeeper + +# The configuration below must match one of the values set in "security.kerberos.login.contexts" +# zookeeper.sasl.login-context-name: Client + +#============================================================================== +# HistoryServer +#============================================================================== + +# The HistoryServer is started and stopped via bin/historyserver.sh (start|stop) + +# Directory to upload completed jobs to. Add this directory to the list of +# monitored directories of the HistoryServer as well (see below). +#jobmanager.archive.fs.dir: hdfs:///completed-jobs/ + +# The address under which the web-based HistoryServer listens. +#historyserver.web.address: 0.0.0.0 + +# The port under which the web-based HistoryServer listens. +#historyserver.web.port: 8082 + +# Comma separated list of directories to monitor for completed jobs. +#historyserver.archive.fs.dir: hdfs:///completed-jobs/ + +# Interval in milliseconds for refreshing the monitored directories. +#historyserver.archive.fs.refresh-interval: 10000 + +blob.server.port: 6124 +query.server.port: 6125 + +jobmanager.rpc.address: flink-jobmanager +taskmanager.numberOfTaskSlots: 10 \ No newline at end of file diff --git a/docker/flink/conf/sql-client-conf.yaml b/docker/flink/conf/sql-client-conf.yaml new file mode 100644 index 00000000..bd305b0a --- /dev/null +++ b/docker/flink/conf/sql-client-conf.yaml @@ -0,0 +1,30 @@ +# This file defines the default environment for Flink's SQL Client. +# Defaults might be overwritten by a session specific environment. + +#============================================================================== +# Execution properties +#============================================================================== + +# Execution properties allow for changing the behavior of a table program. + +execution: + planner: blink # using the Blink planner + type: streaming # 'batch' or 'streaming' execution + result-mode: table # 'changelog' or 'table' presentation of results + parallelism: 1 # parallelism of the program + max-parallelism: 128 # maximum parallelism + min-idle-state-retention: 0 # minimum idle state retention in ms + max-idle-state-retention: 0 # maximum idle state retention in ms + +#============================================================================== +# Deployment properties +#============================================================================== + +# Deployment properties allow for describing the cluster to which table +# programs are submitted to. + +deployment: + type: standalone # only the 'standalone' deployment is supported + response-timeout: 5000 # general cluster communication timeout in ms + gateway-address: "" # (optional) address from cluster to gateway + gateway-port: 0 # (optional) port from cluster to gateway diff --git a/docker/flink/docker-entrypoint.sh b/docker/flink/docker-entrypoint.sh new file mode 100755 index 00000000..d624f3f8 --- /dev/null +++ b/docker/flink/docker-entrypoint.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +sleep infinity \ No newline at end of file diff --git a/filtering/flinksql/build.gradle b/filtering/flinksql/build.gradle index 7a55db90..b2b95b81 100644 --- a/filtering/flinksql/build.gradle +++ b/filtering/flinksql/build.gradle @@ -25,13 +25,13 @@ dependencies { testImplementation 'junit:junit:4.13.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' } diff --git a/hopping-windows/flinksql/build.gradle b/hopping-windows/flinksql/build.gradle index 7a55db90..b2b95b81 100644 --- a/hopping-windows/flinksql/build.gradle +++ b/hopping-windows/flinksql/build.gradle @@ -25,13 +25,13 @@ dependencies { testImplementation 'junit:junit:4.13.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' } diff --git a/joining-stream-stream/flinksql/build.gradle b/joining-stream-stream/flinksql/build.gradle index 7a55db90..b2b95b81 100644 --- a/joining-stream-stream/flinksql/build.gradle +++ b/joining-stream-stream/flinksql/build.gradle @@ -25,13 +25,13 @@ dependencies { testImplementation 'junit:junit:4.13.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' } diff --git a/merging/flinksql/build.gradle b/merging/flinksql/build.gradle index 7a55db90..b2b95b81 100644 --- a/merging/flinksql/build.gradle +++ b/merging/flinksql/build.gradle @@ -25,13 +25,13 @@ dependencies { testImplementation 'junit:junit:4.13.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' } diff --git a/over-aggregations/flinksql/build.gradle b/over-aggregations/flinksql/build.gradle index ca46bc81..15d9a151 100644 --- a/over-aggregations/flinksql/build.gradle +++ b/over-aggregations/flinksql/build.gradle @@ -25,14 +25,14 @@ dependencies { testImplementation 'junit:junit:4.13.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' - testImplementation 'org.apache.flink:flink-json:1.19.0' + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' + testImplementation 'org.apache.flink:flink-json:1.19.1' } diff --git a/pattern-matching/flinksql/build.gradle b/pattern-matching/flinksql/build.gradle index 7a55db90..b2b95b81 100644 --- a/pattern-matching/flinksql/build.gradle +++ b/pattern-matching/flinksql/build.gradle @@ -25,13 +25,13 @@ dependencies { testImplementation 'junit:junit:4.13.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' } diff --git a/splitting/flinksql/build.gradle b/splitting/flinksql/build.gradle index 7a55db90..b2b95b81 100644 --- a/splitting/flinksql/build.gradle +++ b/splitting/flinksql/build.gradle @@ -25,13 +25,13 @@ dependencies { testImplementation 'junit:junit:4.13.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' } diff --git a/top-N/flinksql/build.gradle b/top-N/flinksql/build.gradle index e92a596a..15d9a151 100644 --- a/top-N/flinksql/build.gradle +++ b/top-N/flinksql/build.gradle @@ -25,15 +25,14 @@ dependencies { testImplementation 'junit:junit:4.13.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' - testImplementation 'org.apache.flink:flink-json:1.19.0' - + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' + testImplementation 'org.apache.flink:flink-json:1.19.1' } diff --git a/tumbling-windows/flinksql/build.gradle b/tumbling-windows/flinksql/build.gradle index 7a55db90..b2b95b81 100644 --- a/tumbling-windows/flinksql/build.gradle +++ b/tumbling-windows/flinksql/build.gradle @@ -25,13 +25,13 @@ dependencies { testImplementation 'junit:junit:4.13.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' } diff --git a/windowed-top-N/flinksql/README.md b/windowed-top-N/flinksql/README.md index 71c48e3b..0ebb9b0b 100644 --- a/windowed-top-N/flinksql/README.md +++ b/windowed-top-N/flinksql/README.md @@ -148,10 +148,8 @@ Run the following command to execute [FlinkSqlTopNTest#testTopN](src/test/java/i (369, 'Casablanca', 'Romance', TO_TIMESTAMP('2024-04-23 20:26:00')), (321, 'The Shawshank Redemption', 'Drama', TO_TIMESTAMP('2024-04-23 20:20:00')), (654, 'Forrest Gump', 'Drama', TO_TIMESTAMP('2024-04-23 21:54:00')), - (987, 'Fight Club', 'Drama', TO_TIMESTAMP('2024-04-23 23:24:00')), (135, 'Pulp Fiction', 'Crime', TO_TIMESTAMP('2024-04-23 22:09:00')), (246, 'The Godfather: Part II', 'Crime', TO_TIMESTAMP('2024-04-23 19:28:00')), - (357, 'The Departed', 'Crime', TO_TIMESTAMP('2024-04-23 23:11:00')), (842, 'Toy Story 3', 'Animation', TO_TIMESTAMP('2024-04-23 23:12:00')), (931, 'Up', 'Animation', TO_TIMESTAMP('2024-04-23 22:17:00')), (624, 'The Lion King', 'Animation', TO_TIMESTAMP('2024-04-23 22:28:00')), @@ -159,7 +157,6 @@ Run the following command to execute [FlinkSqlTopNTest#testTopN](src/test/java/i (678, 'The Matrix', 'Sci-Fi', TO_TIMESTAMP('2024-04-23 19:25:00')), (753, 'Interstellar', 'Sci-Fi', TO_TIMESTAMP('2024-04-23 20:14:00')), (834, 'Titanic', 'Romance', TO_TIMESTAMP('2024-04-23 20:25:00')), - (675, 'Pride and Prejudice', 'Romance', TO_TIMESTAMP('2024-04-23 23:37:00')), (333, 'The Pride of Archbishop Carroll', 'History', TO_TIMESTAMP('2024-04-24 03:37:00')); ``` @@ -231,10 +228,8 @@ FROM ( (369, 'Casablanca', 'Romance', TO_TIMESTAMP('2024-04-23 20:26:00')), (321, 'The Shawshank Redemption', 'Drama', TO_TIMESTAMP('2024-04-23 20:20:00')), (654, 'Forrest Gump', 'Drama', TO_TIMESTAMP('2024-04-23 21:54:00')), - (987, 'Fight Club', 'Drama', TO_TIMESTAMP('2024-04-23 23:24:00')), (135, 'Pulp Fiction', 'Crime', TO_TIMESTAMP('2024-04-23 22:09:00')), (246, 'The Godfather: Part II', 'Crime', TO_TIMESTAMP('2024-04-23 19:28:00')), - (357, 'The Departed', 'Crime', TO_TIMESTAMP('2024-04-23 23:11:00')), (842, 'Toy Story 3', 'Animation', TO_TIMESTAMP('2024-04-23 23:12:00')), (931, 'Up', 'Animation', TO_TIMESTAMP('2024-04-23 22:17:00')), (624, 'The Lion King', 'Animation', TO_TIMESTAMP('2024-04-23 22:28:00')), @@ -242,7 +237,6 @@ FROM ( (678, 'The Matrix', 'Sci-Fi', TO_TIMESTAMP('2024-04-23 19:25:00')), (753, 'Interstellar', 'Sci-Fi', TO_TIMESTAMP('2024-04-23 20:14:00')), (834, 'Titanic', 'Romance', TO_TIMESTAMP('2024-04-23 20:25:00')), - (675, 'Pride and Prejudice', 'Romance', TO_TIMESTAMP('2024-04-23 23:37:00')), (333, 'The Pride of Archbishop Carroll', 'History', TO_TIMESTAMP('2024-04-24 03:37:00')); ``` diff --git a/windowed-top-N/flinksql/build.gradle b/windowed-top-N/flinksql/build.gradle index e92a596a..15d9a151 100644 --- a/windowed-top-N/flinksql/build.gradle +++ b/windowed-top-N/flinksql/build.gradle @@ -25,15 +25,14 @@ dependencies { testImplementation 'junit:junit:4.13.2' testImplementation 'org.testcontainers:testcontainers:1.19.3' testImplementation 'org.testcontainers:kafka:1.19.3' - testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.0.2-1.18' - testImplementation 'org.apache.flink:flink-connector-base:1.18.0' - testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils:1.18.0' - testImplementation 'org.apache.flink:flink-test-utils-junit:1.18.0' - testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0' - testImplementation 'org.apache.flink:flink-table-planner_2.12:1.18.0:tests' - testImplementation 'org.apache.flink:flink-statebackend-rocksdb:1.18.0' - testImplementation 'org.apache.flink:flink-json:1.19.0' - + testImplementation 'commons-codec:commons-codec:1.17.0' + testImplementation 'org.apache.flink:flink-sql-connector-kafka:3.2.0-1.19' + testImplementation 'org.apache.flink:flink-connector-base:1.19.1' + testImplementation 'org.apache.flink:flink-sql-avro-confluent-registry:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils:1.19.1' + testImplementation 'org.apache.flink:flink-test-utils-junit:1.19.1' + testImplementation 'org.apache.flink:flink-table-api-java-bridge:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1' + testImplementation 'org.apache.flink:flink-table-planner_2.12:1.19.1:tests' + testImplementation 'org.apache.flink:flink-json:1.19.1' } diff --git a/windowed-top-N/flinksql/src/test/resources/populate-movie-starts.sql b/windowed-top-N/flinksql/src/test/resources/populate-movie-starts.sql index 2bff434e..89036af3 100644 --- a/windowed-top-N/flinksql/src/test/resources/populate-movie-starts.sql +++ b/windowed-top-N/flinksql/src/test/resources/populate-movie-starts.sql @@ -7,15 +7,12 @@ VALUES (123, 'The Dark Knight', 'Action', TO_TIMESTAMP('2024-04-23 19:04:00')), (369, 'Casablanca', 'Romance', TO_TIMESTAMP('2024-04-23 20:26:00')), (321, 'The Shawshank Redemption', 'Drama', TO_TIMESTAMP('2024-04-23 20:20:00')), (654, 'Forrest Gump', 'Drama', TO_TIMESTAMP('2024-04-23 21:54:00')), - (987, 'Fight Club', 'Drama', TO_TIMESTAMP('2024-04-23 23:24:00')), (135, 'Pulp Fiction', 'Crime', TO_TIMESTAMP('2024-04-23 22:09:00')), (246, 'The Godfather: Part II', 'Crime', TO_TIMESTAMP('2024-04-23 19:28:00')), - (357, 'The Departed', 'Crime', TO_TIMESTAMP('2024-04-23 23:11:00')), (842, 'Toy Story 3', 'Animation', TO_TIMESTAMP('2024-04-23 23:12:00')), (931, 'Up', 'Animation', TO_TIMESTAMP('2024-04-23 22:17:00')), (624, 'The Lion King', 'Animation', TO_TIMESTAMP('2024-04-23 22:28:00')), (512, 'Star Wars: The Force Awakens', 'Sci-Fi', TO_TIMESTAMP('2024-04-23 20:42:00')), (678, 'The Matrix', 'Sci-Fi', TO_TIMESTAMP('2024-04-23 19:25:00')), (753, 'Interstellar', 'Sci-Fi', TO_TIMESTAMP('2024-04-23 20:14:00')), - (834, 'Titanic', 'Romance', TO_TIMESTAMP('2024-04-23 20:25:00')), - (675, 'Pride and Prejudice', 'Romance', TO_TIMESTAMP('2024-04-23 23:37:00')); \ No newline at end of file + (834, 'Titanic', 'Romance', TO_TIMESTAMP('2024-04-23 20:25:00')); \ No newline at end of file