From 18cba80dcd319a2c5be9e1070858cdb6692351f0 Mon Sep 17 00:00:00 2001 From: Jeff Xiang <jxiang@pinterest.com> Date: Wed, 9 Oct 2024 11:50:04 -0400 Subject: [PATCH] WIP finish PscSourceReaderTest --- .../reader/PscTopicUriPartitionSplitReader.java | 8 ++------ .../psc/source/reader/PscSourceReaderTest.java | 14 ++++++++++---- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/psc-flink/src/main/java/com/pinterest/flink/connector/psc/source/reader/PscTopicUriPartitionSplitReader.java b/psc-flink/src/main/java/com/pinterest/flink/connector/psc/source/reader/PscTopicUriPartitionSplitReader.java index 9479153..0d56502 100644 --- a/psc-flink/src/main/java/com/pinterest/flink/connector/psc/source/reader/PscTopicUriPartitionSplitReader.java +++ b/psc-flink/src/main/java/com/pinterest/flink/connector/psc/source/reader/PscTopicUriPartitionSplitReader.java @@ -29,7 +29,6 @@ import com.pinterest.psc.consumer.PscConsumer; import com.pinterest.psc.consumer.PscConsumerMessage; import com.pinterest.psc.consumer.PscConsumerMessagesIterable; -import com.pinterest.psc.consumer.PscConsumerPollMessageIterator; import com.pinterest.psc.exception.ClientException; import com.pinterest.psc.exception.consumer.ConsumerException; import com.pinterest.psc.exception.consumer.WakeupException; @@ -169,7 +168,6 @@ private void markEmptySplitsAsFinished(PscPartitionSplitRecords recordsBySplits) @Override public void handleSplitsChanges(SplitsChange<PscTopicUriPartitionSplit> splitsChange) { - System.out.println("handling splits changes"); // Get all the partition assignments and stopping offsets. if (!(splitsChange instanceof SplitsAddition)) { throw new UnsupportedOperationException( @@ -215,7 +213,7 @@ public void handleSplitsChanges(SplitsChange<PscTopicUriPartitionSplit> splitsCh // Metric registration try { - maybeRegisterKafkaConsumerMetrics(props, pscSourceReaderMetrics, consumer); + maybeRegisterPscConsumerMetrics(props, pscSourceReaderMetrics, consumer); this.pscSourceReaderMetrics.registerNumBytesIn(consumer); } catch (ClientException e) { throw new RuntimeException("Failed to register metrics for PscConsumer", e); @@ -254,7 +252,6 @@ public void close() throws Exception { public void notifyCheckpointComplete( Collection<MessageId> offsetsToCommit, OffsetCommitCallback offsetCommitCallback) throws ConfigurationException, ConsumerException { - System.out.println("commitAsync: " + offsetsToCommit); consumer.commitAsync(offsetsToCommit, offsetCommitCallback); } @@ -452,7 +449,7 @@ private long getStoppingOffset(TopicUriPartition tp) { return stoppingOffsets.getOrDefault(tp, Long.MAX_VALUE); } - private void maybeRegisterKafkaConsumerMetrics( + private void maybeRegisterPscConsumerMetrics( Properties props, PscSourceReaderMetrics pscSourceReaderMetrics, PscConsumer<?, ?> consumer) throws ClientException { @@ -548,7 +545,6 @@ public PscConsumerMessage<byte[], byte[]> nextRecordFromSplit() { currentTopicPartition, "Make sure nextSplit() did not return null before " + "iterate over the records split."); - System.out.println("recordIteratorClass: " + recordIterator.getClass().getName()); if (recordIterator.hasNext()) { final PscConsumerMessage<byte[], byte[]> message = recordIterator.next(); // Only emit records before stopping offset diff --git a/psc-flink/src/test/java/com/pinterest/flink/connector/psc/source/reader/PscSourceReaderTest.java b/psc-flink/src/test/java/com/pinterest/flink/connector/psc/source/reader/PscSourceReaderTest.java index 592b588..2923b43 100644 --- a/psc-flink/src/test/java/com/pinterest/flink/connector/psc/source/reader/PscSourceReaderTest.java +++ b/psc-flink/src/test/java/com/pinterest/flink/connector/psc/source/reader/PscSourceReaderTest.java @@ -296,7 +296,7 @@ void testDisableOffsetCommit() throws Exception { @Test void testPscSourceMetrics() throws Exception { final MetricListener metricListener = new MetricListener(); - final String groupId = "testKafkaSourceMetrics"; + final String groupId = "testPscSourceMetrics"; final TopicUriPartition tp0 = new TopicUriPartition(TOPIC_URI_STR, 0); final TopicUriPartition tp1 = new TopicUriPartition(TOPIC_URI_STR, 1); @@ -320,7 +320,7 @@ void testPscSourceMetrics() throws Exception { () -> output.getEmittedRecords().size() == NUM_RECORDS_PER_SPLIT * 2, String.format( "Failed to poll %d records until timeout", NUM_RECORDS_PER_SPLIT * 2)); - + Thread.sleep(100); // Wait for the metric to be updated // Metric "records-consumed-total" of KafkaConsumer should be NUM_RECORDS_PER_SPLIT assertThat(getPscConsumerMetric("records-consumed-total", metricListener)) .isEqualTo(NUM_RECORDS_PER_SPLIT * 2); @@ -427,7 +427,7 @@ void testAssigningEmptySplitOnly() throws Exception { (PscSourceReader<Integer>) createReader( Boundedness.BOUNDED, - "KafkaSourceReaderTestGroup", + "PscSourceReaderTestGroup", new TestingReaderContext(), splitFinishedHook)) { reader.addSplits(Arrays.asList(emptySplit0, emptySplit1)); @@ -437,8 +437,10 @@ void testAssigningEmptySplitOnly() throws Exception { () -> reader.getNumAliveFetchers() == 0, "The split fetcher did not exit before timeout."); assertThat(reader.getNumAliveFetchers()).isEqualTo(0); + + // upstream asserts containsExactly (in order) but that is not necessary given that finishedSplits is a Set assertThat(finishedSplits) - .containsExactly(emptySplit0.splitId(), emptySplit1.splitId()); + .contains(emptySplit0.splitId(), emptySplit1.splitId()); } } @@ -508,6 +510,10 @@ private SourceReader<Integer, PscTopicUriPartitionSplit> createReader( Properties props) throws Exception { props.setProperty(PscFlinkConfiguration.CLUSTER_URI_CONFIG, PscTestEnvironmentWithKafkaAsPubSub.PSC_TEST_TOPIC_URI_PREFIX); + props.setProperty(PscConfiguration.PSC_METRICS_FREQUENCY_MS, "100"); + if (!props.containsKey(PscConfiguration.PSC_CONSUMER_GROUP_ID)) { + props.setProperty(PscConfiguration.PSC_CONSUMER_GROUP_ID, "test-group-id"); + } putDiscoveryProperties(props, PscSourceTestEnv.getBrokerConnectionStrings(), PscTestEnvironmentWithKafkaAsPubSub.PSC_TEST_TOPIC_URI_PREFIX); PscSourceBuilder<Integer> builder = PscSource.<Integer>builder()